Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 275047 | Differences between
and this patch

Collapse All | Expand All

(-)AtomArg.py (+16 lines)
Line 0 Link Here
1
from _emerge.DependencyArg import DependencyArg
2
try:
3
	import portage
4
except ImportError:
5
	from os import path as osp
6
	import sys
7
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
8
	import portage
9
class AtomArg(DependencyArg):
10
	def __init__(self, atom=None, **kwargs):
11
		DependencyArg.__init__(self, **kwargs)
12
		self.atom = atom
13
		if not isinstance(self.atom, portage.dep.Atom):
14
			self.atom = portage.dep.Atom(self.atom)
15
		self.set = (self.atom, )
16
(-)RepoDisplay.py (+61 lines)
Line 0 Link Here
1
from portage.output import teal
2
import os
3
class RepoDisplay(object):
4
	def __init__(self, roots):
5
		self._shown_repos = {}
6
		self._unknown_repo = False
7
		repo_paths = set()
8
		for root_config in roots.itervalues():
9
			portdir = root_config.settings.get("PORTDIR")
10
			if portdir:
11
				repo_paths.add(portdir)
12
			overlays = root_config.settings.get("PORTDIR_OVERLAY")
13
			if overlays:
14
				repo_paths.update(overlays.split())
15
		repo_paths = list(repo_paths)
16
		self._repo_paths = repo_paths
17
		self._repo_paths_real = [ os.path.realpath(repo_path) \
18
			for repo_path in repo_paths ]
19
20
		# pre-allocate index for PORTDIR so that it always has index 0.
21
		for root_config in roots.itervalues():
22
			portdb = root_config.trees["porttree"].dbapi
23
			portdir = portdb.porttree_root
24
			if portdir:
25
				self.repoStr(portdir)
26
27
	def repoStr(self, repo_path_real):
28
		real_index = -1
29
		if repo_path_real:
30
			real_index = self._repo_paths_real.index(repo_path_real)
31
		if real_index == -1:
32
			s = "?"
33
			self._unknown_repo = True
34
		else:
35
			shown_repos = self._shown_repos
36
			repo_paths = self._repo_paths
37
			repo_path = repo_paths[real_index]
38
			index = shown_repos.get(repo_path)
39
			if index is None:
40
				index = len(shown_repos)
41
				shown_repos[repo_path] = index
42
			s = str(index)
43
		return s
44
45
	def __str__(self):
46
		output = []
47
		shown_repos = self._shown_repos
48
		unknown_repo = self._unknown_repo
49
		if shown_repos or self._unknown_repo:
50
			output.append("Portage tree and overlays:\n")
51
		show_repo_paths = list(shown_repos)
52
		for repo_path, repo_index in shown_repos.iteritems():
53
			show_repo_paths[repo_index] = repo_path
54
		if show_repo_paths:
55
			for index, repo_path in enumerate(show_repo_paths):
56
				output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
57
		if unknown_repo:
58
			output.append(" "+teal("[?]") + \
59
				" indicates that the source repository could not be determined\n")
60
		return "".join(output)
61
(-)TaskSequence.py (+40 lines)
Line 0 Link Here
1
from _emerge.CompositeTask import CompositeTask
2
from _emerge.AsynchronousTask import AsynchronousTask
3
import os
4
from collections import deque
5
class TaskSequence(CompositeTask):
6
	"""
7
	A collection of tasks that executes sequentially. Each task
8
	must have a addExitListener() method that can be used as
9
	a means to trigger movement from one task to the next.
10
	"""
11
12
	__slots__ = ("_task_queue",)
13
14
	def __init__(self, **kwargs):
15
		AsynchronousTask.__init__(self, **kwargs)
16
		self._task_queue = deque()
17
18
	def add(self, task):
19
		self._task_queue.append(task)
20
21
	def _start(self):
22
		self._start_next_task()
23
24
	def cancel(self):
25
		self._task_queue.clear()
26
		CompositeTask.cancel(self)
27
28
	def _start_next_task(self):
29
		self._start_task(self._task_queue.popleft(),
30
			self._task_exit_handler)
31
32
	def _task_exit_handler(self, task):
33
		if self._default_exit(task) != os.EX_OK:
34
			self.wait()
35
		elif self._task_queue:
36
			self._start_next_task()
37
		else:
38
			self._final_exit(task)
39
			self.wait()
40
(-)UnmergeDepPriority.py (+31 lines)
Line 0 Link Here
1
from _emerge.AbstractDepPriority import AbstractDepPriority
2
class UnmergeDepPriority(AbstractDepPriority):
3
	__slots__ = ("optional", "satisfied",)
4
	"""
5
	Combination of properties           Priority  Category
6
7
	runtime                                0       HARD
8
	runtime_post                          -1       HARD
9
	buildtime                             -2       SOFT
10
	(none of the above)                   -2       SOFT
11
	"""
12
13
	MAX    =  0
14
	SOFT   = -2
15
	MIN    = -2
16
17
	def __int__(self):
18
		if self.runtime:
19
			return 0
20
		if self.runtime_post:
21
			return -1
22
		if self.buildtime:
23
			return -2
24
		return -2
25
26
	def __str__(self):
27
		myvalue = self.__int__()
28
		if myvalue > self.SOFT:
29
			return "hard"
30
		return "soft"
31
(-)AbstractDepPriority.py (+26 lines)
Line 0 Link Here
1
from _emerge.SlotObject import SlotObject
2
class AbstractDepPriority(SlotObject):
3
	__slots__ = ("buildtime", "runtime", "runtime_post")
4
5
	def __lt__(self, other):
6
		return self.__int__() < other
7
8
	def __le__(self, other):
9
		return self.__int__() <= other
10
11
	def __eq__(self, other):
12
		return self.__int__() == other
13
14
	def __ne__(self, other):
15
		return self.__int__() != other
16
17
	def __gt__(self, other):
18
		return self.__int__() > other
19
20
	def __ge__(self, other):
21
		return self.__int__() >= other
22
23
	def copy(self):
24
		import copy
25
		return copy.copy(self)
26
(-)__init__.py (-3138 / +31 lines)
Lines 3-22 Link Here
3
# Distributed under the terms of the GNU General Public License v2
3
# Distributed under the terms of the GNU General Public License v2
4
# $Id$
4
# $Id$
5
5
6
import array
7
import codecs
8
from collections import deque
9
import fcntl
10
import formatter
6
import formatter
11
import logging
7
import logging
12
import pwd
8
import pwd
13
import select
9
import select
14
import shlex
10
import shlex
15
import shutil
16
import signal
11
import signal
17
import sys
12
import sys
18
import textwrap
13
import textwrap
19
import urlparse
20
import weakref
14
import weakref
21
import gc
15
import gc
22
import os, stat
16
import os, stat
Lines 34-40 Link Here
34
28
35
import _emerge.help
29
import _emerge.help
36
import portage.xpak, commands, errno, re, socket, time
30
import portage.xpak, commands, errno, re, socket, time
37
from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
31
from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
38
	nc_len, red, teal, turquoise, xtermTitle, \
32
	nc_len, red, teal, turquoise, xtermTitle, \
39
	xtermTitleReset, yellow
33
	xtermTitleReset, yellow
40
from portage.output import create_color_func
34
from portage.output import create_color_func
Lines 59-68 Link Here
59
53
60
from itertools import chain, izip
54
from itertools import chain, izip
61
55
62
try:
56
from _emerge.SlotObject import SlotObject
63
	import cPickle as pickle
57
from _emerge.DepPriority import DepPriority
64
except ImportError:
58
from _emerge.BlockerDepPriority import BlockerDepPriority
65
	import pickle
59
from _emerge.UnmergeDepPriority import UnmergeDepPriority
60
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
61
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
62
from _emerge.Task import Task
63
from _emerge.Blocker import Blocker
64
from _emerge.PollConstants import PollConstants
65
from _emerge.AsynchronousTask import AsynchronousTask
66
from _emerge.CompositeTask import CompositeTask
67
from _emerge.EbuildFetcher import EbuildFetcher
68
from _emerge.EbuildBuild import EbuildBuild
69
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
70
from _emerge.EbuildPhase import EbuildPhase
71
from _emerge.Binpkg import Binpkg
72
from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
73
from _emerge.PackageMerge import PackageMerge
74
from _emerge.DependencyArg import DependencyArg
75
from _emerge.AtomArg import AtomArg
76
from _emerge.PackageArg import PackageArg
77
from _emerge.SetArg import SetArg
78
from _emerge.Dependency import Dependency
79
from _emerge.BlockerCache import BlockerCache
80
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
81
from _emerge.RepoDisplay import RepoDisplay
82
from _emerge.UseFlagDisplay import UseFlagDisplay
83
from _emerge.PollSelectAdapter import PollSelectAdapter
84
from _emerge.SequentialTaskQueue import SequentialTaskQueue
85
from _emerge.ProgressHandler import ProgressHandler
66
86
67
try:
87
try:
68
	from cStringIO import StringIO
88
	from cStringIO import StringIO
Lines 869-1160 Link Here
869
		else:
889
		else:
870
			yield flag
890
			yield flag
871
891
872
class SlotObject(object):
873
	__slots__ = ("__weakref__",)
874
875
	def __init__(self, **kwargs):
876
		classes = [self.__class__]
877
		while classes:
878
			c = classes.pop()
879
			if c is SlotObject:
880
				continue
881
			classes.extend(c.__bases__)
882
			slots = getattr(c, "__slots__", None)
883
			if not slots:
884
				continue
885
			for myattr in slots:
886
				myvalue = kwargs.get(myattr, None)
887
				setattr(self, myattr, myvalue)
888
889
	def copy(self):
890
		"""
891
		Create a new instance and copy all attributes
892
		defined from __slots__ (including those from
893
		inherited classes).
894
		"""
895
		obj = self.__class__()
896
897
		classes = [self.__class__]
898
		while classes:
899
			c = classes.pop()
900
			if c is SlotObject:
901
				continue
902
			classes.extend(c.__bases__)
903
			slots = getattr(c, "__slots__", None)
904
			if not slots:
905
				continue
906
			for myattr in slots:
907
				setattr(obj, myattr, getattr(self, myattr))
908
909
		return obj
910
911
class AbstractDepPriority(SlotObject):
912
	__slots__ = ("buildtime", "runtime", "runtime_post")
913
914
	def __lt__(self, other):
915
		return self.__int__() < other
916
917
	def __le__(self, other):
918
		return self.__int__() <= other
919
920
	def __eq__(self, other):
921
		return self.__int__() == other
922
923
	def __ne__(self, other):
924
		return self.__int__() != other
925
926
	def __gt__(self, other):
927
		return self.__int__() > other
928
929
	def __ge__(self, other):
930
		return self.__int__() >= other
931
932
	def copy(self):
933
		import copy
934
		return copy.copy(self)
935
936
class DepPriority(AbstractDepPriority):
937
938
	__slots__ = ("satisfied", "optional", "rebuild")
939
940
	def __int__(self):
941
		"""
942
		Note: These priorities are only used for measuring hardness
943
		in the circular dependency display via digraph.debug_print(),
944
		and nothing more. For actual merge order calculations, the
945
		measures defined by the DepPriorityNormalRange and
946
		DepPrioritySatisfiedRange classes are used.
947
948
		Attributes                            Hardness
949
950
		buildtime                               0
951
		runtime                                -1
952
		runtime_post                           -2
953
		optional                               -3
954
		(none of the above)                    -4
955
956
		"""
957
958
		if self.buildtime:
959
			return 0
960
		if self.runtime:
961
			return -1
962
		if self.runtime_post:
963
			return -2
964
		if self.optional:
965
			return -3
966
		return -4
967
968
	def __str__(self):
969
		if self.optional:
970
			return "optional"
971
		if self.buildtime:
972
			return "buildtime"
973
		if self.runtime:
974
			return "runtime"
975
		if self.runtime_post:
976
			return "runtime_post"
977
		return "soft"
978
979
class BlockerDepPriority(DepPriority):
980
	__slots__ = ()
981
	def __int__(self):
982
		return 0
983
984
	def __str__(self):
985
		return 'blocker'
986
987
BlockerDepPriority.instance = BlockerDepPriority()
988
989
class UnmergeDepPriority(AbstractDepPriority):
990
	__slots__ = ("optional", "satisfied",)
991
	"""
992
	Combination of properties           Priority  Category
993
994
	runtime                                0       HARD
995
	runtime_post                          -1       HARD
996
	buildtime                             -2       SOFT
997
	(none of the above)                   -2       SOFT
998
	"""
999
1000
	MAX    =  0
1001
	SOFT   = -2
1002
	MIN    = -2
1003
1004
	def __int__(self):
1005
		if self.runtime:
1006
			return 0
1007
		if self.runtime_post:
1008
			return -1
1009
		if self.buildtime:
1010
			return -2
1011
		return -2
1012
1013
	def __str__(self):
1014
		myvalue = self.__int__()
1015
		if myvalue > self.SOFT:
1016
			return "hard"
1017
		return "soft"
1018
1019
class DepPriorityNormalRange(object):
1020
	"""
1021
	DepPriority properties              Index      Category
1022
1023
	buildtime                                      HARD
1024
	runtime                                3       MEDIUM
1025
	runtime_post                           2       MEDIUM_SOFT
1026
	optional                               1       SOFT
1027
	(none of the above)                    0       NONE
1028
	"""
1029
	MEDIUM      = 3
1030
	MEDIUM_SOFT = 2
1031
	SOFT        = 1
1032
	NONE        = 0
1033
1034
	@classmethod
1035
	def _ignore_optional(cls, priority):
1036
		if priority.__class__ is not DepPriority:
1037
			return False
1038
		return bool(priority.optional)
1039
1040
	@classmethod
1041
	def _ignore_runtime_post(cls, priority):
1042
		if priority.__class__ is not DepPriority:
1043
			return False
1044
		return bool(priority.optional or priority.runtime_post)
1045
1046
	@classmethod
1047
	def _ignore_runtime(cls, priority):
1048
		if priority.__class__ is not DepPriority:
1049
			return False
1050
		return not priority.buildtime
1051
1052
	ignore_medium      = _ignore_runtime
1053
	ignore_medium_soft = _ignore_runtime_post
1054
	ignore_soft        = _ignore_optional
1055
1056
DepPriorityNormalRange.ignore_priority = (
1057
	None,
1058
	DepPriorityNormalRange._ignore_optional,
1059
	DepPriorityNormalRange._ignore_runtime_post,
1060
	DepPriorityNormalRange._ignore_runtime
1061
)
1062
1063
class DepPrioritySatisfiedRange(object):
1064
	"""
1065
	DepPriority                         Index      Category
1066
1067
	not satisfied and buildtime                    HARD
1068
	not satisfied and runtime              7       MEDIUM
1069
	not satisfied and runtime_post         6       MEDIUM_SOFT
1070
	satisfied and buildtime and rebuild    5       SOFT
1071
	satisfied and buildtime                4       SOFT
1072
	satisfied and runtime                  3       SOFT
1073
	satisfied and runtime_post             2       SOFT
1074
	optional                               1       SOFT
1075
	(none of the above)                    0       NONE
1076
	"""
1077
	MEDIUM      = 7
1078
	MEDIUM_SOFT = 6
1079
	SOFT        = 5
1080
	NONE        = 0
1081
1082
	@classmethod
1083
	def _ignore_optional(cls, priority):
1084
		if priority.__class__ is not DepPriority:
1085
			return False
1086
		return bool(priority.optional)
1087
1088
	@classmethod
1089
	def _ignore_satisfied_runtime_post(cls, priority):
1090
		if priority.__class__ is not DepPriority:
1091
			return False
1092
		if priority.optional:
1093
			return True
1094
		if not priority.satisfied:
1095
			return False
1096
		return bool(priority.runtime_post)
1097
1098
	@classmethod
1099
	def _ignore_satisfied_runtime(cls, priority):
1100
		if priority.__class__ is not DepPriority:
1101
			return False
1102
		if priority.optional:
1103
			return True
1104
		if not priority.satisfied:
1105
			return False
1106
		return not priority.buildtime
1107
1108
	@classmethod
1109
	def _ignore_satisfied_buildtime(cls, priority):
1110
		if priority.__class__ is not DepPriority:
1111
			return False
1112
		if priority.optional:
1113
			return True
1114
		if not priority.satisfied:
1115
			return False
1116
		if priority.buildtime:
1117
			return not priority.rebuild
1118
		return True
1119
1120
	@classmethod
1121
	def _ignore_satisfied_buildtime_rebuild(cls, priority):
1122
		if priority.__class__ is not DepPriority:
1123
			return False
1124
		if priority.optional:
1125
			return True
1126
		return bool(priority.satisfied)
1127
1128
	@classmethod
1129
	def _ignore_runtime_post(cls, priority):
1130
		if priority.__class__ is not DepPriority:
1131
			return False
1132
		return bool(priority.optional or \
1133
			priority.satisfied or \
1134
			priority.runtime_post)
1135
1136
	@classmethod
1137
	def _ignore_runtime(cls, priority):
1138
		if priority.__class__ is not DepPriority:
1139
			return False
1140
		return bool(priority.satisfied or \
1141
			not priority.buildtime)
1142
1143
	ignore_medium      = _ignore_runtime
1144
	ignore_medium_soft = _ignore_runtime_post
1145
	ignore_soft        = _ignore_satisfied_buildtime_rebuild
1146
1147
DepPrioritySatisfiedRange.ignore_priority = (
1148
	None,
1149
	DepPrioritySatisfiedRange._ignore_optional,
1150
	DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1151
	DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1152
	DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1153
	DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1154
	DepPrioritySatisfiedRange._ignore_runtime_post,
1155
	DepPrioritySatisfiedRange._ignore_runtime
1156
)
1157
1158
def _find_deep_system_runtime_deps(graph):
892
def _find_deep_system_runtime_deps(graph):
1159
	deep_system_deps = set()
893
	deep_system_deps = set()
1160
	node_stack = []
894
	node_stack = []
Lines 1533-1590 Link Here
1533
			shown_licenses.add(l)
1267
			shown_licenses.add(l)
1534
	return have_eapi_mask
1268
	return have_eapi_mask
1535
1269
1536
class Task(SlotObject):
1537
	__slots__ = ("_hash_key", "_hash_value")
1538
1539
	def _get_hash_key(self):
1540
		hash_key = getattr(self, "_hash_key", None)
1541
		if hash_key is None:
1542
			raise NotImplementedError(self)
1543
		return hash_key
1544
1545
	def __eq__(self, other):
1546
		return self._get_hash_key() == other
1547
1548
	def __ne__(self, other):
1549
		return self._get_hash_key() != other
1550
1551
	def __hash__(self):
1552
		hash_value = getattr(self, "_hash_value", None)
1553
		if hash_value is None:
1554
			self._hash_value = hash(self._get_hash_key())
1555
		return self._hash_value
1556
1557
	def __len__(self):
1558
		return len(self._get_hash_key())
1559
1560
	def __getitem__(self, key):
1561
		return self._get_hash_key()[key]
1562
1563
	def __iter__(self):
1564
		return iter(self._get_hash_key())
1565
1566
	def __contains__(self, key):
1567
		return key in self._get_hash_key()
1568
1569
	def __str__(self):
1570
		return str(self._get_hash_key())
1571
1572
class Blocker(Task):
1573
1574
	__hash__ = Task.__hash__
1575
	__slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1576
1577
	def __init__(self, **kwargs):
1578
		Task.__init__(self, **kwargs)
1579
		self.cp = portage.dep_getkey(self.atom)
1580
1581
	def _get_hash_key(self):
1582
		hash_key = getattr(self, "_hash_key", None)
1583
		if hash_key is None:
1584
			self._hash_key = \
1585
				("blocks", self.root, self.atom, self.eapi)
1586
		return self._hash_key
1587
1588
class Package(Task):
1270
class Package(Task):
1589
1271
1590
	__hash__ = Task.__hash__
1272
	__hash__ = Task.__hash__
Lines 1757-3383 Link Here
1757
				v = 0
1439
				v = 0
1758
		self._pkg.mtime = v
1440
		self._pkg.mtime = v
1759
1441
1760
class EbuildFetchonly(SlotObject):
1761
1762
	__slots__ = ("fetch_all", "pkg", "pretend", "settings")
1763
1764
	def execute(self):
1765
		settings = self.settings
1766
		pkg = self.pkg
1767
		portdb = pkg.root_config.trees["porttree"].dbapi
1768
		ebuild_path = portdb.findname(pkg.cpv)
1769
		settings.setcpv(pkg)
1770
		debug = settings.get("PORTAGE_DEBUG") == "1"
1771
		restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1772
1773
		if restrict_fetch:
1774
			rval = self._execute_with_builddir()
1775
		else:
1776
			rval = portage.doebuild(ebuild_path, "fetch",
1777
				settings["ROOT"], settings, debug=debug,
1778
				listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1779
				mydbapi=portdb, tree="porttree")
1780
1781
			if rval != os.EX_OK:
1782
				msg = "Fetch failed for '%s'" % (pkg.cpv,)
1783
				eerror(msg, phase="unpack", key=pkg.cpv)
1784
1785
		return rval
1786
1787
	def _execute_with_builddir(self):
1788
		# To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1789
		# ensuring sane $PWD (bug #239560) and storing elog
1790
		# messages. Use a private temp directory, in order
1791
		# to avoid locking the main one.
1792
		settings = self.settings
1793
		global_tmpdir = settings["PORTAGE_TMPDIR"]
1794
		from tempfile import mkdtemp
1795
		try:
1796
			private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1797
		except OSError, e:
1798
			if e.errno != portage.exception.PermissionDenied.errno:
1799
				raise
1800
			raise portage.exception.PermissionDenied(global_tmpdir)
1801
		settings["PORTAGE_TMPDIR"] = private_tmpdir
1802
		settings.backup_changes("PORTAGE_TMPDIR")
1803
		try:
1804
			retval = self._execute()
1805
		finally:
1806
			settings["PORTAGE_TMPDIR"] = global_tmpdir
1807
			settings.backup_changes("PORTAGE_TMPDIR")
1808
			shutil.rmtree(private_tmpdir)
1809
		return retval
1810
1811
	def _execute(self):
1812
		settings = self.settings
1813
		pkg = self.pkg
1814
		root_config = pkg.root_config
1815
		portdb = root_config.trees["porttree"].dbapi
1816
		ebuild_path = portdb.findname(pkg.cpv)
1817
		debug = settings.get("PORTAGE_DEBUG") == "1"
1818
		retval = portage.doebuild(ebuild_path, "fetch",
1819
			self.settings["ROOT"], self.settings, debug=debug,
1820
			listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1821
			mydbapi=portdb, tree="porttree")
1822
1823
		if retval != os.EX_OK:
1824
			msg = "Fetch failed for '%s'" % (pkg.cpv,)
1825
			eerror(msg, phase="unpack", key=pkg.cpv)
1826
1827
		portage.elog.elog_process(self.pkg.cpv, self.settings)
1828
		return retval
1829
1830
class PollConstants(object):
1831
1832
	"""
1833
	Provides POLL* constants that are equivalent to those from the
1834
	select module, for use by PollSelectAdapter.
1835
	"""
1836
1837
	names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1838
	v = 1
1839
	for k in names:
1840
		locals()[k] = getattr(select, k, v)
1841
		v *= 2
1842
	del k, v
1843
1844
class AsynchronousTask(SlotObject):
1845
	"""
1846
	Subclasses override _wait() and _poll() so that calls
1847
	to public methods can be wrapped for implementing
1848
	hooks such as exit listener notification.
1849
1850
	Sublasses should call self.wait() to notify exit listeners after
1851
	the task is complete and self.returncode has been set.
1852
	"""
1853
1854
	__slots__ = ("background", "cancelled", "returncode") + \
1855
		("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1856
1857
	def start(self):
1858
		"""
1859
		Start an asynchronous task and then return as soon as possible.
1860
		"""
1861
		self._start_hook()
1862
		self._start()
1863
1864
	def _start(self):
1865
		raise NotImplementedError(self)
1866
1867
	def isAlive(self):
1868
		return self.returncode is None
1869
1870
	def poll(self):
1871
		self._wait_hook()
1872
		return self._poll()
1873
1874
	def _poll(self):
1875
		return self.returncode
1876
1877
	def wait(self):
1878
		if self.returncode is None:
1879
			self._wait()
1880
		self._wait_hook()
1881
		return self.returncode
1882
1883
	def _wait(self):
1884
		return self.returncode
1885
1886
	def cancel(self):
1887
		self.cancelled = True
1888
		self.wait()
1889
1890
	def addStartListener(self, f):
1891
		"""
1892
		The function will be called with one argument, a reference to self.
1893
		"""
1894
		if self._start_listeners is None:
1895
			self._start_listeners = []
1896
		self._start_listeners.append(f)
1897
1898
	def removeStartListener(self, f):
1899
		if self._start_listeners is None:
1900
			return
1901
		self._start_listeners.remove(f)
1902
1903
	def _start_hook(self):
1904
		if self._start_listeners is not None:
1905
			start_listeners = self._start_listeners
1906
			self._start_listeners = None
1907
1908
			for f in start_listeners:
1909
				f(self)
1910
1911
	def addExitListener(self, f):
1912
		"""
1913
		The function will be called with one argument, a reference to self.
1914
		"""
1915
		if self._exit_listeners is None:
1916
			self._exit_listeners = []
1917
		self._exit_listeners.append(f)
1918
1919
	def removeExitListener(self, f):
1920
		if self._exit_listeners is None:
1921
			if self._exit_listener_stack is not None:
1922
				self._exit_listener_stack.remove(f)
1923
			return
1924
		self._exit_listeners.remove(f)
1925
1926
	def _wait_hook(self):
1927
		"""
1928
		Call this method after the task completes, just before returning
1929
		the returncode from wait() or poll(). This hook is
1930
		used to trigger exit listeners when the returncode first
1931
		becomes available.
1932
		"""
1933
		if self.returncode is not None and \
1934
			self._exit_listeners is not None:
1935
1936
			# This prevents recursion, in case one of the
1937
			# exit handlers triggers this method again by
1938
			# calling wait(). Use a stack that gives
1939
			# removeExitListener() an opportunity to consume
1940
			# listeners from the stack, before they can get
1941
			# called below. This is necessary because a call
1942
			# to one exit listener may result in a call to
1943
			# removeExitListener() for another listener on
1944
			# the stack. That listener needs to be removed
1945
			# from the stack since it would be inconsistent
1946
			# to call it after it has been been passed into
1947
			# removeExitListener().
1948
			self._exit_listener_stack = self._exit_listeners
1949
			self._exit_listeners = None
1950
1951
			self._exit_listener_stack.reverse()
1952
			while self._exit_listener_stack:
1953
				self._exit_listener_stack.pop()(self)
1954
1955
class AbstractPollTask(AsynchronousTask):
1956
1957
	__slots__ = ("scheduler",) + \
1958
		("_registered",)
1959
1960
	_bufsize = 4096
1961
	_exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1962
	_registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1963
		_exceptional_events
1964
1965
	def _unregister(self):
1966
		raise NotImplementedError(self)
1967
1968
	def _unregister_if_appropriate(self, event):
1969
		if self._registered:
1970
			if event & self._exceptional_events:
1971
				self._unregister()
1972
				self.cancel()
1973
			elif event & PollConstants.POLLHUP:
1974
				self._unregister()
1975
				self.wait()
1976
1977
class PipeReader(AbstractPollTask):
1978
1979
	"""
1980
	Reads output from one or more files and saves it in memory,
1981
	for retrieval via the getvalue() method. This is driven by
1982
	the scheduler's poll() loop, so it runs entirely within the
1983
	current process.
1984
	"""
1985
1986
	__slots__ = ("input_files",) + \
1987
		("_read_data", "_reg_ids")
1988
1989
	def _start(self):
1990
		self._reg_ids = set()
1991
		self._read_data = []
1992
		for k, f in self.input_files.iteritems():
1993
			fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1994
				fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1995
			self._reg_ids.add(self.scheduler.register(f.fileno(),
1996
				self._registered_events, self._output_handler))
1997
		self._registered = True
1998
1999
	def isAlive(self):
2000
		return self._registered
2001
2002
	def cancel(self):
2003
		if self.returncode is None:
2004
			self.returncode = 1
2005
			self.cancelled = True
2006
		self.wait()
2007
2008
	def _wait(self):
2009
		if self.returncode is not None:
2010
			return self.returncode
2011
2012
		if self._registered:
2013
			self.scheduler.schedule(self._reg_ids)
2014
			self._unregister()
2015
2016
		self.returncode = os.EX_OK
2017
		return self.returncode
2018
2019
	def getvalue(self):
2020
		"""Retrieve the entire contents"""
2021
		if sys.hexversion >= 0x3000000:
2022
			return bytes().join(self._read_data)
2023
		return "".join(self._read_data)
2024
2025
	def close(self):
2026
		"""Free the memory buffer."""
2027
		self._read_data = None
2028
2029
	def _output_handler(self, fd, event):
2030
2031
		if event & PollConstants.POLLIN:
2032
2033
			for f in self.input_files.itervalues():
2034
				if fd == f.fileno():
2035
					break
2036
2037
			buf = array.array('B')
2038
			try:
2039
				buf.fromfile(f, self._bufsize)
2040
			except EOFError:
2041
				pass
2042
2043
			if buf:
2044
				self._read_data.append(buf.tostring())
2045
			else:
2046
				self._unregister()
2047
				self.wait()
2048
2049
		self._unregister_if_appropriate(event)
2050
		return self._registered
2051
2052
	def _unregister(self):
2053
		"""
2054
		Unregister from the scheduler and close open files.
2055
		"""
2056
2057
		self._registered = False
2058
2059
		if self._reg_ids is not None:
2060
			for reg_id in self._reg_ids:
2061
				self.scheduler.unregister(reg_id)
2062
			self._reg_ids = None
2063
2064
		if self.input_files is not None:
2065
			for f in self.input_files.itervalues():
2066
				f.close()
2067
			self.input_files = None
2068
2069
class CompositeTask(AsynchronousTask):
2070
2071
	__slots__ = ("scheduler",) + ("_current_task",)
2072
2073
	def isAlive(self):
2074
		return self._current_task is not None
2075
2076
	def cancel(self):
2077
		self.cancelled = True
2078
		if self._current_task is not None:
2079
			self._current_task.cancel()
2080
2081
	def _poll(self):
2082
		"""
2083
		This does a loop calling self._current_task.poll()
2084
		repeatedly as long as the value of self._current_task
2085
		keeps changing. It calls poll() a maximum of one time
2086
		for a given self._current_task instance. This is useful
2087
		since calling poll() on a task can trigger advance to
2088
		the next task could eventually lead to the returncode
2089
		being set in cases when polling only a single task would
2090
		not have the same effect.
2091
		"""
2092
2093
		prev = None
2094
		while True:
2095
			task = self._current_task
2096
			if task is None or task is prev:
2097
				# don't poll the same task more than once
2098
				break
2099
			task.poll()
2100
			prev = task
2101
2102
		return self.returncode
2103
2104
	def _wait(self):
2105
2106
		prev = None
2107
		while True:
2108
			task = self._current_task
2109
			if task is None:
2110
				# don't wait for the same task more than once
2111
				break
2112
			if task is prev:
2113
				# Before the task.wait() method returned, an exit
2114
				# listener should have set self._current_task to either
2115
				# a different task or None. Something is wrong.
2116
				raise AssertionError("self._current_task has not " + \
2117
					"changed since calling wait", self, task)
2118
			task.wait()
2119
			prev = task
2120
2121
		return self.returncode
2122
2123
	def _assert_current(self, task):
2124
		"""
2125
		Raises an AssertionError if the given task is not the
2126
		same one as self._current_task. This can be useful
2127
		for detecting bugs.
2128
		"""
2129
		if task is not self._current_task:
2130
			raise AssertionError("Unrecognized task: %s" % (task,))
2131
2132
	def _default_exit(self, task):
2133
		"""
2134
		Calls _assert_current() on the given task and then sets the
2135
		composite returncode attribute if task.returncode != os.EX_OK.
2136
		If the task failed then self._current_task will be set to None.
2137
		Subclasses can use this as a generic task exit callback.
2138
2139
		@rtype: int
2140
		@returns: The task.returncode attribute.
2141
		"""
2142
		self._assert_current(task)
2143
		if task.returncode != os.EX_OK:
2144
			self.returncode = task.returncode
2145
			self._current_task = None
2146
		return task.returncode
2147
2148
	def _final_exit(self, task):
2149
		"""
2150
		Assumes that task is the final task of this composite task.
2151
		Calls _default_exit() and sets self.returncode to the task's
2152
		returncode and sets self._current_task to None.
2153
		"""
2154
		self._default_exit(task)
2155
		self._current_task = None
2156
		self.returncode = task.returncode
2157
		return self.returncode
2158
2159
	def _default_final_exit(self, task):
2160
		"""
2161
		This calls _final_exit() and then wait().
2162
2163
		Subclasses can use this as a generic final task exit callback.
2164
2165
		"""
2166
		self._final_exit(task)
2167
		return self.wait()
2168
2169
	def _start_task(self, task, exit_handler):
2170
		"""
2171
		Register exit handler for the given task, set it
2172
		as self._current_task, and call task.start().
2173
2174
		Subclasses can use this as a generic way to start
2175
		a task.
2176
2177
		"""
2178
		task.addExitListener(exit_handler)
2179
		self._current_task = task
2180
		task.start()
2181
2182
class TaskSequence(CompositeTask):
2183
	"""
2184
	A collection of tasks that executes sequentially. Each task
2185
	must have a addExitListener() method that can be used as
2186
	a means to trigger movement from one task to the next.
2187
	"""
2188
2189
	__slots__ = ("_task_queue",)
2190
2191
	def __init__(self, **kwargs):
2192
		AsynchronousTask.__init__(self, **kwargs)
2193
		self._task_queue = deque()
2194
2195
	def add(self, task):
2196
		self._task_queue.append(task)
2197
2198
	def _start(self):
2199
		self._start_next_task()
2200
2201
	def cancel(self):
2202
		self._task_queue.clear()
2203
		CompositeTask.cancel(self)
2204
2205
	def _start_next_task(self):
2206
		self._start_task(self._task_queue.popleft(),
2207
			self._task_exit_handler)
2208
2209
	def _task_exit_handler(self, task):
2210
		if self._default_exit(task) != os.EX_OK:
2211
			self.wait()
2212
		elif self._task_queue:
2213
			self._start_next_task()
2214
		else:
2215
			self._final_exit(task)
2216
			self.wait()
2217
2218
class SubProcess(AbstractPollTask):
2219
2220
	__slots__ = ("pid",) + \
2221
		("_files", "_reg_id")
2222
2223
	# A file descriptor is required for the scheduler to monitor changes from
2224
	# inside a poll() loop. When logging is not enabled, create a pipe just to
2225
	# serve this purpose alone.
2226
	_dummy_pipe_fd = 9
2227
2228
	def _poll(self):
2229
		if self.returncode is not None:
2230
			return self.returncode
2231
		if self.pid is None:
2232
			return self.returncode
2233
		if self._registered:
2234
			return self.returncode
2235
2236
		try:
2237
			retval = os.waitpid(self.pid, os.WNOHANG)
2238
		except OSError, e:
2239
			if e.errno != errno.ECHILD:
2240
				raise
2241
			del e
2242
			retval = (self.pid, 1)
2243
2244
		if retval == (0, 0):
2245
			return None
2246
		self._set_returncode(retval)
2247
		return self.returncode
2248
2249
	def cancel(self):
2250
		if self.isAlive():
2251
			try:
2252
				os.kill(self.pid, signal.SIGTERM)
2253
			except OSError, e:
2254
				if e.errno != errno.ESRCH:
2255
					raise
2256
				del e
2257
2258
		self.cancelled = True
2259
		if self.pid is not None:
2260
			self.wait()
2261
		return self.returncode
2262
2263
	def isAlive(self):
2264
		return self.pid is not None and \
2265
			self.returncode is None
2266
2267
	def _wait(self):
2268
2269
		if self.returncode is not None:
2270
			return self.returncode
2271
2272
		if self._registered:
2273
			self.scheduler.schedule(self._reg_id)
2274
			self._unregister()
2275
			if self.returncode is not None:
2276
				return self.returncode
2277
2278
		try:
2279
			wait_retval = os.waitpid(self.pid, 0)
2280
		except OSError, e:
2281
			if e.errno != errno.ECHILD:
2282
				raise
2283
			del e
2284
			self._set_returncode((self.pid, 1))
2285
		else:
2286
			self._set_returncode(wait_retval)
2287
2288
		return self.returncode
2289
2290
	def _unregister(self):
2291
		"""
2292
		Unregister from the scheduler and close open files.
2293
		"""
2294
2295
		self._registered = False
2296
2297
		if self._reg_id is not None:
2298
			self.scheduler.unregister(self._reg_id)
2299
			self._reg_id = None
2300
2301
		if self._files is not None:
2302
			for f in self._files.itervalues():
2303
				f.close()
2304
			self._files = None
2305
2306
	def _set_returncode(self, wait_retval):
2307
2308
		retval = wait_retval[1]
2309
2310
		if retval != os.EX_OK:
2311
			if retval & 0xff:
2312
				retval = (retval & 0xff) << 8
2313
			else:
2314
				retval = retval >> 8
2315
2316
		self.returncode = retval
2317
2318
class SpawnProcess(SubProcess):
2319
2320
	"""
2321
	Constructor keyword args are passed into portage.process.spawn().
2322
	The required "args" keyword argument will be passed as the first
2323
	spawn() argument.
2324
	"""
2325
2326
	_spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2327
		"uid", "gid", "groups", "umask", "logfile",
2328
		"path_lookup", "pre_exec")
2329
2330
	__slots__ = ("args",) + \
2331
		_spawn_kwarg_names
2332
2333
	_file_names = ("log", "process", "stdout")
2334
	_files_dict = slot_dict_class(_file_names, prefix="")
2335
2336
	def _start(self):
2337
2338
		if self.cancelled:
2339
			return
2340
2341
		if self.fd_pipes is None:
2342
			self.fd_pipes = {}
2343
		fd_pipes = self.fd_pipes
2344
		fd_pipes.setdefault(0, sys.stdin.fileno())
2345
		fd_pipes.setdefault(1, sys.stdout.fileno())
2346
		fd_pipes.setdefault(2, sys.stderr.fileno())
2347
2348
		# flush any pending output
2349
		for fd in fd_pipes.itervalues():
2350
			if fd == sys.stdout.fileno():
2351
				sys.stdout.flush()
2352
			if fd == sys.stderr.fileno():
2353
				sys.stderr.flush()
2354
2355
		logfile = self.logfile
2356
		self._files = self._files_dict()
2357
		files = self._files
2358
2359
		master_fd, slave_fd = self._pipe(fd_pipes)
2360
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
2361
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2362
2363
		null_input = None
2364
		fd_pipes_orig = fd_pipes.copy()
2365
		if self.background:
2366
			# TODO: Use job control functions like tcsetpgrp() to control
2367
			# access to stdin. Until then, use /dev/null so that any
2368
			# attempts to read from stdin will immediately return EOF
2369
			# instead of blocking indefinitely.
2370
			null_input = open('/dev/null', 'rb')
2371
			fd_pipes[0] = null_input.fileno()
2372
		else:
2373
			fd_pipes[0] = fd_pipes_orig[0]
2374
2375
		files.process = os.fdopen(master_fd, 'rb')
2376
		if logfile is not None:
2377
2378
			fd_pipes[1] = slave_fd
2379
			fd_pipes[2] = slave_fd
2380
2381
			files.log = open(logfile, mode='ab')
2382
			portage.util.apply_secpass_permissions(logfile,
2383
				uid=portage.portage_uid, gid=portage.portage_gid,
2384
				mode=0660)
2385
2386
			if not self.background:
2387
				files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2388
2389
			output_handler = self._output_handler
2390
2391
		else:
2392
2393
			# Create a dummy pipe so the scheduler can monitor
2394
			# the process from inside a poll() loop.
2395
			fd_pipes[self._dummy_pipe_fd] = slave_fd
2396
			if self.background:
2397
				fd_pipes[1] = slave_fd
2398
				fd_pipes[2] = slave_fd
2399
			output_handler = self._dummy_handler
2400
2401
		kwargs = {}
2402
		for k in self._spawn_kwarg_names:
2403
			v = getattr(self, k)
2404
			if v is not None:
2405
				kwargs[k] = v
2406
2407
		kwargs["fd_pipes"] = fd_pipes
2408
		kwargs["returnpid"] = True
2409
		kwargs.pop("logfile", None)
2410
2411
		self._reg_id = self.scheduler.register(files.process.fileno(),
2412
			self._registered_events, output_handler)
2413
		self._registered = True
2414
2415
		retval = self._spawn(self.args, **kwargs)
2416
2417
		os.close(slave_fd)
2418
		if null_input is not None:
2419
			null_input.close()
2420
2421
		if isinstance(retval, int):
2422
			# spawn failed
2423
			self._unregister()
2424
			self.returncode = retval
2425
			self.wait()
2426
			return
2427
2428
		self.pid = retval[0]
2429
		portage.process.spawned_pids.remove(self.pid)
2430
2431
	def _pipe(self, fd_pipes):
2432
		"""
2433
		@type fd_pipes: dict
2434
		@param fd_pipes: pipes from which to copy terminal size if desired.
2435
		"""
2436
		return os.pipe()
2437
2438
	def _spawn(self, args, **kwargs):
2439
		return portage.process.spawn(args, **kwargs)
2440
2441
	def _output_handler(self, fd, event):
2442
2443
		if event & PollConstants.POLLIN:
2444
2445
			files = self._files
2446
			buf = array.array('B')
2447
			try:
2448
				buf.fromfile(files.process, self._bufsize)
2449
			except EOFError:
2450
				pass
2451
2452
			if buf:
2453
				if not self.background:
2454
					write_successful = False
2455
					failures = 0
2456
					while True:
2457
						try:
2458
							if not write_successful:
2459
								buf.tofile(files.stdout)
2460
								write_successful = True
2461
							files.stdout.flush()
2462
							break
2463
						except IOError, e:
2464
							if e.errno != errno.EAGAIN:
2465
								raise
2466
							del e
2467
							failures += 1
2468
							if failures > 50:
2469
								# Avoid a potentially infinite loop. In
2470
								# most cases, the failure count is zero
2471
								# and it's unlikely to exceed 1.
2472
								raise
2473
2474
							# This means that a subprocess has put an inherited
2475
							# stdio file descriptor (typically stdin) into
2476
							# O_NONBLOCK mode. This is not acceptable (see bug
2477
							# #264435), so revert it. We need to use a loop
2478
							# here since there's a race condition due to
2479
							# parallel processes being able to change the
2480
							# flags on the inherited file descriptor.
2481
							# TODO: When possible, avoid having child processes
2482
							# inherit stdio file descriptors from portage
2483
							# (maybe it can't be avoided with
2484
							# PROPERTIES=interactive).
2485
							fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2486
								fcntl.fcntl(files.stdout.fileno(),
2487
								fcntl.F_GETFL) ^ os.O_NONBLOCK)
2488
2489
				buf.tofile(files.log)
2490
				files.log.flush()
2491
			else:
2492
				self._unregister()
2493
				self.wait()
2494
2495
		self._unregister_if_appropriate(event)
2496
		return self._registered
2497
2498
	def _dummy_handler(self, fd, event):
2499
		"""
2500
		This method is mainly interested in detecting EOF, since
2501
		the only purpose of the pipe is to allow the scheduler to
2502
		monitor the process from inside a poll() loop.
2503
		"""
2504
2505
		if event & PollConstants.POLLIN:
2506
2507
			buf = array.array('B')
2508
			try:
2509
				buf.fromfile(self._files.process, self._bufsize)
2510
			except EOFError:
2511
				pass
2512
2513
			if buf:
2514
				pass
2515
			else:
2516
				self._unregister()
2517
				self.wait()
2518
2519
		self._unregister_if_appropriate(event)
2520
		return self._registered
2521
2522
class MiscFunctionsProcess(SpawnProcess):
2523
	"""
2524
	Spawns misc-functions.sh with an existing ebuild environment.
2525
	"""
2526
2527
	__slots__ = ("commands", "phase", "pkg", "settings")
2528
2529
	def _start(self):
2530
		settings = self.settings
2531
		settings.pop("EBUILD_PHASE", None)
2532
		portage_bin_path = settings["PORTAGE_BIN_PATH"]
2533
		misc_sh_binary = os.path.join(portage_bin_path,
2534
			os.path.basename(portage.const.MISC_SH_BINARY))
2535
2536
		self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2537
		self.logfile = settings.get("PORTAGE_LOG_FILE")
2538
2539
		portage._doebuild_exit_status_unlink(
2540
			settings.get("EBUILD_EXIT_STATUS_FILE"))
2541
2542
		SpawnProcess._start(self)
2543
2544
	def _spawn(self, args, **kwargs):
2545
		settings = self.settings
2546
		debug = settings.get("PORTAGE_DEBUG") == "1"
2547
		return portage.spawn(" ".join(args), settings,
2548
			debug=debug, **kwargs)
2549
2550
	def _set_returncode(self, wait_retval):
2551
		SpawnProcess._set_returncode(self, wait_retval)
2552
		self.returncode = portage._doebuild_exit_status_check_and_log(
2553
			self.settings, self.phase, self.returncode)
2554
2555
class EbuildFetcher(SpawnProcess):
2556
2557
	__slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2558
		("_build_dir",)
2559
2560
	def _start(self):
2561
2562
		root_config = self.pkg.root_config
2563
		portdb = root_config.trees["porttree"].dbapi
2564
		ebuild_path = portdb.findname(self.pkg.cpv)
2565
		settings = self.config_pool.allocate()
2566
		settings.setcpv(self.pkg)
2567
2568
		# In prefetch mode, logging goes to emerge-fetch.log and the builddir
2569
		# should not be touched since otherwise it could interfere with
2570
		# another instance of the same cpv concurrently being built for a
2571
		# different $ROOT (currently, builds only cooperate with prefetchers
2572
		# that are spawned for the same $ROOT).
2573
		if not self.prefetch:
2574
			self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2575
			self._build_dir.lock()
2576
			self._build_dir.clean_log()
2577
			portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2578
			if self.logfile is None:
2579
				self.logfile = settings.get("PORTAGE_LOG_FILE")
2580
2581
		phase = "fetch"
2582
		if self.fetchall:
2583
			phase = "fetchall"
2584
2585
		# If any incremental variables have been overridden
2586
		# via the environment, those values need to be passed
2587
		# along here so that they are correctly considered by
2588
		# the config instance in the subproccess.
2589
		fetch_env = os.environ.copy()
2590
2591
		nocolor = settings.get("NOCOLOR")
2592
		if nocolor is not None:
2593
			fetch_env["NOCOLOR"] = nocolor
2594
2595
		fetch_env["PORTAGE_NICENESS"] = "0"
2596
		if self.prefetch:
2597
			fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2598
2599
		ebuild_binary = os.path.join(
2600
			settings["PORTAGE_BIN_PATH"], "ebuild")
2601
2602
		fetch_args = [ebuild_binary, ebuild_path, phase]
2603
		debug = settings.get("PORTAGE_DEBUG") == "1"
2604
		if debug:
2605
			fetch_args.append("--debug")
2606
2607
		self.args = fetch_args
2608
		self.env = fetch_env
2609
		SpawnProcess._start(self)
2610
2611
	def _pipe(self, fd_pipes):
2612
		"""When appropriate, use a pty so that fetcher progress bars,
2613
		like wget has, will work properly."""
2614
		if self.background or not sys.stdout.isatty():
2615
			# When the output only goes to a log file,
2616
			# there's no point in creating a pty.
2617
			return os.pipe()
2618
		stdout_pipe = fd_pipes.get(1)
2619
		got_pty, master_fd, slave_fd = \
2620
			portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2621
		return (master_fd, slave_fd)
2622
2623
	def _set_returncode(self, wait_retval):
2624
		SpawnProcess._set_returncode(self, wait_retval)
2625
		# Collect elog messages that might have been
2626
		# created by the pkg_nofetch phase.
2627
		if self._build_dir is not None:
2628
			# Skip elog messages for prefetch, in order to avoid duplicates.
2629
			if not self.prefetch and self.returncode != os.EX_OK:
2630
				elog_out = None
2631
				if self.logfile is not None:
2632
					if self.background:
2633
						elog_out = open(self.logfile, 'a')
2634
				msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2635
				if self.logfile is not None:
2636
					msg += ", Log file:"
2637
				eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2638
				if self.logfile is not None:
2639
					eerror(" '%s'" % (self.logfile,),
2640
						phase="unpack", key=self.pkg.cpv, out=elog_out)
2641
				if elog_out is not None:
2642
					elog_out.close()
2643
			if not self.prefetch:
2644
				portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2645
			features = self._build_dir.settings.features
2646
			if self.returncode == os.EX_OK:
2647
				self._build_dir.clean_log()
2648
			self._build_dir.unlock()
2649
			self.config_pool.deallocate(self._build_dir.settings)
2650
			self._build_dir = None
2651
2652
class EbuildBuildDir(SlotObject):
2653
2654
	__slots__ = ("dir_path", "pkg", "settings",
2655
		"locked", "_catdir", "_lock_obj")
2656
2657
	def __init__(self, **kwargs):
2658
		SlotObject.__init__(self, **kwargs)
2659
		self.locked = False
2660
2661
	def lock(self):
2662
		"""
2663
		This raises an AlreadyLocked exception if lock() is called
2664
		while a lock is already held. In order to avoid this, call
2665
		unlock() or check whether the "locked" attribute is True
2666
		or False before calling lock().
2667
		"""
2668
		if self._lock_obj is not None:
2669
			raise self.AlreadyLocked((self._lock_obj,))
2670
2671
		dir_path = self.dir_path
2672
		if dir_path is None:
2673
			root_config = self.pkg.root_config
2674
			portdb = root_config.trees["porttree"].dbapi
2675
			ebuild_path = portdb.findname(self.pkg.cpv)
2676
			settings = self.settings
2677
			settings.setcpv(self.pkg)
2678
			debug = settings.get("PORTAGE_DEBUG") == "1"
2679
			use_cache = 1 # always true
2680
			portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2681
				self.settings, debug, use_cache, portdb)
2682
			dir_path = self.settings["PORTAGE_BUILDDIR"]
2683
2684
		catdir = os.path.dirname(dir_path)
2685
		self._catdir = catdir
2686
2687
		portage.util.ensure_dirs(os.path.dirname(catdir),
2688
			gid=portage.portage_gid,
2689
			mode=070, mask=0)
2690
		catdir_lock = None
2691
		try:
2692
			catdir_lock = portage.locks.lockdir(catdir)
2693
			portage.util.ensure_dirs(catdir,
2694
				gid=portage.portage_gid,
2695
				mode=070, mask=0)
2696
			self._lock_obj = portage.locks.lockdir(dir_path)
2697
		finally:
2698
			self.locked = self._lock_obj is not None
2699
			if catdir_lock is not None:
2700
				portage.locks.unlockdir(catdir_lock)
2701
2702
	def clean_log(self):
2703
		"""Discard existing log."""
2704
		settings = self.settings
2705
2706
		for x in ('.logid', 'temp/build.log'):
2707
			try:
2708
				os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2709
			except OSError:
2710
				pass
2711
2712
	def unlock(self):
2713
		if self._lock_obj is None:
2714
			return
2715
2716
		portage.locks.unlockdir(self._lock_obj)
2717
		self._lock_obj = None
2718
		self.locked = False
2719
2720
		catdir = self._catdir
2721
		catdir_lock = None
2722
		try:
2723
			catdir_lock = portage.locks.lockdir(catdir)
2724
		finally:
2725
			if catdir_lock:
2726
				try:
2727
					os.rmdir(catdir)
2728
				except OSError, e:
2729
					if e.errno not in (errno.ENOENT,
2730
						errno.ENOTEMPTY, errno.EEXIST):
2731
						raise
2732
					del e
2733
				portage.locks.unlockdir(catdir_lock)
2734
2735
	class AlreadyLocked(portage.exception.PortageException):
2736
		pass
2737
2738
class EbuildBuild(CompositeTask):
2739
2740
	__slots__ = ("args_set", "config_pool", "find_blockers",
2741
		"ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2742
		"prefetcher", "settings", "world_atom") + \
2743
		("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2744
2745
	def _start(self):
2746
2747
		logger = self.logger
2748
		opts = self.opts
2749
		pkg = self.pkg
2750
		settings = self.settings
2751
		world_atom = self.world_atom
2752
		root_config = pkg.root_config
2753
		tree = "porttree"
2754
		self._tree = tree
2755
		portdb = root_config.trees[tree].dbapi
2756
		settings.setcpv(pkg)
2757
		settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2758
		ebuild_path = portdb.findname(self.pkg.cpv)
2759
		self._ebuild_path = ebuild_path
2760
2761
		prefetcher = self.prefetcher
2762
		if prefetcher is None:
2763
			pass
2764
		elif not prefetcher.isAlive():
2765
			prefetcher.cancel()
2766
		elif prefetcher.poll() is None:
2767
2768
			waiting_msg = "Fetching files " + \
2769
				"in the background. " + \
2770
				"To view fetch progress, run `tail -f " + \
2771
				"/var/log/emerge-fetch.log` in another " + \
2772
				"terminal."
2773
			msg_prefix = colorize("GOOD", " * ")
2774
			from textwrap import wrap
2775
			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2776
				for line in wrap(waiting_msg, 65))
2777
			if not self.background:
2778
				writemsg(waiting_msg, noiselevel=-1)
2779
2780
			self._current_task = prefetcher
2781
			prefetcher.addExitListener(self._prefetch_exit)
2782
			return
2783
2784
		self._prefetch_exit(prefetcher)
2785
2786
	def _prefetch_exit(self, prefetcher):
2787
2788
		opts = self.opts
2789
		pkg = self.pkg
2790
		settings = self.settings
2791
2792
		if opts.fetchonly:
2793
				fetcher = EbuildFetchonly(
2794
					fetch_all=opts.fetch_all_uri,
2795
					pkg=pkg, pretend=opts.pretend,
2796
					settings=settings)
2797
				retval = fetcher.execute()
2798
				self.returncode = retval
2799
				self.wait()
2800
				return
2801
2802
		fetcher = EbuildFetcher(config_pool=self.config_pool,
2803
			fetchall=opts.fetch_all_uri,
2804
			fetchonly=opts.fetchonly,
2805
			background=self.background,
2806
			pkg=pkg, scheduler=self.scheduler)
2807
2808
		self._start_task(fetcher, self._fetch_exit)
2809
2810
	def _fetch_exit(self, fetcher):
2811
		opts = self.opts
2812
		pkg = self.pkg
2813
2814
		fetch_failed = False
2815
		if opts.fetchonly:
2816
			fetch_failed = self._final_exit(fetcher) != os.EX_OK
2817
		else:
2818
			fetch_failed = self._default_exit(fetcher) != os.EX_OK
2819
2820
		if fetch_failed and fetcher.logfile is not None and \
2821
			os.path.exists(fetcher.logfile):
2822
			self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2823
2824
		if not fetch_failed and fetcher.logfile is not None:
2825
			# Fetch was successful, so remove the fetch log.
2826
			try:
2827
				os.unlink(fetcher.logfile)
2828
			except OSError:
2829
				pass
2830
2831
		if fetch_failed or opts.fetchonly:
2832
			self.wait()
2833
			return
2834
2835
		logger = self.logger
2836
		opts = self.opts
2837
		pkg_count = self.pkg_count
2838
		scheduler = self.scheduler
2839
		settings = self.settings
2840
		features = settings.features
2841
		ebuild_path = self._ebuild_path
2842
		system_set = pkg.root_config.sets["system"]
2843
2844
		self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2845
		self._build_dir.lock()
2846
2847
		# Cleaning is triggered before the setup
2848
		# phase, in portage.doebuild().
2849
		msg = " === (%s of %s) Cleaning (%s::%s)" % \
2850
			(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2851
		short_msg = "emerge: (%s of %s) %s Clean" % \
2852
			(pkg_count.curval, pkg_count.maxval, pkg.cpv)
2853
		logger.log(msg, short_msg=short_msg)
2854
2855
		#buildsyspkg: Check if we need to _force_ binary package creation
2856
		self._issyspkg = "buildsyspkg" in features and \
2857
				system_set.findAtomForPackage(pkg) and \
2858
				not opts.buildpkg
2859
2860
		if opts.buildpkg or self._issyspkg:
2861
2862
			self._buildpkg = True
2863
2864
			msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2865
				(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2866
			short_msg = "emerge: (%s of %s) %s Compile" % \
2867
				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
2868
			logger.log(msg, short_msg=short_msg)
2869
2870
		else:
2871
			msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2872
				(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2873
			short_msg = "emerge: (%s of %s) %s Compile" % \
2874
				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
2875
			logger.log(msg, short_msg=short_msg)
2876
2877
		build = EbuildExecuter(background=self.background, pkg=pkg,
2878
			scheduler=scheduler, settings=settings)
2879
		self._start_task(build, self._build_exit)
2880
2881
	def _unlock_builddir(self):
2882
		portage.elog.elog_process(self.pkg.cpv, self.settings)
2883
		self._build_dir.unlock()
2884
2885
	def _build_exit(self, build):
2886
		if self._default_exit(build) != os.EX_OK:
2887
			self._unlock_builddir()
2888
			self.wait()
2889
			return
2890
2891
		opts = self.opts
2892
		buildpkg = self._buildpkg
2893
2894
		if not buildpkg:
2895
			self._final_exit(build)
2896
			self.wait()
2897
			return
2898
2899
		if self._issyspkg:
2900
			msg = ">>> This is a system package, " + \
2901
				"let's pack a rescue tarball.\n"
2902
2903
			log_path = self.settings.get("PORTAGE_LOG_FILE")
2904
			if log_path is not None:
2905
				log_file = open(log_path, 'a')
2906
				try:
2907
					log_file.write(msg)
2908
				finally:
2909
					log_file.close()
2910
2911
			if not self.background:
2912
				portage.writemsg_stdout(msg, noiselevel=-1)
2913
2914
		packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2915
			scheduler=self.scheduler, settings=self.settings)
2916
2917
		self._start_task(packager, self._buildpkg_exit)
2918
2919
	def _buildpkg_exit(self, packager):
2920
		"""
2921
		Released build dir lock when there is a failure or
2922
		when in buildpkgonly mode. Otherwise, the lock will
2923
		be released when merge() is called.
2924
		"""
2925
2926
		if self._default_exit(packager) != os.EX_OK:
2927
			self._unlock_builddir()
2928
			self.wait()
2929
			return
2930
2931
		if self.opts.buildpkgonly:
2932
			# Need to call "clean" phase for buildpkgonly mode
2933
			portage.elog.elog_process(self.pkg.cpv, self.settings)
2934
			phase = "clean"
2935
			clean_phase = EbuildPhase(background=self.background,
2936
				pkg=self.pkg, phase=phase,
2937
				scheduler=self.scheduler, settings=self.settings,
2938
				tree=self._tree)
2939
			self._start_task(clean_phase, self._clean_exit)
2940
			return
2941
2942
		# Continue holding the builddir lock until
2943
		# after the package has been installed.
2944
		self._current_task = None
2945
		self.returncode = packager.returncode
2946
		self.wait()
2947
2948
	def _clean_exit(self, clean_phase):
2949
		if self._final_exit(clean_phase) != os.EX_OK or \
2950
			self.opts.buildpkgonly:
2951
			self._unlock_builddir()
2952
		self.wait()
2953
2954
	def install(self):
2955
		"""
2956
		Install the package and then clean up and release locks.
2957
		Only call this after the build has completed successfully
2958
		and neither fetchonly nor buildpkgonly mode are enabled.
2959
		"""
2960
2961
		find_blockers = self.find_blockers
2962
		ldpath_mtimes = self.ldpath_mtimes
2963
		logger = self.logger
2964
		pkg = self.pkg
2965
		pkg_count = self.pkg_count
2966
		settings = self.settings
2967
		world_atom = self.world_atom
2968
		ebuild_path = self._ebuild_path
2969
		tree = self._tree
2970
2971
		merge = EbuildMerge(find_blockers=self.find_blockers,
2972
			ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2973
			pkg_count=pkg_count, pkg_path=ebuild_path,
2974
			scheduler=self.scheduler,
2975
			settings=settings, tree=tree, world_atom=world_atom)
2976
2977
		msg = " === (%s of %s) Merging (%s::%s)" % \
2978
			(pkg_count.curval, pkg_count.maxval,
2979
			pkg.cpv, ebuild_path)
2980
		short_msg = "emerge: (%s of %s) %s Merge" % \
2981
			(pkg_count.curval, pkg_count.maxval, pkg.cpv)
2982
		logger.log(msg, short_msg=short_msg)
2983
2984
		try:
2985
			rval = merge.execute()
2986
		finally:
2987
			self._unlock_builddir()
2988
2989
		return rval
2990
2991
class EbuildExecuter(CompositeTask):
2992
2993
	__slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2994
2995
	_phases = ("prepare", "configure", "compile", "test", "install")
2996
2997
	_live_eclasses = frozenset([
2998
		"bzr",
2999
		"cvs",
3000
		"darcs",
3001
		"git",
3002
		"mercurial",
3003
		"subversion"
3004
	])
3005
3006
	def _start(self):
3007
		self._tree = "porttree"
3008
		pkg = self.pkg
3009
		phase = "clean"
3010
		clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
3011
			scheduler=self.scheduler, settings=self.settings, tree=self._tree)
3012
		self._start_task(clean_phase, self._clean_phase_exit)
3013
3014
	def _clean_phase_exit(self, clean_phase):
3015
3016
		if self._default_exit(clean_phase) != os.EX_OK:
3017
			self.wait()
3018
			return
3019
3020
		pkg = self.pkg
3021
		scheduler = self.scheduler
3022
		settings = self.settings
3023
		cleanup = 1
3024
3025
		# This initializes PORTAGE_LOG_FILE.
3026
		portage.prepare_build_dirs(pkg.root, settings, cleanup)
3027
3028
		setup_phase = EbuildPhase(background=self.background,
3029
			pkg=pkg, phase="setup", scheduler=scheduler,
3030
			settings=settings, tree=self._tree)
3031
3032
		setup_phase.addExitListener(self._setup_exit)
3033
		self._current_task = setup_phase
3034
		self.scheduler.scheduleSetup(setup_phase)
3035
3036
	def _setup_exit(self, setup_phase):
3037
3038
		if self._default_exit(setup_phase) != os.EX_OK:
3039
			self.wait()
3040
			return
3041
3042
		unpack_phase = EbuildPhase(background=self.background,
3043
			pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3044
			settings=self.settings, tree=self._tree)
3045
3046
		if self._live_eclasses.intersection(self.pkg.inherited):
3047
			# Serialize $DISTDIR access for live ebuilds since
3048
			# otherwise they can interfere with eachother.
3049
3050
			unpack_phase.addExitListener(self._unpack_exit)
3051
			self._current_task = unpack_phase
3052
			self.scheduler.scheduleUnpack(unpack_phase)
3053
3054
		else:
3055
			self._start_task(unpack_phase, self._unpack_exit)
3056
3057
	def _unpack_exit(self, unpack_phase):
3058
3059
		if self._default_exit(unpack_phase) != os.EX_OK:
3060
			self.wait()
3061
			return
3062
3063
		ebuild_phases = TaskSequence(scheduler=self.scheduler)
3064
3065
		pkg = self.pkg
3066
		phases = self._phases
3067
		eapi = pkg.metadata["EAPI"]
3068
		if eapi in ("0", "1"):
3069
			# skip src_prepare and src_configure
3070
			phases = phases[2:]
3071
3072
		for phase in phases:
3073
			ebuild_phases.add(EbuildPhase(background=self.background,
3074
				pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3075
				settings=self.settings, tree=self._tree))
3076
3077
		self._start_task(ebuild_phases, self._default_final_exit)
3078
3079
class EbuildMetadataPhase(SubProcess):
3080
3081
	"""
3082
	Asynchronous interface for the ebuild "depend" phase which is
3083
	used to extract metadata from the ebuild.
3084
	"""
3085
3086
	__slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3087
		"ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3088
		("_raw_metadata",)
3089
3090
	_file_names = ("ebuild",)
3091
	_files_dict = slot_dict_class(_file_names, prefix="")
3092
	_metadata_fd = 9
3093
3094
	def _start(self):
3095
		settings = self.settings
3096
		settings.setcpv(self.cpv)
3097
		ebuild_path = self.ebuild_path
3098
3099
		eapi = None
3100
		if 'parse-eapi-glep-55' in settings.features:
3101
			pf, eapi = portage._split_ebuild_name_glep55(
3102
				os.path.basename(ebuild_path))
3103
		if eapi is None and \
3104
			'parse-eapi-ebuild-head' in settings.features:
3105
			eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3106
				mode='r', encoding='utf_8', errors='replace'))
3107
3108
		if eapi is not None:
3109
			if not portage.eapi_is_supported(eapi):
3110
				self.metadata_callback(self.cpv, self.ebuild_path,
3111
					self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3112
				self.returncode = os.EX_OK
3113
				self.wait()
3114
				return
3115
3116
			settings.configdict['pkg']['EAPI'] = eapi
3117
3118
		debug = settings.get("PORTAGE_DEBUG") == "1"
3119
		master_fd = None
3120
		slave_fd = None
3121
		fd_pipes = None
3122
		if self.fd_pipes is not None:
3123
			fd_pipes = self.fd_pipes.copy()
3124
		else:
3125
			fd_pipes = {}
3126
3127
		fd_pipes.setdefault(0, sys.stdin.fileno())
3128
		fd_pipes.setdefault(1, sys.stdout.fileno())
3129
		fd_pipes.setdefault(2, sys.stderr.fileno())
3130
3131
		# flush any pending output
3132
		for fd in fd_pipes.itervalues():
3133
			if fd == sys.stdout.fileno():
3134
				sys.stdout.flush()
3135
			if fd == sys.stderr.fileno():
3136
				sys.stderr.flush()
3137
3138
		fd_pipes_orig = fd_pipes.copy()
3139
		self._files = self._files_dict()
3140
		files = self._files
3141
3142
		master_fd, slave_fd = os.pipe()
3143
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
3144
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3145
3146
		fd_pipes[self._metadata_fd] = slave_fd
3147
3148
		self._raw_metadata = []
3149
		files.ebuild = os.fdopen(master_fd, 'r')
3150
		self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3151
			self._registered_events, self._output_handler)
3152
		self._registered = True
3153
3154
		retval = portage.doebuild(ebuild_path, "depend",
3155
			settings["ROOT"], settings, debug,
3156
			mydbapi=self.portdb, tree="porttree",
3157
			fd_pipes=fd_pipes, returnpid=True)
3158
3159
		os.close(slave_fd)
3160
3161
		if isinstance(retval, int):
3162
			# doebuild failed before spawning
3163
			self._unregister()
3164
			self.returncode = retval
3165
			self.wait()
3166
			return
3167
3168
		self.pid = retval[0]
3169
		portage.process.spawned_pids.remove(self.pid)
3170
3171
	def _output_handler(self, fd, event):
3172
3173
		if event & PollConstants.POLLIN:
3174
			self._raw_metadata.append(self._files.ebuild.read())
3175
			if not self._raw_metadata[-1]:
3176
				self._unregister()
3177
				self.wait()
3178
3179
		self._unregister_if_appropriate(event)
3180
		return self._registered
3181
3182
	def _set_returncode(self, wait_retval):
3183
		SubProcess._set_returncode(self, wait_retval)
3184
		if self.returncode == os.EX_OK:
3185
			metadata_lines = "".join(self._raw_metadata).splitlines()
3186
			if len(portage.auxdbkeys) != len(metadata_lines):
3187
				# Don't trust bash's returncode if the
3188
				# number of lines is incorrect.
3189
				self.returncode = 1
3190
			else:
3191
				metadata = izip(portage.auxdbkeys, metadata_lines)
3192
				self.metadata = self.metadata_callback(self.cpv,
3193
					self.ebuild_path, self.repo_path, metadata,
3194
					self.ebuild_mtime)
3195
3196
class EbuildProcess(SpawnProcess):
3197
3198
	__slots__ = ("phase", "pkg", "settings", "tree")
3199
3200
	def _start(self):
3201
		# Don't open the log file during the clean phase since the
3202
		# open file can result in an nfs lock on $T/build.log which
3203
		# prevents the clean phase from removing $T.
3204
		if self.phase not in ("clean", "cleanrm"):
3205
			self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3206
		SpawnProcess._start(self)
3207
3208
	def _pipe(self, fd_pipes):
3209
		stdout_pipe = fd_pipes.get(1)
3210
		got_pty, master_fd, slave_fd = \
3211
			portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3212
		return (master_fd, slave_fd)
3213
3214
	def _spawn(self, args, **kwargs):
3215
3216
		root_config = self.pkg.root_config
3217
		tree = self.tree
3218
		mydbapi = root_config.trees[tree].dbapi
3219
		settings = self.settings
3220
		ebuild_path = settings["EBUILD"]
3221
		debug = settings.get("PORTAGE_DEBUG") == "1"
3222
3223
		rval = portage.doebuild(ebuild_path, self.phase,
3224
			root_config.root, settings, debug,
3225
			mydbapi=mydbapi, tree=tree, **kwargs)
3226
3227
		return rval
3228
3229
	def _set_returncode(self, wait_retval):
3230
		SpawnProcess._set_returncode(self, wait_retval)
3231
3232
		if self.phase not in ("clean", "cleanrm"):
3233
			self.returncode = portage._doebuild_exit_status_check_and_log(
3234
				self.settings, self.phase, self.returncode)
3235
3236
		if self.phase == "test" and self.returncode != os.EX_OK and \
3237
			"test-fail-continue" in self.settings.features:
3238
			self.returncode = os.EX_OK
3239
3240
		portage._post_phase_userpriv_perms(self.settings)
3241
3242
class EbuildPhase(CompositeTask):
3243
3244
	__slots__ = ("background", "pkg", "phase",
3245
		"scheduler", "settings", "tree")
3246
3247
	_post_phase_cmds = portage._post_phase_cmds
3248
3249
	def _start(self):
3250
3251
		ebuild_process = EbuildProcess(background=self.background,
3252
			pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3253
			settings=self.settings, tree=self.tree)
3254
3255
		self._start_task(ebuild_process, self._ebuild_exit)
3256
3257
	def _ebuild_exit(self, ebuild_process):
3258
3259
		if self.phase == "install":
3260
			out = None
3261
			log_path = self.settings.get("PORTAGE_LOG_FILE")
3262
			log_file = None
3263
			if self.background and log_path is not None:
3264
				log_file = open(log_path, 'a')
3265
				out = log_file
3266
			try:
3267
				portage._check_build_log(self.settings, out=out)
3268
			finally:
3269
				if log_file is not None:
3270
					log_file.close()
3271
3272
		if self._default_exit(ebuild_process) != os.EX_OK:
3273
			self.wait()
3274
			return
3275
3276
		settings = self.settings
3277
3278
		if self.phase == "install":
3279
			portage._post_src_install_chost_fix(settings)
3280
			portage._post_src_install_uid_fix(settings)
3281
3282
		post_phase_cmds = self._post_phase_cmds.get(self.phase)
3283
		if post_phase_cmds is not None:
3284
			post_phase = MiscFunctionsProcess(background=self.background,
3285
				commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3286
				scheduler=self.scheduler, settings=settings)
3287
			self._start_task(post_phase, self._post_phase_exit)
3288
			return
3289
3290
		self.returncode = ebuild_process.returncode
3291
		self._current_task = None
3292
		self.wait()
3293
3294
	def _post_phase_exit(self, post_phase):
3295
		if self._final_exit(post_phase) != os.EX_OK:
3296
			writemsg("!!! post %s failed; exiting.\n" % self.phase,
3297
				noiselevel=-1)
3298
		self._current_task = None
3299
		self.wait()
3300
		return
3301
3302
class EbuildBinpkg(EbuildProcess):
3303
	"""
3304
	This assumes that src_install() has successfully completed.
3305
	"""
3306
	__slots__ = ("_binpkg_tmpfile",)
3307
3308
	def _start(self):
3309
		self.phase = "package"
3310
		self.tree = "porttree"
3311
		pkg = self.pkg
3312
		root_config = pkg.root_config
3313
		portdb = root_config.trees["porttree"].dbapi
3314
		bintree = root_config.trees["bintree"]
3315
		ebuild_path = portdb.findname(self.pkg.cpv)
3316
		settings = self.settings
3317
		debug = settings.get("PORTAGE_DEBUG") == "1"
3318
3319
		bintree.prevent_collision(pkg.cpv)
3320
		binpkg_tmpfile = os.path.join(bintree.pkgdir,
3321
			pkg.cpv + ".tbz2." + str(os.getpid()))
3322
		self._binpkg_tmpfile = binpkg_tmpfile
3323
		settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3324
		settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3325
3326
		try:
3327
			EbuildProcess._start(self)
3328
		finally:
3329
			settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3330
3331
	def _set_returncode(self, wait_retval):
3332
		EbuildProcess._set_returncode(self, wait_retval)
3333
3334
		pkg = self.pkg
3335
		bintree = pkg.root_config.trees["bintree"]
3336
		binpkg_tmpfile = self._binpkg_tmpfile
3337
		if self.returncode == os.EX_OK:
3338
			bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3339
3340
class EbuildMerge(SlotObject):
3341
3342
	__slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3343
		"pkg", "pkg_count", "pkg_path", "pretend",
3344
		"scheduler", "settings", "tree", "world_atom")
3345
3346
	def execute(self):
3347
		root_config = self.pkg.root_config
3348
		settings = self.settings
3349
		retval = portage.merge(settings["CATEGORY"],
3350
			settings["PF"], settings["D"],
3351
			os.path.join(settings["PORTAGE_BUILDDIR"],
3352
			"build-info"), root_config.root, settings,
3353
			myebuild=settings["EBUILD"],
3354
			mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3355
			vartree=root_config.trees["vartree"],
3356
			prev_mtimes=self.ldpath_mtimes,
3357
			scheduler=self.scheduler,
3358
			blockers=self.find_blockers)
3359
3360
		if retval == os.EX_OK:
3361
			self.world_atom(self.pkg)
3362
			self._log_success()
3363
3364
		return retval
3365
3366
	def _log_success(self):
3367
		pkg = self.pkg
3368
		pkg_count = self.pkg_count
3369
		pkg_path = self.pkg_path
3370
		logger = self.logger
3371
		if "noclean" not in self.settings.features:
3372
			short_msg = "emerge: (%s of %s) %s Clean Post" % \
3373
				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
3374
			logger.log((" === (%s of %s) " + \
3375
				"Post-Build Cleaning (%s::%s)") % \
3376
				(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3377
				short_msg=short_msg)
3378
		logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3379
			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3380
3381
class PackageUninstall(AsynchronousTask):
1442
class PackageUninstall(AsynchronousTask):
3382
1443
3383
	__slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
1444
	__slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
Lines 3414-3947 Link Here
3414
			finally:
1475
			finally:
3415
				f.close()
1476
				f.close()
3416
1477
3417
class Binpkg(CompositeTask):
3418
3419
	__slots__ = ("find_blockers",
3420
		"ldpath_mtimes", "logger", "opts",
3421
		"pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3422
		("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3423
		"_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3424
3425
	def _writemsg_level(self, msg, level=0, noiselevel=0):
3426
3427
		if not self.background:
3428
			portage.util.writemsg_level(msg,
3429
				level=level, noiselevel=noiselevel)
3430
3431
		log_path = self.settings.get("PORTAGE_LOG_FILE")
3432
		if  log_path is not None:
3433
			f = open(log_path, 'a')
3434
			try:
3435
				f.write(msg)
3436
			finally:
3437
				f.close()
3438
3439
	def _start(self):
3440
3441
		pkg = self.pkg
3442
		settings = self.settings
3443
		settings.setcpv(pkg)
3444
		self._tree = "bintree"
3445
		self._bintree = self.pkg.root_config.trees[self._tree]
3446
		self._verify = not self.opts.pretend
3447
3448
		dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3449
			"portage", pkg.category, pkg.pf)
3450
		self._build_dir = EbuildBuildDir(dir_path=dir_path,
3451
			pkg=pkg, settings=settings)
3452
		self._image_dir = os.path.join(dir_path, "image")
3453
		self._infloc = os.path.join(dir_path, "build-info")
3454
		self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3455
		settings["EBUILD"] = self._ebuild_path
3456
		debug = settings.get("PORTAGE_DEBUG") == "1"
3457
		portage.doebuild_environment(self._ebuild_path, "setup",
3458
			settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3459
		settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3460
3461
		# The prefetcher has already completed or it
3462
		# could be running now. If it's running now,
3463
		# wait for it to complete since it holds
3464
		# a lock on the file being fetched. The
3465
		# portage.locks functions are only designed
3466
		# to work between separate processes. Since
3467
		# the lock is held by the current process,
3468
		# use the scheduler and fetcher methods to
3469
		# synchronize with the fetcher.
3470
		prefetcher = self.prefetcher
3471
		if prefetcher is None:
3472
			pass
3473
		elif not prefetcher.isAlive():
3474
			prefetcher.cancel()
3475
		elif prefetcher.poll() is None:
3476
3477
			waiting_msg = ("Fetching '%s' " + \
3478
				"in the background. " + \
3479
				"To view fetch progress, run `tail -f " + \
3480
				"/var/log/emerge-fetch.log` in another " + \
3481
				"terminal.") % prefetcher.pkg_path
3482
			msg_prefix = colorize("GOOD", " * ")
3483
			from textwrap import wrap
3484
			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3485
				for line in wrap(waiting_msg, 65))
3486
			if not self.background:
3487
				writemsg(waiting_msg, noiselevel=-1)
3488
3489
			self._current_task = prefetcher
3490
			prefetcher.addExitListener(self._prefetch_exit)
3491
			return
3492
3493
		self._prefetch_exit(prefetcher)
3494
3495
	def _prefetch_exit(self, prefetcher):
3496
3497
		pkg = self.pkg
3498
		pkg_count = self.pkg_count
3499
		if not (self.opts.pretend or self.opts.fetchonly):
3500
			self._build_dir.lock()
3501
			# If necessary, discard old log so that we don't
3502
			# append to it.
3503
			self._build_dir.clean_log()
3504
			# Initialze PORTAGE_LOG_FILE.
3505
			portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3506
		fetcher = BinpkgFetcher(background=self.background,
3507
			logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3508
			pretend=self.opts.pretend, scheduler=self.scheduler)
3509
		pkg_path = fetcher.pkg_path
3510
		self._pkg_path = pkg_path
3511
3512
		if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3513
3514
			msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3515
				(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3516
			short_msg = "emerge: (%s of %s) %s Fetch" % \
3517
				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
3518
			self.logger.log(msg, short_msg=short_msg)
3519
			self._start_task(fetcher, self._fetcher_exit)
3520
			return
3521
3522
		self._fetcher_exit(fetcher)
3523
3524
	def _fetcher_exit(self, fetcher):
3525
3526
		# The fetcher only has a returncode when
3527
		# --getbinpkg is enabled.
3528
		if fetcher.returncode is not None:
3529
			self._fetched_pkg = True
3530
			if self._default_exit(fetcher) != os.EX_OK:
3531
				self._unlock_builddir()
3532
				self.wait()
3533
				return
3534
3535
		if self.opts.pretend:
3536
			self._current_task = None
3537
			self.returncode = os.EX_OK
3538
			self.wait()
3539
			return
3540
3541
		verifier = None
3542
		if self._verify:
3543
			logfile = None
3544
			if self.background:
3545
				logfile = self.settings.get("PORTAGE_LOG_FILE")
3546
			verifier = BinpkgVerifier(background=self.background,
3547
				logfile=logfile, pkg=self.pkg)
3548
			self._start_task(verifier, self._verifier_exit)
3549
			return
3550
3551
		self._verifier_exit(verifier)
3552
3553
	def _verifier_exit(self, verifier):
3554
		if verifier is not None and \
3555
			self._default_exit(verifier) != os.EX_OK:
3556
			self._unlock_builddir()
3557
			self.wait()
3558
			return
3559
3560
		logger = self.logger
3561
		pkg = self.pkg
3562
		pkg_count = self.pkg_count
3563
		pkg_path = self._pkg_path
3564
3565
		if self._fetched_pkg:
3566
			self._bintree.inject(pkg.cpv, filename=pkg_path)
3567
3568
		if self.opts.fetchonly:
3569
			self._current_task = None
3570
			self.returncode = os.EX_OK
3571
			self.wait()
3572
			return
3573
3574
		msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3575
			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3576
		short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3577
			(pkg_count.curval, pkg_count.maxval, pkg.cpv)
3578
		logger.log(msg, short_msg=short_msg)
3579
3580
		phase = "clean"
3581
		settings = self.settings
3582
		ebuild_phase = EbuildPhase(background=self.background,
3583
			pkg=pkg, phase=phase, scheduler=self.scheduler,
3584
			settings=settings, tree=self._tree)
3585
3586
		self._start_task(ebuild_phase, self._clean_exit)
3587
3588
	def _clean_exit(self, clean_phase):
3589
		if self._default_exit(clean_phase) != os.EX_OK:
3590
			self._unlock_builddir()
3591
			self.wait()
3592
			return
3593
3594
		dir_path = self._build_dir.dir_path
3595
3596
		infloc = self._infloc
3597
		pkg = self.pkg
3598
		pkg_path = self._pkg_path
3599
3600
		dir_mode = 0755
3601
		for mydir in (dir_path, self._image_dir, infloc):
3602
			portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3603
				gid=portage.data.portage_gid, mode=dir_mode)
3604
3605
		# This initializes PORTAGE_LOG_FILE.
3606
		portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3607
		self._writemsg_level(">>> Extracting info\n")
3608
3609
		pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3610
		check_missing_metadata = ("CATEGORY", "PF")
3611
		missing_metadata = set()
3612
		for k in check_missing_metadata:
3613
			v = pkg_xpak.getfile(k)
3614
			if not v:
3615
				missing_metadata.add(k)
3616
3617
		pkg_xpak.unpackinfo(infloc)
3618
		for k in missing_metadata:
3619
			if k == "CATEGORY":
3620
				v = pkg.category
3621
			elif k == "PF":
3622
				v = pkg.pf
3623
			else:
3624
				continue
3625
3626
			f = open(os.path.join(infloc, k), 'wb')
3627
			try:
3628
				f.write(v + "\n")
3629
			finally:
3630
				f.close()
3631
3632
		# Store the md5sum in the vdb.
3633
		f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3634
		try:
3635
			f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3636
		finally:
3637
			f.close()
3638
3639
		# This gives bashrc users an opportunity to do various things
3640
		# such as remove binary packages after they're installed.
3641
		settings = self.settings
3642
		settings.setcpv(self.pkg)
3643
		settings["PORTAGE_BINPKG_FILE"] = pkg_path
3644
		settings.backup_changes("PORTAGE_BINPKG_FILE")
3645
3646
		phase = "setup"
3647
		setup_phase = EbuildPhase(background=self.background,
3648
			pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3649
			settings=settings, tree=self._tree)
3650
3651
		setup_phase.addExitListener(self._setup_exit)
3652
		self._current_task = setup_phase
3653
		self.scheduler.scheduleSetup(setup_phase)
3654
3655
	def _setup_exit(self, setup_phase):
3656
		if self._default_exit(setup_phase) != os.EX_OK:
3657
			self._unlock_builddir()
3658
			self.wait()
3659
			return
3660
3661
		extractor = BinpkgExtractorAsync(background=self.background,
3662
			image_dir=self._image_dir,
3663
			pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3664
		self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3665
		self._start_task(extractor, self._extractor_exit)
3666
3667
	def _extractor_exit(self, extractor):
3668
		if self._final_exit(extractor) != os.EX_OK:
3669
			self._unlock_builddir()
3670
			writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3671
				noiselevel=-1)
3672
		self.wait()
3673
3674
	def _unlock_builddir(self):
3675
		if self.opts.pretend or self.opts.fetchonly:
3676
			return
3677
		portage.elog.elog_process(self.pkg.cpv, self.settings)
3678
		self._build_dir.unlock()
3679
3680
	def install(self):
3681
3682
		# This gives bashrc users an opportunity to do various things
3683
		# such as remove binary packages after they're installed.
3684
		settings = self.settings
3685
		settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3686
		settings.backup_changes("PORTAGE_BINPKG_FILE")
3687
3688
		merge = EbuildMerge(find_blockers=self.find_blockers,
3689
			ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3690
			pkg=self.pkg, pkg_count=self.pkg_count,
3691
			pkg_path=self._pkg_path, scheduler=self.scheduler,
3692
			settings=settings, tree=self._tree, world_atom=self.world_atom)
3693
3694
		try:
3695
			retval = merge.execute()
3696
		finally:
3697
			settings.pop("PORTAGE_BINPKG_FILE", None)
3698
			self._unlock_builddir()
3699
		return retval
3700
3701
class BinpkgFetcher(SpawnProcess):
3702
3703
	__slots__ = ("pkg", "pretend",
3704
		"locked", "pkg_path", "_lock_obj")
3705
3706
	def __init__(self, **kwargs):
3707
		SpawnProcess.__init__(self, **kwargs)
3708
		pkg = self.pkg
3709
		self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3710
3711
	def _start(self):
3712
3713
		if self.cancelled:
3714
			return
3715
3716
		pkg = self.pkg
3717
		pretend = self.pretend
3718
		bintree = pkg.root_config.trees["bintree"]
3719
		settings = bintree.settings
3720
		use_locks = "distlocks" in settings.features
3721
		pkg_path = self.pkg_path
3722
3723
		if not pretend:
3724
			portage.util.ensure_dirs(os.path.dirname(pkg_path))
3725
			if use_locks:
3726
				self.lock()
3727
		exists = os.path.exists(pkg_path)
3728
		resume = exists and os.path.basename(pkg_path) in bintree.invalids
3729
		if not (pretend or resume):
3730
			# Remove existing file or broken symlink.
3731
			try:
3732
				os.unlink(pkg_path)
3733
			except OSError:
3734
				pass
3735
3736
		# urljoin doesn't work correctly with
3737
		# unrecognized protocols like sftp
3738
		if bintree._remote_has_index:
3739
			rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3740
			if not rel_uri:
3741
				rel_uri = pkg.cpv + ".tbz2"
3742
			uri = bintree._remote_base_uri.rstrip("/") + \
3743
				"/" + rel_uri.lstrip("/")
3744
		else:
3745
			uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3746
				"/" + pkg.pf + ".tbz2"
3747
3748
		if pretend:
3749
			portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3750
			self.returncode = os.EX_OK
3751
			self.wait()
3752
			return
3753
3754
		protocol = urlparse.urlparse(uri)[0]
3755
		fcmd_prefix = "FETCHCOMMAND"
3756
		if resume:
3757
			fcmd_prefix = "RESUMECOMMAND"
3758
		fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3759
		if not fcmd:
3760
			fcmd = settings.get(fcmd_prefix)
3761
3762
		fcmd_vars = {
3763
			"DISTDIR" : os.path.dirname(pkg_path),
3764
			"URI"     : uri,
3765
			"FILE"    : os.path.basename(pkg_path)
3766
		}
3767
3768
		fetch_env = dict(settings.iteritems())
3769
		fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3770
			for x in shlex.split(fcmd)]
3771
3772
		if self.fd_pipes is None:
3773
			self.fd_pipes = {}
3774
		fd_pipes = self.fd_pipes
3775
3776
		# Redirect all output to stdout since some fetchers like
3777
		# wget pollute stderr (if portage detects a problem then it
3778
		# can send it's own message to stderr).
3779
		fd_pipes.setdefault(0, sys.stdin.fileno())
3780
		fd_pipes.setdefault(1, sys.stdout.fileno())
3781
		fd_pipes.setdefault(2, sys.stdout.fileno())
3782
3783
		self.args = fetch_args
3784
		self.env = fetch_env
3785
		SpawnProcess._start(self)
3786
3787
	def _set_returncode(self, wait_retval):
3788
		SpawnProcess._set_returncode(self, wait_retval)
3789
		if self.returncode == os.EX_OK:
3790
			# If possible, update the mtime to match the remote package if
3791
			# the fetcher didn't already do it automatically.
3792
			bintree = self.pkg.root_config.trees["bintree"]
3793
			if bintree._remote_has_index:
3794
				remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3795
				if remote_mtime is not None:
3796
					try:
3797
						remote_mtime = long(remote_mtime)
3798
					except ValueError:
3799
						pass
3800
					else:
3801
						try:
3802
							local_mtime = long(os.stat(self.pkg_path).st_mtime)
3803
						except OSError:
3804
							pass
3805
						else:
3806
							if remote_mtime != local_mtime:
3807
								try:
3808
									os.utime(self.pkg_path,
3809
										(remote_mtime, remote_mtime))
3810
								except OSError:
3811
									pass
3812
3813
		if self.locked:
3814
			self.unlock()
3815
3816
	def lock(self):
3817
		"""
3818
		This raises an AlreadyLocked exception if lock() is called
3819
		while a lock is already held. In order to avoid this, call
3820
		unlock() or check whether the "locked" attribute is True
3821
		or False before calling lock().
3822
		"""
3823
		if self._lock_obj is not None:
3824
			raise self.AlreadyLocked((self._lock_obj,))
3825
3826
		self._lock_obj = portage.locks.lockfile(
3827
			self.pkg_path, wantnewlockfile=1)
3828
		self.locked = True
3829
3830
	class AlreadyLocked(portage.exception.PortageException):
3831
		pass
3832
3833
	def unlock(self):
3834
		if self._lock_obj is None:
3835
			return
3836
		portage.locks.unlockfile(self._lock_obj)
3837
		self._lock_obj = None
3838
		self.locked = False
3839
3840
class BinpkgVerifier(AsynchronousTask):
3841
	__slots__ = ("logfile", "pkg",)
3842
3843
	def _start(self):
3844
		"""
3845
		Note: Unlike a normal AsynchronousTask.start() method,
3846
		this one does all work is synchronously. The returncode
3847
		attribute will be set before it returns.
3848
		"""
3849
3850
		pkg = self.pkg
3851
		root_config = pkg.root_config
3852
		bintree = root_config.trees["bintree"]
3853
		rval = os.EX_OK
3854
		stdout_orig = sys.stdout
3855
		stderr_orig = sys.stderr
3856
		log_file = None
3857
		if self.background and self.logfile is not None:
3858
			log_file = open(self.logfile, 'a')
3859
		try:
3860
			if log_file is not None:
3861
				sys.stdout = log_file
3862
				sys.stderr = log_file
3863
			try:
3864
				bintree.digestCheck(pkg)
3865
			except portage.exception.FileNotFound:
3866
				writemsg("!!! Fetching Binary failed " + \
3867
					"for '%s'\n" % pkg.cpv, noiselevel=-1)
3868
				rval = 1
3869
			except portage.exception.DigestException, e:
3870
				writemsg("\n!!! Digest verification failed:\n",
3871
					noiselevel=-1)
3872
				writemsg("!!! %s\n" % e.value[0],
3873
					noiselevel=-1)
3874
				writemsg("!!! Reason: %s\n" % e.value[1],
3875
					noiselevel=-1)
3876
				writemsg("!!! Got: %s\n" % e.value[2],
3877
					noiselevel=-1)
3878
				writemsg("!!! Expected: %s\n" % e.value[3],
3879
					noiselevel=-1)
3880
				rval = 1
3881
			if rval != os.EX_OK:
3882
				pkg_path = bintree.getname(pkg.cpv)
3883
				head, tail = os.path.split(pkg_path)
3884
				temp_filename = portage._checksum_failure_temp_file(head, tail)
3885
				writemsg("File renamed to '%s'\n" % (temp_filename,),
3886
					noiselevel=-1)
3887
		finally:
3888
			sys.stdout = stdout_orig
3889
			sys.stderr = stderr_orig
3890
			if log_file is not None:
3891
				log_file.close()
3892
3893
		self.returncode = rval
3894
		self.wait()
3895
3896
class BinpkgPrefetcher(CompositeTask):
3897
3898
	__slots__ = ("pkg",) + \
3899
		("pkg_path", "_bintree",)
3900
3901
	def _start(self):
3902
		self._bintree = self.pkg.root_config.trees["bintree"]
3903
		fetcher = BinpkgFetcher(background=self.background,
3904
			logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3905
			scheduler=self.scheduler)
3906
		self.pkg_path = fetcher.pkg_path
3907
		self._start_task(fetcher, self._fetcher_exit)
3908
3909
	def _fetcher_exit(self, fetcher):
3910
3911
		if self._default_exit(fetcher) != os.EX_OK:
3912
			self.wait()
3913
			return
3914
3915
		verifier = BinpkgVerifier(background=self.background,
3916
			logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3917
		self._start_task(verifier, self._verifier_exit)
3918
3919
	def _verifier_exit(self, verifier):
3920
		if self._default_exit(verifier) != os.EX_OK:
3921
			self.wait()
3922
			return
3923
3924
		self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3925
3926
		self._current_task = None
3927
		self.returncode = os.EX_OK
3928
		self.wait()
3929
3930
class BinpkgExtractorAsync(SpawnProcess):
3931
3932
	__slots__ = ("image_dir", "pkg", "pkg_path")
3933
3934
	_shell_binary = portage.const.BASH_BINARY
3935
3936
	def _start(self):
3937
		self.args = [self._shell_binary, "-c",
3938
			"bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3939
			(portage._shell_quote(self.pkg_path),
3940
			portage._shell_quote(self.image_dir))]
3941
3942
		self.env = self.pkg.root_config.settings.environ()
3943
		SpawnProcess._start(self)
3944
3945
class MergeListItem(CompositeTask):
1478
class MergeListItem(CompositeTask):
3946
1479
3947
	"""
1480
	"""
Lines 4079-4325 Link Here
4079
		retval = self._install_task.install()
1612
		retval = self._install_task.install()
4080
		return retval
1613
		return retval
4081
1614
4082
class PackageMerge(AsynchronousTask):
4083
	"""
4084
	TODO: Implement asynchronous merge so that the scheduler can
4085
	run while a merge is executing.
4086
	"""
4087
4088
	__slots__ = ("merge",)
4089
4090
	def _start(self):
4091
4092
		pkg = self.merge.pkg
4093
		pkg_count = self.merge.pkg_count
4094
4095
		if pkg.installed:
4096
			action_desc = "Uninstalling"
4097
			preposition = "from"
4098
			counter_str = ""
4099
		else:
4100
			action_desc = "Installing"
4101
			preposition = "to"
4102
			counter_str = "(%s of %s) " % \
4103
				(colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4104
				colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4105
4106
		msg = "%s %s%s" % \
4107
			(action_desc,
4108
			counter_str,
4109
			colorize("GOOD", pkg.cpv))
4110
4111
		if pkg.root != "/":
4112
			msg += " %s %s" % (preposition, pkg.root)
4113
4114
		if not self.merge.build_opts.fetchonly and \
4115
			not self.merge.build_opts.pretend and \
4116
			not self.merge.build_opts.buildpkgonly:
4117
			self.merge.statusMessage(msg)
4118
4119
		self.returncode = self.merge.merge()
4120
		self.wait()
4121
4122
class DependencyArg(object):
4123
	def __init__(self, arg=None, root_config=None):
4124
		self.arg = arg
4125
		self.root_config = root_config
4126
4127
	def __str__(self):
4128
		return str(self.arg)
4129
4130
class AtomArg(DependencyArg):
4131
	def __init__(self, atom=None, **kwargs):
4132
		DependencyArg.__init__(self, **kwargs)
4133
		self.atom = atom
4134
		if not isinstance(self.atom, portage.dep.Atom):
4135
			self.atom = portage.dep.Atom(self.atom)
4136
		self.set = (self.atom, )
4137
4138
class PackageArg(DependencyArg):
4139
	def __init__(self, package=None, **kwargs):
4140
		DependencyArg.__init__(self, **kwargs)
4141
		self.package = package
4142
		self.atom = portage.dep.Atom("=" + package.cpv)
4143
		self.set = (self.atom, )
4144
4145
class SetArg(DependencyArg):
4146
	def __init__(self, set=None, **kwargs):
4147
		DependencyArg.__init__(self, **kwargs)
4148
		self.set = set
4149
		self.name = self.arg[len(SETPREFIX):]
4150
4151
class Dependency(SlotObject):
4152
	__slots__ = ("atom", "blocker", "depth",
4153
		"parent", "onlydeps", "priority", "root")
4154
	def __init__(self, **kwargs):
4155
		SlotObject.__init__(self, **kwargs)
4156
		if self.priority is None:
4157
			self.priority = DepPriority()
4158
		if self.depth is None:
4159
			self.depth = 0
4160
4161
class BlockerCache(portage.cache.mappings.MutableMapping):
4162
	"""This caches blockers of installed packages so that dep_check does not
4163
	have to be done for every single installed package on every invocation of
4164
	emerge.  The cache is invalidated whenever it is detected that something
4165
	has changed that might alter the results of dep_check() calls:
4166
		1) the set of installed packages (including COUNTER) has changed
4167
		2) the old-style virtuals have changed
4168
	"""
4169
4170
	# Number of uncached packages to trigger cache update, since
4171
	# it's wasteful to update it for every vdb change.
4172
	_cache_threshold = 5
4173
4174
	class BlockerData(object):
4175
4176
		__slots__ = ("__weakref__", "atoms", "counter")
4177
4178
		def __init__(self, counter, atoms):
4179
			self.counter = counter
4180
			self.atoms = atoms
4181
4182
	def __init__(self, myroot, vardb):
4183
		self._vardb = vardb
4184
		self._virtuals = vardb.settings.getvirtuals()
4185
		self._cache_filename = os.path.join(myroot,
4186
			portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4187
		self._cache_version = "1"
4188
		self._cache_data = None
4189
		self._modified = set()
4190
		self._load()
4191
4192
	def _load(self):
4193
		try:
4194
			f = open(self._cache_filename, mode='rb')
4195
			mypickle = pickle.Unpickler(f)
4196
			try:
4197
				mypickle.find_global = None
4198
			except AttributeError:
4199
				# TODO: If py3k, override Unpickler.find_class().
4200
				pass
4201
			self._cache_data = mypickle.load()
4202
			f.close()
4203
			del f
4204
		except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4205
			if isinstance(e, pickle.UnpicklingError):
4206
				writemsg("!!! Error loading '%s': %s\n" % \
4207
					(self._cache_filename, str(e)), noiselevel=-1)
4208
			del e
4209
4210
		cache_valid = self._cache_data and \
4211
			isinstance(self._cache_data, dict) and \
4212
			self._cache_data.get("version") == self._cache_version and \
4213
			isinstance(self._cache_data.get("blockers"), dict)
4214
		if cache_valid:
4215
			# Validate all the atoms and counters so that
4216
			# corruption is detected as soon as possible.
4217
			invalid_items = set()
4218
			for k, v in self._cache_data["blockers"].iteritems():
4219
				if not isinstance(k, basestring):
4220
					invalid_items.add(k)
4221
					continue
4222
				try:
4223
					if portage.catpkgsplit(k) is None:
4224
						invalid_items.add(k)
4225
						continue
4226
				except portage.exception.InvalidData:
4227
					invalid_items.add(k)
4228
					continue
4229
				if not isinstance(v, tuple) or \
4230
					len(v) != 2:
4231
					invalid_items.add(k)
4232
					continue
4233
				counter, atoms = v
4234
				if not isinstance(counter, (int, long)):
4235
					invalid_items.add(k)
4236
					continue
4237
				if not isinstance(atoms, (list, tuple)):
4238
					invalid_items.add(k)
4239
					continue
4240
				invalid_atom = False
4241
				for atom in atoms:
4242
					if not isinstance(atom, basestring):
4243
						invalid_atom = True
4244
						break
4245
					if atom[:1] != "!" or \
4246
						not portage.isvalidatom(
4247
						atom, allow_blockers=True):
4248
						invalid_atom = True
4249
						break
4250
				if invalid_atom:
4251
					invalid_items.add(k)
4252
					continue
4253
4254
			for k in invalid_items:
4255
				del self._cache_data["blockers"][k]
4256
			if not self._cache_data["blockers"]:
4257
				cache_valid = False
4258
4259
		if not cache_valid:
4260
			self._cache_data = {"version":self._cache_version}
4261
			self._cache_data["blockers"] = {}
4262
			self._cache_data["virtuals"] = self._virtuals
4263
		self._modified.clear()
4264
4265
	def flush(self):
4266
		"""If the current user has permission and the internal blocker cache
4267
		been updated, save it to disk and mark it unmodified.  This is called
4268
		by emerge after it has proccessed blockers for all installed packages.
4269
		Currently, the cache is only written if the user has superuser
4270
		privileges (since that's required to obtain a lock), but all users
4271
		have read access and benefit from faster blocker lookups (as long as
4272
		the entire cache is still valid).  The cache is stored as a pickled
4273
		dict object with the following format:
4274
4275
		{
4276
			version : "1",
4277
			"blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4278
			"virtuals" : vardb.settings.getvirtuals()
4279
		}
4280
		"""
4281
		if len(self._modified) >= self._cache_threshold and \
4282
			secpass >= 2:
4283
			try:
4284
				f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4285
				pickle.dump(self._cache_data, f, protocol=2)
4286
				f.close()
4287
				portage.util.apply_secpass_permissions(
4288
					self._cache_filename, gid=portage.portage_gid, mode=0644)
4289
			except (IOError, OSError), e:
4290
				pass
4291
			self._modified.clear()
4292
4293
	def __setitem__(self, cpv, blocker_data):
4294
		"""
4295
		Update the cache and mark it as modified for a future call to
4296
		self.flush().
4297
4298
		@param cpv: Package for which to cache blockers.
4299
		@type cpv: String
4300
		@param blocker_data: An object with counter and atoms attributes.
4301
		@type blocker_data: BlockerData
4302
		"""
4303
		self._cache_data["blockers"][cpv] = \
4304
			(blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4305
		self._modified.add(cpv)
4306
4307
	def __iter__(self):
4308
		if self._cache_data is None:
4309
			# triggered by python-trace
4310
			return iter([])
4311
		return iter(self._cache_data["blockers"])
4312
4313
	def __delitem__(self, cpv):
4314
		del self._cache_data["blockers"][cpv]
4315
4316
	def __getitem__(self, cpv):
4317
		"""
4318
		@rtype: BlockerData
4319
		@returns: An object with counter and atoms attributes.
4320
		"""
4321
		return self.BlockerData(*self._cache_data["blockers"][cpv])
4322
4323
class BlockerDB(object):
1615
class BlockerDB(object):
4324
1616
4325
	def __init__(self, root_config):
1617
	def __init__(self, root_config):
Lines 4455-4593 Link Here
4455
	msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
1747
	msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4456
	writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
1748
	writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4457
1749
4458
class PackageVirtualDbapi(portage.dbapi):
4459
	"""
4460
	A dbapi-like interface class that represents the state of the installed
4461
	package database as new packages are installed, replacing any packages
4462
	that previously existed in the same slot. The main difference between
4463
	this class and fakedbapi is that this one uses Package instances
4464
	internally (passed in via cpv_inject() and cpv_remove() calls).
4465
	"""
4466
	def __init__(self, settings):
4467
		portage.dbapi.__init__(self)
4468
		self.settings = settings
4469
		self._match_cache = {}
4470
		self._cp_map = {}
4471
		self._cpv_map = {}
4472
4473
	def clear(self):
4474
		"""
4475
		Remove all packages.
4476
		"""
4477
		if self._cpv_map:
4478
			self._clear_cache()
4479
			self._cp_map.clear()
4480
			self._cpv_map.clear()
4481
4482
	def copy(self):
4483
		obj = PackageVirtualDbapi(self.settings)
4484
		obj._match_cache = self._match_cache.copy()
4485
		obj._cp_map = self._cp_map.copy()
4486
		for k, v in obj._cp_map.iteritems():
4487
			obj._cp_map[k] = v[:]
4488
		obj._cpv_map = self._cpv_map.copy()
4489
		return obj
4490
4491
	def __iter__(self):
4492
		return self._cpv_map.itervalues()
4493
4494
	def __contains__(self, item):
4495
		existing = self._cpv_map.get(item.cpv)
4496
		if existing is not None and \
4497
			existing == item:
4498
			return True
4499
		return False
4500
4501
	def get(self, item, default=None):
4502
		cpv = getattr(item, "cpv", None)
4503
		if cpv is None:
4504
			if len(item) != 4:
4505
				return default
4506
			type_name, root, cpv, operation = item
4507
4508
		existing = self._cpv_map.get(cpv)
4509
		if existing is not None and \
4510
			existing == item:
4511
			return existing
4512
		return default
4513
4514
	def match_pkgs(self, atom):
4515
		return [self._cpv_map[cpv] for cpv in self.match(atom)]
4516
4517
	def _clear_cache(self):
4518
		if self._categories is not None:
4519
			self._categories = None
4520
		if self._match_cache:
4521
			self._match_cache = {}
4522
4523
	def match(self, origdep, use_cache=1):
4524
		result = self._match_cache.get(origdep)
4525
		if result is not None:
4526
			return result[:]
4527
		result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4528
		self._match_cache[origdep] = result
4529
		return result[:]
4530
4531
	def cpv_exists(self, cpv):
4532
		return cpv in self._cpv_map
4533
4534
	def cp_list(self, mycp, use_cache=1):
4535
		cachelist = self._match_cache.get(mycp)
4536
		# cp_list() doesn't expand old-style virtuals
4537
		if cachelist and cachelist[0].startswith(mycp):
4538
			return cachelist[:]
4539
		cpv_list = self._cp_map.get(mycp)
4540
		if cpv_list is None:
4541
			cpv_list = []
4542
		else:
4543
			cpv_list = [pkg.cpv for pkg in cpv_list]
4544
		self._cpv_sort_ascending(cpv_list)
4545
		if not (not cpv_list and mycp.startswith("virtual/")):
4546
			self._match_cache[mycp] = cpv_list
4547
		return cpv_list[:]
4548
4549
	def cp_all(self):
4550
		return list(self._cp_map)
4551
4552
	def cpv_all(self):
4553
		return list(self._cpv_map)
4554
4555
	def cpv_inject(self, pkg):
4556
		cp_list = self._cp_map.get(pkg.cp)
4557
		if cp_list is None:
4558
			cp_list = []
4559
			self._cp_map[pkg.cp] = cp_list
4560
		e_pkg = self._cpv_map.get(pkg.cpv)
4561
		if e_pkg is not None:
4562
			if e_pkg == pkg:
4563
				return
4564
			self.cpv_remove(e_pkg)
4565
		for e_pkg in cp_list:
4566
			if e_pkg.slot_atom == pkg.slot_atom:
4567
				if e_pkg == pkg:
4568
					return
4569
				self.cpv_remove(e_pkg)
4570
				break
4571
		cp_list.append(pkg)
4572
		self._cpv_map[pkg.cpv] = pkg
4573
		self._clear_cache()
4574
4575
	def cpv_remove(self, pkg):
4576
		old_pkg = self._cpv_map.get(pkg.cpv)
4577
		if old_pkg != pkg:
4578
			raise KeyError(pkg)
4579
		self._cp_map[pkg.cp].remove(pkg)
4580
		del self._cpv_map[pkg.cpv]
4581
		self._clear_cache()
4582
4583
	def aux_get(self, cpv, wants):
4584
		metadata = self._cpv_map[cpv].metadata
4585
		return [metadata.get(x, "") for x in wants]
4586
4587
	def aux_update(self, cpv, values):
4588
		self._cpv_map[cpv].metadata.update(values)
4589
		self._clear_cache()
4590
4591
class depgraph(object):
1750
class depgraph(object):
4592
1751
4593
	pkg_tree_map = RootConfig.pkg_tree_map
1752
	pkg_tree_map = RootConfig.pkg_tree_map
Lines 9290-9354 Link Here
9290
			metadata = self._cpv_pkg_map[cpv].metadata
6449
			metadata = self._cpv_pkg_map[cpv].metadata
9291
			return [metadata.get(x, "") for x in wants]
6450
			return [metadata.get(x, "") for x in wants]
9292
6451
9293
class RepoDisplay(object):
9294
	def __init__(self, roots):
9295
		self._shown_repos = {}
9296
		self._unknown_repo = False
9297
		repo_paths = set()
9298
		for root_config in roots.itervalues():
9299
			portdir = root_config.settings.get("PORTDIR")
9300
			if portdir:
9301
				repo_paths.add(portdir)
9302
			overlays = root_config.settings.get("PORTDIR_OVERLAY")
9303
			if overlays:
9304
				repo_paths.update(overlays.split())
9305
		repo_paths = list(repo_paths)
9306
		self._repo_paths = repo_paths
9307
		self._repo_paths_real = [ os.path.realpath(repo_path) \
9308
			for repo_path in repo_paths ]
9309
9310
		# pre-allocate index for PORTDIR so that it always has index 0.
9311
		for root_config in roots.itervalues():
9312
			portdb = root_config.trees["porttree"].dbapi
9313
			portdir = portdb.porttree_root
9314
			if portdir:
9315
				self.repoStr(portdir)
9316
9317
	def repoStr(self, repo_path_real):
9318
		real_index = -1
9319
		if repo_path_real:
9320
			real_index = self._repo_paths_real.index(repo_path_real)
9321
		if real_index == -1:
9322
			s = "?"
9323
			self._unknown_repo = True
9324
		else:
9325
			shown_repos = self._shown_repos
9326
			repo_paths = self._repo_paths
9327
			repo_path = repo_paths[real_index]
9328
			index = shown_repos.get(repo_path)
9329
			if index is None:
9330
				index = len(shown_repos)
9331
				shown_repos[repo_path] = index
9332
			s = str(index)
9333
		return s
9334
9335
	def __str__(self):
9336
		output = []
9337
		shown_repos = self._shown_repos
9338
		unknown_repo = self._unknown_repo
9339
		if shown_repos or self._unknown_repo:
9340
			output.append("Portage tree and overlays:\n")
9341
		show_repo_paths = list(shown_repos)
9342
		for repo_path, repo_index in shown_repos.iteritems():
9343
			show_repo_paths[repo_index] = repo_path
9344
		if show_repo_paths:
9345
			for index, repo_path in enumerate(show_repo_paths):
9346
				output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9347
		if unknown_repo:
9348
			output.append(" "+teal("[?]") + \
9349
				" indicates that the source repository could not be determined\n")
9350
		return "".join(output)
9351
9352
class PackageCounters(object):
6452
class PackageCounters(object):
9353
6453
9354
	def __init__(self):
6454
	def __init__(self):
Lines 9421-9616 Link Here
9421
					(self.blocks - self.blocks_satisfied))
6521
					(self.blocks - self.blocks_satisfied))
9422
		return "".join(myoutput)
6522
		return "".join(myoutput)
9423
6523
9424
class UseFlagDisplay(object):
9425
6524
9426
	__slots__ = ('name', 'enabled', 'forced')
9427
9428
	def __init__(self, name, enabled, forced):
9429
		self.name = name
9430
		self.enabled = enabled
9431
		self.forced = forced
9432
9433
	def __str__(self):
9434
		s = self.name
9435
		if self.enabled:
9436
			s = red(s)
9437
		else:
9438
			s = '-' + s
9439
			s = blue(s)
9440
		if self.forced:
9441
			s = '(%s)' % s
9442
		return s
9443
9444
	def _cmp_combined(a, b):
9445
		"""
9446
		Sort by name, combining enabled and disabled flags.
9447
		"""
9448
		return (a.name > b.name) - (a.name < b.name)
9449
9450
	sort_combined = cmp_sort_key(_cmp_combined)
9451
	del _cmp_combined
9452
9453
	def _cmp_separated(a, b):
9454
		"""
9455
		Sort by name, separating enabled flags from disabled flags.
9456
		"""
9457
		enabled_diff = b.enabled - a.enabled
9458
		if enabled_diff:
9459
			return enabled_diff
9460
		return (a.name > b.name) - (a.name < b.name)
9461
9462
	sort_separated = cmp_sort_key(_cmp_separated)
9463
	del _cmp_separated
9464
9465
class PollSelectAdapter(PollConstants):
9466
9467
	"""
9468
	Use select to emulate a poll object, for
9469
	systems that don't support poll().
9470
	"""
9471
9472
	def __init__(self):
9473
		self._registered = {}
9474
		self._select_args = [[], [], []]
9475
9476
	def register(self, fd, *args):
9477
		"""
9478
		Only POLLIN is currently supported!
9479
		"""
9480
		if len(args) > 1:
9481
			raise TypeError(
9482
				"register expected at most 2 arguments, got " + \
9483
				repr(1 + len(args)))
9484
9485
		eventmask = PollConstants.POLLIN | \
9486
			PollConstants.POLLPRI | PollConstants.POLLOUT
9487
		if args:
9488
			eventmask = args[0]
9489
9490
		self._registered[fd] = eventmask
9491
		self._select_args = None
9492
9493
	def unregister(self, fd):
9494
		self._select_args = None
9495
		del self._registered[fd]
9496
9497
	def poll(self, *args):
9498
		if len(args) > 1:
9499
			raise TypeError(
9500
				"poll expected at most 2 arguments, got " + \
9501
				repr(1 + len(args)))
9502
9503
		timeout = None
9504
		if args:
9505
			timeout = args[0]
9506
9507
		select_args = self._select_args
9508
		if select_args is None:
9509
			select_args = [self._registered.keys(), [], []]
9510
9511
		if timeout is not None:
9512
			select_args = select_args[:]
9513
			# Translate poll() timeout args to select() timeout args:
9514
			#
9515
			#          | units        | value(s) for indefinite block
9516
			# ---------|--------------|------------------------------
9517
			#   poll   | milliseconds | omitted, negative, or None
9518
			# ---------|--------------|------------------------------
9519
			#   select | seconds      | omitted
9520
			# ---------|--------------|------------------------------
9521
9522
			if timeout is not None and timeout < 0:
9523
				timeout = None
9524
			if timeout is not None:
9525
				select_args.append(timeout / 1000)
9526
9527
		select_events = select.select(*select_args)
9528
		poll_events = []
9529
		for fd in select_events[0]:
9530
			poll_events.append((fd, PollConstants.POLLIN))
9531
		return poll_events
9532
9533
class SequentialTaskQueue(SlotObject):
9534
9535
	__slots__ = ("max_jobs", "running_tasks") + \
9536
		("_dirty", "_scheduling", "_task_queue")
9537
9538
	def __init__(self, **kwargs):
9539
		SlotObject.__init__(self, **kwargs)
9540
		self._task_queue = deque()
9541
		self.running_tasks = set()
9542
		if self.max_jobs is None:
9543
			self.max_jobs = 1
9544
		self._dirty = True
9545
9546
	def add(self, task):
9547
		self._task_queue.append(task)
9548
		self._dirty = True
9549
9550
	def addFront(self, task):
9551
		self._task_queue.appendleft(task)
9552
		self._dirty = True
9553
9554
	def schedule(self):
9555
9556
		if not self._dirty:
9557
			return False
9558
9559
		if not self:
9560
			return False
9561
9562
		if self._scheduling:
9563
			# Ignore any recursive schedule() calls triggered via
9564
			# self._task_exit().
9565
			return False
9566
9567
		self._scheduling = True
9568
9569
		task_queue = self._task_queue
9570
		running_tasks = self.running_tasks
9571
		max_jobs = self.max_jobs
9572
		state_changed = False
9573
9574
		while task_queue and \
9575
			(max_jobs is True or len(running_tasks) < max_jobs):
9576
			task = task_queue.popleft()
9577
			cancelled = getattr(task, "cancelled", None)
9578
			if not cancelled:
9579
				running_tasks.add(task)
9580
				task.addExitListener(self._task_exit)
9581
				task.start()
9582
			state_changed = True
9583
9584
		self._dirty = False
9585
		self._scheduling = False
9586
9587
		return state_changed
9588
9589
	def _task_exit(self, task):
9590
		"""
9591
		Since we can always rely on exit listeners being called, the set of
9592
 		running tasks is always pruned automatically and there is never any need
9593
		to actively prune it.
9594
		"""
9595
		self.running_tasks.remove(task)
9596
		if self._task_queue:
9597
			self._dirty = True
9598
9599
	def clear(self):
9600
		self._task_queue.clear()
9601
		running_tasks = self.running_tasks
9602
		while running_tasks:
9603
			task = running_tasks.pop()
9604
			task.removeExitListener(self._task_exit)
9605
			task.cancel()
9606
		self._dirty = False
9607
9608
	def __nonzero__(self):
9609
		return bool(self._task_queue or self.running_tasks)
9610
9611
	def __len__(self):
9612
		return len(self._task_queue) + len(self.running_tasks)
9613
9614
_can_poll_device = None
6525
_can_poll_device = None
9615
6526
9616
def can_poll_device():
6527
def can_poll_device():
Lines 10215-10238 Link Here
10215
		if self.xterm_titles:
7126
		if self.xterm_titles:
10216
			xtermTitle(" ".join(plain_output.split()))
7127
			xtermTitle(" ".join(plain_output.split()))
10217
7128
10218
class ProgressHandler(object):
10219
	def __init__(self):
10220
		self.curval = 0
10221
		self.maxval = 0
10222
		self._last_update = 0
10223
		self.min_latency = 0.2
10224
10225
	def onProgress(self, maxval, curval):
10226
		self.maxval = maxval
10227
		self.curval = curval
10228
		cur_time = time.time()
10229
		if cur_time - self._last_update >= self.min_latency:
10230
			self._last_update = cur_time
10231
			self.display()
10232
10233
	def display(self):
10234
		raise NotImplementedError(self)
10235
10236
class Scheduler(PollScheduler):
7129
class Scheduler(PollScheduler):
10237
7130
10238
	_opts_ignore_blockers = \
7131
	_opts_ignore_blockers = \
(-)SequentialTaskQueue.py (+83 lines)
Line 0 Link Here
1
from _emerge.SlotObject import SlotObject
2
from collections import deque
3
class SequentialTaskQueue(SlotObject):
4
5
	__slots__ = ("max_jobs", "running_tasks") + \
6
		("_dirty", "_scheduling", "_task_queue")
7
8
	def __init__(self, **kwargs):
9
		SlotObject.__init__(self, **kwargs)
10
		self._task_queue = deque()
11
		self.running_tasks = set()
12
		if self.max_jobs is None:
13
			self.max_jobs = 1
14
		self._dirty = True
15
16
	def add(self, task):
17
		self._task_queue.append(task)
18
		self._dirty = True
19
20
	def addFront(self, task):
21
		self._task_queue.appendleft(task)
22
		self._dirty = True
23
24
	def schedule(self):
25
26
		if not self._dirty:
27
			return False
28
29
		if not self:
30
			return False
31
32
		if self._scheduling:
33
			# Ignore any recursive schedule() calls triggered via
34
			# self._task_exit().
35
			return False
36
37
		self._scheduling = True
38
39
		task_queue = self._task_queue
40
		running_tasks = self.running_tasks
41
		max_jobs = self.max_jobs
42
		state_changed = False
43
44
		while task_queue and \
45
			(max_jobs is True or len(running_tasks) < max_jobs):
46
			task = task_queue.popleft()
47
			cancelled = getattr(task, "cancelled", None)
48
			if not cancelled:
49
				running_tasks.add(task)
50
				task.addExitListener(self._task_exit)
51
				task.start()
52
			state_changed = True
53
54
		self._dirty = False
55
		self._scheduling = False
56
57
		return state_changed
58
59
	def _task_exit(self, task):
60
		"""
61
		Since we can always rely on exit listeners being called, the set of
62
 		running tasks is always pruned automatically and there is never any need
63
		to actively prune it.
64
		"""
65
		self.running_tasks.remove(task)
66
		if self._task_queue:
67
			self._dirty = True
68
69
	def clear(self):
70
		self._task_queue.clear()
71
		running_tasks = self.running_tasks
72
		while running_tasks:
73
			task = running_tasks.pop()
74
			task.removeExitListener(self._task_exit)
75
			task.cancel()
76
		self._dirty = False
77
78
	def __nonzero__(self):
79
		return bool(self._task_queue or self.running_tasks)
80
81
	def __len__(self):
82
		return len(self._task_queue) + len(self.running_tasks)
83
(-)BinpkgExtractorAsync.py (+23 lines)
Line 0 Link Here
1
from _emerge.SpawnProcess import SpawnProcess
2
try:
3
	import portage
4
except ImportError:
5
	from os import path as osp
6
	import sys
7
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
8
	import portage
9
class BinpkgExtractorAsync(SpawnProcess):
10
11
	__slots__ = ("image_dir", "pkg", "pkg_path")
12
13
	_shell_binary = portage.const.BASH_BINARY
14
15
	def _start(self):
16
		self.args = [self._shell_binary, "-c",
17
			"bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
18
			(portage._shell_quote(self.pkg_path),
19
			portage._shell_quote(self.image_dir))]
20
21
		self.env = self.pkg.root_config.settings.environ()
22
		SpawnProcess._start(self)
23
(-)BlockerCache.py (+176 lines)
Line 0 Link Here
1
from portage.util import writemsg
2
from portage.data import secpass
3
try:
4
	import portage
5
except ImportError:
6
	from os import path as osp
7
	import sys
8
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
9
	import portage
10
try:
11
	import cPickle as pickle
12
except ImportError:
13
	import pickle
14
import os
15
class BlockerCache(portage.cache.mappings.MutableMapping):
16
	"""This caches blockers of installed packages so that dep_check does not
17
	have to be done for every single installed package on every invocation of
18
	emerge.  The cache is invalidated whenever it is detected that something
19
	has changed that might alter the results of dep_check() calls:
20
		1) the set of installed packages (including COUNTER) has changed
21
		2) the old-style virtuals have changed
22
	"""
23
24
	# Number of uncached packages to trigger cache update, since
25
	# it's wasteful to update it for every vdb change.
26
	_cache_threshold = 5
27
28
	class BlockerData(object):
29
30
		__slots__ = ("__weakref__", "atoms", "counter")
31
32
		def __init__(self, counter, atoms):
33
			self.counter = counter
34
			self.atoms = atoms
35
36
	def __init__(self, myroot, vardb):
37
		self._vardb = vardb
38
		self._virtuals = vardb.settings.getvirtuals()
39
		self._cache_filename = os.path.join(myroot,
40
			portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
41
		self._cache_version = "1"
42
		self._cache_data = None
43
		self._modified = set()
44
		self._load()
45
46
	def _load(self):
47
		try:
48
			f = open(self._cache_filename, mode='rb')
49
			mypickle = pickle.Unpickler(f)
50
			try:
51
				mypickle.find_global = None
52
			except AttributeError:
53
				# TODO: If py3k, override Unpickler.find_class().
54
				pass
55
			self._cache_data = mypickle.load()
56
			f.close()
57
			del f
58
		except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
59
			if isinstance(e, pickle.UnpicklingError):
60
				writemsg("!!! Error loading '%s': %s\n" % \
61
					(self._cache_filename, str(e)), noiselevel=-1)
62
			del e
63
64
		cache_valid = self._cache_data and \
65
			isinstance(self._cache_data, dict) and \
66
			self._cache_data.get("version") == self._cache_version and \
67
			isinstance(self._cache_data.get("blockers"), dict)
68
		if cache_valid:
69
			# Validate all the atoms and counters so that
70
			# corruption is detected as soon as possible.
71
			invalid_items = set()
72
			for k, v in self._cache_data["blockers"].iteritems():
73
				if not isinstance(k, basestring):
74
					invalid_items.add(k)
75
					continue
76
				try:
77
					if portage.catpkgsplit(k) is None:
78
						invalid_items.add(k)
79
						continue
80
				except portage.exception.InvalidData:
81
					invalid_items.add(k)
82
					continue
83
				if not isinstance(v, tuple) or \
84
					len(v) != 2:
85
					invalid_items.add(k)
86
					continue
87
				counter, atoms = v
88
				if not isinstance(counter, (int, long)):
89
					invalid_items.add(k)
90
					continue
91
				if not isinstance(atoms, (list, tuple)):
92
					invalid_items.add(k)
93
					continue
94
				invalid_atom = False
95
				for atom in atoms:
96
					if not isinstance(atom, basestring):
97
						invalid_atom = True
98
						break
99
					if atom[:1] != "!" or \
100
						not portage.isvalidatom(
101
						atom, allow_blockers=True):
102
						invalid_atom = True
103
						break
104
				if invalid_atom:
105
					invalid_items.add(k)
106
					continue
107
108
			for k in invalid_items:
109
				del self._cache_data["blockers"][k]
110
			if not self._cache_data["blockers"]:
111
				cache_valid = False
112
113
		if not cache_valid:
114
			self._cache_data = {"version":self._cache_version}
115
			self._cache_data["blockers"] = {}
116
			self._cache_data["virtuals"] = self._virtuals
117
		self._modified.clear()
118
119
	def flush(self):
120
		"""If the current user has permission and the internal blocker cache
121
		been updated, save it to disk and mark it unmodified.  This is called
122
		by emerge after it has proccessed blockers for all installed packages.
123
		Currently, the cache is only written if the user has superuser
124
		privileges (since that's required to obtain a lock), but all users
125
		have read access and benefit from faster blocker lookups (as long as
126
		the entire cache is still valid).  The cache is stored as a pickled
127
		dict object with the following format:
128
129
		{
130
			version : "1",
131
			"blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
132
			"virtuals" : vardb.settings.getvirtuals()
133
		}
134
		"""
135
		if len(self._modified) >= self._cache_threshold and \
136
			secpass >= 2:
137
			try:
138
				f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
139
				pickle.dump(self._cache_data, f, protocol=2)
140
				f.close()
141
				portage.util.apply_secpass_permissions(
142
					self._cache_filename, gid=portage.portage_gid, mode=0644)
143
			except (IOError, OSError), e:
144
				pass
145
			self._modified.clear()
146
147
	def __setitem__(self, cpv, blocker_data):
148
		"""
149
		Update the cache and mark it as modified for a future call to
150
		self.flush().
151
152
		@param cpv: Package for which to cache blockers.
153
		@type cpv: String
154
		@param blocker_data: An object with counter and atoms attributes.
155
		@type blocker_data: BlockerData
156
		"""
157
		self._cache_data["blockers"][cpv] = \
158
			(blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
159
		self._modified.add(cpv)
160
161
	def __iter__(self):
162
		if self._cache_data is None:
163
			# triggered by python-trace
164
			return iter([])
165
		return iter(self._cache_data["blockers"])
166
167
	def __delitem__(self, cpv):
168
		del self._cache_data["blockers"][cpv]
169
170
	def __getitem__(self, cpv):
171
		"""
172
		@rtype: BlockerData
173
		@returns: An object with counter and atoms attributes.
174
		"""
175
		return self.BlockerData(*self._cache_data["blockers"][cpv])
176
(-)BinpkgVerifier.py (+67 lines)
Line 0 Link Here
1
from _emerge.AsynchronousTask import AsynchronousTask
2
from portage.util import writemsg
3
import sys
4
try:
5
	import portage
6
except ImportError:
7
	from os import path as osp
8
	import sys
9
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
10
	import portage
11
import os
12
class BinpkgVerifier(AsynchronousTask):
13
	__slots__ = ("logfile", "pkg",)
14
15
	def _start(self):
16
		"""
17
		Note: Unlike a normal AsynchronousTask.start() method,
18
		this one does all work is synchronously. The returncode
19
		attribute will be set before it returns.
20
		"""
21
22
		pkg = self.pkg
23
		root_config = pkg.root_config
24
		bintree = root_config.trees["bintree"]
25
		rval = os.EX_OK
26
		stdout_orig = sys.stdout
27
		stderr_orig = sys.stderr
28
		log_file = None
29
		if self.background and self.logfile is not None:
30
			log_file = open(self.logfile, 'a')
31
		try:
32
			if log_file is not None:
33
				sys.stdout = log_file
34
				sys.stderr = log_file
35
			try:
36
				bintree.digestCheck(pkg)
37
			except portage.exception.FileNotFound:
38
				writemsg("!!! Fetching Binary failed " + \
39
					"for '%s'\n" % pkg.cpv, noiselevel=-1)
40
				rval = 1
41
			except portage.exception.DigestException, e:
42
				writemsg("\n!!! Digest verification failed:\n",
43
					noiselevel=-1)
44
				writemsg("!!! %s\n" % e.value[0],
45
					noiselevel=-1)
46
				writemsg("!!! Reason: %s\n" % e.value[1],
47
					noiselevel=-1)
48
				writemsg("!!! Got: %s\n" % e.value[2],
49
					noiselevel=-1)
50
				writemsg("!!! Expected: %s\n" % e.value[3],
51
					noiselevel=-1)
52
				rval = 1
53
			if rval != os.EX_OK:
54
				pkg_path = bintree.getname(pkg.cpv)
55
				head, tail = os.path.split(pkg_path)
56
				temp_filename = portage._checksum_failure_temp_file(head, tail)
57
				writemsg("File renamed to '%s'\n" % (temp_filename,),
58
					noiselevel=-1)
59
		finally:
60
			sys.stdout = stdout_orig
61
			sys.stderr = stderr_orig
62
			if log_file is not None:
63
				log_file.close()
64
65
		self.returncode = rval
66
		self.wait()
67
(-)PollSelectAdapter.py (+70 lines)
Line 0 Link Here
1
from _emerge.PollConstants import PollConstants
2
import select
3
class PollSelectAdapter(PollConstants):
4
5
	"""
6
	Use select to emulate a poll object, for
7
	systems that don't support poll().
8
	"""
9
10
	def __init__(self):
11
		self._registered = {}
12
		self._select_args = [[], [], []]
13
14
	def register(self, fd, *args):
15
		"""
16
		Only POLLIN is currently supported!
17
		"""
18
		if len(args) > 1:
19
			raise TypeError(
20
				"register expected at most 2 arguments, got " + \
21
				repr(1 + len(args)))
22
23
		eventmask = PollConstants.POLLIN | \
24
			PollConstants.POLLPRI | PollConstants.POLLOUT
25
		if args:
26
			eventmask = args[0]
27
28
		self._registered[fd] = eventmask
29
		self._select_args = None
30
31
	def unregister(self, fd):
32
		self._select_args = None
33
		del self._registered[fd]
34
35
	def poll(self, *args):
36
		if len(args) > 1:
37
			raise TypeError(
38
				"poll expected at most 2 arguments, got " + \
39
				repr(1 + len(args)))
40
41
		timeout = None
42
		if args:
43
			timeout = args[0]
44
45
		select_args = self._select_args
46
		if select_args is None:
47
			select_args = [self._registered.keys(), [], []]
48
49
		if timeout is not None:
50
			select_args = select_args[:]
51
			# Translate poll() timeout args to select() timeout args:
52
			#
53
			#          | units        | value(s) for indefinite block
54
			# ---------|--------------|------------------------------
55
			#   poll   | milliseconds | omitted, negative, or None
56
			# ---------|--------------|------------------------------
57
			#   select | seconds      | omitted
58
			# ---------|--------------|------------------------------
59
60
			if timeout is not None and timeout < 0:
61
				timeout = None
62
			if timeout is not None:
63
				select_args.append(timeout / 1000)
64
65
		select_events = select.select(*select_args)
66
		poll_events = []
67
		for fd in select_events[0]:
68
			poll_events.append((fd, PollConstants.POLLIN))
69
		return poll_events
70
(-)BinpkgFetcher.py (+151 lines)
Line 0 Link Here
1
from _emerge.SpawnProcess import SpawnProcess
2
import urlparse
3
import sys
4
import shlex
5
try:
6
	import portage
7
except ImportError:
8
	from os import path as osp
9
	import sys
10
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
11
	import portage
12
import os
13
class BinpkgFetcher(SpawnProcess):
14
15
	__slots__ = ("pkg", "pretend",
16
		"locked", "pkg_path", "_lock_obj")
17
18
	def __init__(self, **kwargs):
19
		SpawnProcess.__init__(self, **kwargs)
20
		pkg = self.pkg
21
		self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
22
23
	def _start(self):
24
25
		if self.cancelled:
26
			return
27
28
		pkg = self.pkg
29
		pretend = self.pretend
30
		bintree = pkg.root_config.trees["bintree"]
31
		settings = bintree.settings
32
		use_locks = "distlocks" in settings.features
33
		pkg_path = self.pkg_path
34
35
		if not pretend:
36
			portage.util.ensure_dirs(os.path.dirname(pkg_path))
37
			if use_locks:
38
				self.lock()
39
		exists = os.path.exists(pkg_path)
40
		resume = exists and os.path.basename(pkg_path) in bintree.invalids
41
		if not (pretend or resume):
42
			# Remove existing file or broken symlink.
43
			try:
44
				os.unlink(pkg_path)
45
			except OSError:
46
				pass
47
48
		# urljoin doesn't work correctly with
49
		# unrecognized protocols like sftp
50
		if bintree._remote_has_index:
51
			rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
52
			if not rel_uri:
53
				rel_uri = pkg.cpv + ".tbz2"
54
			uri = bintree._remote_base_uri.rstrip("/") + \
55
				"/" + rel_uri.lstrip("/")
56
		else:
57
			uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
58
				"/" + pkg.pf + ".tbz2"
59
60
		if pretend:
61
			portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
62
			self.returncode = os.EX_OK
63
			self.wait()
64
			return
65
66
		protocol = urlparse.urlparse(uri)[0]
67
		fcmd_prefix = "FETCHCOMMAND"
68
		if resume:
69
			fcmd_prefix = "RESUMECOMMAND"
70
		fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
71
		if not fcmd:
72
			fcmd = settings.get(fcmd_prefix)
73
74
		fcmd_vars = {
75
			"DISTDIR" : os.path.dirname(pkg_path),
76
			"URI"     : uri,
77
			"FILE"    : os.path.basename(pkg_path)
78
		}
79
80
		fetch_env = dict(settings.iteritems())
81
		fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
82
			for x in shlex.split(fcmd)]
83
84
		if self.fd_pipes is None:
85
			self.fd_pipes = {}
86
		fd_pipes = self.fd_pipes
87
88
		# Redirect all output to stdout since some fetchers like
89
		# wget pollute stderr (if portage detects a problem then it
90
		# can send it's own message to stderr).
91
		fd_pipes.setdefault(0, sys.stdin.fileno())
92
		fd_pipes.setdefault(1, sys.stdout.fileno())
93
		fd_pipes.setdefault(2, sys.stdout.fileno())
94
95
		self.args = fetch_args
96
		self.env = fetch_env
97
		SpawnProcess._start(self)
98
99
	def _set_returncode(self, wait_retval):
100
		SpawnProcess._set_returncode(self, wait_retval)
101
		if self.returncode == os.EX_OK:
102
			# If possible, update the mtime to match the remote package if
103
			# the fetcher didn't already do it automatically.
104
			bintree = self.pkg.root_config.trees["bintree"]
105
			if bintree._remote_has_index:
106
				remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
107
				if remote_mtime is not None:
108
					try:
109
						remote_mtime = long(remote_mtime)
110
					except ValueError:
111
						pass
112
					else:
113
						try:
114
							local_mtime = long(os.stat(self.pkg_path).st_mtime)
115
						except OSError:
116
							pass
117
						else:
118
							if remote_mtime != local_mtime:
119
								try:
120
									os.utime(self.pkg_path,
121
										(remote_mtime, remote_mtime))
122
								except OSError:
123
									pass
124
125
		if self.locked:
126
			self.unlock()
127
128
	def lock(self):
129
		"""
130
		This raises an AlreadyLocked exception if lock() is called
131
		while a lock is already held. In order to avoid this, call
132
		unlock() or check whether the "locked" attribute is True
133
		or False before calling lock().
134
		"""
135
		if self._lock_obj is not None:
136
			raise self.AlreadyLocked((self._lock_obj,))
137
138
		self._lock_obj = portage.locks.lockfile(
139
			self.pkg_path, wantnewlockfile=1)
140
		self.locked = True
141
142
	class AlreadyLocked(portage.exception.PortageException):
143
		pass
144
145
	def unlock(self):
146
		if self._lock_obj is None:
147
			return
148
		portage.locks.unlockfile(self._lock_obj)
149
		self._lock_obj = None
150
		self.locked = False
151
(-)PackageMerge.py (+42 lines)
Line 0 Link Here
1
from _emerge.AsynchronousTask import AsynchronousTask
2
from portage.output import colorize
3
class PackageMerge(AsynchronousTask):
4
	"""
5
	TODO: Implement asynchronous merge so that the scheduler can
6
	run while a merge is executing.
7
	"""
8
9
	__slots__ = ("merge",)
10
11
	def _start(self):
12
13
		pkg = self.merge.pkg
14
		pkg_count = self.merge.pkg_count
15
16
		if pkg.installed:
17
			action_desc = "Uninstalling"
18
			preposition = "from"
19
			counter_str = ""
20
		else:
21
			action_desc = "Installing"
22
			preposition = "to"
23
			counter_str = "(%s of %s) " % \
24
				(colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
25
				colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
26
27
		msg = "%s %s%s" % \
28
			(action_desc,
29
			counter_str,
30
			colorize("GOOD", pkg.cpv))
31
32
		if pkg.root != "/":
33
			msg += " %s %s" % (preposition, pkg.root)
34
35
		if not self.merge.build_opts.fetchonly and \
36
			not self.merge.build_opts.pretend and \
37
			not self.merge.build_opts.buildpkgonly:
38
			self.merge.statusMessage(msg)
39
40
		self.returncode = self.merge.merge()
41
		self.wait()
42
(-)UseFlagDisplay.py (+44 lines)
Line 0 Link Here
1
from portage.output import red
2
from portage.util import cmp_sort_key
3
from portage.output import blue
4
class UseFlagDisplay(object):
5
6
	__slots__ = ('name', 'enabled', 'forced')
7
8
	def __init__(self, name, enabled, forced):
9
		self.name = name
10
		self.enabled = enabled
11
		self.forced = forced
12
13
	def __str__(self):
14
		s = self.name
15
		if self.enabled:
16
			s = red(s)
17
		else:
18
			s = '-' + s
19
			s = blue(s)
20
		if self.forced:
21
			s = '(%s)' % s
22
		return s
23
24
	def _cmp_combined(a, b):
25
		"""
26
		Sort by name, combining enabled and disabled flags.
27
		"""
28
		return (a.name > b.name) - (a.name < b.name)
29
30
	sort_combined = cmp_sort_key(_cmp_combined)
31
	del _cmp_combined
32
33
	def _cmp_separated(a, b):
34
		"""
35
		Sort by name, separating enabled flags from disabled flags.
36
		"""
37
		enabled_diff = b.enabled - a.enabled
38
		if enabled_diff:
39
			return enabled_diff
40
		return (a.name > b.name) - (a.name < b.name)
41
42
	sort_separated = cmp_sort_key(_cmp_separated)
43
	del _cmp_separated
44
(-)PipeReader.py (+98 lines)
Line 0 Link Here
1
from _emerge.AbstractPollTask import AbstractPollTask
2
from _emerge.PollConstants import PollConstants
3
import sys
4
import os
5
import fcntl
6
import array
7
class PipeReader(AbstractPollTask):
8
9
	"""
10
	Reads output from one or more files and saves it in memory,
11
	for retrieval via the getvalue() method. This is driven by
12
	the scheduler's poll() loop, so it runs entirely within the
13
	current process.
14
	"""
15
16
	__slots__ = ("input_files",) + \
17
		("_read_data", "_reg_ids")
18
19
	def _start(self):
20
		self._reg_ids = set()
21
		self._read_data = []
22
		for k, f in self.input_files.iteritems():
23
			fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
24
				fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
25
			self._reg_ids.add(self.scheduler.register(f.fileno(),
26
				self._registered_events, self._output_handler))
27
		self._registered = True
28
29
	def isAlive(self):
30
		return self._registered
31
32
	def cancel(self):
33
		if self.returncode is None:
34
			self.returncode = 1
35
			self.cancelled = True
36
		self.wait()
37
38
	def _wait(self):
39
		if self.returncode is not None:
40
			return self.returncode
41
42
		if self._registered:
43
			self.scheduler.schedule(self._reg_ids)
44
			self._unregister()
45
46
		self.returncode = os.EX_OK
47
		return self.returncode
48
49
	def getvalue(self):
50
		"""Retrieve the entire contents"""
51
		if sys.hexversion >= 0x3000000:
52
			return bytes().join(self._read_data)
53
		return "".join(self._read_data)
54
55
	def close(self):
56
		"""Free the memory buffer."""
57
		self._read_data = None
58
59
	def _output_handler(self, fd, event):
60
61
		if event & PollConstants.POLLIN:
62
63
			for f in self.input_files.itervalues():
64
				if fd == f.fileno():
65
					break
66
67
			buf = array.array('B')
68
			try:
69
				buf.fromfile(f, self._bufsize)
70
			except EOFError:
71
				pass
72
73
			if buf:
74
				self._read_data.append(buf.tostring())
75
			else:
76
				self._unregister()
77
				self.wait()
78
79
		self._unregister_if_appropriate(event)
80
		return self._registered
81
82
	def _unregister(self):
83
		"""
84
		Unregister from the scheduler and close open files.
85
		"""
86
87
		self._registered = False
88
89
		if self._reg_ids is not None:
90
			for reg_id in self._reg_ids:
91
				self.scheduler.unregister(reg_id)
92
			self._reg_ids = None
93
94
		if self.input_files is not None:
95
			for f in self.input_files.itervalues():
96
				f.close()
97
			self.input_files = None
98
(-)Blocker.py (+24 lines)
Line 0 Link Here
1
from _emerge.Task import Task
2
try:
3
	import portage
4
except ImportError:
5
	from os import path as osp
6
	import sys
7
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
8
	import portage
9
class Blocker(Task):
10
11
	__hash__ = Task.__hash__
12
	__slots__ = ("root", "atom", "cp", "eapi", "satisfied")
13
14
	def __init__(self, **kwargs):
15
		Task.__init__(self, **kwargs)
16
		self.cp = portage.dep_getkey(self.atom)
17
18
	def _get_hash_key(self):
19
		hash_key = getattr(self, "_hash_key", None)
20
		if hash_key is None:
21
			self._hash_key = \
22
				("blocks", self.root, self.atom, self.eapi)
23
		return self._hash_key
24
(-)EbuildMerge.py (+50 lines)
Line 0 Link Here
1
from _emerge.SlotObject import SlotObject
2
try:
3
	import portage
4
except ImportError:
5
	from os import path as osp
6
	import sys
7
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
8
	import portage
9
import os
10
class EbuildMerge(SlotObject):
11
12
	__slots__ = ("find_blockers", "logger", "ldpath_mtimes",
13
		"pkg", "pkg_count", "pkg_path", "pretend",
14
		"scheduler", "settings", "tree", "world_atom")
15
16
	def execute(self):
17
		root_config = self.pkg.root_config
18
		settings = self.settings
19
		retval = portage.merge(settings["CATEGORY"],
20
			settings["PF"], settings["D"],
21
			os.path.join(settings["PORTAGE_BUILDDIR"],
22
			"build-info"), root_config.root, settings,
23
			myebuild=settings["EBUILD"],
24
			mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
25
			vartree=root_config.trees["vartree"],
26
			prev_mtimes=self.ldpath_mtimes,
27
			scheduler=self.scheduler,
28
			blockers=self.find_blockers)
29
30
		if retval == os.EX_OK:
31
			self.world_atom(self.pkg)
32
			self._log_success()
33
34
		return retval
35
36
	def _log_success(self):
37
		pkg = self.pkg
38
		pkg_count = self.pkg_count
39
		pkg_path = self.pkg_path
40
		logger = self.logger
41
		if "noclean" not in self.settings.features:
42
			short_msg = "emerge: (%s of %s) %s Clean Post" % \
43
				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
44
			logger.log((" === (%s of %s) " + \
45
				"Post-Build Cleaning (%s::%s)") % \
46
				(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
47
				short_msg=short_msg)
48
		logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
49
			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
50
(-)EbuildPhase.py (+72 lines)
Line 0 Link Here
1
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
2
from _emerge.EbuildProcess import EbuildProcess
3
from _emerge.CompositeTask import CompositeTask
4
from portage.util import writemsg
5
try:
6
	import portage
7
except ImportError:
8
	from os import path as osp
9
	import sys
10
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
11
	import portage
12
import os
13
class EbuildPhase(CompositeTask):
14
15
	__slots__ = ("background", "pkg", "phase",
16
		"scheduler", "settings", "tree")
17
18
	_post_phase_cmds = portage._post_phase_cmds
19
20
	def _start(self):
21
22
		ebuild_process = EbuildProcess(background=self.background,
23
			pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
24
			settings=self.settings, tree=self.tree)
25
26
		self._start_task(ebuild_process, self._ebuild_exit)
27
28
	def _ebuild_exit(self, ebuild_process):
29
30
		if self.phase == "install":
31
			out = None
32
			log_path = self.settings.get("PORTAGE_LOG_FILE")
33
			log_file = None
34
			if self.background and log_path is not None:
35
				log_file = open(log_path, 'a')
36
				out = log_file
37
			try:
38
				portage._check_build_log(self.settings, out=out)
39
			finally:
40
				if log_file is not None:
41
					log_file.close()
42
43
		if self._default_exit(ebuild_process) != os.EX_OK:
44
			self.wait()
45
			return
46
47
		settings = self.settings
48
49
		if self.phase == "install":
50
			portage._post_src_install_chost_fix(settings)
51
			portage._post_src_install_uid_fix(settings)
52
53
		post_phase_cmds = self._post_phase_cmds.get(self.phase)
54
		if post_phase_cmds is not None:
55
			post_phase = MiscFunctionsProcess(background=self.background,
56
				commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
57
				scheduler=self.scheduler, settings=settings)
58
			self._start_task(post_phase, self._post_phase_exit)
59
			return
60
61
		self.returncode = ebuild_process.returncode
62
		self._current_task = None
63
		self.wait()
64
65
	def _post_phase_exit(self, post_phase):
66
		if self._final_exit(post_phase) != os.EX_OK:
67
			writemsg("!!! post %s failed; exiting.\n" % self.phase,
68
				noiselevel=-1)
69
		self._current_task = None
70
		self.wait()
71
		return
72
(-)EbuildMetadataPhase.py (+132 lines)
Line 0 Link Here
1
from _emerge.SubProcess import SubProcess
2
from _emerge.PollConstants import PollConstants
3
import sys
4
from portage.cache.mappings import slot_dict_class
5
try:
6
	import portage
7
except ImportError:
8
	from os import path as osp
9
	import sys
10
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
11
	import portage
12
import os
13
from itertools import izip
14
import fcntl
15
import codecs
16
class EbuildMetadataPhase(SubProcess):
17
18
	"""
19
	Asynchronous interface for the ebuild "depend" phase which is
20
	used to extract metadata from the ebuild.
21
	"""
22
23
	__slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
24
		"ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
25
		("_raw_metadata",)
26
27
	_file_names = ("ebuild",)
28
	_files_dict = slot_dict_class(_file_names, prefix="")
29
	_metadata_fd = 9
30
31
	def _start(self):
32
		settings = self.settings
33
		settings.setcpv(self.cpv)
34
		ebuild_path = self.ebuild_path
35
36
		eapi = None
37
		if 'parse-eapi-glep-55' in settings.features:
38
			pf, eapi = portage._split_ebuild_name_glep55(
39
				os.path.basename(ebuild_path))
40
		if eapi is None and \
41
			'parse-eapi-ebuild-head' in settings.features:
42
			eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
43
				mode='r', encoding='utf_8', errors='replace'))
44
45
		if eapi is not None:
46
			if not portage.eapi_is_supported(eapi):
47
				self.metadata_callback(self.cpv, self.ebuild_path,
48
					self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
49
				self.returncode = os.EX_OK
50
				self.wait()
51
				return
52
53
			settings.configdict['pkg']['EAPI'] = eapi
54
55
		debug = settings.get("PORTAGE_DEBUG") == "1"
56
		master_fd = None
57
		slave_fd = None
58
		fd_pipes = None
59
		if self.fd_pipes is not None:
60
			fd_pipes = self.fd_pipes.copy()
61
		else:
62
			fd_pipes = {}
63
64
		fd_pipes.setdefault(0, sys.stdin.fileno())
65
		fd_pipes.setdefault(1, sys.stdout.fileno())
66
		fd_pipes.setdefault(2, sys.stderr.fileno())
67
68
		# flush any pending output
69
		for fd in fd_pipes.itervalues():
70
			if fd == sys.stdout.fileno():
71
				sys.stdout.flush()
72
			if fd == sys.stderr.fileno():
73
				sys.stderr.flush()
74
75
		fd_pipes_orig = fd_pipes.copy()
76
		self._files = self._files_dict()
77
		files = self._files
78
79
		master_fd, slave_fd = os.pipe()
80
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
81
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
82
83
		fd_pipes[self._metadata_fd] = slave_fd
84
85
		self._raw_metadata = []
86
		files.ebuild = os.fdopen(master_fd, 'r')
87
		self._reg_id = self.scheduler.register(files.ebuild.fileno(),
88
			self._registered_events, self._output_handler)
89
		self._registered = True
90
91
		retval = portage.doebuild(ebuild_path, "depend",
92
			settings["ROOT"], settings, debug,
93
			mydbapi=self.portdb, tree="porttree",
94
			fd_pipes=fd_pipes, returnpid=True)
95
96
		os.close(slave_fd)
97
98
		if isinstance(retval, int):
99
			# doebuild failed before spawning
100
			self._unregister()
101
			self.returncode = retval
102
			self.wait()
103
			return
104
105
		self.pid = retval[0]
106
		portage.process.spawned_pids.remove(self.pid)
107
108
	def _output_handler(self, fd, event):
109
110
		if event & PollConstants.POLLIN:
111
			self._raw_metadata.append(self._files.ebuild.read())
112
			if not self._raw_metadata[-1]:
113
				self._unregister()
114
				self.wait()
115
116
		self._unregister_if_appropriate(event)
117
		return self._registered
118
119
	def _set_returncode(self, wait_retval):
120
		SubProcess._set_returncode(self, wait_retval)
121
		if self.returncode == os.EX_OK:
122
			metadata_lines = "".join(self._raw_metadata).splitlines()
123
			if len(portage.auxdbkeys) != len(metadata_lines):
124
				# Don't trust bash's returncode if the
125
				# number of lines is incorrect.
126
				self.returncode = 1
127
			else:
128
				metadata = izip(portage.auxdbkeys, metadata_lines)
129
				self.metadata = self.metadata_callback(self.cpv,
130
					self.ebuild_path, self.repo_path, metadata,
131
					self.ebuild_mtime)
132
(-)SubProcess.py (+104 lines)
Line 0 Link Here
1
from _emerge.AbstractPollTask import AbstractPollTask
2
import signal
3
import os
4
import errno
5
class SubProcess(AbstractPollTask):
6
7
	__slots__ = ("pid",) + \
8
		("_files", "_reg_id")
9
10
	# A file descriptor is required for the scheduler to monitor changes from
11
	# inside a poll() loop. When logging is not enabled, create a pipe just to
12
	# serve this purpose alone.
13
	_dummy_pipe_fd = 9
14
15
	def _poll(self):
16
		if self.returncode is not None:
17
			return self.returncode
18
		if self.pid is None:
19
			return self.returncode
20
		if self._registered:
21
			return self.returncode
22
23
		try:
24
			retval = os.waitpid(self.pid, os.WNOHANG)
25
		except OSError, e:
26
			if e.errno != errno.ECHILD:
27
				raise
28
			del e
29
			retval = (self.pid, 1)
30
31
		if retval == (0, 0):
32
			return None
33
		self._set_returncode(retval)
34
		return self.returncode
35
36
	def cancel(self):
37
		if self.isAlive():
38
			try:
39
				os.kill(self.pid, signal.SIGTERM)
40
			except OSError, e:
41
				if e.errno != errno.ESRCH:
42
					raise
43
				del e
44
45
		self.cancelled = True
46
		if self.pid is not None:
47
			self.wait()
48
		return self.returncode
49
50
	def isAlive(self):
51
		return self.pid is not None and \
52
			self.returncode is None
53
54
	def _wait(self):
55
56
		if self.returncode is not None:
57
			return self.returncode
58
59
		if self._registered:
60
			self.scheduler.schedule(self._reg_id)
61
			self._unregister()
62
			if self.returncode is not None:
63
				return self.returncode
64
65
		try:
66
			wait_retval = os.waitpid(self.pid, 0)
67
		except OSError, e:
68
			if e.errno != errno.ECHILD:
69
				raise
70
			del e
71
			self._set_returncode((self.pid, 1))
72
		else:
73
			self._set_returncode(wait_retval)
74
75
		return self.returncode
76
77
	def _unregister(self):
78
		"""
79
		Unregister from the scheduler and close open files.
80
		"""
81
82
		self._registered = False
83
84
		if self._reg_id is not None:
85
			self.scheduler.unregister(self._reg_id)
86
			self._reg_id = None
87
88
		if self._files is not None:
89
			for f in self._files.itervalues():
90
				f.close()
91
			self._files = None
92
93
	def _set_returncode(self, wait_retval):
94
95
		retval = wait_retval[1]
96
97
		if retval != os.EX_OK:
98
			if retval & 0xff:
99
				retval = (retval & 0xff) << 8
100
			else:
101
				retval = retval >> 8
102
103
		self.returncode = retval
104
(-)help.py (-2 lines)
Lines 2-9 Link Here
2
# Distributed under the terms of the GNU General Public License v2
2
# Distributed under the terms of the GNU General Public License v2
3
# $Id$
3
# $Id$
4
4
5
6
import os,sys
7
from portage.output import bold, turquoise, green
5
from portage.output import bold, turquoise, green
8
6
9
def shorthelp():
7
def shorthelp():
(-)EbuildBinpkg.py (+40 lines)
Line 0 Link Here
1
from _emerge.EbuildProcess import EbuildProcess
2
import os
3
class EbuildBinpkg(EbuildProcess):
4
	"""
5
	This assumes that src_install() has successfully completed.
6
	"""
7
	__slots__ = ("_binpkg_tmpfile",)
8
9
	def _start(self):
10
		self.phase = "package"
11
		self.tree = "porttree"
12
		pkg = self.pkg
13
		root_config = pkg.root_config
14
		portdb = root_config.trees["porttree"].dbapi
15
		bintree = root_config.trees["bintree"]
16
		ebuild_path = portdb.findname(self.pkg.cpv)
17
		settings = self.settings
18
		debug = settings.get("PORTAGE_DEBUG") == "1"
19
20
		bintree.prevent_collision(pkg.cpv)
21
		binpkg_tmpfile = os.path.join(bintree.pkgdir,
22
			pkg.cpv + ".tbz2." + str(os.getpid()))
23
		self._binpkg_tmpfile = binpkg_tmpfile
24
		settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
25
		settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
26
27
		try:
28
			EbuildProcess._start(self)
29
		finally:
30
			settings.pop("PORTAGE_BINPKG_TMPFILE", None)
31
32
	def _set_returncode(self, wait_retval):
33
		EbuildProcess._set_returncode(self, wait_retval)
34
35
		pkg = self.pkg
36
		bintree = pkg.root_config.trees["bintree"]
37
		binpkg_tmpfile = self._binpkg_tmpfile
38
		if self.returncode == os.EX_OK:
39
			bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
40
(-)DepPriorityNormalRange.py (+44 lines)
Line 0 Link Here
1
from _emerge.DepPriority import DepPriority
2
class DepPriorityNormalRange(object):
3
	"""
4
	DepPriority properties              Index      Category
5
6
	buildtime                                      HARD
7
	runtime                                3       MEDIUM
8
	runtime_post                           2       MEDIUM_SOFT
9
	optional                               1       SOFT
10
	(none of the above)                    0       NONE
11
	"""
12
	MEDIUM      = 3
13
	MEDIUM_SOFT = 2
14
	SOFT        = 1
15
	NONE        = 0
16
17
	@classmethod
18
	def _ignore_optional(cls, priority):
19
		if priority.__class__ is not DepPriority:
20
			return False
21
		return bool(priority.optional)
22
23
	@classmethod
24
	def _ignore_runtime_post(cls, priority):
25
		if priority.__class__ is not DepPriority:
26
			return False
27
		return bool(priority.optional or priority.runtime_post)
28
29
	@classmethod
30
	def _ignore_runtime(cls, priority):
31
		if priority.__class__ is not DepPriority:
32
			return False
33
		return not priority.buildtime
34
35
	ignore_medium      = _ignore_runtime
36
	ignore_medium_soft = _ignore_runtime_post
37
	ignore_soft        = _ignore_optional
38
39
DepPriorityNormalRange.ignore_priority = (
40
	None,
41
	DepPriorityNormalRange._ignore_optional,
42
	DepPriorityNormalRange._ignore_runtime_post,
43
	DepPriorityNormalRange._ignore_runtime
44
)
(-)ProgressHandler.py (+19 lines)
Line 0 Link Here
1
import time
2
class ProgressHandler(object):
3
	def __init__(self):
4
		self.curval = 0
5
		self.maxval = 0
6
		self._last_update = 0
7
		self.min_latency = 0.2
8
9
	def onProgress(self, maxval, curval):
10
		self.maxval = maxval
11
		self.curval = curval
12
		cur_time = time.time()
13
		if cur_time - self._last_update >= self.min_latency:
14
			self._last_update = cur_time
15
			self.display()
16
17
	def display(self):
18
		raise NotImplementedError(self)
19
(-)PackageVirtualDbapi.py (+140 lines)
Line 0 Link Here
1
try:
2
	import portage
3
except ImportError:
4
	from os import path as osp
5
	import sys
6
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
7
	import portage
8
class PackageVirtualDbapi(portage.dbapi):
9
	"""
10
	A dbapi-like interface class that represents the state of the installed
11
	package database as new packages are installed, replacing any packages
12
	that previously existed in the same slot. The main difference between
13
	this class and fakedbapi is that this one uses Package instances
14
	internally (passed in via cpv_inject() and cpv_remove() calls).
15
	"""
16
	def __init__(self, settings):
17
		portage.dbapi.__init__(self)
18
		self.settings = settings
19
		self._match_cache = {}
20
		self._cp_map = {}
21
		self._cpv_map = {}
22
23
	def clear(self):
24
		"""
25
		Remove all packages.
26
		"""
27
		if self._cpv_map:
28
			self._clear_cache()
29
			self._cp_map.clear()
30
			self._cpv_map.clear()
31
32
	def copy(self):
33
		obj = PackageVirtualDbapi(self.settings)
34
		obj._match_cache = self._match_cache.copy()
35
		obj._cp_map = self._cp_map.copy()
36
		for k, v in obj._cp_map.iteritems():
37
			obj._cp_map[k] = v[:]
38
		obj._cpv_map = self._cpv_map.copy()
39
		return obj
40
41
	def __iter__(self):
42
		return self._cpv_map.itervalues()
43
44
	def __contains__(self, item):
45
		existing = self._cpv_map.get(item.cpv)
46
		if existing is not None and \
47
			existing == item:
48
			return True
49
		return False
50
51
	def get(self, item, default=None):
52
		cpv = getattr(item, "cpv", None)
53
		if cpv is None:
54
			if len(item) != 4:
55
				return default
56
			type_name, root, cpv, operation = item
57
58
		existing = self._cpv_map.get(cpv)
59
		if existing is not None and \
60
			existing == item:
61
			return existing
62
		return default
63
64
	def match_pkgs(self, atom):
65
		return [self._cpv_map[cpv] for cpv in self.match(atom)]
66
67
	def _clear_cache(self):
68
		if self._categories is not None:
69
			self._categories = None
70
		if self._match_cache:
71
			self._match_cache = {}
72
73
	def match(self, origdep, use_cache=1):
74
		result = self._match_cache.get(origdep)
75
		if result is not None:
76
			return result[:]
77
		result = portage.dbapi.match(self, origdep, use_cache=use_cache)
78
		self._match_cache[origdep] = result
79
		return result[:]
80
81
	def cpv_exists(self, cpv):
82
		return cpv in self._cpv_map
83
84
	def cp_list(self, mycp, use_cache=1):
85
		cachelist = self._match_cache.get(mycp)
86
		# cp_list() doesn't expand old-style virtuals
87
		if cachelist and cachelist[0].startswith(mycp):
88
			return cachelist[:]
89
		cpv_list = self._cp_map.get(mycp)
90
		if cpv_list is None:
91
			cpv_list = []
92
		else:
93
			cpv_list = [pkg.cpv for pkg in cpv_list]
94
		self._cpv_sort_ascending(cpv_list)
95
		if not (not cpv_list and mycp.startswith("virtual/")):
96
			self._match_cache[mycp] = cpv_list
97
		return cpv_list[:]
98
99
	def cp_all(self):
100
		return list(self._cp_map)
101
102
	def cpv_all(self):
103
		return list(self._cpv_map)
104
105
	def cpv_inject(self, pkg):
106
		cp_list = self._cp_map.get(pkg.cp)
107
		if cp_list is None:
108
			cp_list = []
109
			self._cp_map[pkg.cp] = cp_list
110
		e_pkg = self._cpv_map.get(pkg.cpv)
111
		if e_pkg is not None:
112
			if e_pkg == pkg:
113
				return
114
			self.cpv_remove(e_pkg)
115
		for e_pkg in cp_list:
116
			if e_pkg.slot_atom == pkg.slot_atom:
117
				if e_pkg == pkg:
118
					return
119
				self.cpv_remove(e_pkg)
120
				break
121
		cp_list.append(pkg)
122
		self._cpv_map[pkg.cpv] = pkg
123
		self._clear_cache()
124
125
	def cpv_remove(self, pkg):
126
		old_pkg = self._cpv_map.get(pkg.cpv)
127
		if old_pkg != pkg:
128
			raise KeyError(pkg)
129
		self._cp_map[pkg.cp].remove(pkg)
130
		del self._cpv_map[pkg.cpv]
131
		self._clear_cache()
132
133
	def aux_get(self, cpv, wants):
134
		metadata = self._cpv_map[cpv].metadata
135
		return [metadata.get(x, "") for x in wants]
136
137
	def aux_update(self, cpv, values):
138
		self._cpv_map[cpv].metadata.update(values)
139
		self._clear_cache()
140
(-)Task.py (+37 lines)
Line 0 Link Here
1
from _emerge.SlotObject import SlotObject
2
class Task(SlotObject):
3
	__slots__ = ("_hash_key", "_hash_value")
4
5
	def _get_hash_key(self):
6
		hash_key = getattr(self, "_hash_key", None)
7
		if hash_key is None:
8
			raise NotImplementedError(self)
9
		return hash_key
10
11
	def __eq__(self, other):
12
		return self._get_hash_key() == other
13
14
	def __ne__(self, other):
15
		return self._get_hash_key() != other
16
17
	def __hash__(self):
18
		hash_value = getattr(self, "_hash_value", None)
19
		if hash_value is None:
20
			self._hash_value = hash(self._get_hash_key())
21
		return self._hash_value
22
23
	def __len__(self):
24
		return len(self._get_hash_key())
25
26
	def __getitem__(self, key):
27
		return self._get_hash_key()[key]
28
29
	def __iter__(self):
30
		return iter(self._get_hash_key())
31
32
	def __contains__(self, key):
33
		return key in self._get_hash_key()
34
35
	def __str__(self):
36
		return str(self._get_hash_key())
37
(-)PollConstants.py (+15 lines)
Line 0 Link Here
1
import select
2
class PollConstants(object):
3
4
	"""
5
	Provides POLL* constants that are equivalent to those from the
6
	select module, for use by PollSelectAdapter.
7
	"""
8
9
	names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
10
	v = 1
11
	for k in names:
12
		locals()[k] = getattr(select, k, v)
13
		v *= 2
14
	del k, v
15
(-)EbuildProcess.py (+55 lines)
Line 0 Link Here
1
from _emerge.SpawnProcess import SpawnProcess
2
try:
3
	import portage
4
except ImportError:
5
	from os import path as osp
6
	import sys
7
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
8
	import portage
9
import os
10
class EbuildProcess(SpawnProcess):
11
12
	__slots__ = ("phase", "pkg", "settings", "tree")
13
14
	def _start(self):
15
		# Don't open the log file during the clean phase since the
16
		# open file can result in an nfs lock on $T/build.log which
17
		# prevents the clean phase from removing $T.
18
		if self.phase not in ("clean", "cleanrm"):
19
			self.logfile = self.settings.get("PORTAGE_LOG_FILE")
20
		SpawnProcess._start(self)
21
22
	def _pipe(self, fd_pipes):
23
		stdout_pipe = fd_pipes.get(1)
24
		got_pty, master_fd, slave_fd = \
25
			portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
26
		return (master_fd, slave_fd)
27
28
	def _spawn(self, args, **kwargs):
29
30
		root_config = self.pkg.root_config
31
		tree = self.tree
32
		mydbapi = root_config.trees[tree].dbapi
33
		settings = self.settings
34
		ebuild_path = settings["EBUILD"]
35
		debug = settings.get("PORTAGE_DEBUG") == "1"
36
37
		rval = portage.doebuild(ebuild_path, self.phase,
38
			root_config.root, settings, debug,
39
			mydbapi=mydbapi, tree=tree, **kwargs)
40
41
		return rval
42
43
	def _set_returncode(self, wait_retval):
44
		SpawnProcess._set_returncode(self, wait_retval)
45
46
		if self.phase not in ("clean", "cleanrm"):
47
			self.returncode = portage._doebuild_exit_status_check_and_log(
48
				self.settings, self.phase, self.returncode)
49
50
		if self.phase == "test" and self.returncode != os.EX_OK and \
51
			"test-fail-continue" in self.settings.features:
52
			self.returncode = os.EX_OK
53
54
		portage._post_phase_userpriv_perms(self.settings)
55
(-)EbuildFetcher.py (+109 lines)
Line 0 Link Here
1
from _emerge.SpawnProcess import SpawnProcess
2
from _emerge.EbuildBuildDir import EbuildBuildDir
3
import sys
4
try:
5
	import portage
6
except ImportError:
7
	from os import path as osp
8
	import sys
9
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
10
	import portage
11
import os
12
from portage.elog.messages import eerror
13
class EbuildFetcher(SpawnProcess):
14
15
	__slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
16
		("_build_dir",)
17
18
	def _start(self):
19
20
		root_config = self.pkg.root_config
21
		portdb = root_config.trees["porttree"].dbapi
22
		ebuild_path = portdb.findname(self.pkg.cpv)
23
		settings = self.config_pool.allocate()
24
		settings.setcpv(self.pkg)
25
26
		# In prefetch mode, logging goes to emerge-fetch.log and the builddir
27
		# should not be touched since otherwise it could interfere with
28
		# another instance of the same cpv concurrently being built for a
29
		# different $ROOT (currently, builds only cooperate with prefetchers
30
		# that are spawned for the same $ROOT).
31
		if not self.prefetch:
32
			self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
33
			self._build_dir.lock()
34
			self._build_dir.clean_log()
35
			portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
36
			if self.logfile is None:
37
				self.logfile = settings.get("PORTAGE_LOG_FILE")
38
39
		phase = "fetch"
40
		if self.fetchall:
41
			phase = "fetchall"
42
43
		# If any incremental variables have been overridden
44
		# via the environment, those values need to be passed
45
		# along here so that they are correctly considered by
46
		# the config instance in the subproccess.
47
		fetch_env = os.environ.copy()
48
49
		nocolor = settings.get("NOCOLOR")
50
		if nocolor is not None:
51
			fetch_env["NOCOLOR"] = nocolor
52
53
		fetch_env["PORTAGE_NICENESS"] = "0"
54
		if self.prefetch:
55
			fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
56
57
		ebuild_binary = os.path.join(
58
			settings["PORTAGE_BIN_PATH"], "ebuild")
59
60
		fetch_args = [ebuild_binary, ebuild_path, phase]
61
		debug = settings.get("PORTAGE_DEBUG") == "1"
62
		if debug:
63
			fetch_args.append("--debug")
64
65
		self.args = fetch_args
66
		self.env = fetch_env
67
		SpawnProcess._start(self)
68
69
	def _pipe(self, fd_pipes):
70
		"""When appropriate, use a pty so that fetcher progress bars,
71
		like wget has, will work properly."""
72
		if self.background or not sys.stdout.isatty():
73
			# When the output only goes to a log file,
74
			# there's no point in creating a pty.
75
			return os.pipe()
76
		stdout_pipe = fd_pipes.get(1)
77
		got_pty, master_fd, slave_fd = \
78
			portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
79
		return (master_fd, slave_fd)
80
81
	def _set_returncode(self, wait_retval):
82
		SpawnProcess._set_returncode(self, wait_retval)
83
		# Collect elog messages that might have been
84
		# created by the pkg_nofetch phase.
85
		if self._build_dir is not None:
86
			# Skip elog messages for prefetch, in order to avoid duplicates.
87
			if not self.prefetch and self.returncode != os.EX_OK:
88
				elog_out = None
89
				if self.logfile is not None:
90
					if self.background:
91
						elog_out = open(self.logfile, 'a')
92
				msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
93
				if self.logfile is not None:
94
					msg += ", Log file:"
95
				eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
96
				if self.logfile is not None:
97
					eerror(" '%s'" % (self.logfile,),
98
						phase="unpack", key=self.pkg.cpv, out=elog_out)
99
				if elog_out is not None:
100
					elog_out.close()
101
			if not self.prefetch:
102
				portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
103
			features = self._build_dir.settings.features
104
			if self.returncode == os.EX_OK:
105
				self._build_dir.clean_log()
106
			self._build_dir.unlock()
107
			self.config_pool.deallocate(self._build_dir.settings)
108
			self._build_dir = None
109
(-)SlotObject.py (+39 lines)
Line 0 Link Here
1
class SlotObject(object):
2
	__slots__ = ("__weakref__",)
3
4
	def __init__(self, **kwargs):
5
		classes = [self.__class__]
6
		while classes:
7
			c = classes.pop()
8
			if c is SlotObject:
9
				continue
10
			classes.extend(c.__bases__)
11
			slots = getattr(c, "__slots__", None)
12
			if not slots:
13
				continue
14
			for myattr in slots:
15
				myvalue = kwargs.get(myattr, None)
16
				setattr(self, myattr, myvalue)
17
18
	def copy(self):
19
		"""
20
		Create a new instance and copy all attributes
21
		defined from __slots__ (including those from
22
		inherited classes).
23
		"""
24
		obj = self.__class__()
25
26
		classes = [self.__class__]
27
		while classes:
28
			c = classes.pop()
29
			if c is SlotObject:
30
				continue
31
			classes.extend(c.__bases__)
32
			slots = getattr(c, "__slots__", None)
33
			if not slots:
34
				continue
35
			for myattr in slots:
36
				setattr(obj, myattr, getattr(self, myattr))
37
38
		return obj
39
(-)DependencyArg.py (+8 lines)
Line 0 Link Here
1
class DependencyArg(object):
2
	def __init__(self, arg=None, root_config=None):
3
		self.arg = arg
4
		self.root_config = root_config
5
6
	def __str__(self):
7
		return str(self.arg)
8
(-)EbuildExecuter.py (+99 lines)
Line 0 Link Here
1
from _emerge.EbuildPhase import EbuildPhase
2
from _emerge.TaskSequence import TaskSequence
3
from _emerge.CompositeTask import CompositeTask
4
try:
5
	import portage
6
except ImportError:
7
	from os import path as osp
8
	import sys
9
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
10
	import portage
11
import os
12
class EbuildExecuter(CompositeTask):
13
14
	__slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
15
16
	_phases = ("prepare", "configure", "compile", "test", "install")
17
18
	_live_eclasses = frozenset([
19
		"bzr",
20
		"cvs",
21
		"darcs",
22
		"git",
23
		"mercurial",
24
		"subversion"
25
	])
26
27
	def _start(self):
28
		self._tree = "porttree"
29
		pkg = self.pkg
30
		phase = "clean"
31
		clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
32
			scheduler=self.scheduler, settings=self.settings, tree=self._tree)
33
		self._start_task(clean_phase, self._clean_phase_exit)
34
35
	def _clean_phase_exit(self, clean_phase):
36
37
		if self._default_exit(clean_phase) != os.EX_OK:
38
			self.wait()
39
			return
40
41
		pkg = self.pkg
42
		scheduler = self.scheduler
43
		settings = self.settings
44
		cleanup = 1
45
46
		# This initializes PORTAGE_LOG_FILE.
47
		portage.prepare_build_dirs(pkg.root, settings, cleanup)
48
49
		setup_phase = EbuildPhase(background=self.background,
50
			pkg=pkg, phase="setup", scheduler=scheduler,
51
			settings=settings, tree=self._tree)
52
53
		setup_phase.addExitListener(self._setup_exit)
54
		self._current_task = setup_phase
55
		self.scheduler.scheduleSetup(setup_phase)
56
57
	def _setup_exit(self, setup_phase):
58
59
		if self._default_exit(setup_phase) != os.EX_OK:
60
			self.wait()
61
			return
62
63
		unpack_phase = EbuildPhase(background=self.background,
64
			pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
65
			settings=self.settings, tree=self._tree)
66
67
		if self._live_eclasses.intersection(self.pkg.inherited):
68
			# Serialize $DISTDIR access for live ebuilds since
69
			# otherwise they can interfere with eachother.
70
71
			unpack_phase.addExitListener(self._unpack_exit)
72
			self._current_task = unpack_phase
73
			self.scheduler.scheduleUnpack(unpack_phase)
74
75
		else:
76
			self._start_task(unpack_phase, self._unpack_exit)
77
78
	def _unpack_exit(self, unpack_phase):
79
80
		if self._default_exit(unpack_phase) != os.EX_OK:
81
			self.wait()
82
			return
83
84
		ebuild_phases = TaskSequence(scheduler=self.scheduler)
85
86
		pkg = self.pkg
87
		phases = self._phases
88
		eapi = pkg.metadata["EAPI"]
89
		if eapi in ("0", "1"):
90
			# skip src_prepare and src_configure
91
			phases = phases[2:]
92
93
		for phase in phases:
94
			ebuild_phases.add(EbuildPhase(background=self.background,
95
				pkg=self.pkg, phase=phase, scheduler=self.scheduler,
96
				settings=self.settings, tree=self._tree))
97
98
		self._start_task(ebuild_phases, self._default_final_exit)
99
(-)Binpkg.py (+301 lines)
Line 0 Link Here
1
from _emerge.EbuildPhase import EbuildPhase
2
from _emerge.BinpkgFetcher import BinpkgFetcher
3
from _emerge.BinpkgExtractorAsync import BinpkgExtractorAsync
4
from _emerge.CompositeTask import CompositeTask
5
from _emerge.BinpkgVerifier import BinpkgVerifier
6
from _emerge.EbuildMerge import EbuildMerge
7
from _emerge.EbuildBuildDir import EbuildBuildDir
8
from portage.util import writemsg
9
try:
10
	import portage
11
except ImportError:
12
	from os import path as osp
13
	import sys
14
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
15
	import portage
16
import os
17
from portage.output import colorize
18
class Binpkg(CompositeTask):
19
20
	__slots__ = ("find_blockers",
21
		"ldpath_mtimes", "logger", "opts",
22
		"pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
23
		("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
24
		"_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
25
26
	def _writemsg_level(self, msg, level=0, noiselevel=0):
27
28
		if not self.background:
29
			portage.util.writemsg_level(msg,
30
				level=level, noiselevel=noiselevel)
31
32
		log_path = self.settings.get("PORTAGE_LOG_FILE")
33
		if  log_path is not None:
34
			f = open(log_path, 'a')
35
			try:
36
				f.write(msg)
37
			finally:
38
				f.close()
39
40
	def _start(self):
41
42
		pkg = self.pkg
43
		settings = self.settings
44
		settings.setcpv(pkg)
45
		self._tree = "bintree"
46
		self._bintree = self.pkg.root_config.trees[self._tree]
47
		self._verify = not self.opts.pretend
48
49
		dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
50
			"portage", pkg.category, pkg.pf)
51
		self._build_dir = EbuildBuildDir(dir_path=dir_path,
52
			pkg=pkg, settings=settings)
53
		self._image_dir = os.path.join(dir_path, "image")
54
		self._infloc = os.path.join(dir_path, "build-info")
55
		self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
56
		settings["EBUILD"] = self._ebuild_path
57
		debug = settings.get("PORTAGE_DEBUG") == "1"
58
		portage.doebuild_environment(self._ebuild_path, "setup",
59
			settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
60
		settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
61
62
		# The prefetcher has already completed or it
63
		# could be running now. If it's running now,
64
		# wait for it to complete since it holds
65
		# a lock on the file being fetched. The
66
		# portage.locks functions are only designed
67
		# to work between separate processes. Since
68
		# the lock is held by the current process,
69
		# use the scheduler and fetcher methods to
70
		# synchronize with the fetcher.
71
		prefetcher = self.prefetcher
72
		if prefetcher is None:
73
			pass
74
		elif not prefetcher.isAlive():
75
			prefetcher.cancel()
76
		elif prefetcher.poll() is None:
77
78
			waiting_msg = ("Fetching '%s' " + \
79
				"in the background. " + \
80
				"To view fetch progress, run `tail -f " + \
81
				"/var/log/emerge-fetch.log` in another " + \
82
				"terminal.") % prefetcher.pkg_path
83
			msg_prefix = colorize("GOOD", " * ")
84
			from textwrap import wrap
85
			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
86
				for line in wrap(waiting_msg, 65))
87
			if not self.background:
88
				writemsg(waiting_msg, noiselevel=-1)
89
90
			self._current_task = prefetcher
91
			prefetcher.addExitListener(self._prefetch_exit)
92
			return
93
94
		self._prefetch_exit(prefetcher)
95
96
	def _prefetch_exit(self, prefetcher):
97
98
		pkg = self.pkg
99
		pkg_count = self.pkg_count
100
		if not (self.opts.pretend or self.opts.fetchonly):
101
			self._build_dir.lock()
102
			# If necessary, discard old log so that we don't
103
			# append to it.
104
			self._build_dir.clean_log()
105
			# Initialze PORTAGE_LOG_FILE.
106
			portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
107
		fetcher = BinpkgFetcher(background=self.background,
108
			logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
109
			pretend=self.opts.pretend, scheduler=self.scheduler)
110
		pkg_path = fetcher.pkg_path
111
		self._pkg_path = pkg_path
112
113
		if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
114
115
			msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
116
				(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
117
			short_msg = "emerge: (%s of %s) %s Fetch" % \
118
				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
119
			self.logger.log(msg, short_msg=short_msg)
120
			self._start_task(fetcher, self._fetcher_exit)
121
			return
122
123
		self._fetcher_exit(fetcher)
124
125
	def _fetcher_exit(self, fetcher):
126
127
		# The fetcher only has a returncode when
128
		# --getbinpkg is enabled.
129
		if fetcher.returncode is not None:
130
			self._fetched_pkg = True
131
			if self._default_exit(fetcher) != os.EX_OK:
132
				self._unlock_builddir()
133
				self.wait()
134
				return
135
136
		if self.opts.pretend:
137
			self._current_task = None
138
			self.returncode = os.EX_OK
139
			self.wait()
140
			return
141
142
		verifier = None
143
		if self._verify:
144
			logfile = None
145
			if self.background:
146
				logfile = self.settings.get("PORTAGE_LOG_FILE")
147
			verifier = BinpkgVerifier(background=self.background,
148
				logfile=logfile, pkg=self.pkg)
149
			self._start_task(verifier, self._verifier_exit)
150
			return
151
152
		self._verifier_exit(verifier)
153
154
	def _verifier_exit(self, verifier):
155
		if verifier is not None and \
156
			self._default_exit(verifier) != os.EX_OK:
157
			self._unlock_builddir()
158
			self.wait()
159
			return
160
161
		logger = self.logger
162
		pkg = self.pkg
163
		pkg_count = self.pkg_count
164
		pkg_path = self._pkg_path
165
166
		if self._fetched_pkg:
167
			self._bintree.inject(pkg.cpv, filename=pkg_path)
168
169
		if self.opts.fetchonly:
170
			self._current_task = None
171
			self.returncode = os.EX_OK
172
			self.wait()
173
			return
174
175
		msg = " === (%s of %s) Merging Binary (%s::%s)" % \
176
			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
177
		short_msg = "emerge: (%s of %s) %s Merge Binary" % \
178
			(pkg_count.curval, pkg_count.maxval, pkg.cpv)
179
		logger.log(msg, short_msg=short_msg)
180
181
		phase = "clean"
182
		settings = self.settings
183
		ebuild_phase = EbuildPhase(background=self.background,
184
			pkg=pkg, phase=phase, scheduler=self.scheduler,
185
			settings=settings, tree=self._tree)
186
187
		self._start_task(ebuild_phase, self._clean_exit)
188
189
	def _clean_exit(self, clean_phase):
190
		if self._default_exit(clean_phase) != os.EX_OK:
191
			self._unlock_builddir()
192
			self.wait()
193
			return
194
195
		dir_path = self._build_dir.dir_path
196
197
		infloc = self._infloc
198
		pkg = self.pkg
199
		pkg_path = self._pkg_path
200
201
		dir_mode = 0755
202
		for mydir in (dir_path, self._image_dir, infloc):
203
			portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
204
				gid=portage.data.portage_gid, mode=dir_mode)
205
206
		# This initializes PORTAGE_LOG_FILE.
207
		portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
208
		self._writemsg_level(">>> Extracting info\n")
209
210
		pkg_xpak = portage.xpak.tbz2(self._pkg_path)
211
		check_missing_metadata = ("CATEGORY", "PF")
212
		missing_metadata = set()
213
		for k in check_missing_metadata:
214
			v = pkg_xpak.getfile(k)
215
			if not v:
216
				missing_metadata.add(k)
217
218
		pkg_xpak.unpackinfo(infloc)
219
		for k in missing_metadata:
220
			if k == "CATEGORY":
221
				v = pkg.category
222
			elif k == "PF":
223
				v = pkg.pf
224
			else:
225
				continue
226
227
			f = open(os.path.join(infloc, k), 'wb')
228
			try:
229
				f.write(v + "\n")
230
			finally:
231
				f.close()
232
233
		# Store the md5sum in the vdb.
234
		f = open(os.path.join(infloc, "BINPKGMD5"), "w")
235
		try:
236
			f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
237
		finally:
238
			f.close()
239
240
		# This gives bashrc users an opportunity to do various things
241
		# such as remove binary packages after they're installed.
242
		settings = self.settings
243
		settings.setcpv(self.pkg)
244
		settings["PORTAGE_BINPKG_FILE"] = pkg_path
245
		settings.backup_changes("PORTAGE_BINPKG_FILE")
246
247
		phase = "setup"
248
		setup_phase = EbuildPhase(background=self.background,
249
			pkg=self.pkg, phase=phase, scheduler=self.scheduler,
250
			settings=settings, tree=self._tree)
251
252
		setup_phase.addExitListener(self._setup_exit)
253
		self._current_task = setup_phase
254
		self.scheduler.scheduleSetup(setup_phase)
255
256
	def _setup_exit(self, setup_phase):
257
		if self._default_exit(setup_phase) != os.EX_OK:
258
			self._unlock_builddir()
259
			self.wait()
260
			return
261
262
		extractor = BinpkgExtractorAsync(background=self.background,
263
			image_dir=self._image_dir,
264
			pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
265
		self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
266
		self._start_task(extractor, self._extractor_exit)
267
268
	def _extractor_exit(self, extractor):
269
		if self._final_exit(extractor) != os.EX_OK:
270
			self._unlock_builddir()
271
			writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
272
				noiselevel=-1)
273
		self.wait()
274
275
	def _unlock_builddir(self):
276
		if self.opts.pretend or self.opts.fetchonly:
277
			return
278
		portage.elog.elog_process(self.pkg.cpv, self.settings)
279
		self._build_dir.unlock()
280
281
	def install(self):
282
283
		# This gives bashrc users an opportunity to do various things
284
		# such as remove binary packages after they're installed.
285
		settings = self.settings
286
		settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
287
		settings.backup_changes("PORTAGE_BINPKG_FILE")
288
289
		merge = EbuildMerge(find_blockers=self.find_blockers,
290
			ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
291
			pkg=self.pkg, pkg_count=self.pkg_count,
292
			pkg_path=self._pkg_path, scheduler=self.scheduler,
293
			settings=settings, tree=self._tree, world_atom=self.world_atom)
294
295
		try:
296
			retval = merge.execute()
297
		finally:
298
			settings.pop("PORTAGE_BINPKG_FILE", None)
299
			self._unlock_builddir()
300
		return retval
301
(-)DepPriority.py (+44 lines)
Line 0 Link Here
1
from _emerge.AbstractDepPriority import AbstractDepPriority
2
class DepPriority(AbstractDepPriority):
3
4
	__slots__ = ("satisfied", "optional", "rebuild")
5
6
	def __int__(self):
7
		"""
8
		Note: These priorities are only used for measuring hardness
9
		in the circular dependency display via digraph.debug_print(),
10
		and nothing more. For actual merge order calculations, the
11
		measures defined by the DepPriorityNormalRange and
12
		DepPrioritySatisfiedRange classes are used.
13
14
		Attributes                            Hardness
15
16
		buildtime                               0
17
		runtime                                -1
18
		runtime_post                           -2
19
		optional                               -3
20
		(none of the above)                    -4
21
22
		"""
23
24
		if self.buildtime:
25
			return 0
26
		if self.runtime:
27
			return -1
28
		if self.runtime_post:
29
			return -2
30
		if self.optional:
31
			return -3
32
		return -4
33
34
	def __str__(self):
35
		if self.optional:
36
			return "optional"
37
		if self.buildtime:
38
			return "buildtime"
39
		if self.runtime:
40
			return "runtime"
41
		if self.runtime_post:
42
			return "runtime_post"
43
		return "soft"
44
(-)BlockerDepPriority.py (+10 lines)
Line 0 Link Here
1
from _emerge.DepPriority import DepPriority
2
class BlockerDepPriority(DepPriority):
3
	__slots__ = ()
4
	def __int__(self):
5
		return 0
6
7
	def __str__(self):
8
		return 'blocker'
9
10
BlockerDepPriority.instance = BlockerDepPriority()
(-)AbstractPollTask.py (+24 lines)
Line 0 Link Here
1
from _emerge.AsynchronousTask import AsynchronousTask
2
from _emerge.PollConstants import PollConstants
3
class AbstractPollTask(AsynchronousTask):
4
5
	__slots__ = ("scheduler",) + \
6
		("_registered",)
7
8
	_bufsize = 4096
9
	_exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
10
	_registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
11
		_exceptional_events
12
13
	def _unregister(self):
14
		raise NotImplementedError(self)
15
16
	def _unregister_if_appropriate(self, event):
17
		if self._registered:
18
			if event & self._exceptional_events:
19
				self._unregister()
20
				self.cancel()
21
			elif event & PollConstants.POLLHUP:
22
				self._unregister()
23
				self.wait()
24
(-)AsynchronousTask.py (+112 lines)
Line 0 Link Here
1
from _emerge.SlotObject import SlotObject
2
class AsynchronousTask(SlotObject):
3
	"""
4
	Subclasses override _wait() and _poll() so that calls
5
	to public methods can be wrapped for implementing
6
	hooks such as exit listener notification.
7
8
	Sublasses should call self.wait() to notify exit listeners after
9
	the task is complete and self.returncode has been set.
10
	"""
11
12
	__slots__ = ("background", "cancelled", "returncode") + \
13
		("_exit_listeners", "_exit_listener_stack", "_start_listeners")
14
15
	def start(self):
16
		"""
17
		Start an asynchronous task and then return as soon as possible.
18
		"""
19
		self._start_hook()
20
		self._start()
21
22
	def _start(self):
23
		raise NotImplementedError(self)
24
25
	def isAlive(self):
26
		return self.returncode is None
27
28
	def poll(self):
29
		self._wait_hook()
30
		return self._poll()
31
32
	def _poll(self):
33
		return self.returncode
34
35
	def wait(self):
36
		if self.returncode is None:
37
			self._wait()
38
		self._wait_hook()
39
		return self.returncode
40
41
	def _wait(self):
42
		return self.returncode
43
44
	def cancel(self):
45
		self.cancelled = True
46
		self.wait()
47
48
	def addStartListener(self, f):
49
		"""
50
		The function will be called with one argument, a reference to self.
51
		"""
52
		if self._start_listeners is None:
53
			self._start_listeners = []
54
		self._start_listeners.append(f)
55
56
	def removeStartListener(self, f):
57
		if self._start_listeners is None:
58
			return
59
		self._start_listeners.remove(f)
60
61
	def _start_hook(self):
62
		if self._start_listeners is not None:
63
			start_listeners = self._start_listeners
64
			self._start_listeners = None
65
66
			for f in start_listeners:
67
				f(self)
68
69
	def addExitListener(self, f):
70
		"""
71
		The function will be called with one argument, a reference to self.
72
		"""
73
		if self._exit_listeners is None:
74
			self._exit_listeners = []
75
		self._exit_listeners.append(f)
76
77
	def removeExitListener(self, f):
78
		if self._exit_listeners is None:
79
			if self._exit_listener_stack is not None:
80
				self._exit_listener_stack.remove(f)
81
			return
82
		self._exit_listeners.remove(f)
83
84
	def _wait_hook(self):
85
		"""
86
		Call this method after the task completes, just before returning
87
		the returncode from wait() or poll(). This hook is
88
		used to trigger exit listeners when the returncode first
89
		becomes available.
90
		"""
91
		if self.returncode is not None and \
92
			self._exit_listeners is not None:
93
94
			# This prevents recursion, in case one of the
95
			# exit handlers triggers this method again by
96
			# calling wait(). Use a stack that gives
97
			# removeExitListener() an opportunity to consume
98
			# listeners from the stack, before they can get
99
			# called below. This is necessary because a call
100
			# to one exit listener may result in a call to
101
			# removeExitListener() for another listener on
102
			# the stack. That listener needs to be removed
103
			# from the stack since it would be inconsistent
104
			# to call it after it has been been passed into
105
			# removeExitListener().
106
			self._exit_listener_stack = self._exit_listeners
107
			self._exit_listeners = None
108
109
			self._exit_listener_stack.reverse()
110
			while self._exit_listener_stack:
111
				self._exit_listener_stack.pop()(self)
112
(-)Dependency.py (+12 lines)
Line 0 Link Here
1
from _emerge.DepPriority import DepPriority
2
from _emerge.SlotObject import SlotObject
3
class Dependency(SlotObject):
4
	__slots__ = ("atom", "blocker", "depth",
5
		"parent", "onlydeps", "priority", "root")
6
	def __init__(self, **kwargs):
7
		SlotObject.__init__(self, **kwargs)
8
		if self.priority is None:
9
			self.priority = DepPriority()
10
		if self.depth is None:
11
			self.depth = 0
12
(-)EbuildFetchonly.py (+81 lines)
Line 0 Link Here
1
from _emerge.SlotObject import SlotObject
2
import shutil
3
try:
4
	import portage
5
except ImportError:
6
	from os import path as osp
7
	import sys
8
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
9
	import portage
10
import os
11
from portage.elog.messages import eerror
12
class EbuildFetchonly(SlotObject):
13
14
	__slots__ = ("fetch_all", "pkg", "pretend", "settings")
15
16
	def execute(self):
17
		settings = self.settings
18
		pkg = self.pkg
19
		portdb = pkg.root_config.trees["porttree"].dbapi
20
		ebuild_path = portdb.findname(pkg.cpv)
21
		settings.setcpv(pkg)
22
		debug = settings.get("PORTAGE_DEBUG") == "1"
23
		restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
24
25
		if restrict_fetch:
26
			rval = self._execute_with_builddir()
27
		else:
28
			rval = portage.doebuild(ebuild_path, "fetch",
29
				settings["ROOT"], settings, debug=debug,
30
				listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
31
				mydbapi=portdb, tree="porttree")
32
33
			if rval != os.EX_OK:
34
				msg = "Fetch failed for '%s'" % (pkg.cpv,)
35
				eerror(msg, phase="unpack", key=pkg.cpv)
36
37
		return rval
38
39
	def _execute_with_builddir(self):
40
		# To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
41
		# ensuring sane $PWD (bug #239560) and storing elog
42
		# messages. Use a private temp directory, in order
43
		# to avoid locking the main one.
44
		settings = self.settings
45
		global_tmpdir = settings["PORTAGE_TMPDIR"]
46
		from tempfile import mkdtemp
47
		try:
48
			private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
49
		except OSError, e:
50
			if e.errno != portage.exception.PermissionDenied.errno:
51
				raise
52
			raise portage.exception.PermissionDenied(global_tmpdir)
53
		settings["PORTAGE_TMPDIR"] = private_tmpdir
54
		settings.backup_changes("PORTAGE_TMPDIR")
55
		try:
56
			retval = self._execute()
57
		finally:
58
			settings["PORTAGE_TMPDIR"] = global_tmpdir
59
			settings.backup_changes("PORTAGE_TMPDIR")
60
			shutil.rmtree(private_tmpdir)
61
		return retval
62
63
	def _execute(self):
64
		settings = self.settings
65
		pkg = self.pkg
66
		root_config = pkg.root_config
67
		portdb = root_config.trees["porttree"].dbapi
68
		ebuild_path = portdb.findname(pkg.cpv)
69
		debug = settings.get("PORTAGE_DEBUG") == "1"
70
		retval = portage.doebuild(ebuild_path, "fetch",
71
			self.settings["ROOT"], self.settings, debug=debug,
72
			listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
73
			mydbapi=portdb, tree="porttree")
74
75
		if retval != os.EX_OK:
76
			msg = "Fetch failed for '%s'" % (pkg.cpv,)
77
			eerror(msg, phase="unpack", key=pkg.cpv)
78
79
		portage.elog.elog_process(self.pkg.cpv, self.settings)
80
		return retval
81
(-)BinpkgPrefetcher.py (+38 lines)
Line 0 Link Here
1
from _emerge.BinpkgFetcher import BinpkgFetcher
2
from _emerge.CompositeTask import CompositeTask
3
from _emerge.BinpkgVerifier import BinpkgVerifier
4
import os
5
class BinpkgPrefetcher(CompositeTask):
6
7
	__slots__ = ("pkg",) + \
8
		("pkg_path", "_bintree",)
9
10
	def _start(self):
11
		self._bintree = self.pkg.root_config.trees["bintree"]
12
		fetcher = BinpkgFetcher(background=self.background,
13
			logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
14
			scheduler=self.scheduler)
15
		self.pkg_path = fetcher.pkg_path
16
		self._start_task(fetcher, self._fetcher_exit)
17
18
	def _fetcher_exit(self, fetcher):
19
20
		if self._default_exit(fetcher) != os.EX_OK:
21
			self.wait()
22
			return
23
24
		verifier = BinpkgVerifier(background=self.background,
25
			logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
26
		self._start_task(verifier, self._verifier_exit)
27
28
	def _verifier_exit(self, verifier):
29
		if self._default_exit(verifier) != os.EX_OK:
30
			self.wait()
31
			return
32
33
		self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
34
35
		self._current_task = None
36
		self.returncode = os.EX_OK
37
		self.wait()
38
(-)EbuildBuildDir.py (+96 lines)
Line 0 Link Here
1
from _emerge.SlotObject import SlotObject
2
try:
3
	import portage
4
except ImportError:
5
	from os import path as osp
6
	import sys
7
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
8
	import portage
9
import os
10
import errno
11
class EbuildBuildDir(SlotObject):
12
13
	__slots__ = ("dir_path", "pkg", "settings",
14
		"locked", "_catdir", "_lock_obj")
15
16
	def __init__(self, **kwargs):
17
		SlotObject.__init__(self, **kwargs)
18
		self.locked = False
19
20
	def lock(self):
21
		"""
22
		This raises an AlreadyLocked exception if lock() is called
23
		while a lock is already held. In order to avoid this, call
24
		unlock() or check whether the "locked" attribute is True
25
		or False before calling lock().
26
		"""
27
		if self._lock_obj is not None:
28
			raise self.AlreadyLocked((self._lock_obj,))
29
30
		dir_path = self.dir_path
31
		if dir_path is None:
32
			root_config = self.pkg.root_config
33
			portdb = root_config.trees["porttree"].dbapi
34
			ebuild_path = portdb.findname(self.pkg.cpv)
35
			settings = self.settings
36
			settings.setcpv(self.pkg)
37
			debug = settings.get("PORTAGE_DEBUG") == "1"
38
			use_cache = 1 # always true
39
			portage.doebuild_environment(ebuild_path, "setup", root_config.root,
40
				self.settings, debug, use_cache, portdb)
41
			dir_path = self.settings["PORTAGE_BUILDDIR"]
42
43
		catdir = os.path.dirname(dir_path)
44
		self._catdir = catdir
45
46
		portage.util.ensure_dirs(os.path.dirname(catdir),
47
			gid=portage.portage_gid,
48
			mode=070, mask=0)
49
		catdir_lock = None
50
		try:
51
			catdir_lock = portage.locks.lockdir(catdir)
52
			portage.util.ensure_dirs(catdir,
53
				gid=portage.portage_gid,
54
				mode=070, mask=0)
55
			self._lock_obj = portage.locks.lockdir(dir_path)
56
		finally:
57
			self.locked = self._lock_obj is not None
58
			if catdir_lock is not None:
59
				portage.locks.unlockdir(catdir_lock)
60
61
	def clean_log(self):
62
		"""Discard existing log."""
63
		settings = self.settings
64
65
		for x in ('.logid', 'temp/build.log'):
66
			try:
67
				os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
68
			except OSError:
69
				pass
70
71
	def unlock(self):
72
		if self._lock_obj is None:
73
			return
74
75
		portage.locks.unlockdir(self._lock_obj)
76
		self._lock_obj = None
77
		self.locked = False
78
79
		catdir = self._catdir
80
		catdir_lock = None
81
		try:
82
			catdir_lock = portage.locks.lockdir(catdir)
83
		finally:
84
			if catdir_lock:
85
				try:
86
					os.rmdir(catdir)
87
				except OSError, e:
88
					if e.errno not in (errno.ENOENT,
89
						errno.ENOTEMPTY, errno.EEXIST):
90
						raise
91
					del e
92
				portage.locks.unlockdir(catdir_lock)
93
94
	class AlreadyLocked(portage.exception.PortageException):
95
		pass
96
(-)MiscFunctionsProcess.py (+42 lines)
Line 0 Link Here
1
from _emerge.SpawnProcess import SpawnProcess
2
try:
3
	import portage
4
except ImportError:
5
	from os import path as osp
6
	import sys
7
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
8
	import portage
9
import os
10
class MiscFunctionsProcess(SpawnProcess):
11
	"""
12
	Spawns misc-functions.sh with an existing ebuild environment.
13
	"""
14
15
	__slots__ = ("commands", "phase", "pkg", "settings")
16
17
	def _start(self):
18
		settings = self.settings
19
		settings.pop("EBUILD_PHASE", None)
20
		portage_bin_path = settings["PORTAGE_BIN_PATH"]
21
		misc_sh_binary = os.path.join(portage_bin_path,
22
			os.path.basename(portage.const.MISC_SH_BINARY))
23
24
		self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
25
		self.logfile = settings.get("PORTAGE_LOG_FILE")
26
27
		portage._doebuild_exit_status_unlink(
28
			settings.get("EBUILD_EXIT_STATUS_FILE"))
29
30
		SpawnProcess._start(self)
31
32
	def _spawn(self, args, **kwargs):
33
		settings = self.settings
34
		debug = settings.get("PORTAGE_DEBUG") == "1"
35
		return portage.spawn(" ".join(args), settings,
36
			debug=debug, **kwargs)
37
38
	def _set_returncode(self, wait_retval):
39
		SpawnProcess._set_returncode(self, wait_retval)
40
		self.returncode = portage._doebuild_exit_status_check_and_log(
41
			self.settings, self.phase, self.returncode)
42
(-)DepPrioritySatisfiedRange.py (+96 lines)
Line 0 Link Here
1
from _emerge.DepPriority import DepPriority
2
class DepPrioritySatisfiedRange(object):
3
	"""
4
	DepPriority                         Index      Category
5
6
	not satisfied and buildtime                    HARD
7
	not satisfied and runtime              7       MEDIUM
8
	not satisfied and runtime_post         6       MEDIUM_SOFT
9
	satisfied and buildtime and rebuild    5       SOFT
10
	satisfied and buildtime                4       SOFT
11
	satisfied and runtime                  3       SOFT
12
	satisfied and runtime_post             2       SOFT
13
	optional                               1       SOFT
14
	(none of the above)                    0       NONE
15
	"""
16
	MEDIUM      = 7
17
	MEDIUM_SOFT = 6
18
	SOFT        = 5
19
	NONE        = 0
20
21
	@classmethod
22
	def _ignore_optional(cls, priority):
23
		if priority.__class__ is not DepPriority:
24
			return False
25
		return bool(priority.optional)
26
27
	@classmethod
28
	def _ignore_satisfied_runtime_post(cls, priority):
29
		if priority.__class__ is not DepPriority:
30
			return False
31
		if priority.optional:
32
			return True
33
		if not priority.satisfied:
34
			return False
35
		return bool(priority.runtime_post)
36
37
	@classmethod
38
	def _ignore_satisfied_runtime(cls, priority):
39
		if priority.__class__ is not DepPriority:
40
			return False
41
		if priority.optional:
42
			return True
43
		if not priority.satisfied:
44
			return False
45
		return not priority.buildtime
46
47
	@classmethod
48
	def _ignore_satisfied_buildtime(cls, priority):
49
		if priority.__class__ is not DepPriority:
50
			return False
51
		if priority.optional:
52
			return True
53
		if not priority.satisfied:
54
			return False
55
		if priority.buildtime:
56
			return not priority.rebuild
57
		return True
58
59
	@classmethod
60
	def _ignore_satisfied_buildtime_rebuild(cls, priority):
61
		if priority.__class__ is not DepPriority:
62
			return False
63
		if priority.optional:
64
			return True
65
		return bool(priority.satisfied)
66
67
	@classmethod
68
	def _ignore_runtime_post(cls, priority):
69
		if priority.__class__ is not DepPriority:
70
			return False
71
		return bool(priority.optional or \
72
			priority.satisfied or \
73
			priority.runtime_post)
74
75
	@classmethod
76
	def _ignore_runtime(cls, priority):
77
		if priority.__class__ is not DepPriority:
78
			return False
79
		return bool(priority.satisfied or \
80
			not priority.buildtime)
81
82
	ignore_medium      = _ignore_runtime
83
	ignore_medium_soft = _ignore_runtime_post
84
	ignore_soft        = _ignore_satisfied_buildtime_rebuild
85
86
87
DepPrioritySatisfiedRange.ignore_priority = (
88
	None,
89
	DepPrioritySatisfiedRange._ignore_optional,
90
	DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
91
	DepPrioritySatisfiedRange._ignore_satisfied_runtime,
92
	DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
93
	DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
94
	DepPrioritySatisfiedRange._ignore_runtime_post,
95
	DepPrioritySatisfiedRange._ignore_runtime
96
)
(-)EbuildBuild.py (+271 lines)
Line 0 Link Here
1
from _emerge.EbuildExecuter import EbuildExecuter
2
from _emerge.EbuildPhase import EbuildPhase
3
from _emerge.EbuildBinpkg import EbuildBinpkg
4
from _emerge.EbuildFetcher import EbuildFetcher
5
from _emerge.CompositeTask import CompositeTask
6
from _emerge.EbuildMerge import EbuildMerge
7
from _emerge.EbuildFetchonly import EbuildFetchonly
8
from _emerge.EbuildBuildDir import EbuildBuildDir
9
from portage.util import writemsg
10
try:
11
	import portage
12
except ImportError:
13
	from os import path as osp
14
	import sys
15
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
16
	import portage
17
import os
18
from portage.output import colorize
19
class EbuildBuild(CompositeTask):
20
21
	__slots__ = ("args_set", "config_pool", "find_blockers",
22
		"ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
23
		"prefetcher", "settings", "world_atom") + \
24
		("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
25
26
	def _start(self):
27
28
		logger = self.logger
29
		opts = self.opts
30
		pkg = self.pkg
31
		settings = self.settings
32
		world_atom = self.world_atom
33
		root_config = pkg.root_config
34
		tree = "porttree"
35
		self._tree = tree
36
		portdb = root_config.trees[tree].dbapi
37
		settings.setcpv(pkg)
38
		settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
39
		ebuild_path = portdb.findname(self.pkg.cpv)
40
		self._ebuild_path = ebuild_path
41
42
		prefetcher = self.prefetcher
43
		if prefetcher is None:
44
			pass
45
		elif not prefetcher.isAlive():
46
			prefetcher.cancel()
47
		elif prefetcher.poll() is None:
48
49
			waiting_msg = "Fetching files " + \
50
				"in the background. " + \
51
				"To view fetch progress, run `tail -f " + \
52
				"/var/log/emerge-fetch.log` in another " + \
53
				"terminal."
54
			msg_prefix = colorize("GOOD", " * ")
55
			from textwrap import wrap
56
			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
57
				for line in wrap(waiting_msg, 65))
58
			if not self.background:
59
				writemsg(waiting_msg, noiselevel=-1)
60
61
			self._current_task = prefetcher
62
			prefetcher.addExitListener(self._prefetch_exit)
63
			return
64
65
		self._prefetch_exit(prefetcher)
66
67
	def _prefetch_exit(self, prefetcher):
68
69
		opts = self.opts
70
		pkg = self.pkg
71
		settings = self.settings
72
73
		if opts.fetchonly:
74
				fetcher = EbuildFetchonly(
75
					fetch_all=opts.fetch_all_uri,
76
					pkg=pkg, pretend=opts.pretend,
77
					settings=settings)
78
				retval = fetcher.execute()
79
				self.returncode = retval
80
				self.wait()
81
				return
82
83
		fetcher = EbuildFetcher(config_pool=self.config_pool,
84
			fetchall=opts.fetch_all_uri,
85
			fetchonly=opts.fetchonly,
86
			background=self.background,
87
			pkg=pkg, scheduler=self.scheduler)
88
89
		self._start_task(fetcher, self._fetch_exit)
90
91
	def _fetch_exit(self, fetcher):
92
		opts = self.opts
93
		pkg = self.pkg
94
95
		fetch_failed = False
96
		if opts.fetchonly:
97
			fetch_failed = self._final_exit(fetcher) != os.EX_OK
98
		else:
99
			fetch_failed = self._default_exit(fetcher) != os.EX_OK
100
101
		if fetch_failed and fetcher.logfile is not None and \
102
			os.path.exists(fetcher.logfile):
103
			self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
104
105
		if not fetch_failed and fetcher.logfile is not None:
106
			# Fetch was successful, so remove the fetch log.
107
			try:
108
				os.unlink(fetcher.logfile)
109
			except OSError:
110
				pass
111
112
		if fetch_failed or opts.fetchonly:
113
			self.wait()
114
			return
115
116
		logger = self.logger
117
		opts = self.opts
118
		pkg_count = self.pkg_count
119
		scheduler = self.scheduler
120
		settings = self.settings
121
		features = settings.features
122
		ebuild_path = self._ebuild_path
123
		system_set = pkg.root_config.sets["system"]
124
125
		self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
126
		self._build_dir.lock()
127
128
		# Cleaning is triggered before the setup
129
		# phase, in portage.doebuild().
130
		msg = " === (%s of %s) Cleaning (%s::%s)" % \
131
			(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
132
		short_msg = "emerge: (%s of %s) %s Clean" % \
133
			(pkg_count.curval, pkg_count.maxval, pkg.cpv)
134
		logger.log(msg, short_msg=short_msg)
135
136
		#buildsyspkg: Check if we need to _force_ binary package creation
137
		self._issyspkg = "buildsyspkg" in features and \
138
				system_set.findAtomForPackage(pkg) and \
139
				not opts.buildpkg
140
141
		if opts.buildpkg or self._issyspkg:
142
143
			self._buildpkg = True
144
145
			msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
146
				(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
147
			short_msg = "emerge: (%s of %s) %s Compile" % \
148
				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
149
			logger.log(msg, short_msg=short_msg)
150
151
		else:
152
			msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
153
				(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
154
			short_msg = "emerge: (%s of %s) %s Compile" % \
155
				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
156
			logger.log(msg, short_msg=short_msg)
157
158
		build = EbuildExecuter(background=self.background, pkg=pkg,
159
			scheduler=scheduler, settings=settings)
160
		self._start_task(build, self._build_exit)
161
162
	def _unlock_builddir(self):
163
		portage.elog.elog_process(self.pkg.cpv, self.settings)
164
		self._build_dir.unlock()
165
166
	def _build_exit(self, build):
167
		if self._default_exit(build) != os.EX_OK:
168
			self._unlock_builddir()
169
			self.wait()
170
			return
171
172
		opts = self.opts
173
		buildpkg = self._buildpkg
174
175
		if not buildpkg:
176
			self._final_exit(build)
177
			self.wait()
178
			return
179
180
		if self._issyspkg:
181
			msg = ">>> This is a system package, " + \
182
				"let's pack a rescue tarball.\n"
183
184
			log_path = self.settings.get("PORTAGE_LOG_FILE")
185
			if log_path is not None:
186
				log_file = open(log_path, 'a')
187
				try:
188
					log_file.write(msg)
189
				finally:
190
					log_file.close()
191
192
			if not self.background:
193
				portage.writemsg_stdout(msg, noiselevel=-1)
194
195
		packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
196
			scheduler=self.scheduler, settings=self.settings)
197
198
		self._start_task(packager, self._buildpkg_exit)
199
200
	def _buildpkg_exit(self, packager):
201
		"""
202
		Released build dir lock when there is a failure or
203
		when in buildpkgonly mode. Otherwise, the lock will
204
		be released when merge() is called.
205
		"""
206
207
		if self._default_exit(packager) != os.EX_OK:
208
			self._unlock_builddir()
209
			self.wait()
210
			return
211
212
		if self.opts.buildpkgonly:
213
			# Need to call "clean" phase for buildpkgonly mode
214
			portage.elog.elog_process(self.pkg.cpv, self.settings)
215
			phase = "clean"
216
			clean_phase = EbuildPhase(background=self.background,
217
				pkg=self.pkg, phase=phase,
218
				scheduler=self.scheduler, settings=self.settings,
219
				tree=self._tree)
220
			self._start_task(clean_phase, self._clean_exit)
221
			return
222
223
		# Continue holding the builddir lock until
224
		# after the package has been installed.
225
		self._current_task = None
226
		self.returncode = packager.returncode
227
		self.wait()
228
229
	def _clean_exit(self, clean_phase):
230
		if self._final_exit(clean_phase) != os.EX_OK or \
231
			self.opts.buildpkgonly:
232
			self._unlock_builddir()
233
		self.wait()
234
235
	def install(self):
236
		"""
237
		Install the package and then clean up and release locks.
238
		Only call this after the build has completed successfully
239
		and neither fetchonly nor buildpkgonly mode are enabled.
240
		"""
241
242
		find_blockers = self.find_blockers
243
		ldpath_mtimes = self.ldpath_mtimes
244
		logger = self.logger
245
		pkg = self.pkg
246
		pkg_count = self.pkg_count
247
		settings = self.settings
248
		world_atom = self.world_atom
249
		ebuild_path = self._ebuild_path
250
		tree = self._tree
251
252
		merge = EbuildMerge(find_blockers=self.find_blockers,
253
			ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
254
			pkg_count=pkg_count, pkg_path=ebuild_path,
255
			scheduler=self.scheduler,
256
			settings=settings, tree=tree, world_atom=world_atom)
257
258
		msg = " === (%s of %s) Merging (%s::%s)" % \
259
			(pkg_count.curval, pkg_count.maxval,
260
			pkg.cpv, ebuild_path)
261
		short_msg = "emerge: (%s of %s) %s Merge" % \
262
			(pkg_count.curval, pkg_count.maxval, pkg.cpv)
263
		logger.log(msg, short_msg=short_msg)
264
265
		try:
266
			rval = merge.execute()
267
		finally:
268
			self._unlock_builddir()
269
270
		return rval
271
(-)CompositeTask.py (+115 lines)
Line 0 Link Here
1
from _emerge.AsynchronousTask import AsynchronousTask
2
import os
3
class CompositeTask(AsynchronousTask):
4
5
	__slots__ = ("scheduler",) + ("_current_task",)
6
7
	def isAlive(self):
8
		return self._current_task is not None
9
10
	def cancel(self):
11
		self.cancelled = True
12
		if self._current_task is not None:
13
			self._current_task.cancel()
14
15
	def _poll(self):
16
		"""
17
		This does a loop calling self._current_task.poll()
18
		repeatedly as long as the value of self._current_task
19
		keeps changing. It calls poll() a maximum of one time
20
		for a given self._current_task instance. This is useful
21
		since calling poll() on a task can trigger advance to
22
		the next task could eventually lead to the returncode
23
		being set in cases when polling only a single task would
24
		not have the same effect.
25
		"""
26
27
		prev = None
28
		while True:
29
			task = self._current_task
30
			if task is None or task is prev:
31
				# don't poll the same task more than once
32
				break
33
			task.poll()
34
			prev = task
35
36
		return self.returncode
37
38
	def _wait(self):
39
40
		prev = None
41
		while True:
42
			task = self._current_task
43
			if task is None:
44
				# don't wait for the same task more than once
45
				break
46
			if task is prev:
47
				# Before the task.wait() method returned, an exit
48
				# listener should have set self._current_task to either
49
				# a different task or None. Something is wrong.
50
				raise AssertionError("self._current_task has not " + \
51
					"changed since calling wait", self, task)
52
			task.wait()
53
			prev = task
54
55
		return self.returncode
56
57
	def _assert_current(self, task):
58
		"""
59
		Raises an AssertionError if the given task is not the
60
		same one as self._current_task. This can be useful
61
		for detecting bugs.
62
		"""
63
		if task is not self._current_task:
64
			raise AssertionError("Unrecognized task: %s" % (task,))
65
66
	def _default_exit(self, task):
67
		"""
68
		Calls _assert_current() on the given task and then sets the
69
		composite returncode attribute if task.returncode != os.EX_OK.
70
		If the task failed then self._current_task will be set to None.
71
		Subclasses can use this as a generic task exit callback.
72
73
		@rtype: int
74
		@returns: The task.returncode attribute.
75
		"""
76
		self._assert_current(task)
77
		if task.returncode != os.EX_OK:
78
			self.returncode = task.returncode
79
			self._current_task = None
80
		return task.returncode
81
82
	def _final_exit(self, task):
83
		"""
84
		Assumes that task is the final task of this composite task.
85
		Calls _default_exit() and sets self.returncode to the task's
86
		returncode and sets self._current_task to None.
87
		"""
88
		self._default_exit(task)
89
		self._current_task = None
90
		self.returncode = task.returncode
91
		return self.returncode
92
93
	def _default_final_exit(self, task):
94
		"""
95
		This calls _final_exit() and then wait().
96
97
		Subclasses can use this as a generic final task exit callback.
98
99
		"""
100
		self._final_exit(task)
101
		return self.wait()
102
103
	def _start_task(self, task, exit_handler):
104
		"""
105
		Register exit handler for the given task, set it
106
		as self._current_task, and call task.start().
107
108
		Subclasses can use this as a generic way to start
109
		a task.
110
111
		"""
112
		task.addExitListener(exit_handler)
113
		self._current_task = task
114
		task.start()
115
(-)PackageArg.py (+15 lines)
Line 0 Link Here
1
from _emerge.DependencyArg import DependencyArg
2
try:
3
	import portage
4
except ImportError:
5
	from os import path as osp
6
	import sys
7
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
8
	import portage
9
class PackageArg(DependencyArg):
10
	def __init__(self, package=None, **kwargs):
11
		DependencyArg.__init__(self, **kwargs)
12
		self.package = package
13
		self.atom = portage.dep.Atom("=" + package.cpv)
14
		self.set = (self.atom, )
15
(-)SetArg.py (+8 lines)
Line 0 Link Here
1
from _emerge.DependencyArg import DependencyArg
2
from portage.sets import SETPREFIX
3
class SetArg(DependencyArg):
4
	def __init__(self, set=None, **kwargs):
5
		DependencyArg.__init__(self, **kwargs)
6
		self.set = set
7
		self.name = self.arg[len(SETPREFIX):]
8
(-)SpawnProcess.py (+219 lines)
Line 0 Link Here
1
from _emerge.SubProcess import SubProcess
2
from _emerge.PollConstants import PollConstants
3
import sys
4
from portage.cache.mappings import slot_dict_class
5
try:
6
	import portage
7
except ImportError:
8
	from os import path as osp
9
	import sys
10
	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
11
	import portage
12
import os
13
import fcntl
14
import errno
15
import array
16
class SpawnProcess(SubProcess):
17
18
	"""
19
	Constructor keyword args are passed into portage.process.spawn().
20
	The required "args" keyword argument will be passed as the first
21
	spawn() argument.
22
	"""
23
24
	_spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
25
		"uid", "gid", "groups", "umask", "logfile",
26
		"path_lookup", "pre_exec")
27
28
	__slots__ = ("args",) + \
29
		_spawn_kwarg_names
30
31
	_file_names = ("log", "process", "stdout")
32
	_files_dict = slot_dict_class(_file_names, prefix="")
33
34
	def _start(self):
35
36
		if self.cancelled:
37
			return
38
39
		if self.fd_pipes is None:
40
			self.fd_pipes = {}
41
		fd_pipes = self.fd_pipes
42
		fd_pipes.setdefault(0, sys.stdin.fileno())
43
		fd_pipes.setdefault(1, sys.stdout.fileno())
44
		fd_pipes.setdefault(2, sys.stderr.fileno())
45
46
		# flush any pending output
47
		for fd in fd_pipes.itervalues():
48
			if fd == sys.stdout.fileno():
49
				sys.stdout.flush()
50
			if fd == sys.stderr.fileno():
51
				sys.stderr.flush()
52
53
		logfile = self.logfile
54
		self._files = self._files_dict()
55
		files = self._files
56
57
		master_fd, slave_fd = self._pipe(fd_pipes)
58
		fcntl.fcntl(master_fd, fcntl.F_SETFL,
59
			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
60
61
		null_input = None
62
		fd_pipes_orig = fd_pipes.copy()
63
		if self.background:
64
			# TODO: Use job control functions like tcsetpgrp() to control
65
			# access to stdin. Until then, use /dev/null so that any
66
			# attempts to read from stdin will immediately return EOF
67
			# instead of blocking indefinitely.
68
			null_input = open('/dev/null', 'rb')
69
			fd_pipes[0] = null_input.fileno()
70
		else:
71
			fd_pipes[0] = fd_pipes_orig[0]
72
73
		files.process = os.fdopen(master_fd, 'rb')
74
		if logfile is not None:
75
76
			fd_pipes[1] = slave_fd
77
			fd_pipes[2] = slave_fd
78
79
			files.log = open(logfile, mode='ab')
80
			portage.util.apply_secpass_permissions(logfile,
81
				uid=portage.portage_uid, gid=portage.portage_gid,
82
				mode=0660)
83
84
			if not self.background:
85
				files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
86
87
			output_handler = self._output_handler
88
89
		else:
90
91
			# Create a dummy pipe so the scheduler can monitor
92
			# the process from inside a poll() loop.
93
			fd_pipes[self._dummy_pipe_fd] = slave_fd
94
			if self.background:
95
				fd_pipes[1] = slave_fd
96
				fd_pipes[2] = slave_fd
97
			output_handler = self._dummy_handler
98
99
		kwargs = {}
100
		for k in self._spawn_kwarg_names:
101
			v = getattr(self, k)
102
			if v is not None:
103
				kwargs[k] = v
104
105
		kwargs["fd_pipes"] = fd_pipes
106
		kwargs["returnpid"] = True
107
		kwargs.pop("logfile", None)
108
109
		self._reg_id = self.scheduler.register(files.process.fileno(),
110
			self._registered_events, output_handler)
111
		self._registered = True
112
113
		retval = self._spawn(self.args, **kwargs)
114
115
		os.close(slave_fd)
116
		if null_input is not None:
117
			null_input.close()
118
119
		if isinstance(retval, int):
120
			# spawn failed
121
			self._unregister()
122
			self.returncode = retval
123
			self.wait()
124
			return
125
126
		self.pid = retval[0]
127
		portage.process.spawned_pids.remove(self.pid)
128
129
	def _pipe(self, fd_pipes):
130
		"""
131
		@type fd_pipes: dict
132
		@param fd_pipes: pipes from which to copy terminal size if desired.
133
		"""
134
		return os.pipe()
135
136
	def _spawn(self, args, **kwargs):
137
		return portage.process.spawn(args, **kwargs)
138
139
	def _output_handler(self, fd, event):
140
141
		if event & PollConstants.POLLIN:
142
143
			files = self._files
144
			buf = array.array('B')
145
			try:
146
				buf.fromfile(files.process, self._bufsize)
147
			except EOFError:
148
				pass
149
150
			if buf:
151
				if not self.background:
152
					write_successful = False
153
					failures = 0
154
					while True:
155
						try:
156
							if not write_successful:
157
								buf.tofile(files.stdout)
158
								write_successful = True
159
							files.stdout.flush()
160
							break
161
						except IOError, e:
162
							if e.errno != errno.EAGAIN:
163
								raise
164
							del e
165
							failures += 1
166
							if failures > 50:
167
								# Avoid a potentially infinite loop. In
168
								# most cases, the failure count is zero
169
								# and it's unlikely to exceed 1.
170
								raise
171
172
							# This means that a subprocess has put an inherited
173
							# stdio file descriptor (typically stdin) into
174
							# O_NONBLOCK mode. This is not acceptable (see bug
175
							# #264435), so revert it. We need to use a loop
176
							# here since there's a race condition due to
177
							# parallel processes being able to change the
178
							# flags on the inherited file descriptor.
179
							# TODO: When possible, avoid having child processes
180
							# inherit stdio file descriptors from portage
181
							# (maybe it can't be avoided with
182
							# PROPERTIES=interactive).
183
							fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
184
								fcntl.fcntl(files.stdout.fileno(),
185
								fcntl.F_GETFL) ^ os.O_NONBLOCK)
186
187
				buf.tofile(files.log)
188
				files.log.flush()
189
			else:
190
				self._unregister()
191
				self.wait()
192
193
		self._unregister_if_appropriate(event)
194
		return self._registered
195
196
	def _dummy_handler(self, fd, event):
197
		"""
198
		This method is mainly interested in detecting EOF, since
199
		the only purpose of the pipe is to allow the scheduler to
200
		monitor the process from inside a poll() loop.
201
		"""
202
203
		if event & PollConstants.POLLIN:
204
205
			buf = array.array('B')
206
			try:
207
				buf.fromfile(self._files.process, self._bufsize)
208
			except EOFError:
209
				pass
210
211
			if buf:
212
				pass
213
			else:
214
				self._unregister()
215
				self.wait()
216
217
		self._unregister_if_appropriate(event)
218
		return self._registered
219

Return to bug 275047