Lines 3-22
Link Here
|
3 |
# Distributed under the terms of the GNU General Public License v2 |
3 |
# Distributed under the terms of the GNU General Public License v2 |
4 |
# $Id$ |
4 |
# $Id$ |
5 |
|
5 |
|
6 |
import array |
|
|
7 |
import codecs |
8 |
from collections import deque |
9 |
import fcntl |
10 |
import formatter |
6 |
import formatter |
11 |
import logging |
7 |
import logging |
12 |
import pwd |
8 |
import pwd |
13 |
import select |
9 |
import select |
14 |
import shlex |
10 |
import shlex |
15 |
import shutil |
|
|
16 |
import signal |
11 |
import signal |
17 |
import sys |
12 |
import sys |
18 |
import textwrap |
13 |
import textwrap |
19 |
import urlparse |
|
|
20 |
import weakref |
14 |
import weakref |
21 |
import gc |
15 |
import gc |
22 |
import os, stat |
16 |
import os, stat |
Lines 34-40
Link Here
|
34 |
|
28 |
|
35 |
import _emerge.help |
29 |
import _emerge.help |
36 |
import portage.xpak, commands, errno, re, socket, time |
30 |
import portage.xpak, commands, errno, re, socket, time |
37 |
from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \ |
31 |
from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \ |
38 |
nc_len, red, teal, turquoise, xtermTitle, \ |
32 |
nc_len, red, teal, turquoise, xtermTitle, \ |
39 |
xtermTitleReset, yellow |
33 |
xtermTitleReset, yellow |
40 |
from portage.output import create_color_func |
34 |
from portage.output import create_color_func |
Lines 59-68
Link Here
|
59 |
|
53 |
|
60 |
from itertools import chain, izip |
54 |
from itertools import chain, izip |
61 |
|
55 |
|
62 |
try: |
56 |
from _emerge.SlotObject import SlotObject |
63 |
import cPickle as pickle |
57 |
from _emerge.DepPriority import DepPriority |
64 |
except ImportError: |
58 |
from _emerge.BlockerDepPriority import BlockerDepPriority |
65 |
import pickle |
59 |
from _emerge.UnmergeDepPriority import UnmergeDepPriority |
|
|
60 |
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange |
61 |
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange |
62 |
from _emerge.Task import Task |
63 |
from _emerge.Blocker import Blocker |
64 |
from _emerge.PollConstants import PollConstants |
65 |
from _emerge.AsynchronousTask import AsynchronousTask |
66 |
from _emerge.CompositeTask import CompositeTask |
67 |
from _emerge.EbuildFetcher import EbuildFetcher |
68 |
from _emerge.EbuildBuild import EbuildBuild |
69 |
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase |
70 |
from _emerge.EbuildPhase import EbuildPhase |
71 |
from _emerge.Binpkg import Binpkg |
72 |
from _emerge.BinpkgPrefetcher import BinpkgPrefetcher |
73 |
from _emerge.PackageMerge import PackageMerge |
74 |
from _emerge.DependencyArg import DependencyArg |
75 |
from _emerge.AtomArg import AtomArg |
76 |
from _emerge.PackageArg import PackageArg |
77 |
from _emerge.SetArg import SetArg |
78 |
from _emerge.Dependency import Dependency |
79 |
from _emerge.BlockerCache import BlockerCache |
80 |
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi |
81 |
from _emerge.RepoDisplay import RepoDisplay |
82 |
from _emerge.UseFlagDisplay import UseFlagDisplay |
83 |
from _emerge.PollSelectAdapter import PollSelectAdapter |
84 |
from _emerge.SequentialTaskQueue import SequentialTaskQueue |
85 |
from _emerge.ProgressHandler import ProgressHandler |
66 |
|
86 |
|
67 |
try: |
87 |
try: |
68 |
from cStringIO import StringIO |
88 |
from cStringIO import StringIO |
Lines 869-1160
Link Here
|
869 |
else: |
889 |
else: |
870 |
yield flag |
890 |
yield flag |
871 |
|
891 |
|
872 |
class SlotObject(object): |
|
|
873 |
__slots__ = ("__weakref__",) |
874 |
|
875 |
def __init__(self, **kwargs): |
876 |
classes = [self.__class__] |
877 |
while classes: |
878 |
c = classes.pop() |
879 |
if c is SlotObject: |
880 |
continue |
881 |
classes.extend(c.__bases__) |
882 |
slots = getattr(c, "__slots__", None) |
883 |
if not slots: |
884 |
continue |
885 |
for myattr in slots: |
886 |
myvalue = kwargs.get(myattr, None) |
887 |
setattr(self, myattr, myvalue) |
888 |
|
889 |
def copy(self): |
890 |
""" |
891 |
Create a new instance and copy all attributes |
892 |
defined from __slots__ (including those from |
893 |
inherited classes). |
894 |
""" |
895 |
obj = self.__class__() |
896 |
|
897 |
classes = [self.__class__] |
898 |
while classes: |
899 |
c = classes.pop() |
900 |
if c is SlotObject: |
901 |
continue |
902 |
classes.extend(c.__bases__) |
903 |
slots = getattr(c, "__slots__", None) |
904 |
if not slots: |
905 |
continue |
906 |
for myattr in slots: |
907 |
setattr(obj, myattr, getattr(self, myattr)) |
908 |
|
909 |
return obj |
910 |
|
911 |
class AbstractDepPriority(SlotObject): |
912 |
__slots__ = ("buildtime", "runtime", "runtime_post") |
913 |
|
914 |
def __lt__(self, other): |
915 |
return self.__int__() < other |
916 |
|
917 |
def __le__(self, other): |
918 |
return self.__int__() <= other |
919 |
|
920 |
def __eq__(self, other): |
921 |
return self.__int__() == other |
922 |
|
923 |
def __ne__(self, other): |
924 |
return self.__int__() != other |
925 |
|
926 |
def __gt__(self, other): |
927 |
return self.__int__() > other |
928 |
|
929 |
def __ge__(self, other): |
930 |
return self.__int__() >= other |
931 |
|
932 |
def copy(self): |
933 |
import copy |
934 |
return copy.copy(self) |
935 |
|
936 |
class DepPriority(AbstractDepPriority): |
937 |
|
938 |
__slots__ = ("satisfied", "optional", "rebuild") |
939 |
|
940 |
def __int__(self): |
941 |
""" |
942 |
Note: These priorities are only used for measuring hardness |
943 |
in the circular dependency display via digraph.debug_print(), |
944 |
and nothing more. For actual merge order calculations, the |
945 |
measures defined by the DepPriorityNormalRange and |
946 |
DepPrioritySatisfiedRange classes are used. |
947 |
|
948 |
Attributes Hardness |
949 |
|
950 |
buildtime 0 |
951 |
runtime -1 |
952 |
runtime_post -2 |
953 |
optional -3 |
954 |
(none of the above) -4 |
955 |
|
956 |
""" |
957 |
|
958 |
if self.buildtime: |
959 |
return 0 |
960 |
if self.runtime: |
961 |
return -1 |
962 |
if self.runtime_post: |
963 |
return -2 |
964 |
if self.optional: |
965 |
return -3 |
966 |
return -4 |
967 |
|
968 |
def __str__(self): |
969 |
if self.optional: |
970 |
return "optional" |
971 |
if self.buildtime: |
972 |
return "buildtime" |
973 |
if self.runtime: |
974 |
return "runtime" |
975 |
if self.runtime_post: |
976 |
return "runtime_post" |
977 |
return "soft" |
978 |
|
979 |
class BlockerDepPriority(DepPriority): |
980 |
__slots__ = () |
981 |
def __int__(self): |
982 |
return 0 |
983 |
|
984 |
def __str__(self): |
985 |
return 'blocker' |
986 |
|
987 |
BlockerDepPriority.instance = BlockerDepPriority() |
988 |
|
989 |
class UnmergeDepPriority(AbstractDepPriority): |
990 |
__slots__ = ("optional", "satisfied",) |
991 |
""" |
992 |
Combination of properties Priority Category |
993 |
|
994 |
runtime 0 HARD |
995 |
runtime_post -1 HARD |
996 |
buildtime -2 SOFT |
997 |
(none of the above) -2 SOFT |
998 |
""" |
999 |
|
1000 |
MAX = 0 |
1001 |
SOFT = -2 |
1002 |
MIN = -2 |
1003 |
|
1004 |
def __int__(self): |
1005 |
if self.runtime: |
1006 |
return 0 |
1007 |
if self.runtime_post: |
1008 |
return -1 |
1009 |
if self.buildtime: |
1010 |
return -2 |
1011 |
return -2 |
1012 |
|
1013 |
def __str__(self): |
1014 |
myvalue = self.__int__() |
1015 |
if myvalue > self.SOFT: |
1016 |
return "hard" |
1017 |
return "soft" |
1018 |
|
1019 |
class DepPriorityNormalRange(object): |
1020 |
""" |
1021 |
DepPriority properties Index Category |
1022 |
|
1023 |
buildtime HARD |
1024 |
runtime 3 MEDIUM |
1025 |
runtime_post 2 MEDIUM_SOFT |
1026 |
optional 1 SOFT |
1027 |
(none of the above) 0 NONE |
1028 |
""" |
1029 |
MEDIUM = 3 |
1030 |
MEDIUM_SOFT = 2 |
1031 |
SOFT = 1 |
1032 |
NONE = 0 |
1033 |
|
1034 |
@classmethod |
1035 |
def _ignore_optional(cls, priority): |
1036 |
if priority.__class__ is not DepPriority: |
1037 |
return False |
1038 |
return bool(priority.optional) |
1039 |
|
1040 |
@classmethod |
1041 |
def _ignore_runtime_post(cls, priority): |
1042 |
if priority.__class__ is not DepPriority: |
1043 |
return False |
1044 |
return bool(priority.optional or priority.runtime_post) |
1045 |
|
1046 |
@classmethod |
1047 |
def _ignore_runtime(cls, priority): |
1048 |
if priority.__class__ is not DepPriority: |
1049 |
return False |
1050 |
return not priority.buildtime |
1051 |
|
1052 |
ignore_medium = _ignore_runtime |
1053 |
ignore_medium_soft = _ignore_runtime_post |
1054 |
ignore_soft = _ignore_optional |
1055 |
|
1056 |
DepPriorityNormalRange.ignore_priority = ( |
1057 |
None, |
1058 |
DepPriorityNormalRange._ignore_optional, |
1059 |
DepPriorityNormalRange._ignore_runtime_post, |
1060 |
DepPriorityNormalRange._ignore_runtime |
1061 |
) |
1062 |
|
1063 |
class DepPrioritySatisfiedRange(object): |
1064 |
""" |
1065 |
DepPriority Index Category |
1066 |
|
1067 |
not satisfied and buildtime HARD |
1068 |
not satisfied and runtime 7 MEDIUM |
1069 |
not satisfied and runtime_post 6 MEDIUM_SOFT |
1070 |
satisfied and buildtime and rebuild 5 SOFT |
1071 |
satisfied and buildtime 4 SOFT |
1072 |
satisfied and runtime 3 SOFT |
1073 |
satisfied and runtime_post 2 SOFT |
1074 |
optional 1 SOFT |
1075 |
(none of the above) 0 NONE |
1076 |
""" |
1077 |
MEDIUM = 7 |
1078 |
MEDIUM_SOFT = 6 |
1079 |
SOFT = 5 |
1080 |
NONE = 0 |
1081 |
|
1082 |
@classmethod |
1083 |
def _ignore_optional(cls, priority): |
1084 |
if priority.__class__ is not DepPriority: |
1085 |
return False |
1086 |
return bool(priority.optional) |
1087 |
|
1088 |
@classmethod |
1089 |
def _ignore_satisfied_runtime_post(cls, priority): |
1090 |
if priority.__class__ is not DepPriority: |
1091 |
return False |
1092 |
if priority.optional: |
1093 |
return True |
1094 |
if not priority.satisfied: |
1095 |
return False |
1096 |
return bool(priority.runtime_post) |
1097 |
|
1098 |
@classmethod |
1099 |
def _ignore_satisfied_runtime(cls, priority): |
1100 |
if priority.__class__ is not DepPriority: |
1101 |
return False |
1102 |
if priority.optional: |
1103 |
return True |
1104 |
if not priority.satisfied: |
1105 |
return False |
1106 |
return not priority.buildtime |
1107 |
|
1108 |
@classmethod |
1109 |
def _ignore_satisfied_buildtime(cls, priority): |
1110 |
if priority.__class__ is not DepPriority: |
1111 |
return False |
1112 |
if priority.optional: |
1113 |
return True |
1114 |
if not priority.satisfied: |
1115 |
return False |
1116 |
if priority.buildtime: |
1117 |
return not priority.rebuild |
1118 |
return True |
1119 |
|
1120 |
@classmethod |
1121 |
def _ignore_satisfied_buildtime_rebuild(cls, priority): |
1122 |
if priority.__class__ is not DepPriority: |
1123 |
return False |
1124 |
if priority.optional: |
1125 |
return True |
1126 |
return bool(priority.satisfied) |
1127 |
|
1128 |
@classmethod |
1129 |
def _ignore_runtime_post(cls, priority): |
1130 |
if priority.__class__ is not DepPriority: |
1131 |
return False |
1132 |
return bool(priority.optional or \ |
1133 |
priority.satisfied or \ |
1134 |
priority.runtime_post) |
1135 |
|
1136 |
@classmethod |
1137 |
def _ignore_runtime(cls, priority): |
1138 |
if priority.__class__ is not DepPriority: |
1139 |
return False |
1140 |
return bool(priority.satisfied or \ |
1141 |
not priority.buildtime) |
1142 |
|
1143 |
ignore_medium = _ignore_runtime |
1144 |
ignore_medium_soft = _ignore_runtime_post |
1145 |
ignore_soft = _ignore_satisfied_buildtime_rebuild |
1146 |
|
1147 |
DepPrioritySatisfiedRange.ignore_priority = ( |
1148 |
None, |
1149 |
DepPrioritySatisfiedRange._ignore_optional, |
1150 |
DepPrioritySatisfiedRange._ignore_satisfied_runtime_post, |
1151 |
DepPrioritySatisfiedRange._ignore_satisfied_runtime, |
1152 |
DepPrioritySatisfiedRange._ignore_satisfied_buildtime, |
1153 |
DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild, |
1154 |
DepPrioritySatisfiedRange._ignore_runtime_post, |
1155 |
DepPrioritySatisfiedRange._ignore_runtime |
1156 |
) |
1157 |
|
1158 |
def _find_deep_system_runtime_deps(graph): |
892 |
def _find_deep_system_runtime_deps(graph): |
1159 |
deep_system_deps = set() |
893 |
deep_system_deps = set() |
1160 |
node_stack = [] |
894 |
node_stack = [] |
Lines 1533-1590
Link Here
|
1533 |
shown_licenses.add(l) |
1267 |
shown_licenses.add(l) |
1534 |
return have_eapi_mask |
1268 |
return have_eapi_mask |
1535 |
|
1269 |
|
1536 |
class Task(SlotObject): |
|
|
1537 |
__slots__ = ("_hash_key", "_hash_value") |
1538 |
|
1539 |
def _get_hash_key(self): |
1540 |
hash_key = getattr(self, "_hash_key", None) |
1541 |
if hash_key is None: |
1542 |
raise NotImplementedError(self) |
1543 |
return hash_key |
1544 |
|
1545 |
def __eq__(self, other): |
1546 |
return self._get_hash_key() == other |
1547 |
|
1548 |
def __ne__(self, other): |
1549 |
return self._get_hash_key() != other |
1550 |
|
1551 |
def __hash__(self): |
1552 |
hash_value = getattr(self, "_hash_value", None) |
1553 |
if hash_value is None: |
1554 |
self._hash_value = hash(self._get_hash_key()) |
1555 |
return self._hash_value |
1556 |
|
1557 |
def __len__(self): |
1558 |
return len(self._get_hash_key()) |
1559 |
|
1560 |
def __getitem__(self, key): |
1561 |
return self._get_hash_key()[key] |
1562 |
|
1563 |
def __iter__(self): |
1564 |
return iter(self._get_hash_key()) |
1565 |
|
1566 |
def __contains__(self, key): |
1567 |
return key in self._get_hash_key() |
1568 |
|
1569 |
def __str__(self): |
1570 |
return str(self._get_hash_key()) |
1571 |
|
1572 |
class Blocker(Task): |
1573 |
|
1574 |
__hash__ = Task.__hash__ |
1575 |
__slots__ = ("root", "atom", "cp", "eapi", "satisfied") |
1576 |
|
1577 |
def __init__(self, **kwargs): |
1578 |
Task.__init__(self, **kwargs) |
1579 |
self.cp = portage.dep_getkey(self.atom) |
1580 |
|
1581 |
def _get_hash_key(self): |
1582 |
hash_key = getattr(self, "_hash_key", None) |
1583 |
if hash_key is None: |
1584 |
self._hash_key = \ |
1585 |
("blocks", self.root, self.atom, self.eapi) |
1586 |
return self._hash_key |
1587 |
|
1588 |
class Package(Task): |
1270 |
class Package(Task): |
1589 |
|
1271 |
|
1590 |
__hash__ = Task.__hash__ |
1272 |
__hash__ = Task.__hash__ |
Lines 1757-3383
Link Here
|
1757 |
v = 0 |
1439 |
v = 0 |
1758 |
self._pkg.mtime = v |
1440 |
self._pkg.mtime = v |
1759 |
|
1441 |
|
1760 |
class EbuildFetchonly(SlotObject): |
|
|
1761 |
|
1762 |
__slots__ = ("fetch_all", "pkg", "pretend", "settings") |
1763 |
|
1764 |
def execute(self): |
1765 |
settings = self.settings |
1766 |
pkg = self.pkg |
1767 |
portdb = pkg.root_config.trees["porttree"].dbapi |
1768 |
ebuild_path = portdb.findname(pkg.cpv) |
1769 |
settings.setcpv(pkg) |
1770 |
debug = settings.get("PORTAGE_DEBUG") == "1" |
1771 |
restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split() |
1772 |
|
1773 |
if restrict_fetch: |
1774 |
rval = self._execute_with_builddir() |
1775 |
else: |
1776 |
rval = portage.doebuild(ebuild_path, "fetch", |
1777 |
settings["ROOT"], settings, debug=debug, |
1778 |
listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all, |
1779 |
mydbapi=portdb, tree="porttree") |
1780 |
|
1781 |
if rval != os.EX_OK: |
1782 |
msg = "Fetch failed for '%s'" % (pkg.cpv,) |
1783 |
eerror(msg, phase="unpack", key=pkg.cpv) |
1784 |
|
1785 |
return rval |
1786 |
|
1787 |
def _execute_with_builddir(self): |
1788 |
# To spawn pkg_nofetch requires PORTAGE_BUILDDIR for |
1789 |
# ensuring sane $PWD (bug #239560) and storing elog |
1790 |
# messages. Use a private temp directory, in order |
1791 |
# to avoid locking the main one. |
1792 |
settings = self.settings |
1793 |
global_tmpdir = settings["PORTAGE_TMPDIR"] |
1794 |
from tempfile import mkdtemp |
1795 |
try: |
1796 |
private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir) |
1797 |
except OSError, e: |
1798 |
if e.errno != portage.exception.PermissionDenied.errno: |
1799 |
raise |
1800 |
raise portage.exception.PermissionDenied(global_tmpdir) |
1801 |
settings["PORTAGE_TMPDIR"] = private_tmpdir |
1802 |
settings.backup_changes("PORTAGE_TMPDIR") |
1803 |
try: |
1804 |
retval = self._execute() |
1805 |
finally: |
1806 |
settings["PORTAGE_TMPDIR"] = global_tmpdir |
1807 |
settings.backup_changes("PORTAGE_TMPDIR") |
1808 |
shutil.rmtree(private_tmpdir) |
1809 |
return retval |
1810 |
|
1811 |
def _execute(self): |
1812 |
settings = self.settings |
1813 |
pkg = self.pkg |
1814 |
root_config = pkg.root_config |
1815 |
portdb = root_config.trees["porttree"].dbapi |
1816 |
ebuild_path = portdb.findname(pkg.cpv) |
1817 |
debug = settings.get("PORTAGE_DEBUG") == "1" |
1818 |
retval = portage.doebuild(ebuild_path, "fetch", |
1819 |
self.settings["ROOT"], self.settings, debug=debug, |
1820 |
listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all, |
1821 |
mydbapi=portdb, tree="porttree") |
1822 |
|
1823 |
if retval != os.EX_OK: |
1824 |
msg = "Fetch failed for '%s'" % (pkg.cpv,) |
1825 |
eerror(msg, phase="unpack", key=pkg.cpv) |
1826 |
|
1827 |
portage.elog.elog_process(self.pkg.cpv, self.settings) |
1828 |
return retval |
1829 |
|
1830 |
class PollConstants(object): |
1831 |
|
1832 |
""" |
1833 |
Provides POLL* constants that are equivalent to those from the |
1834 |
select module, for use by PollSelectAdapter. |
1835 |
""" |
1836 |
|
1837 |
names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL") |
1838 |
v = 1 |
1839 |
for k in names: |
1840 |
locals()[k] = getattr(select, k, v) |
1841 |
v *= 2 |
1842 |
del k, v |
1843 |
|
1844 |
class AsynchronousTask(SlotObject): |
1845 |
""" |
1846 |
Subclasses override _wait() and _poll() so that calls |
1847 |
to public methods can be wrapped for implementing |
1848 |
hooks such as exit listener notification. |
1849 |
|
1850 |
Sublasses should call self.wait() to notify exit listeners after |
1851 |
the task is complete and self.returncode has been set. |
1852 |
""" |
1853 |
|
1854 |
__slots__ = ("background", "cancelled", "returncode") + \ |
1855 |
("_exit_listeners", "_exit_listener_stack", "_start_listeners") |
1856 |
|
1857 |
def start(self): |
1858 |
""" |
1859 |
Start an asynchronous task and then return as soon as possible. |
1860 |
""" |
1861 |
self._start_hook() |
1862 |
self._start() |
1863 |
|
1864 |
def _start(self): |
1865 |
raise NotImplementedError(self) |
1866 |
|
1867 |
def isAlive(self): |
1868 |
return self.returncode is None |
1869 |
|
1870 |
def poll(self): |
1871 |
self._wait_hook() |
1872 |
return self._poll() |
1873 |
|
1874 |
def _poll(self): |
1875 |
return self.returncode |
1876 |
|
1877 |
def wait(self): |
1878 |
if self.returncode is None: |
1879 |
self._wait() |
1880 |
self._wait_hook() |
1881 |
return self.returncode |
1882 |
|
1883 |
def _wait(self): |
1884 |
return self.returncode |
1885 |
|
1886 |
def cancel(self): |
1887 |
self.cancelled = True |
1888 |
self.wait() |
1889 |
|
1890 |
def addStartListener(self, f): |
1891 |
""" |
1892 |
The function will be called with one argument, a reference to self. |
1893 |
""" |
1894 |
if self._start_listeners is None: |
1895 |
self._start_listeners = [] |
1896 |
self._start_listeners.append(f) |
1897 |
|
1898 |
def removeStartListener(self, f): |
1899 |
if self._start_listeners is None: |
1900 |
return |
1901 |
self._start_listeners.remove(f) |
1902 |
|
1903 |
def _start_hook(self): |
1904 |
if self._start_listeners is not None: |
1905 |
start_listeners = self._start_listeners |
1906 |
self._start_listeners = None |
1907 |
|
1908 |
for f in start_listeners: |
1909 |
f(self) |
1910 |
|
1911 |
def addExitListener(self, f): |
1912 |
""" |
1913 |
The function will be called with one argument, a reference to self. |
1914 |
""" |
1915 |
if self._exit_listeners is None: |
1916 |
self._exit_listeners = [] |
1917 |
self._exit_listeners.append(f) |
1918 |
|
1919 |
def removeExitListener(self, f): |
1920 |
if self._exit_listeners is None: |
1921 |
if self._exit_listener_stack is not None: |
1922 |
self._exit_listener_stack.remove(f) |
1923 |
return |
1924 |
self._exit_listeners.remove(f) |
1925 |
|
1926 |
def _wait_hook(self): |
1927 |
""" |
1928 |
Call this method after the task completes, just before returning |
1929 |
the returncode from wait() or poll(). This hook is |
1930 |
used to trigger exit listeners when the returncode first |
1931 |
becomes available. |
1932 |
""" |
1933 |
if self.returncode is not None and \ |
1934 |
self._exit_listeners is not None: |
1935 |
|
1936 |
# This prevents recursion, in case one of the |
1937 |
# exit handlers triggers this method again by |
1938 |
# calling wait(). Use a stack that gives |
1939 |
# removeExitListener() an opportunity to consume |
1940 |
# listeners from the stack, before they can get |
1941 |
# called below. This is necessary because a call |
1942 |
# to one exit listener may result in a call to |
1943 |
# removeExitListener() for another listener on |
1944 |
# the stack. That listener needs to be removed |
1945 |
# from the stack since it would be inconsistent |
1946 |
# to call it after it has been been passed into |
1947 |
# removeExitListener(). |
1948 |
self._exit_listener_stack = self._exit_listeners |
1949 |
self._exit_listeners = None |
1950 |
|
1951 |
self._exit_listener_stack.reverse() |
1952 |
while self._exit_listener_stack: |
1953 |
self._exit_listener_stack.pop()(self) |
1954 |
|
1955 |
class AbstractPollTask(AsynchronousTask): |
1956 |
|
1957 |
__slots__ = ("scheduler",) + \ |
1958 |
("_registered",) |
1959 |
|
1960 |
_bufsize = 4096 |
1961 |
_exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL |
1962 |
_registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \ |
1963 |
_exceptional_events |
1964 |
|
1965 |
def _unregister(self): |
1966 |
raise NotImplementedError(self) |
1967 |
|
1968 |
def _unregister_if_appropriate(self, event): |
1969 |
if self._registered: |
1970 |
if event & self._exceptional_events: |
1971 |
self._unregister() |
1972 |
self.cancel() |
1973 |
elif event & PollConstants.POLLHUP: |
1974 |
self._unregister() |
1975 |
self.wait() |
1976 |
|
1977 |
class PipeReader(AbstractPollTask): |
1978 |
|
1979 |
""" |
1980 |
Reads output from one or more files and saves it in memory, |
1981 |
for retrieval via the getvalue() method. This is driven by |
1982 |
the scheduler's poll() loop, so it runs entirely within the |
1983 |
current process. |
1984 |
""" |
1985 |
|
1986 |
__slots__ = ("input_files",) + \ |
1987 |
("_read_data", "_reg_ids") |
1988 |
|
1989 |
def _start(self): |
1990 |
self._reg_ids = set() |
1991 |
self._read_data = [] |
1992 |
for k, f in self.input_files.iteritems(): |
1993 |
fcntl.fcntl(f.fileno(), fcntl.F_SETFL, |
1994 |
fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK) |
1995 |
self._reg_ids.add(self.scheduler.register(f.fileno(), |
1996 |
self._registered_events, self._output_handler)) |
1997 |
self._registered = True |
1998 |
|
1999 |
def isAlive(self): |
2000 |
return self._registered |
2001 |
|
2002 |
def cancel(self): |
2003 |
if self.returncode is None: |
2004 |
self.returncode = 1 |
2005 |
self.cancelled = True |
2006 |
self.wait() |
2007 |
|
2008 |
def _wait(self): |
2009 |
if self.returncode is not None: |
2010 |
return self.returncode |
2011 |
|
2012 |
if self._registered: |
2013 |
self.scheduler.schedule(self._reg_ids) |
2014 |
self._unregister() |
2015 |
|
2016 |
self.returncode = os.EX_OK |
2017 |
return self.returncode |
2018 |
|
2019 |
def getvalue(self): |
2020 |
"""Retrieve the entire contents""" |
2021 |
if sys.hexversion >= 0x3000000: |
2022 |
return bytes().join(self._read_data) |
2023 |
return "".join(self._read_data) |
2024 |
|
2025 |
def close(self): |
2026 |
"""Free the memory buffer.""" |
2027 |
self._read_data = None |
2028 |
|
2029 |
def _output_handler(self, fd, event): |
2030 |
|
2031 |
if event & PollConstants.POLLIN: |
2032 |
|
2033 |
for f in self.input_files.itervalues(): |
2034 |
if fd == f.fileno(): |
2035 |
break |
2036 |
|
2037 |
buf = array.array('B') |
2038 |
try: |
2039 |
buf.fromfile(f, self._bufsize) |
2040 |
except EOFError: |
2041 |
pass |
2042 |
|
2043 |
if buf: |
2044 |
self._read_data.append(buf.tostring()) |
2045 |
else: |
2046 |
self._unregister() |
2047 |
self.wait() |
2048 |
|
2049 |
self._unregister_if_appropriate(event) |
2050 |
return self._registered |
2051 |
|
2052 |
def _unregister(self): |
2053 |
""" |
2054 |
Unregister from the scheduler and close open files. |
2055 |
""" |
2056 |
|
2057 |
self._registered = False |
2058 |
|
2059 |
if self._reg_ids is not None: |
2060 |
for reg_id in self._reg_ids: |
2061 |
self.scheduler.unregister(reg_id) |
2062 |
self._reg_ids = None |
2063 |
|
2064 |
if self.input_files is not None: |
2065 |
for f in self.input_files.itervalues(): |
2066 |
f.close() |
2067 |
self.input_files = None |
2068 |
|
2069 |
class CompositeTask(AsynchronousTask): |
2070 |
|
2071 |
__slots__ = ("scheduler",) + ("_current_task",) |
2072 |
|
2073 |
def isAlive(self): |
2074 |
return self._current_task is not None |
2075 |
|
2076 |
def cancel(self): |
2077 |
self.cancelled = True |
2078 |
if self._current_task is not None: |
2079 |
self._current_task.cancel() |
2080 |
|
2081 |
def _poll(self): |
2082 |
""" |
2083 |
This does a loop calling self._current_task.poll() |
2084 |
repeatedly as long as the value of self._current_task |
2085 |
keeps changing. It calls poll() a maximum of one time |
2086 |
for a given self._current_task instance. This is useful |
2087 |
since calling poll() on a task can trigger advance to |
2088 |
the next task could eventually lead to the returncode |
2089 |
being set in cases when polling only a single task would |
2090 |
not have the same effect. |
2091 |
""" |
2092 |
|
2093 |
prev = None |
2094 |
while True: |
2095 |
task = self._current_task |
2096 |
if task is None or task is prev: |
2097 |
# don't poll the same task more than once |
2098 |
break |
2099 |
task.poll() |
2100 |
prev = task |
2101 |
|
2102 |
return self.returncode |
2103 |
|
2104 |
def _wait(self): |
2105 |
|
2106 |
prev = None |
2107 |
while True: |
2108 |
task = self._current_task |
2109 |
if task is None: |
2110 |
# don't wait for the same task more than once |
2111 |
break |
2112 |
if task is prev: |
2113 |
# Before the task.wait() method returned, an exit |
2114 |
# listener should have set self._current_task to either |
2115 |
# a different task or None. Something is wrong. |
2116 |
raise AssertionError("self._current_task has not " + \ |
2117 |
"changed since calling wait", self, task) |
2118 |
task.wait() |
2119 |
prev = task |
2120 |
|
2121 |
return self.returncode |
2122 |
|
2123 |
def _assert_current(self, task): |
2124 |
""" |
2125 |
Raises an AssertionError if the given task is not the |
2126 |
same one as self._current_task. This can be useful |
2127 |
for detecting bugs. |
2128 |
""" |
2129 |
if task is not self._current_task: |
2130 |
raise AssertionError("Unrecognized task: %s" % (task,)) |
2131 |
|
2132 |
def _default_exit(self, task): |
2133 |
""" |
2134 |
Calls _assert_current() on the given task and then sets the |
2135 |
composite returncode attribute if task.returncode != os.EX_OK. |
2136 |
If the task failed then self._current_task will be set to None. |
2137 |
Subclasses can use this as a generic task exit callback. |
2138 |
|
2139 |
@rtype: int |
2140 |
@returns: The task.returncode attribute. |
2141 |
""" |
2142 |
self._assert_current(task) |
2143 |
if task.returncode != os.EX_OK: |
2144 |
self.returncode = task.returncode |
2145 |
self._current_task = None |
2146 |
return task.returncode |
2147 |
|
2148 |
def _final_exit(self, task): |
2149 |
""" |
2150 |
Assumes that task is the final task of this composite task. |
2151 |
Calls _default_exit() and sets self.returncode to the task's |
2152 |
returncode and sets self._current_task to None. |
2153 |
""" |
2154 |
self._default_exit(task) |
2155 |
self._current_task = None |
2156 |
self.returncode = task.returncode |
2157 |
return self.returncode |
2158 |
|
2159 |
def _default_final_exit(self, task): |
2160 |
""" |
2161 |
This calls _final_exit() and then wait(). |
2162 |
|
2163 |
Subclasses can use this as a generic final task exit callback. |
2164 |
|
2165 |
""" |
2166 |
self._final_exit(task) |
2167 |
return self.wait() |
2168 |
|
2169 |
def _start_task(self, task, exit_handler): |
2170 |
""" |
2171 |
Register exit handler for the given task, set it |
2172 |
as self._current_task, and call task.start(). |
2173 |
|
2174 |
Subclasses can use this as a generic way to start |
2175 |
a task. |
2176 |
|
2177 |
""" |
2178 |
task.addExitListener(exit_handler) |
2179 |
self._current_task = task |
2180 |
task.start() |
2181 |
|
2182 |
class TaskSequence(CompositeTask): |
2183 |
""" |
2184 |
A collection of tasks that executes sequentially. Each task |
2185 |
must have a addExitListener() method that can be used as |
2186 |
a means to trigger movement from one task to the next. |
2187 |
""" |
2188 |
|
2189 |
__slots__ = ("_task_queue",) |
2190 |
|
2191 |
def __init__(self, **kwargs): |
2192 |
AsynchronousTask.__init__(self, **kwargs) |
2193 |
self._task_queue = deque() |
2194 |
|
2195 |
def add(self, task): |
2196 |
self._task_queue.append(task) |
2197 |
|
2198 |
def _start(self): |
2199 |
self._start_next_task() |
2200 |
|
2201 |
def cancel(self): |
2202 |
self._task_queue.clear() |
2203 |
CompositeTask.cancel(self) |
2204 |
|
2205 |
def _start_next_task(self): |
2206 |
self._start_task(self._task_queue.popleft(), |
2207 |
self._task_exit_handler) |
2208 |
|
2209 |
def _task_exit_handler(self, task): |
2210 |
if self._default_exit(task) != os.EX_OK: |
2211 |
self.wait() |
2212 |
elif self._task_queue: |
2213 |
self._start_next_task() |
2214 |
else: |
2215 |
self._final_exit(task) |
2216 |
self.wait() |
2217 |
|
2218 |
class SubProcess(AbstractPollTask): |
2219 |
|
2220 |
__slots__ = ("pid",) + \ |
2221 |
("_files", "_reg_id") |
2222 |
|
2223 |
# A file descriptor is required for the scheduler to monitor changes from |
2224 |
# inside a poll() loop. When logging is not enabled, create a pipe just to |
2225 |
# serve this purpose alone. |
2226 |
_dummy_pipe_fd = 9 |
2227 |
|
2228 |
def _poll(self): |
2229 |
if self.returncode is not None: |
2230 |
return self.returncode |
2231 |
if self.pid is None: |
2232 |
return self.returncode |
2233 |
if self._registered: |
2234 |
return self.returncode |
2235 |
|
2236 |
try: |
2237 |
retval = os.waitpid(self.pid, os.WNOHANG) |
2238 |
except OSError, e: |
2239 |
if e.errno != errno.ECHILD: |
2240 |
raise |
2241 |
del e |
2242 |
retval = (self.pid, 1) |
2243 |
|
2244 |
if retval == (0, 0): |
2245 |
return None |
2246 |
self._set_returncode(retval) |
2247 |
return self.returncode |
2248 |
|
2249 |
def cancel(self): |
2250 |
if self.isAlive(): |
2251 |
try: |
2252 |
os.kill(self.pid, signal.SIGTERM) |
2253 |
except OSError, e: |
2254 |
if e.errno != errno.ESRCH: |
2255 |
raise |
2256 |
del e |
2257 |
|
2258 |
self.cancelled = True |
2259 |
if self.pid is not None: |
2260 |
self.wait() |
2261 |
return self.returncode |
2262 |
|
2263 |
def isAlive(self): |
2264 |
return self.pid is not None and \ |
2265 |
self.returncode is None |
2266 |
|
2267 |
def _wait(self): |
2268 |
|
2269 |
if self.returncode is not None: |
2270 |
return self.returncode |
2271 |
|
2272 |
if self._registered: |
2273 |
self.scheduler.schedule(self._reg_id) |
2274 |
self._unregister() |
2275 |
if self.returncode is not None: |
2276 |
return self.returncode |
2277 |
|
2278 |
try: |
2279 |
wait_retval = os.waitpid(self.pid, 0) |
2280 |
except OSError, e: |
2281 |
if e.errno != errno.ECHILD: |
2282 |
raise |
2283 |
del e |
2284 |
self._set_returncode((self.pid, 1)) |
2285 |
else: |
2286 |
self._set_returncode(wait_retval) |
2287 |
|
2288 |
return self.returncode |
2289 |
|
2290 |
def _unregister(self): |
2291 |
""" |
2292 |
Unregister from the scheduler and close open files. |
2293 |
""" |
2294 |
|
2295 |
self._registered = False |
2296 |
|
2297 |
if self._reg_id is not None: |
2298 |
self.scheduler.unregister(self._reg_id) |
2299 |
self._reg_id = None |
2300 |
|
2301 |
if self._files is not None: |
2302 |
for f in self._files.itervalues(): |
2303 |
f.close() |
2304 |
self._files = None |
2305 |
|
2306 |
def _set_returncode(self, wait_retval): |
2307 |
|
2308 |
retval = wait_retval[1] |
2309 |
|
2310 |
if retval != os.EX_OK: |
2311 |
if retval & 0xff: |
2312 |
retval = (retval & 0xff) << 8 |
2313 |
else: |
2314 |
retval = retval >> 8 |
2315 |
|
2316 |
self.returncode = retval |
2317 |
|
2318 |
class SpawnProcess(SubProcess): |
2319 |
|
2320 |
""" |
2321 |
Constructor keyword args are passed into portage.process.spawn(). |
2322 |
The required "args" keyword argument will be passed as the first |
2323 |
spawn() argument. |
2324 |
""" |
2325 |
|
2326 |
_spawn_kwarg_names = ("env", "opt_name", "fd_pipes", |
2327 |
"uid", "gid", "groups", "umask", "logfile", |
2328 |
"path_lookup", "pre_exec") |
2329 |
|
2330 |
__slots__ = ("args",) + \ |
2331 |
_spawn_kwarg_names |
2332 |
|
2333 |
_file_names = ("log", "process", "stdout") |
2334 |
_files_dict = slot_dict_class(_file_names, prefix="") |
2335 |
|
2336 |
def _start(self): |
2337 |
|
2338 |
if self.cancelled: |
2339 |
return |
2340 |
|
2341 |
if self.fd_pipes is None: |
2342 |
self.fd_pipes = {} |
2343 |
fd_pipes = self.fd_pipes |
2344 |
fd_pipes.setdefault(0, sys.stdin.fileno()) |
2345 |
fd_pipes.setdefault(1, sys.stdout.fileno()) |
2346 |
fd_pipes.setdefault(2, sys.stderr.fileno()) |
2347 |
|
2348 |
# flush any pending output |
2349 |
for fd in fd_pipes.itervalues(): |
2350 |
if fd == sys.stdout.fileno(): |
2351 |
sys.stdout.flush() |
2352 |
if fd == sys.stderr.fileno(): |
2353 |
sys.stderr.flush() |
2354 |
|
2355 |
logfile = self.logfile |
2356 |
self._files = self._files_dict() |
2357 |
files = self._files |
2358 |
|
2359 |
master_fd, slave_fd = self._pipe(fd_pipes) |
2360 |
fcntl.fcntl(master_fd, fcntl.F_SETFL, |
2361 |
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK) |
2362 |
|
2363 |
null_input = None |
2364 |
fd_pipes_orig = fd_pipes.copy() |
2365 |
if self.background: |
2366 |
# TODO: Use job control functions like tcsetpgrp() to control |
2367 |
# access to stdin. Until then, use /dev/null so that any |
2368 |
# attempts to read from stdin will immediately return EOF |
2369 |
# instead of blocking indefinitely. |
2370 |
null_input = open('/dev/null', 'rb') |
2371 |
fd_pipes[0] = null_input.fileno() |
2372 |
else: |
2373 |
fd_pipes[0] = fd_pipes_orig[0] |
2374 |
|
2375 |
files.process = os.fdopen(master_fd, 'rb') |
2376 |
if logfile is not None: |
2377 |
|
2378 |
fd_pipes[1] = slave_fd |
2379 |
fd_pipes[2] = slave_fd |
2380 |
|
2381 |
files.log = open(logfile, mode='ab') |
2382 |
portage.util.apply_secpass_permissions(logfile, |
2383 |
uid=portage.portage_uid, gid=portage.portage_gid, |
2384 |
mode=0660) |
2385 |
|
2386 |
if not self.background: |
2387 |
files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb') |
2388 |
|
2389 |
output_handler = self._output_handler |
2390 |
|
2391 |
else: |
2392 |
|
2393 |
# Create a dummy pipe so the scheduler can monitor |
2394 |
# the process from inside a poll() loop. |
2395 |
fd_pipes[self._dummy_pipe_fd] = slave_fd |
2396 |
if self.background: |
2397 |
fd_pipes[1] = slave_fd |
2398 |
fd_pipes[2] = slave_fd |
2399 |
output_handler = self._dummy_handler |
2400 |
|
2401 |
kwargs = {} |
2402 |
for k in self._spawn_kwarg_names: |
2403 |
v = getattr(self, k) |
2404 |
if v is not None: |
2405 |
kwargs[k] = v |
2406 |
|
2407 |
kwargs["fd_pipes"] = fd_pipes |
2408 |
kwargs["returnpid"] = True |
2409 |
kwargs.pop("logfile", None) |
2410 |
|
2411 |
self._reg_id = self.scheduler.register(files.process.fileno(), |
2412 |
self._registered_events, output_handler) |
2413 |
self._registered = True |
2414 |
|
2415 |
retval = self._spawn(self.args, **kwargs) |
2416 |
|
2417 |
os.close(slave_fd) |
2418 |
if null_input is not None: |
2419 |
null_input.close() |
2420 |
|
2421 |
if isinstance(retval, int): |
2422 |
# spawn failed |
2423 |
self._unregister() |
2424 |
self.returncode = retval |
2425 |
self.wait() |
2426 |
return |
2427 |
|
2428 |
self.pid = retval[0] |
2429 |
portage.process.spawned_pids.remove(self.pid) |
2430 |
|
2431 |
def _pipe(self, fd_pipes): |
2432 |
""" |
2433 |
@type fd_pipes: dict |
2434 |
@param fd_pipes: pipes from which to copy terminal size if desired. |
2435 |
""" |
2436 |
return os.pipe() |
2437 |
|
2438 |
def _spawn(self, args, **kwargs): |
2439 |
return portage.process.spawn(args, **kwargs) |
2440 |
|
2441 |
def _output_handler(self, fd, event): |
2442 |
|
2443 |
if event & PollConstants.POLLIN: |
2444 |
|
2445 |
files = self._files |
2446 |
buf = array.array('B') |
2447 |
try: |
2448 |
buf.fromfile(files.process, self._bufsize) |
2449 |
except EOFError: |
2450 |
pass |
2451 |
|
2452 |
if buf: |
2453 |
if not self.background: |
2454 |
write_successful = False |
2455 |
failures = 0 |
2456 |
while True: |
2457 |
try: |
2458 |
if not write_successful: |
2459 |
buf.tofile(files.stdout) |
2460 |
write_successful = True |
2461 |
files.stdout.flush() |
2462 |
break |
2463 |
except IOError, e: |
2464 |
if e.errno != errno.EAGAIN: |
2465 |
raise |
2466 |
del e |
2467 |
failures += 1 |
2468 |
if failures > 50: |
2469 |
# Avoid a potentially infinite loop. In |
2470 |
# most cases, the failure count is zero |
2471 |
# and it's unlikely to exceed 1. |
2472 |
raise |
2473 |
|
2474 |
# This means that a subprocess has put an inherited |
2475 |
# stdio file descriptor (typically stdin) into |
2476 |
# O_NONBLOCK mode. This is not acceptable (see bug |
2477 |
# #264435), so revert it. We need to use a loop |
2478 |
# here since there's a race condition due to |
2479 |
# parallel processes being able to change the |
2480 |
# flags on the inherited file descriptor. |
2481 |
# TODO: When possible, avoid having child processes |
2482 |
# inherit stdio file descriptors from portage |
2483 |
# (maybe it can't be avoided with |
2484 |
# PROPERTIES=interactive). |
2485 |
fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL, |
2486 |
fcntl.fcntl(files.stdout.fileno(), |
2487 |
fcntl.F_GETFL) ^ os.O_NONBLOCK) |
2488 |
|
2489 |
buf.tofile(files.log) |
2490 |
files.log.flush() |
2491 |
else: |
2492 |
self._unregister() |
2493 |
self.wait() |
2494 |
|
2495 |
self._unregister_if_appropriate(event) |
2496 |
return self._registered |
2497 |
|
2498 |
def _dummy_handler(self, fd, event): |
2499 |
""" |
2500 |
This method is mainly interested in detecting EOF, since |
2501 |
the only purpose of the pipe is to allow the scheduler to |
2502 |
monitor the process from inside a poll() loop. |
2503 |
""" |
2504 |
|
2505 |
if event & PollConstants.POLLIN: |
2506 |
|
2507 |
buf = array.array('B') |
2508 |
try: |
2509 |
buf.fromfile(self._files.process, self._bufsize) |
2510 |
except EOFError: |
2511 |
pass |
2512 |
|
2513 |
if buf: |
2514 |
pass |
2515 |
else: |
2516 |
self._unregister() |
2517 |
self.wait() |
2518 |
|
2519 |
self._unregister_if_appropriate(event) |
2520 |
return self._registered |
2521 |
|
2522 |
class MiscFunctionsProcess(SpawnProcess): |
2523 |
""" |
2524 |
Spawns misc-functions.sh with an existing ebuild environment. |
2525 |
""" |
2526 |
|
2527 |
__slots__ = ("commands", "phase", "pkg", "settings") |
2528 |
|
2529 |
def _start(self): |
2530 |
settings = self.settings |
2531 |
settings.pop("EBUILD_PHASE", None) |
2532 |
portage_bin_path = settings["PORTAGE_BIN_PATH"] |
2533 |
misc_sh_binary = os.path.join(portage_bin_path, |
2534 |
os.path.basename(portage.const.MISC_SH_BINARY)) |
2535 |
|
2536 |
self.args = [portage._shell_quote(misc_sh_binary)] + self.commands |
2537 |
self.logfile = settings.get("PORTAGE_LOG_FILE") |
2538 |
|
2539 |
portage._doebuild_exit_status_unlink( |
2540 |
settings.get("EBUILD_EXIT_STATUS_FILE")) |
2541 |
|
2542 |
SpawnProcess._start(self) |
2543 |
|
2544 |
def _spawn(self, args, **kwargs): |
2545 |
settings = self.settings |
2546 |
debug = settings.get("PORTAGE_DEBUG") == "1" |
2547 |
return portage.spawn(" ".join(args), settings, |
2548 |
debug=debug, **kwargs) |
2549 |
|
2550 |
def _set_returncode(self, wait_retval): |
2551 |
SpawnProcess._set_returncode(self, wait_retval) |
2552 |
self.returncode = portage._doebuild_exit_status_check_and_log( |
2553 |
self.settings, self.phase, self.returncode) |
2554 |
|
2555 |
class EbuildFetcher(SpawnProcess): |
2556 |
|
2557 |
__slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \ |
2558 |
("_build_dir",) |
2559 |
|
2560 |
def _start(self): |
2561 |
|
2562 |
root_config = self.pkg.root_config |
2563 |
portdb = root_config.trees["porttree"].dbapi |
2564 |
ebuild_path = portdb.findname(self.pkg.cpv) |
2565 |
settings = self.config_pool.allocate() |
2566 |
settings.setcpv(self.pkg) |
2567 |
|
2568 |
# In prefetch mode, logging goes to emerge-fetch.log and the builddir |
2569 |
# should not be touched since otherwise it could interfere with |
2570 |
# another instance of the same cpv concurrently being built for a |
2571 |
# different $ROOT (currently, builds only cooperate with prefetchers |
2572 |
# that are spawned for the same $ROOT). |
2573 |
if not self.prefetch: |
2574 |
self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings) |
2575 |
self._build_dir.lock() |
2576 |
self._build_dir.clean_log() |
2577 |
portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0) |
2578 |
if self.logfile is None: |
2579 |
self.logfile = settings.get("PORTAGE_LOG_FILE") |
2580 |
|
2581 |
phase = "fetch" |
2582 |
if self.fetchall: |
2583 |
phase = "fetchall" |
2584 |
|
2585 |
# If any incremental variables have been overridden |
2586 |
# via the environment, those values need to be passed |
2587 |
# along here so that they are correctly considered by |
2588 |
# the config instance in the subproccess. |
2589 |
fetch_env = os.environ.copy() |
2590 |
|
2591 |
nocolor = settings.get("NOCOLOR") |
2592 |
if nocolor is not None: |
2593 |
fetch_env["NOCOLOR"] = nocolor |
2594 |
|
2595 |
fetch_env["PORTAGE_NICENESS"] = "0" |
2596 |
if self.prefetch: |
2597 |
fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1" |
2598 |
|
2599 |
ebuild_binary = os.path.join( |
2600 |
settings["PORTAGE_BIN_PATH"], "ebuild") |
2601 |
|
2602 |
fetch_args = [ebuild_binary, ebuild_path, phase] |
2603 |
debug = settings.get("PORTAGE_DEBUG") == "1" |
2604 |
if debug: |
2605 |
fetch_args.append("--debug") |
2606 |
|
2607 |
self.args = fetch_args |
2608 |
self.env = fetch_env |
2609 |
SpawnProcess._start(self) |
2610 |
|
2611 |
def _pipe(self, fd_pipes): |
2612 |
"""When appropriate, use a pty so that fetcher progress bars, |
2613 |
like wget has, will work properly.""" |
2614 |
if self.background or not sys.stdout.isatty(): |
2615 |
# When the output only goes to a log file, |
2616 |
# there's no point in creating a pty. |
2617 |
return os.pipe() |
2618 |
stdout_pipe = fd_pipes.get(1) |
2619 |
got_pty, master_fd, slave_fd = \ |
2620 |
portage._create_pty_or_pipe(copy_term_size=stdout_pipe) |
2621 |
return (master_fd, slave_fd) |
2622 |
|
2623 |
def _set_returncode(self, wait_retval): |
2624 |
SpawnProcess._set_returncode(self, wait_retval) |
2625 |
# Collect elog messages that might have been |
2626 |
# created by the pkg_nofetch phase. |
2627 |
if self._build_dir is not None: |
2628 |
# Skip elog messages for prefetch, in order to avoid duplicates. |
2629 |
if not self.prefetch and self.returncode != os.EX_OK: |
2630 |
elog_out = None |
2631 |
if self.logfile is not None: |
2632 |
if self.background: |
2633 |
elog_out = open(self.logfile, 'a') |
2634 |
msg = "Fetch failed for '%s'" % (self.pkg.cpv,) |
2635 |
if self.logfile is not None: |
2636 |
msg += ", Log file:" |
2637 |
eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out) |
2638 |
if self.logfile is not None: |
2639 |
eerror(" '%s'" % (self.logfile,), |
2640 |
phase="unpack", key=self.pkg.cpv, out=elog_out) |
2641 |
if elog_out is not None: |
2642 |
elog_out.close() |
2643 |
if not self.prefetch: |
2644 |
portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings) |
2645 |
features = self._build_dir.settings.features |
2646 |
if self.returncode == os.EX_OK: |
2647 |
self._build_dir.clean_log() |
2648 |
self._build_dir.unlock() |
2649 |
self.config_pool.deallocate(self._build_dir.settings) |
2650 |
self._build_dir = None |
2651 |
|
2652 |
class EbuildBuildDir(SlotObject): |
2653 |
|
2654 |
__slots__ = ("dir_path", "pkg", "settings", |
2655 |
"locked", "_catdir", "_lock_obj") |
2656 |
|
2657 |
def __init__(self, **kwargs): |
2658 |
SlotObject.__init__(self, **kwargs) |
2659 |
self.locked = False |
2660 |
|
2661 |
def lock(self): |
2662 |
""" |
2663 |
This raises an AlreadyLocked exception if lock() is called |
2664 |
while a lock is already held. In order to avoid this, call |
2665 |
unlock() or check whether the "locked" attribute is True |
2666 |
or False before calling lock(). |
2667 |
""" |
2668 |
if self._lock_obj is not None: |
2669 |
raise self.AlreadyLocked((self._lock_obj,)) |
2670 |
|
2671 |
dir_path = self.dir_path |
2672 |
if dir_path is None: |
2673 |
root_config = self.pkg.root_config |
2674 |
portdb = root_config.trees["porttree"].dbapi |
2675 |
ebuild_path = portdb.findname(self.pkg.cpv) |
2676 |
settings = self.settings |
2677 |
settings.setcpv(self.pkg) |
2678 |
debug = settings.get("PORTAGE_DEBUG") == "1" |
2679 |
use_cache = 1 # always true |
2680 |
portage.doebuild_environment(ebuild_path, "setup", root_config.root, |
2681 |
self.settings, debug, use_cache, portdb) |
2682 |
dir_path = self.settings["PORTAGE_BUILDDIR"] |
2683 |
|
2684 |
catdir = os.path.dirname(dir_path) |
2685 |
self._catdir = catdir |
2686 |
|
2687 |
portage.util.ensure_dirs(os.path.dirname(catdir), |
2688 |
gid=portage.portage_gid, |
2689 |
mode=070, mask=0) |
2690 |
catdir_lock = None |
2691 |
try: |
2692 |
catdir_lock = portage.locks.lockdir(catdir) |
2693 |
portage.util.ensure_dirs(catdir, |
2694 |
gid=portage.portage_gid, |
2695 |
mode=070, mask=0) |
2696 |
self._lock_obj = portage.locks.lockdir(dir_path) |
2697 |
finally: |
2698 |
self.locked = self._lock_obj is not None |
2699 |
if catdir_lock is not None: |
2700 |
portage.locks.unlockdir(catdir_lock) |
2701 |
|
2702 |
def clean_log(self): |
2703 |
"""Discard existing log.""" |
2704 |
settings = self.settings |
2705 |
|
2706 |
for x in ('.logid', 'temp/build.log'): |
2707 |
try: |
2708 |
os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x)) |
2709 |
except OSError: |
2710 |
pass |
2711 |
|
2712 |
def unlock(self): |
2713 |
if self._lock_obj is None: |
2714 |
return |
2715 |
|
2716 |
portage.locks.unlockdir(self._lock_obj) |
2717 |
self._lock_obj = None |
2718 |
self.locked = False |
2719 |
|
2720 |
catdir = self._catdir |
2721 |
catdir_lock = None |
2722 |
try: |
2723 |
catdir_lock = portage.locks.lockdir(catdir) |
2724 |
finally: |
2725 |
if catdir_lock: |
2726 |
try: |
2727 |
os.rmdir(catdir) |
2728 |
except OSError, e: |
2729 |
if e.errno not in (errno.ENOENT, |
2730 |
errno.ENOTEMPTY, errno.EEXIST): |
2731 |
raise |
2732 |
del e |
2733 |
portage.locks.unlockdir(catdir_lock) |
2734 |
|
2735 |
class AlreadyLocked(portage.exception.PortageException): |
2736 |
pass |
2737 |
|
2738 |
class EbuildBuild(CompositeTask): |
2739 |
|
2740 |
__slots__ = ("args_set", "config_pool", "find_blockers", |
2741 |
"ldpath_mtimes", "logger", "opts", "pkg", "pkg_count", |
2742 |
"prefetcher", "settings", "world_atom") + \ |
2743 |
("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree") |
2744 |
|
2745 |
def _start(self): |
2746 |
|
2747 |
logger = self.logger |
2748 |
opts = self.opts |
2749 |
pkg = self.pkg |
2750 |
settings = self.settings |
2751 |
world_atom = self.world_atom |
2752 |
root_config = pkg.root_config |
2753 |
tree = "porttree" |
2754 |
self._tree = tree |
2755 |
portdb = root_config.trees[tree].dbapi |
2756 |
settings.setcpv(pkg) |
2757 |
settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name |
2758 |
ebuild_path = portdb.findname(self.pkg.cpv) |
2759 |
self._ebuild_path = ebuild_path |
2760 |
|
2761 |
prefetcher = self.prefetcher |
2762 |
if prefetcher is None: |
2763 |
pass |
2764 |
elif not prefetcher.isAlive(): |
2765 |
prefetcher.cancel() |
2766 |
elif prefetcher.poll() is None: |
2767 |
|
2768 |
waiting_msg = "Fetching files " + \ |
2769 |
"in the background. " + \ |
2770 |
"To view fetch progress, run `tail -f " + \ |
2771 |
"/var/log/emerge-fetch.log` in another " + \ |
2772 |
"terminal." |
2773 |
msg_prefix = colorize("GOOD", " * ") |
2774 |
from textwrap import wrap |
2775 |
waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \ |
2776 |
for line in wrap(waiting_msg, 65)) |
2777 |
if not self.background: |
2778 |
writemsg(waiting_msg, noiselevel=-1) |
2779 |
|
2780 |
self._current_task = prefetcher |
2781 |
prefetcher.addExitListener(self._prefetch_exit) |
2782 |
return |
2783 |
|
2784 |
self._prefetch_exit(prefetcher) |
2785 |
|
2786 |
def _prefetch_exit(self, prefetcher): |
2787 |
|
2788 |
opts = self.opts |
2789 |
pkg = self.pkg |
2790 |
settings = self.settings |
2791 |
|
2792 |
if opts.fetchonly: |
2793 |
fetcher = EbuildFetchonly( |
2794 |
fetch_all=opts.fetch_all_uri, |
2795 |
pkg=pkg, pretend=opts.pretend, |
2796 |
settings=settings) |
2797 |
retval = fetcher.execute() |
2798 |
self.returncode = retval |
2799 |
self.wait() |
2800 |
return |
2801 |
|
2802 |
fetcher = EbuildFetcher(config_pool=self.config_pool, |
2803 |
fetchall=opts.fetch_all_uri, |
2804 |
fetchonly=opts.fetchonly, |
2805 |
background=self.background, |
2806 |
pkg=pkg, scheduler=self.scheduler) |
2807 |
|
2808 |
self._start_task(fetcher, self._fetch_exit) |
2809 |
|
2810 |
def _fetch_exit(self, fetcher): |
2811 |
opts = self.opts |
2812 |
pkg = self.pkg |
2813 |
|
2814 |
fetch_failed = False |
2815 |
if opts.fetchonly: |
2816 |
fetch_failed = self._final_exit(fetcher) != os.EX_OK |
2817 |
else: |
2818 |
fetch_failed = self._default_exit(fetcher) != os.EX_OK |
2819 |
|
2820 |
if fetch_failed and fetcher.logfile is not None and \ |
2821 |
os.path.exists(fetcher.logfile): |
2822 |
self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile |
2823 |
|
2824 |
if not fetch_failed and fetcher.logfile is not None: |
2825 |
# Fetch was successful, so remove the fetch log. |
2826 |
try: |
2827 |
os.unlink(fetcher.logfile) |
2828 |
except OSError: |
2829 |
pass |
2830 |
|
2831 |
if fetch_failed or opts.fetchonly: |
2832 |
self.wait() |
2833 |
return |
2834 |
|
2835 |
logger = self.logger |
2836 |
opts = self.opts |
2837 |
pkg_count = self.pkg_count |
2838 |
scheduler = self.scheduler |
2839 |
settings = self.settings |
2840 |
features = settings.features |
2841 |
ebuild_path = self._ebuild_path |
2842 |
system_set = pkg.root_config.sets["system"] |
2843 |
|
2844 |
self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings) |
2845 |
self._build_dir.lock() |
2846 |
|
2847 |
# Cleaning is triggered before the setup |
2848 |
# phase, in portage.doebuild(). |
2849 |
msg = " === (%s of %s) Cleaning (%s::%s)" % \ |
2850 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path) |
2851 |
short_msg = "emerge: (%s of %s) %s Clean" % \ |
2852 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv) |
2853 |
logger.log(msg, short_msg=short_msg) |
2854 |
|
2855 |
#buildsyspkg: Check if we need to _force_ binary package creation |
2856 |
self._issyspkg = "buildsyspkg" in features and \ |
2857 |
system_set.findAtomForPackage(pkg) and \ |
2858 |
not opts.buildpkg |
2859 |
|
2860 |
if opts.buildpkg or self._issyspkg: |
2861 |
|
2862 |
self._buildpkg = True |
2863 |
|
2864 |
msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \ |
2865 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path) |
2866 |
short_msg = "emerge: (%s of %s) %s Compile" % \ |
2867 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv) |
2868 |
logger.log(msg, short_msg=short_msg) |
2869 |
|
2870 |
else: |
2871 |
msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \ |
2872 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path) |
2873 |
short_msg = "emerge: (%s of %s) %s Compile" % \ |
2874 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv) |
2875 |
logger.log(msg, short_msg=short_msg) |
2876 |
|
2877 |
build = EbuildExecuter(background=self.background, pkg=pkg, |
2878 |
scheduler=scheduler, settings=settings) |
2879 |
self._start_task(build, self._build_exit) |
2880 |
|
2881 |
def _unlock_builddir(self): |
2882 |
portage.elog.elog_process(self.pkg.cpv, self.settings) |
2883 |
self._build_dir.unlock() |
2884 |
|
2885 |
def _build_exit(self, build): |
2886 |
if self._default_exit(build) != os.EX_OK: |
2887 |
self._unlock_builddir() |
2888 |
self.wait() |
2889 |
return |
2890 |
|
2891 |
opts = self.opts |
2892 |
buildpkg = self._buildpkg |
2893 |
|
2894 |
if not buildpkg: |
2895 |
self._final_exit(build) |
2896 |
self.wait() |
2897 |
return |
2898 |
|
2899 |
if self._issyspkg: |
2900 |
msg = ">>> This is a system package, " + \ |
2901 |
"let's pack a rescue tarball.\n" |
2902 |
|
2903 |
log_path = self.settings.get("PORTAGE_LOG_FILE") |
2904 |
if log_path is not None: |
2905 |
log_file = open(log_path, 'a') |
2906 |
try: |
2907 |
log_file.write(msg) |
2908 |
finally: |
2909 |
log_file.close() |
2910 |
|
2911 |
if not self.background: |
2912 |
portage.writemsg_stdout(msg, noiselevel=-1) |
2913 |
|
2914 |
packager = EbuildBinpkg(background=self.background, pkg=self.pkg, |
2915 |
scheduler=self.scheduler, settings=self.settings) |
2916 |
|
2917 |
self._start_task(packager, self._buildpkg_exit) |
2918 |
|
2919 |
def _buildpkg_exit(self, packager): |
2920 |
""" |
2921 |
Released build dir lock when there is a failure or |
2922 |
when in buildpkgonly mode. Otherwise, the lock will |
2923 |
be released when merge() is called. |
2924 |
""" |
2925 |
|
2926 |
if self._default_exit(packager) != os.EX_OK: |
2927 |
self._unlock_builddir() |
2928 |
self.wait() |
2929 |
return |
2930 |
|
2931 |
if self.opts.buildpkgonly: |
2932 |
# Need to call "clean" phase for buildpkgonly mode |
2933 |
portage.elog.elog_process(self.pkg.cpv, self.settings) |
2934 |
phase = "clean" |
2935 |
clean_phase = EbuildPhase(background=self.background, |
2936 |
pkg=self.pkg, phase=phase, |
2937 |
scheduler=self.scheduler, settings=self.settings, |
2938 |
tree=self._tree) |
2939 |
self._start_task(clean_phase, self._clean_exit) |
2940 |
return |
2941 |
|
2942 |
# Continue holding the builddir lock until |
2943 |
# after the package has been installed. |
2944 |
self._current_task = None |
2945 |
self.returncode = packager.returncode |
2946 |
self.wait() |
2947 |
|
2948 |
def _clean_exit(self, clean_phase): |
2949 |
if self._final_exit(clean_phase) != os.EX_OK or \ |
2950 |
self.opts.buildpkgonly: |
2951 |
self._unlock_builddir() |
2952 |
self.wait() |
2953 |
|
2954 |
def install(self): |
2955 |
""" |
2956 |
Install the package and then clean up and release locks. |
2957 |
Only call this after the build has completed successfully |
2958 |
and neither fetchonly nor buildpkgonly mode are enabled. |
2959 |
""" |
2960 |
|
2961 |
find_blockers = self.find_blockers |
2962 |
ldpath_mtimes = self.ldpath_mtimes |
2963 |
logger = self.logger |
2964 |
pkg = self.pkg |
2965 |
pkg_count = self.pkg_count |
2966 |
settings = self.settings |
2967 |
world_atom = self.world_atom |
2968 |
ebuild_path = self._ebuild_path |
2969 |
tree = self._tree |
2970 |
|
2971 |
merge = EbuildMerge(find_blockers=self.find_blockers, |
2972 |
ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg, |
2973 |
pkg_count=pkg_count, pkg_path=ebuild_path, |
2974 |
scheduler=self.scheduler, |
2975 |
settings=settings, tree=tree, world_atom=world_atom) |
2976 |
|
2977 |
msg = " === (%s of %s) Merging (%s::%s)" % \ |
2978 |
(pkg_count.curval, pkg_count.maxval, |
2979 |
pkg.cpv, ebuild_path) |
2980 |
short_msg = "emerge: (%s of %s) %s Merge" % \ |
2981 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv) |
2982 |
logger.log(msg, short_msg=short_msg) |
2983 |
|
2984 |
try: |
2985 |
rval = merge.execute() |
2986 |
finally: |
2987 |
self._unlock_builddir() |
2988 |
|
2989 |
return rval |
2990 |
|
2991 |
class EbuildExecuter(CompositeTask): |
2992 |
|
2993 |
__slots__ = ("pkg", "scheduler", "settings") + ("_tree",) |
2994 |
|
2995 |
_phases = ("prepare", "configure", "compile", "test", "install") |
2996 |
|
2997 |
_live_eclasses = frozenset([ |
2998 |
"bzr", |
2999 |
"cvs", |
3000 |
"darcs", |
3001 |
"git", |
3002 |
"mercurial", |
3003 |
"subversion" |
3004 |
]) |
3005 |
|
3006 |
def _start(self): |
3007 |
self._tree = "porttree" |
3008 |
pkg = self.pkg |
3009 |
phase = "clean" |
3010 |
clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase, |
3011 |
scheduler=self.scheduler, settings=self.settings, tree=self._tree) |
3012 |
self._start_task(clean_phase, self._clean_phase_exit) |
3013 |
|
3014 |
def _clean_phase_exit(self, clean_phase): |
3015 |
|
3016 |
if self._default_exit(clean_phase) != os.EX_OK: |
3017 |
self.wait() |
3018 |
return |
3019 |
|
3020 |
pkg = self.pkg |
3021 |
scheduler = self.scheduler |
3022 |
settings = self.settings |
3023 |
cleanup = 1 |
3024 |
|
3025 |
# This initializes PORTAGE_LOG_FILE. |
3026 |
portage.prepare_build_dirs(pkg.root, settings, cleanup) |
3027 |
|
3028 |
setup_phase = EbuildPhase(background=self.background, |
3029 |
pkg=pkg, phase="setup", scheduler=scheduler, |
3030 |
settings=settings, tree=self._tree) |
3031 |
|
3032 |
setup_phase.addExitListener(self._setup_exit) |
3033 |
self._current_task = setup_phase |
3034 |
self.scheduler.scheduleSetup(setup_phase) |
3035 |
|
3036 |
def _setup_exit(self, setup_phase): |
3037 |
|
3038 |
if self._default_exit(setup_phase) != os.EX_OK: |
3039 |
self.wait() |
3040 |
return |
3041 |
|
3042 |
unpack_phase = EbuildPhase(background=self.background, |
3043 |
pkg=self.pkg, phase="unpack", scheduler=self.scheduler, |
3044 |
settings=self.settings, tree=self._tree) |
3045 |
|
3046 |
if self._live_eclasses.intersection(self.pkg.inherited): |
3047 |
# Serialize $DISTDIR access for live ebuilds since |
3048 |
# otherwise they can interfere with eachother. |
3049 |
|
3050 |
unpack_phase.addExitListener(self._unpack_exit) |
3051 |
self._current_task = unpack_phase |
3052 |
self.scheduler.scheduleUnpack(unpack_phase) |
3053 |
|
3054 |
else: |
3055 |
self._start_task(unpack_phase, self._unpack_exit) |
3056 |
|
3057 |
def _unpack_exit(self, unpack_phase): |
3058 |
|
3059 |
if self._default_exit(unpack_phase) != os.EX_OK: |
3060 |
self.wait() |
3061 |
return |
3062 |
|
3063 |
ebuild_phases = TaskSequence(scheduler=self.scheduler) |
3064 |
|
3065 |
pkg = self.pkg |
3066 |
phases = self._phases |
3067 |
eapi = pkg.metadata["EAPI"] |
3068 |
if eapi in ("0", "1"): |
3069 |
# skip src_prepare and src_configure |
3070 |
phases = phases[2:] |
3071 |
|
3072 |
for phase in phases: |
3073 |
ebuild_phases.add(EbuildPhase(background=self.background, |
3074 |
pkg=self.pkg, phase=phase, scheduler=self.scheduler, |
3075 |
settings=self.settings, tree=self._tree)) |
3076 |
|
3077 |
self._start_task(ebuild_phases, self._default_final_exit) |
3078 |
|
3079 |
class EbuildMetadataPhase(SubProcess): |
3080 |
|
3081 |
""" |
3082 |
Asynchronous interface for the ebuild "depend" phase which is |
3083 |
used to extract metadata from the ebuild. |
3084 |
""" |
3085 |
|
3086 |
__slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback", |
3087 |
"ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \ |
3088 |
("_raw_metadata",) |
3089 |
|
3090 |
_file_names = ("ebuild",) |
3091 |
_files_dict = slot_dict_class(_file_names, prefix="") |
3092 |
_metadata_fd = 9 |
3093 |
|
3094 |
def _start(self): |
3095 |
settings = self.settings |
3096 |
settings.setcpv(self.cpv) |
3097 |
ebuild_path = self.ebuild_path |
3098 |
|
3099 |
eapi = None |
3100 |
if 'parse-eapi-glep-55' in settings.features: |
3101 |
pf, eapi = portage._split_ebuild_name_glep55( |
3102 |
os.path.basename(ebuild_path)) |
3103 |
if eapi is None and \ |
3104 |
'parse-eapi-ebuild-head' in settings.features: |
3105 |
eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path, |
3106 |
mode='r', encoding='utf_8', errors='replace')) |
3107 |
|
3108 |
if eapi is not None: |
3109 |
if not portage.eapi_is_supported(eapi): |
3110 |
self.metadata_callback(self.cpv, self.ebuild_path, |
3111 |
self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime) |
3112 |
self.returncode = os.EX_OK |
3113 |
self.wait() |
3114 |
return |
3115 |
|
3116 |
settings.configdict['pkg']['EAPI'] = eapi |
3117 |
|
3118 |
debug = settings.get("PORTAGE_DEBUG") == "1" |
3119 |
master_fd = None |
3120 |
slave_fd = None |
3121 |
fd_pipes = None |
3122 |
if self.fd_pipes is not None: |
3123 |
fd_pipes = self.fd_pipes.copy() |
3124 |
else: |
3125 |
fd_pipes = {} |
3126 |
|
3127 |
fd_pipes.setdefault(0, sys.stdin.fileno()) |
3128 |
fd_pipes.setdefault(1, sys.stdout.fileno()) |
3129 |
fd_pipes.setdefault(2, sys.stderr.fileno()) |
3130 |
|
3131 |
# flush any pending output |
3132 |
for fd in fd_pipes.itervalues(): |
3133 |
if fd == sys.stdout.fileno(): |
3134 |
sys.stdout.flush() |
3135 |
if fd == sys.stderr.fileno(): |
3136 |
sys.stderr.flush() |
3137 |
|
3138 |
fd_pipes_orig = fd_pipes.copy() |
3139 |
self._files = self._files_dict() |
3140 |
files = self._files |
3141 |
|
3142 |
master_fd, slave_fd = os.pipe() |
3143 |
fcntl.fcntl(master_fd, fcntl.F_SETFL, |
3144 |
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK) |
3145 |
|
3146 |
fd_pipes[self._metadata_fd] = slave_fd |
3147 |
|
3148 |
self._raw_metadata = [] |
3149 |
files.ebuild = os.fdopen(master_fd, 'r') |
3150 |
self._reg_id = self.scheduler.register(files.ebuild.fileno(), |
3151 |
self._registered_events, self._output_handler) |
3152 |
self._registered = True |
3153 |
|
3154 |
retval = portage.doebuild(ebuild_path, "depend", |
3155 |
settings["ROOT"], settings, debug, |
3156 |
mydbapi=self.portdb, tree="porttree", |
3157 |
fd_pipes=fd_pipes, returnpid=True) |
3158 |
|
3159 |
os.close(slave_fd) |
3160 |
|
3161 |
if isinstance(retval, int): |
3162 |
# doebuild failed before spawning |
3163 |
self._unregister() |
3164 |
self.returncode = retval |
3165 |
self.wait() |
3166 |
return |
3167 |
|
3168 |
self.pid = retval[0] |
3169 |
portage.process.spawned_pids.remove(self.pid) |
3170 |
|
3171 |
def _output_handler(self, fd, event): |
3172 |
|
3173 |
if event & PollConstants.POLLIN: |
3174 |
self._raw_metadata.append(self._files.ebuild.read()) |
3175 |
if not self._raw_metadata[-1]: |
3176 |
self._unregister() |
3177 |
self.wait() |
3178 |
|
3179 |
self._unregister_if_appropriate(event) |
3180 |
return self._registered |
3181 |
|
3182 |
def _set_returncode(self, wait_retval): |
3183 |
SubProcess._set_returncode(self, wait_retval) |
3184 |
if self.returncode == os.EX_OK: |
3185 |
metadata_lines = "".join(self._raw_metadata).splitlines() |
3186 |
if len(portage.auxdbkeys) != len(metadata_lines): |
3187 |
# Don't trust bash's returncode if the |
3188 |
# number of lines is incorrect. |
3189 |
self.returncode = 1 |
3190 |
else: |
3191 |
metadata = izip(portage.auxdbkeys, metadata_lines) |
3192 |
self.metadata = self.metadata_callback(self.cpv, |
3193 |
self.ebuild_path, self.repo_path, metadata, |
3194 |
self.ebuild_mtime) |
3195 |
|
3196 |
class EbuildProcess(SpawnProcess): |
3197 |
|
3198 |
__slots__ = ("phase", "pkg", "settings", "tree") |
3199 |
|
3200 |
def _start(self): |
3201 |
# Don't open the log file during the clean phase since the |
3202 |
# open file can result in an nfs lock on $T/build.log which |
3203 |
# prevents the clean phase from removing $T. |
3204 |
if self.phase not in ("clean", "cleanrm"): |
3205 |
self.logfile = self.settings.get("PORTAGE_LOG_FILE") |
3206 |
SpawnProcess._start(self) |
3207 |
|
3208 |
def _pipe(self, fd_pipes): |
3209 |
stdout_pipe = fd_pipes.get(1) |
3210 |
got_pty, master_fd, slave_fd = \ |
3211 |
portage._create_pty_or_pipe(copy_term_size=stdout_pipe) |
3212 |
return (master_fd, slave_fd) |
3213 |
|
3214 |
def _spawn(self, args, **kwargs): |
3215 |
|
3216 |
root_config = self.pkg.root_config |
3217 |
tree = self.tree |
3218 |
mydbapi = root_config.trees[tree].dbapi |
3219 |
settings = self.settings |
3220 |
ebuild_path = settings["EBUILD"] |
3221 |
debug = settings.get("PORTAGE_DEBUG") == "1" |
3222 |
|
3223 |
rval = portage.doebuild(ebuild_path, self.phase, |
3224 |
root_config.root, settings, debug, |
3225 |
mydbapi=mydbapi, tree=tree, **kwargs) |
3226 |
|
3227 |
return rval |
3228 |
|
3229 |
def _set_returncode(self, wait_retval): |
3230 |
SpawnProcess._set_returncode(self, wait_retval) |
3231 |
|
3232 |
if self.phase not in ("clean", "cleanrm"): |
3233 |
self.returncode = portage._doebuild_exit_status_check_and_log( |
3234 |
self.settings, self.phase, self.returncode) |
3235 |
|
3236 |
if self.phase == "test" and self.returncode != os.EX_OK and \ |
3237 |
"test-fail-continue" in self.settings.features: |
3238 |
self.returncode = os.EX_OK |
3239 |
|
3240 |
portage._post_phase_userpriv_perms(self.settings) |
3241 |
|
3242 |
class EbuildPhase(CompositeTask): |
3243 |
|
3244 |
__slots__ = ("background", "pkg", "phase", |
3245 |
"scheduler", "settings", "tree") |
3246 |
|
3247 |
_post_phase_cmds = portage._post_phase_cmds |
3248 |
|
3249 |
def _start(self): |
3250 |
|
3251 |
ebuild_process = EbuildProcess(background=self.background, |
3252 |
pkg=self.pkg, phase=self.phase, scheduler=self.scheduler, |
3253 |
settings=self.settings, tree=self.tree) |
3254 |
|
3255 |
self._start_task(ebuild_process, self._ebuild_exit) |
3256 |
|
3257 |
def _ebuild_exit(self, ebuild_process): |
3258 |
|
3259 |
if self.phase == "install": |
3260 |
out = None |
3261 |
log_path = self.settings.get("PORTAGE_LOG_FILE") |
3262 |
log_file = None |
3263 |
if self.background and log_path is not None: |
3264 |
log_file = open(log_path, 'a') |
3265 |
out = log_file |
3266 |
try: |
3267 |
portage._check_build_log(self.settings, out=out) |
3268 |
finally: |
3269 |
if log_file is not None: |
3270 |
log_file.close() |
3271 |
|
3272 |
if self._default_exit(ebuild_process) != os.EX_OK: |
3273 |
self.wait() |
3274 |
return |
3275 |
|
3276 |
settings = self.settings |
3277 |
|
3278 |
if self.phase == "install": |
3279 |
portage._post_src_install_chost_fix(settings) |
3280 |
portage._post_src_install_uid_fix(settings) |
3281 |
|
3282 |
post_phase_cmds = self._post_phase_cmds.get(self.phase) |
3283 |
if post_phase_cmds is not None: |
3284 |
post_phase = MiscFunctionsProcess(background=self.background, |
3285 |
commands=post_phase_cmds, phase=self.phase, pkg=self.pkg, |
3286 |
scheduler=self.scheduler, settings=settings) |
3287 |
self._start_task(post_phase, self._post_phase_exit) |
3288 |
return |
3289 |
|
3290 |
self.returncode = ebuild_process.returncode |
3291 |
self._current_task = None |
3292 |
self.wait() |
3293 |
|
3294 |
def _post_phase_exit(self, post_phase): |
3295 |
if self._final_exit(post_phase) != os.EX_OK: |
3296 |
writemsg("!!! post %s failed; exiting.\n" % self.phase, |
3297 |
noiselevel=-1) |
3298 |
self._current_task = None |
3299 |
self.wait() |
3300 |
return |
3301 |
|
3302 |
class EbuildBinpkg(EbuildProcess): |
3303 |
""" |
3304 |
This assumes that src_install() has successfully completed. |
3305 |
""" |
3306 |
__slots__ = ("_binpkg_tmpfile",) |
3307 |
|
3308 |
def _start(self): |
3309 |
self.phase = "package" |
3310 |
self.tree = "porttree" |
3311 |
pkg = self.pkg |
3312 |
root_config = pkg.root_config |
3313 |
portdb = root_config.trees["porttree"].dbapi |
3314 |
bintree = root_config.trees["bintree"] |
3315 |
ebuild_path = portdb.findname(self.pkg.cpv) |
3316 |
settings = self.settings |
3317 |
debug = settings.get("PORTAGE_DEBUG") == "1" |
3318 |
|
3319 |
bintree.prevent_collision(pkg.cpv) |
3320 |
binpkg_tmpfile = os.path.join(bintree.pkgdir, |
3321 |
pkg.cpv + ".tbz2." + str(os.getpid())) |
3322 |
self._binpkg_tmpfile = binpkg_tmpfile |
3323 |
settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile |
3324 |
settings.backup_changes("PORTAGE_BINPKG_TMPFILE") |
3325 |
|
3326 |
try: |
3327 |
EbuildProcess._start(self) |
3328 |
finally: |
3329 |
settings.pop("PORTAGE_BINPKG_TMPFILE", None) |
3330 |
|
3331 |
def _set_returncode(self, wait_retval): |
3332 |
EbuildProcess._set_returncode(self, wait_retval) |
3333 |
|
3334 |
pkg = self.pkg |
3335 |
bintree = pkg.root_config.trees["bintree"] |
3336 |
binpkg_tmpfile = self._binpkg_tmpfile |
3337 |
if self.returncode == os.EX_OK: |
3338 |
bintree.inject(pkg.cpv, filename=binpkg_tmpfile) |
3339 |
|
3340 |
class EbuildMerge(SlotObject): |
3341 |
|
3342 |
__slots__ = ("find_blockers", "logger", "ldpath_mtimes", |
3343 |
"pkg", "pkg_count", "pkg_path", "pretend", |
3344 |
"scheduler", "settings", "tree", "world_atom") |
3345 |
|
3346 |
def execute(self): |
3347 |
root_config = self.pkg.root_config |
3348 |
settings = self.settings |
3349 |
retval = portage.merge(settings["CATEGORY"], |
3350 |
settings["PF"], settings["D"], |
3351 |
os.path.join(settings["PORTAGE_BUILDDIR"], |
3352 |
"build-info"), root_config.root, settings, |
3353 |
myebuild=settings["EBUILD"], |
3354 |
mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi, |
3355 |
vartree=root_config.trees["vartree"], |
3356 |
prev_mtimes=self.ldpath_mtimes, |
3357 |
scheduler=self.scheduler, |
3358 |
blockers=self.find_blockers) |
3359 |
|
3360 |
if retval == os.EX_OK: |
3361 |
self.world_atom(self.pkg) |
3362 |
self._log_success() |
3363 |
|
3364 |
return retval |
3365 |
|
3366 |
def _log_success(self): |
3367 |
pkg = self.pkg |
3368 |
pkg_count = self.pkg_count |
3369 |
pkg_path = self.pkg_path |
3370 |
logger = self.logger |
3371 |
if "noclean" not in self.settings.features: |
3372 |
short_msg = "emerge: (%s of %s) %s Clean Post" % \ |
3373 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv) |
3374 |
logger.log((" === (%s of %s) " + \ |
3375 |
"Post-Build Cleaning (%s::%s)") % \ |
3376 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path), |
3377 |
short_msg=short_msg) |
3378 |
logger.log(" ::: completed emerge (%s of %s) %s to %s" % \ |
3379 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root)) |
3380 |
|
3381 |
class PackageUninstall(AsynchronousTask): |
1442 |
class PackageUninstall(AsynchronousTask): |
3382 |
|
1443 |
|
3383 |
__slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings") |
1444 |
__slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings") |
Lines 3414-3947
Link Here
|
3414 |
finally: |
1475 |
finally: |
3415 |
f.close() |
1476 |
f.close() |
3416 |
|
1477 |
|
3417 |
class Binpkg(CompositeTask): |
|
|
3418 |
|
3419 |
__slots__ = ("find_blockers", |
3420 |
"ldpath_mtimes", "logger", "opts", |
3421 |
"pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \ |
3422 |
("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg", |
3423 |
"_image_dir", "_infloc", "_pkg_path", "_tree", "_verify") |
3424 |
|
3425 |
def _writemsg_level(self, msg, level=0, noiselevel=0): |
3426 |
|
3427 |
if not self.background: |
3428 |
portage.util.writemsg_level(msg, |
3429 |
level=level, noiselevel=noiselevel) |
3430 |
|
3431 |
log_path = self.settings.get("PORTAGE_LOG_FILE") |
3432 |
if log_path is not None: |
3433 |
f = open(log_path, 'a') |
3434 |
try: |
3435 |
f.write(msg) |
3436 |
finally: |
3437 |
f.close() |
3438 |
|
3439 |
def _start(self): |
3440 |
|
3441 |
pkg = self.pkg |
3442 |
settings = self.settings |
3443 |
settings.setcpv(pkg) |
3444 |
self._tree = "bintree" |
3445 |
self._bintree = self.pkg.root_config.trees[self._tree] |
3446 |
self._verify = not self.opts.pretend |
3447 |
|
3448 |
dir_path = os.path.join(settings["PORTAGE_TMPDIR"], |
3449 |
"portage", pkg.category, pkg.pf) |
3450 |
self._build_dir = EbuildBuildDir(dir_path=dir_path, |
3451 |
pkg=pkg, settings=settings) |
3452 |
self._image_dir = os.path.join(dir_path, "image") |
3453 |
self._infloc = os.path.join(dir_path, "build-info") |
3454 |
self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild") |
3455 |
settings["EBUILD"] = self._ebuild_path |
3456 |
debug = settings.get("PORTAGE_DEBUG") == "1" |
3457 |
portage.doebuild_environment(self._ebuild_path, "setup", |
3458 |
settings["ROOT"], settings, debug, 1, self._bintree.dbapi) |
3459 |
settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name |
3460 |
|
3461 |
# The prefetcher has already completed or it |
3462 |
# could be running now. If it's running now, |
3463 |
# wait for it to complete since it holds |
3464 |
# a lock on the file being fetched. The |
3465 |
# portage.locks functions are only designed |
3466 |
# to work between separate processes. Since |
3467 |
# the lock is held by the current process, |
3468 |
# use the scheduler and fetcher methods to |
3469 |
# synchronize with the fetcher. |
3470 |
prefetcher = self.prefetcher |
3471 |
if prefetcher is None: |
3472 |
pass |
3473 |
elif not prefetcher.isAlive(): |
3474 |
prefetcher.cancel() |
3475 |
elif prefetcher.poll() is None: |
3476 |
|
3477 |
waiting_msg = ("Fetching '%s' " + \ |
3478 |
"in the background. " + \ |
3479 |
"To view fetch progress, run `tail -f " + \ |
3480 |
"/var/log/emerge-fetch.log` in another " + \ |
3481 |
"terminal.") % prefetcher.pkg_path |
3482 |
msg_prefix = colorize("GOOD", " * ") |
3483 |
from textwrap import wrap |
3484 |
waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \ |
3485 |
for line in wrap(waiting_msg, 65)) |
3486 |
if not self.background: |
3487 |
writemsg(waiting_msg, noiselevel=-1) |
3488 |
|
3489 |
self._current_task = prefetcher |
3490 |
prefetcher.addExitListener(self._prefetch_exit) |
3491 |
return |
3492 |
|
3493 |
self._prefetch_exit(prefetcher) |
3494 |
|
3495 |
def _prefetch_exit(self, prefetcher): |
3496 |
|
3497 |
pkg = self.pkg |
3498 |
pkg_count = self.pkg_count |
3499 |
if not (self.opts.pretend or self.opts.fetchonly): |
3500 |
self._build_dir.lock() |
3501 |
# If necessary, discard old log so that we don't |
3502 |
# append to it. |
3503 |
self._build_dir.clean_log() |
3504 |
# Initialze PORTAGE_LOG_FILE. |
3505 |
portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1) |
3506 |
fetcher = BinpkgFetcher(background=self.background, |
3507 |
logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg, |
3508 |
pretend=self.opts.pretend, scheduler=self.scheduler) |
3509 |
pkg_path = fetcher.pkg_path |
3510 |
self._pkg_path = pkg_path |
3511 |
|
3512 |
if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv): |
3513 |
|
3514 |
msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\ |
3515 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path) |
3516 |
short_msg = "emerge: (%s of %s) %s Fetch" % \ |
3517 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv) |
3518 |
self.logger.log(msg, short_msg=short_msg) |
3519 |
self._start_task(fetcher, self._fetcher_exit) |
3520 |
return |
3521 |
|
3522 |
self._fetcher_exit(fetcher) |
3523 |
|
3524 |
def _fetcher_exit(self, fetcher): |
3525 |
|
3526 |
# The fetcher only has a returncode when |
3527 |
# --getbinpkg is enabled. |
3528 |
if fetcher.returncode is not None: |
3529 |
self._fetched_pkg = True |
3530 |
if self._default_exit(fetcher) != os.EX_OK: |
3531 |
self._unlock_builddir() |
3532 |
self.wait() |
3533 |
return |
3534 |
|
3535 |
if self.opts.pretend: |
3536 |
self._current_task = None |
3537 |
self.returncode = os.EX_OK |
3538 |
self.wait() |
3539 |
return |
3540 |
|
3541 |
verifier = None |
3542 |
if self._verify: |
3543 |
logfile = None |
3544 |
if self.background: |
3545 |
logfile = self.settings.get("PORTAGE_LOG_FILE") |
3546 |
verifier = BinpkgVerifier(background=self.background, |
3547 |
logfile=logfile, pkg=self.pkg) |
3548 |
self._start_task(verifier, self._verifier_exit) |
3549 |
return |
3550 |
|
3551 |
self._verifier_exit(verifier) |
3552 |
|
3553 |
def _verifier_exit(self, verifier): |
3554 |
if verifier is not None and \ |
3555 |
self._default_exit(verifier) != os.EX_OK: |
3556 |
self._unlock_builddir() |
3557 |
self.wait() |
3558 |
return |
3559 |
|
3560 |
logger = self.logger |
3561 |
pkg = self.pkg |
3562 |
pkg_count = self.pkg_count |
3563 |
pkg_path = self._pkg_path |
3564 |
|
3565 |
if self._fetched_pkg: |
3566 |
self._bintree.inject(pkg.cpv, filename=pkg_path) |
3567 |
|
3568 |
if self.opts.fetchonly: |
3569 |
self._current_task = None |
3570 |
self.returncode = os.EX_OK |
3571 |
self.wait() |
3572 |
return |
3573 |
|
3574 |
msg = " === (%s of %s) Merging Binary (%s::%s)" % \ |
3575 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path) |
3576 |
short_msg = "emerge: (%s of %s) %s Merge Binary" % \ |
3577 |
(pkg_count.curval, pkg_count.maxval, pkg.cpv) |
3578 |
logger.log(msg, short_msg=short_msg) |
3579 |
|
3580 |
phase = "clean" |
3581 |
settings = self.settings |
3582 |
ebuild_phase = EbuildPhase(background=self.background, |
3583 |
pkg=pkg, phase=phase, scheduler=self.scheduler, |
3584 |
settings=settings, tree=self._tree) |
3585 |
|
3586 |
self._start_task(ebuild_phase, self._clean_exit) |
3587 |
|
3588 |
def _clean_exit(self, clean_phase): |
3589 |
if self._default_exit(clean_phase) != os.EX_OK: |
3590 |
self._unlock_builddir() |
3591 |
self.wait() |
3592 |
return |
3593 |
|
3594 |
dir_path = self._build_dir.dir_path |
3595 |
|
3596 |
infloc = self._infloc |
3597 |
pkg = self.pkg |
3598 |
pkg_path = self._pkg_path |
3599 |
|
3600 |
dir_mode = 0755 |
3601 |
for mydir in (dir_path, self._image_dir, infloc): |
3602 |
portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid, |
3603 |
gid=portage.data.portage_gid, mode=dir_mode) |
3604 |
|
3605 |
# This initializes PORTAGE_LOG_FILE. |
3606 |
portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1) |
3607 |
self._writemsg_level(">>> Extracting info\n") |
3608 |
|
3609 |
pkg_xpak = portage.xpak.tbz2(self._pkg_path) |
3610 |
check_missing_metadata = ("CATEGORY", "PF") |
3611 |
missing_metadata = set() |
3612 |
for k in check_missing_metadata: |
3613 |
v = pkg_xpak.getfile(k) |
3614 |
if not v: |
3615 |
missing_metadata.add(k) |
3616 |
|
3617 |
pkg_xpak.unpackinfo(infloc) |
3618 |
for k in missing_metadata: |
3619 |
if k == "CATEGORY": |
3620 |
v = pkg.category |
3621 |
elif k == "PF": |
3622 |
v = pkg.pf |
3623 |
else: |
3624 |
continue |
3625 |
|
3626 |
f = open(os.path.join(infloc, k), 'wb') |
3627 |
try: |
3628 |
f.write(v + "\n") |
3629 |
finally: |
3630 |
f.close() |
3631 |
|
3632 |
# Store the md5sum in the vdb. |
3633 |
f = open(os.path.join(infloc, "BINPKGMD5"), "w") |
3634 |
try: |
3635 |
f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n") |
3636 |
finally: |
3637 |
f.close() |
3638 |
|
3639 |
# This gives bashrc users an opportunity to do various things |
3640 |
# such as remove binary packages after they're installed. |
3641 |
settings = self.settings |
3642 |
settings.setcpv(self.pkg) |
3643 |
settings["PORTAGE_BINPKG_FILE"] = pkg_path |
3644 |
settings.backup_changes("PORTAGE_BINPKG_FILE") |
3645 |
|
3646 |
phase = "setup" |
3647 |
setup_phase = EbuildPhase(background=self.background, |
3648 |
pkg=self.pkg, phase=phase, scheduler=self.scheduler, |
3649 |
settings=settings, tree=self._tree) |
3650 |
|
3651 |
setup_phase.addExitListener(self._setup_exit) |
3652 |
self._current_task = setup_phase |
3653 |
self.scheduler.scheduleSetup(setup_phase) |
3654 |
|
3655 |
def _setup_exit(self, setup_phase): |
3656 |
if self._default_exit(setup_phase) != os.EX_OK: |
3657 |
self._unlock_builddir() |
3658 |
self.wait() |
3659 |
return |
3660 |
|
3661 |
extractor = BinpkgExtractorAsync(background=self.background, |
3662 |
image_dir=self._image_dir, |
3663 |
pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler) |
3664 |
self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv) |
3665 |
self._start_task(extractor, self._extractor_exit) |
3666 |
|
3667 |
def _extractor_exit(self, extractor): |
3668 |
if self._final_exit(extractor) != os.EX_OK: |
3669 |
self._unlock_builddir() |
3670 |
writemsg("!!! Error Extracting '%s'\n" % self._pkg_path, |
3671 |
noiselevel=-1) |
3672 |
self.wait() |
3673 |
|
3674 |
def _unlock_builddir(self): |
3675 |
if self.opts.pretend or self.opts.fetchonly: |
3676 |
return |
3677 |
portage.elog.elog_process(self.pkg.cpv, self.settings) |
3678 |
self._build_dir.unlock() |
3679 |
|
3680 |
def install(self): |
3681 |
|
3682 |
# This gives bashrc users an opportunity to do various things |
3683 |
# such as remove binary packages after they're installed. |
3684 |
settings = self.settings |
3685 |
settings["PORTAGE_BINPKG_FILE"] = self._pkg_path |
3686 |
settings.backup_changes("PORTAGE_BINPKG_FILE") |
3687 |
|
3688 |
merge = EbuildMerge(find_blockers=self.find_blockers, |
3689 |
ldpath_mtimes=self.ldpath_mtimes, logger=self.logger, |
3690 |
pkg=self.pkg, pkg_count=self.pkg_count, |
3691 |
pkg_path=self._pkg_path, scheduler=self.scheduler, |
3692 |
settings=settings, tree=self._tree, world_atom=self.world_atom) |
3693 |
|
3694 |
try: |
3695 |
retval = merge.execute() |
3696 |
finally: |
3697 |
settings.pop("PORTAGE_BINPKG_FILE", None) |
3698 |
self._unlock_builddir() |
3699 |
return retval |
3700 |
|
3701 |
class BinpkgFetcher(SpawnProcess): |
3702 |
|
3703 |
__slots__ = ("pkg", "pretend", |
3704 |
"locked", "pkg_path", "_lock_obj") |
3705 |
|
3706 |
def __init__(self, **kwargs): |
3707 |
SpawnProcess.__init__(self, **kwargs) |
3708 |
pkg = self.pkg |
3709 |
self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv) |
3710 |
|
3711 |
def _start(self): |
3712 |
|
3713 |
if self.cancelled: |
3714 |
return |
3715 |
|
3716 |
pkg = self.pkg |
3717 |
pretend = self.pretend |
3718 |
bintree = pkg.root_config.trees["bintree"] |
3719 |
settings = bintree.settings |
3720 |
use_locks = "distlocks" in settings.features |
3721 |
pkg_path = self.pkg_path |
3722 |
|
3723 |
if not pretend: |
3724 |
portage.util.ensure_dirs(os.path.dirname(pkg_path)) |
3725 |
if use_locks: |
3726 |
self.lock() |
3727 |
exists = os.path.exists(pkg_path) |
3728 |
resume = exists and os.path.basename(pkg_path) in bintree.invalids |
3729 |
if not (pretend or resume): |
3730 |
# Remove existing file or broken symlink. |
3731 |
try: |
3732 |
os.unlink(pkg_path) |
3733 |
except OSError: |
3734 |
pass |
3735 |
|
3736 |
# urljoin doesn't work correctly with |
3737 |
# unrecognized protocols like sftp |
3738 |
if bintree._remote_has_index: |
3739 |
rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH") |
3740 |
if not rel_uri: |
3741 |
rel_uri = pkg.cpv + ".tbz2" |
3742 |
uri = bintree._remote_base_uri.rstrip("/") + \ |
3743 |
"/" + rel_uri.lstrip("/") |
3744 |
else: |
3745 |
uri = settings["PORTAGE_BINHOST"].rstrip("/") + \ |
3746 |
"/" + pkg.pf + ".tbz2" |
3747 |
|
3748 |
if pretend: |
3749 |
portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1) |
3750 |
self.returncode = os.EX_OK |
3751 |
self.wait() |
3752 |
return |
3753 |
|
3754 |
protocol = urlparse.urlparse(uri)[0] |
3755 |
fcmd_prefix = "FETCHCOMMAND" |
3756 |
if resume: |
3757 |
fcmd_prefix = "RESUMECOMMAND" |
3758 |
fcmd = settings.get(fcmd_prefix + "_" + protocol.upper()) |
3759 |
if not fcmd: |
3760 |
fcmd = settings.get(fcmd_prefix) |
3761 |
|
3762 |
fcmd_vars = { |
3763 |
"DISTDIR" : os.path.dirname(pkg_path), |
3764 |
"URI" : uri, |
3765 |
"FILE" : os.path.basename(pkg_path) |
3766 |
} |
3767 |
|
3768 |
fetch_env = dict(settings.iteritems()) |
3769 |
fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \ |
3770 |
for x in shlex.split(fcmd)] |
3771 |
|
3772 |
if self.fd_pipes is None: |
3773 |
self.fd_pipes = {} |
3774 |
fd_pipes = self.fd_pipes |
3775 |
|
3776 |
# Redirect all output to stdout since some fetchers like |
3777 |
# wget pollute stderr (if portage detects a problem then it |
3778 |
# can send it's own message to stderr). |
3779 |
fd_pipes.setdefault(0, sys.stdin.fileno()) |
3780 |
fd_pipes.setdefault(1, sys.stdout.fileno()) |
3781 |
fd_pipes.setdefault(2, sys.stdout.fileno()) |
3782 |
|
3783 |
self.args = fetch_args |
3784 |
self.env = fetch_env |
3785 |
SpawnProcess._start(self) |
3786 |
|
3787 |
def _set_returncode(self, wait_retval): |
3788 |
SpawnProcess._set_returncode(self, wait_retval) |
3789 |
if self.returncode == os.EX_OK: |
3790 |
# If possible, update the mtime to match the remote package if |
3791 |
# the fetcher didn't already do it automatically. |
3792 |
bintree = self.pkg.root_config.trees["bintree"] |
3793 |
if bintree._remote_has_index: |
3794 |
remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME") |
3795 |
if remote_mtime is not None: |
3796 |
try: |
3797 |
remote_mtime = long(remote_mtime) |
3798 |
except ValueError: |
3799 |
pass |
3800 |
else: |
3801 |
try: |
3802 |
local_mtime = long(os.stat(self.pkg_path).st_mtime) |
3803 |
except OSError: |
3804 |
pass |
3805 |
else: |
3806 |
if remote_mtime != local_mtime: |
3807 |
try: |
3808 |
os.utime(self.pkg_path, |
3809 |
(remote_mtime, remote_mtime)) |
3810 |
except OSError: |
3811 |
pass |
3812 |
|
3813 |
if self.locked: |
3814 |
self.unlock() |
3815 |
|
3816 |
def lock(self): |
3817 |
""" |
3818 |
This raises an AlreadyLocked exception if lock() is called |
3819 |
while a lock is already held. In order to avoid this, call |
3820 |
unlock() or check whether the "locked" attribute is True |
3821 |
or False before calling lock(). |
3822 |
""" |
3823 |
if self._lock_obj is not None: |
3824 |
raise self.AlreadyLocked((self._lock_obj,)) |
3825 |
|
3826 |
self._lock_obj = portage.locks.lockfile( |
3827 |
self.pkg_path, wantnewlockfile=1) |
3828 |
self.locked = True |
3829 |
|
3830 |
class AlreadyLocked(portage.exception.PortageException): |
3831 |
pass |
3832 |
|
3833 |
def unlock(self): |
3834 |
if self._lock_obj is None: |
3835 |
return |
3836 |
portage.locks.unlockfile(self._lock_obj) |
3837 |
self._lock_obj = None |
3838 |
self.locked = False |
3839 |
|
3840 |
class BinpkgVerifier(AsynchronousTask): |
3841 |
__slots__ = ("logfile", "pkg",) |
3842 |
|
3843 |
def _start(self): |
3844 |
""" |
3845 |
Note: Unlike a normal AsynchronousTask.start() method, |
3846 |
this one does all work is synchronously. The returncode |
3847 |
attribute will be set before it returns. |
3848 |
""" |
3849 |
|
3850 |
pkg = self.pkg |
3851 |
root_config = pkg.root_config |
3852 |
bintree = root_config.trees["bintree"] |
3853 |
rval = os.EX_OK |
3854 |
stdout_orig = sys.stdout |
3855 |
stderr_orig = sys.stderr |
3856 |
log_file = None |
3857 |
if self.background and self.logfile is not None: |
3858 |
log_file = open(self.logfile, 'a') |
3859 |
try: |
3860 |
if log_file is not None: |
3861 |
sys.stdout = log_file |
3862 |
sys.stderr = log_file |
3863 |
try: |
3864 |
bintree.digestCheck(pkg) |
3865 |
except portage.exception.FileNotFound: |
3866 |
writemsg("!!! Fetching Binary failed " + \ |
3867 |
"for '%s'\n" % pkg.cpv, noiselevel=-1) |
3868 |
rval = 1 |
3869 |
except portage.exception.DigestException, e: |
3870 |
writemsg("\n!!! Digest verification failed:\n", |
3871 |
noiselevel=-1) |
3872 |
writemsg("!!! %s\n" % e.value[0], |
3873 |
noiselevel=-1) |
3874 |
writemsg("!!! Reason: %s\n" % e.value[1], |
3875 |
noiselevel=-1) |
3876 |
writemsg("!!! Got: %s\n" % e.value[2], |
3877 |
noiselevel=-1) |
3878 |
writemsg("!!! Expected: %s\n" % e.value[3], |
3879 |
noiselevel=-1) |
3880 |
rval = 1 |
3881 |
if rval != os.EX_OK: |
3882 |
pkg_path = bintree.getname(pkg.cpv) |
3883 |
head, tail = os.path.split(pkg_path) |
3884 |
temp_filename = portage._checksum_failure_temp_file(head, tail) |
3885 |
writemsg("File renamed to '%s'\n" % (temp_filename,), |
3886 |
noiselevel=-1) |
3887 |
finally: |
3888 |
sys.stdout = stdout_orig |
3889 |
sys.stderr = stderr_orig |
3890 |
if log_file is not None: |
3891 |
log_file.close() |
3892 |
|
3893 |
self.returncode = rval |
3894 |
self.wait() |
3895 |
|
3896 |
class BinpkgPrefetcher(CompositeTask): |
3897 |
|
3898 |
__slots__ = ("pkg",) + \ |
3899 |
("pkg_path", "_bintree",) |
3900 |
|
3901 |
def _start(self): |
3902 |
self._bintree = self.pkg.root_config.trees["bintree"] |
3903 |
fetcher = BinpkgFetcher(background=self.background, |
3904 |
logfile=self.scheduler.fetch.log_file, pkg=self.pkg, |
3905 |
scheduler=self.scheduler) |
3906 |
self.pkg_path = fetcher.pkg_path |
3907 |
self._start_task(fetcher, self._fetcher_exit) |
3908 |
|
3909 |
def _fetcher_exit(self, fetcher): |
3910 |
|
3911 |
if self._default_exit(fetcher) != os.EX_OK: |
3912 |
self.wait() |
3913 |
return |
3914 |
|
3915 |
verifier = BinpkgVerifier(background=self.background, |
3916 |
logfile=self.scheduler.fetch.log_file, pkg=self.pkg) |
3917 |
self._start_task(verifier, self._verifier_exit) |
3918 |
|
3919 |
def _verifier_exit(self, verifier): |
3920 |
if self._default_exit(verifier) != os.EX_OK: |
3921 |
self.wait() |
3922 |
return |
3923 |
|
3924 |
self._bintree.inject(self.pkg.cpv, filename=self.pkg_path) |
3925 |
|
3926 |
self._current_task = None |
3927 |
self.returncode = os.EX_OK |
3928 |
self.wait() |
3929 |
|
3930 |
class BinpkgExtractorAsync(SpawnProcess): |
3931 |
|
3932 |
__slots__ = ("image_dir", "pkg", "pkg_path") |
3933 |
|
3934 |
_shell_binary = portage.const.BASH_BINARY |
3935 |
|
3936 |
def _start(self): |
3937 |
self.args = [self._shell_binary, "-c", |
3938 |
"bzip2 -dqc -- %s | tar -xp -C %s -f -" % \ |
3939 |
(portage._shell_quote(self.pkg_path), |
3940 |
portage._shell_quote(self.image_dir))] |
3941 |
|
3942 |
self.env = self.pkg.root_config.settings.environ() |
3943 |
SpawnProcess._start(self) |
3944 |
|
3945 |
class MergeListItem(CompositeTask): |
1478 |
class MergeListItem(CompositeTask): |
3946 |
|
1479 |
|
3947 |
""" |
1480 |
""" |
Lines 4079-4325
Link Here
|
4079 |
retval = self._install_task.install() |
1612 |
retval = self._install_task.install() |
4080 |
return retval |
1613 |
return retval |
4081 |
|
1614 |
|
4082 |
class PackageMerge(AsynchronousTask): |
|
|
4083 |
""" |
4084 |
TODO: Implement asynchronous merge so that the scheduler can |
4085 |
run while a merge is executing. |
4086 |
""" |
4087 |
|
4088 |
__slots__ = ("merge",) |
4089 |
|
4090 |
def _start(self): |
4091 |
|
4092 |
pkg = self.merge.pkg |
4093 |
pkg_count = self.merge.pkg_count |
4094 |
|
4095 |
if pkg.installed: |
4096 |
action_desc = "Uninstalling" |
4097 |
preposition = "from" |
4098 |
counter_str = "" |
4099 |
else: |
4100 |
action_desc = "Installing" |
4101 |
preposition = "to" |
4102 |
counter_str = "(%s of %s) " % \ |
4103 |
(colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)), |
4104 |
colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval))) |
4105 |
|
4106 |
msg = "%s %s%s" % \ |
4107 |
(action_desc, |
4108 |
counter_str, |
4109 |
colorize("GOOD", pkg.cpv)) |
4110 |
|
4111 |
if pkg.root != "/": |
4112 |
msg += " %s %s" % (preposition, pkg.root) |
4113 |
|
4114 |
if not self.merge.build_opts.fetchonly and \ |
4115 |
not self.merge.build_opts.pretend and \ |
4116 |
not self.merge.build_opts.buildpkgonly: |
4117 |
self.merge.statusMessage(msg) |
4118 |
|
4119 |
self.returncode = self.merge.merge() |
4120 |
self.wait() |
4121 |
|
4122 |
class DependencyArg(object): |
4123 |
def __init__(self, arg=None, root_config=None): |
4124 |
self.arg = arg |
4125 |
self.root_config = root_config |
4126 |
|
4127 |
def __str__(self): |
4128 |
return str(self.arg) |
4129 |
|
4130 |
class AtomArg(DependencyArg): |
4131 |
def __init__(self, atom=None, **kwargs): |
4132 |
DependencyArg.__init__(self, **kwargs) |
4133 |
self.atom = atom |
4134 |
if not isinstance(self.atom, portage.dep.Atom): |
4135 |
self.atom = portage.dep.Atom(self.atom) |
4136 |
self.set = (self.atom, ) |
4137 |
|
4138 |
class PackageArg(DependencyArg): |
4139 |
def __init__(self, package=None, **kwargs): |
4140 |
DependencyArg.__init__(self, **kwargs) |
4141 |
self.package = package |
4142 |
self.atom = portage.dep.Atom("=" + package.cpv) |
4143 |
self.set = (self.atom, ) |
4144 |
|
4145 |
class SetArg(DependencyArg): |
4146 |
def __init__(self, set=None, **kwargs): |
4147 |
DependencyArg.__init__(self, **kwargs) |
4148 |
self.set = set |
4149 |
self.name = self.arg[len(SETPREFIX):] |
4150 |
|
4151 |
class Dependency(SlotObject): |
4152 |
__slots__ = ("atom", "blocker", "depth", |
4153 |
"parent", "onlydeps", "priority", "root") |
4154 |
def __init__(self, **kwargs): |
4155 |
SlotObject.__init__(self, **kwargs) |
4156 |
if self.priority is None: |
4157 |
self.priority = DepPriority() |
4158 |
if self.depth is None: |
4159 |
self.depth = 0 |
4160 |
|
4161 |
class BlockerCache(portage.cache.mappings.MutableMapping): |
4162 |
"""This caches blockers of installed packages so that dep_check does not |
4163 |
have to be done for every single installed package on every invocation of |
4164 |
emerge. The cache is invalidated whenever it is detected that something |
4165 |
has changed that might alter the results of dep_check() calls: |
4166 |
1) the set of installed packages (including COUNTER) has changed |
4167 |
2) the old-style virtuals have changed |
4168 |
""" |
4169 |
|
4170 |
# Number of uncached packages to trigger cache update, since |
4171 |
# it's wasteful to update it for every vdb change. |
4172 |
_cache_threshold = 5 |
4173 |
|
4174 |
class BlockerData(object): |
4175 |
|
4176 |
__slots__ = ("__weakref__", "atoms", "counter") |
4177 |
|
4178 |
def __init__(self, counter, atoms): |
4179 |
self.counter = counter |
4180 |
self.atoms = atoms |
4181 |
|
4182 |
def __init__(self, myroot, vardb): |
4183 |
self._vardb = vardb |
4184 |
self._virtuals = vardb.settings.getvirtuals() |
4185 |
self._cache_filename = os.path.join(myroot, |
4186 |
portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle") |
4187 |
self._cache_version = "1" |
4188 |
self._cache_data = None |
4189 |
self._modified = set() |
4190 |
self._load() |
4191 |
|
4192 |
def _load(self): |
4193 |
try: |
4194 |
f = open(self._cache_filename, mode='rb') |
4195 |
mypickle = pickle.Unpickler(f) |
4196 |
try: |
4197 |
mypickle.find_global = None |
4198 |
except AttributeError: |
4199 |
# TODO: If py3k, override Unpickler.find_class(). |
4200 |
pass |
4201 |
self._cache_data = mypickle.load() |
4202 |
f.close() |
4203 |
del f |
4204 |
except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e: |
4205 |
if isinstance(e, pickle.UnpicklingError): |
4206 |
writemsg("!!! Error loading '%s': %s\n" % \ |
4207 |
(self._cache_filename, str(e)), noiselevel=-1) |
4208 |
del e |
4209 |
|
4210 |
cache_valid = self._cache_data and \ |
4211 |
isinstance(self._cache_data, dict) and \ |
4212 |
self._cache_data.get("version") == self._cache_version and \ |
4213 |
isinstance(self._cache_data.get("blockers"), dict) |
4214 |
if cache_valid: |
4215 |
# Validate all the atoms and counters so that |
4216 |
# corruption is detected as soon as possible. |
4217 |
invalid_items = set() |
4218 |
for k, v in self._cache_data["blockers"].iteritems(): |
4219 |
if not isinstance(k, basestring): |
4220 |
invalid_items.add(k) |
4221 |
continue |
4222 |
try: |
4223 |
if portage.catpkgsplit(k) is None: |
4224 |
invalid_items.add(k) |
4225 |
continue |
4226 |
except portage.exception.InvalidData: |
4227 |
invalid_items.add(k) |
4228 |
continue |
4229 |
if not isinstance(v, tuple) or \ |
4230 |
len(v) != 2: |
4231 |
invalid_items.add(k) |
4232 |
continue |
4233 |
counter, atoms = v |
4234 |
if not isinstance(counter, (int, long)): |
4235 |
invalid_items.add(k) |
4236 |
continue |
4237 |
if not isinstance(atoms, (list, tuple)): |
4238 |
invalid_items.add(k) |
4239 |
continue |
4240 |
invalid_atom = False |
4241 |
for atom in atoms: |
4242 |
if not isinstance(atom, basestring): |
4243 |
invalid_atom = True |
4244 |
break |
4245 |
if atom[:1] != "!" or \ |
4246 |
not portage.isvalidatom( |
4247 |
atom, allow_blockers=True): |
4248 |
invalid_atom = True |
4249 |
break |
4250 |
if invalid_atom: |
4251 |
invalid_items.add(k) |
4252 |
continue |
4253 |
|
4254 |
for k in invalid_items: |
4255 |
del self._cache_data["blockers"][k] |
4256 |
if not self._cache_data["blockers"]: |
4257 |
cache_valid = False |
4258 |
|
4259 |
if not cache_valid: |
4260 |
self._cache_data = {"version":self._cache_version} |
4261 |
self._cache_data["blockers"] = {} |
4262 |
self._cache_data["virtuals"] = self._virtuals |
4263 |
self._modified.clear() |
4264 |
|
4265 |
def flush(self): |
4266 |
"""If the current user has permission and the internal blocker cache |
4267 |
been updated, save it to disk and mark it unmodified. This is called |
4268 |
by emerge after it has proccessed blockers for all installed packages. |
4269 |
Currently, the cache is only written if the user has superuser |
4270 |
privileges (since that's required to obtain a lock), but all users |
4271 |
have read access and benefit from faster blocker lookups (as long as |
4272 |
the entire cache is still valid). The cache is stored as a pickled |
4273 |
dict object with the following format: |
4274 |
|
4275 |
{ |
4276 |
version : "1", |
4277 |
"blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...}, |
4278 |
"virtuals" : vardb.settings.getvirtuals() |
4279 |
} |
4280 |
""" |
4281 |
if len(self._modified) >= self._cache_threshold and \ |
4282 |
secpass >= 2: |
4283 |
try: |
4284 |
f = portage.util.atomic_ofstream(self._cache_filename, mode='wb') |
4285 |
pickle.dump(self._cache_data, f, protocol=2) |
4286 |
f.close() |
4287 |
portage.util.apply_secpass_permissions( |
4288 |
self._cache_filename, gid=portage.portage_gid, mode=0644) |
4289 |
except (IOError, OSError), e: |
4290 |
pass |
4291 |
self._modified.clear() |
4292 |
|
4293 |
def __setitem__(self, cpv, blocker_data): |
4294 |
""" |
4295 |
Update the cache and mark it as modified for a future call to |
4296 |
self.flush(). |
4297 |
|
4298 |
@param cpv: Package for which to cache blockers. |
4299 |
@type cpv: String |
4300 |
@param blocker_data: An object with counter and atoms attributes. |
4301 |
@type blocker_data: BlockerData |
4302 |
""" |
4303 |
self._cache_data["blockers"][cpv] = \ |
4304 |
(blocker_data.counter, tuple(str(x) for x in blocker_data.atoms)) |
4305 |
self._modified.add(cpv) |
4306 |
|
4307 |
def __iter__(self): |
4308 |
if self._cache_data is None: |
4309 |
# triggered by python-trace |
4310 |
return iter([]) |
4311 |
return iter(self._cache_data["blockers"]) |
4312 |
|
4313 |
def __delitem__(self, cpv): |
4314 |
del self._cache_data["blockers"][cpv] |
4315 |
|
4316 |
def __getitem__(self, cpv): |
4317 |
""" |
4318 |
@rtype: BlockerData |
4319 |
@returns: An object with counter and atoms attributes. |
4320 |
""" |
4321 |
return self.BlockerData(*self._cache_data["blockers"][cpv]) |
4322 |
|
4323 |
class BlockerDB(object): |
1615 |
class BlockerDB(object): |
4324 |
|
1616 |
|
4325 |
def __init__(self, root_config): |
1617 |
def __init__(self, root_config): |
Lines 4455-4593
Link Here
|
4455 |
msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72)) |
1747 |
msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72)) |
4456 |
writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1) |
1748 |
writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1) |
4457 |
|
1749 |
|
4458 |
class PackageVirtualDbapi(portage.dbapi): |
|
|
4459 |
""" |
4460 |
A dbapi-like interface class that represents the state of the installed |
4461 |
package database as new packages are installed, replacing any packages |
4462 |
that previously existed in the same slot. The main difference between |
4463 |
this class and fakedbapi is that this one uses Package instances |
4464 |
internally (passed in via cpv_inject() and cpv_remove() calls). |
4465 |
""" |
4466 |
def __init__(self, settings): |
4467 |
portage.dbapi.__init__(self) |
4468 |
self.settings = settings |
4469 |
self._match_cache = {} |
4470 |
self._cp_map = {} |
4471 |
self._cpv_map = {} |
4472 |
|
4473 |
def clear(self): |
4474 |
""" |
4475 |
Remove all packages. |
4476 |
""" |
4477 |
if self._cpv_map: |
4478 |
self._clear_cache() |
4479 |
self._cp_map.clear() |
4480 |
self._cpv_map.clear() |
4481 |
|
4482 |
def copy(self): |
4483 |
obj = PackageVirtualDbapi(self.settings) |
4484 |
obj._match_cache = self._match_cache.copy() |
4485 |
obj._cp_map = self._cp_map.copy() |
4486 |
for k, v in obj._cp_map.iteritems(): |
4487 |
obj._cp_map[k] = v[:] |
4488 |
obj._cpv_map = self._cpv_map.copy() |
4489 |
return obj |
4490 |
|
4491 |
def __iter__(self): |
4492 |
return self._cpv_map.itervalues() |
4493 |
|
4494 |
def __contains__(self, item): |
4495 |
existing = self._cpv_map.get(item.cpv) |
4496 |
if existing is not None and \ |
4497 |
existing == item: |
4498 |
return True |
4499 |
return False |
4500 |
|
4501 |
def get(self, item, default=None): |
4502 |
cpv = getattr(item, "cpv", None) |
4503 |
if cpv is None: |
4504 |
if len(item) != 4: |
4505 |
return default |
4506 |
type_name, root, cpv, operation = item |
4507 |
|
4508 |
existing = self._cpv_map.get(cpv) |
4509 |
if existing is not None and \ |
4510 |
existing == item: |
4511 |
return existing |
4512 |
return default |
4513 |
|
4514 |
def match_pkgs(self, atom): |
4515 |
return [self._cpv_map[cpv] for cpv in self.match(atom)] |
4516 |
|
4517 |
def _clear_cache(self): |
4518 |
if self._categories is not None: |
4519 |
self._categories = None |
4520 |
if self._match_cache: |
4521 |
self._match_cache = {} |
4522 |
|
4523 |
def match(self, origdep, use_cache=1): |
4524 |
result = self._match_cache.get(origdep) |
4525 |
if result is not None: |
4526 |
return result[:] |
4527 |
result = portage.dbapi.match(self, origdep, use_cache=use_cache) |
4528 |
self._match_cache[origdep] = result |
4529 |
return result[:] |
4530 |
|
4531 |
def cpv_exists(self, cpv): |
4532 |
return cpv in self._cpv_map |
4533 |
|
4534 |
def cp_list(self, mycp, use_cache=1): |
4535 |
cachelist = self._match_cache.get(mycp) |
4536 |
# cp_list() doesn't expand old-style virtuals |
4537 |
if cachelist and cachelist[0].startswith(mycp): |
4538 |
return cachelist[:] |
4539 |
cpv_list = self._cp_map.get(mycp) |
4540 |
if cpv_list is None: |
4541 |
cpv_list = [] |
4542 |
else: |
4543 |
cpv_list = [pkg.cpv for pkg in cpv_list] |
4544 |
self._cpv_sort_ascending(cpv_list) |
4545 |
if not (not cpv_list and mycp.startswith("virtual/")): |
4546 |
self._match_cache[mycp] = cpv_list |
4547 |
return cpv_list[:] |
4548 |
|
4549 |
def cp_all(self): |
4550 |
return list(self._cp_map) |
4551 |
|
4552 |
def cpv_all(self): |
4553 |
return list(self._cpv_map) |
4554 |
|
4555 |
def cpv_inject(self, pkg): |
4556 |
cp_list = self._cp_map.get(pkg.cp) |
4557 |
if cp_list is None: |
4558 |
cp_list = [] |
4559 |
self._cp_map[pkg.cp] = cp_list |
4560 |
e_pkg = self._cpv_map.get(pkg.cpv) |
4561 |
if e_pkg is not None: |
4562 |
if e_pkg == pkg: |
4563 |
return |
4564 |
self.cpv_remove(e_pkg) |
4565 |
for e_pkg in cp_list: |
4566 |
if e_pkg.slot_atom == pkg.slot_atom: |
4567 |
if e_pkg == pkg: |
4568 |
return |
4569 |
self.cpv_remove(e_pkg) |
4570 |
break |
4571 |
cp_list.append(pkg) |
4572 |
self._cpv_map[pkg.cpv] = pkg |
4573 |
self._clear_cache() |
4574 |
|
4575 |
def cpv_remove(self, pkg): |
4576 |
old_pkg = self._cpv_map.get(pkg.cpv) |
4577 |
if old_pkg != pkg: |
4578 |
raise KeyError(pkg) |
4579 |
self._cp_map[pkg.cp].remove(pkg) |
4580 |
del self._cpv_map[pkg.cpv] |
4581 |
self._clear_cache() |
4582 |
|
4583 |
def aux_get(self, cpv, wants): |
4584 |
metadata = self._cpv_map[cpv].metadata |
4585 |
return [metadata.get(x, "") for x in wants] |
4586 |
|
4587 |
def aux_update(self, cpv, values): |
4588 |
self._cpv_map[cpv].metadata.update(values) |
4589 |
self._clear_cache() |
4590 |
|
4591 |
class depgraph(object): |
1750 |
class depgraph(object): |
4592 |
|
1751 |
|
4593 |
pkg_tree_map = RootConfig.pkg_tree_map |
1752 |
pkg_tree_map = RootConfig.pkg_tree_map |
Lines 9290-9354
Link Here
|
9290 |
metadata = self._cpv_pkg_map[cpv].metadata |
6449 |
metadata = self._cpv_pkg_map[cpv].metadata |
9291 |
return [metadata.get(x, "") for x in wants] |
6450 |
return [metadata.get(x, "") for x in wants] |
9292 |
|
6451 |
|
9293 |
class RepoDisplay(object): |
|
|
9294 |
def __init__(self, roots): |
9295 |
self._shown_repos = {} |
9296 |
self._unknown_repo = False |
9297 |
repo_paths = set() |
9298 |
for root_config in roots.itervalues(): |
9299 |
portdir = root_config.settings.get("PORTDIR") |
9300 |
if portdir: |
9301 |
repo_paths.add(portdir) |
9302 |
overlays = root_config.settings.get("PORTDIR_OVERLAY") |
9303 |
if overlays: |
9304 |
repo_paths.update(overlays.split()) |
9305 |
repo_paths = list(repo_paths) |
9306 |
self._repo_paths = repo_paths |
9307 |
self._repo_paths_real = [ os.path.realpath(repo_path) \ |
9308 |
for repo_path in repo_paths ] |
9309 |
|
9310 |
# pre-allocate index for PORTDIR so that it always has index 0. |
9311 |
for root_config in roots.itervalues(): |
9312 |
portdb = root_config.trees["porttree"].dbapi |
9313 |
portdir = portdb.porttree_root |
9314 |
if portdir: |
9315 |
self.repoStr(portdir) |
9316 |
|
9317 |
def repoStr(self, repo_path_real): |
9318 |
real_index = -1 |
9319 |
if repo_path_real: |
9320 |
real_index = self._repo_paths_real.index(repo_path_real) |
9321 |
if real_index == -1: |
9322 |
s = "?" |
9323 |
self._unknown_repo = True |
9324 |
else: |
9325 |
shown_repos = self._shown_repos |
9326 |
repo_paths = self._repo_paths |
9327 |
repo_path = repo_paths[real_index] |
9328 |
index = shown_repos.get(repo_path) |
9329 |
if index is None: |
9330 |
index = len(shown_repos) |
9331 |
shown_repos[repo_path] = index |
9332 |
s = str(index) |
9333 |
return s |
9334 |
|
9335 |
def __str__(self): |
9336 |
output = [] |
9337 |
shown_repos = self._shown_repos |
9338 |
unknown_repo = self._unknown_repo |
9339 |
if shown_repos or self._unknown_repo: |
9340 |
output.append("Portage tree and overlays:\n") |
9341 |
show_repo_paths = list(shown_repos) |
9342 |
for repo_path, repo_index in shown_repos.iteritems(): |
9343 |
show_repo_paths[repo_index] = repo_path |
9344 |
if show_repo_paths: |
9345 |
for index, repo_path in enumerate(show_repo_paths): |
9346 |
output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path) |
9347 |
if unknown_repo: |
9348 |
output.append(" "+teal("[?]") + \ |
9349 |
" indicates that the source repository could not be determined\n") |
9350 |
return "".join(output) |
9351 |
|
9352 |
class PackageCounters(object): |
6452 |
class PackageCounters(object): |
9353 |
|
6453 |
|
9354 |
def __init__(self): |
6454 |
def __init__(self): |
Lines 9421-9616
Link Here
|
9421 |
(self.blocks - self.blocks_satisfied)) |
6521 |
(self.blocks - self.blocks_satisfied)) |
9422 |
return "".join(myoutput) |
6522 |
return "".join(myoutput) |
9423 |
|
6523 |
|
9424 |
class UseFlagDisplay(object): |
|
|
9425 |
|
6524 |
|
9426 |
__slots__ = ('name', 'enabled', 'forced') |
|
|
9427 |
|
9428 |
def __init__(self, name, enabled, forced): |
9429 |
self.name = name |
9430 |
self.enabled = enabled |
9431 |
self.forced = forced |
9432 |
|
9433 |
def __str__(self): |
9434 |
s = self.name |
9435 |
if self.enabled: |
9436 |
s = red(s) |
9437 |
else: |
9438 |
s = '-' + s |
9439 |
s = blue(s) |
9440 |
if self.forced: |
9441 |
s = '(%s)' % s |
9442 |
return s |
9443 |
|
9444 |
def _cmp_combined(a, b): |
9445 |
""" |
9446 |
Sort by name, combining enabled and disabled flags. |
9447 |
""" |
9448 |
return (a.name > b.name) - (a.name < b.name) |
9449 |
|
9450 |
sort_combined = cmp_sort_key(_cmp_combined) |
9451 |
del _cmp_combined |
9452 |
|
9453 |
def _cmp_separated(a, b): |
9454 |
""" |
9455 |
Sort by name, separating enabled flags from disabled flags. |
9456 |
""" |
9457 |
enabled_diff = b.enabled - a.enabled |
9458 |
if enabled_diff: |
9459 |
return enabled_diff |
9460 |
return (a.name > b.name) - (a.name < b.name) |
9461 |
|
9462 |
sort_separated = cmp_sort_key(_cmp_separated) |
9463 |
del _cmp_separated |
9464 |
|
9465 |
class PollSelectAdapter(PollConstants): |
9466 |
|
9467 |
""" |
9468 |
Use select to emulate a poll object, for |
9469 |
systems that don't support poll(). |
9470 |
""" |
9471 |
|
9472 |
def __init__(self): |
9473 |
self._registered = {} |
9474 |
self._select_args = [[], [], []] |
9475 |
|
9476 |
def register(self, fd, *args): |
9477 |
""" |
9478 |
Only POLLIN is currently supported! |
9479 |
""" |
9480 |
if len(args) > 1: |
9481 |
raise TypeError( |
9482 |
"register expected at most 2 arguments, got " + \ |
9483 |
repr(1 + len(args))) |
9484 |
|
9485 |
eventmask = PollConstants.POLLIN | \ |
9486 |
PollConstants.POLLPRI | PollConstants.POLLOUT |
9487 |
if args: |
9488 |
eventmask = args[0] |
9489 |
|
9490 |
self._registered[fd] = eventmask |
9491 |
self._select_args = None |
9492 |
|
9493 |
def unregister(self, fd): |
9494 |
self._select_args = None |
9495 |
del self._registered[fd] |
9496 |
|
9497 |
def poll(self, *args): |
9498 |
if len(args) > 1: |
9499 |
raise TypeError( |
9500 |
"poll expected at most 2 arguments, got " + \ |
9501 |
repr(1 + len(args))) |
9502 |
|
9503 |
timeout = None |
9504 |
if args: |
9505 |
timeout = args[0] |
9506 |
|
9507 |
select_args = self._select_args |
9508 |
if select_args is None: |
9509 |
select_args = [self._registered.keys(), [], []] |
9510 |
|
9511 |
if timeout is not None: |
9512 |
select_args = select_args[:] |
9513 |
# Translate poll() timeout args to select() timeout args: |
9514 |
# |
9515 |
# | units | value(s) for indefinite block |
9516 |
# ---------|--------------|------------------------------ |
9517 |
# poll | milliseconds | omitted, negative, or None |
9518 |
# ---------|--------------|------------------------------ |
9519 |
# select | seconds | omitted |
9520 |
# ---------|--------------|------------------------------ |
9521 |
|
9522 |
if timeout is not None and timeout < 0: |
9523 |
timeout = None |
9524 |
if timeout is not None: |
9525 |
select_args.append(timeout / 1000) |
9526 |
|
9527 |
select_events = select.select(*select_args) |
9528 |
poll_events = [] |
9529 |
for fd in select_events[0]: |
9530 |
poll_events.append((fd, PollConstants.POLLIN)) |
9531 |
return poll_events |
9532 |
|
9533 |
class SequentialTaskQueue(SlotObject): |
9534 |
|
9535 |
__slots__ = ("max_jobs", "running_tasks") + \ |
9536 |
("_dirty", "_scheduling", "_task_queue") |
9537 |
|
9538 |
def __init__(self, **kwargs): |
9539 |
SlotObject.__init__(self, **kwargs) |
9540 |
self._task_queue = deque() |
9541 |
self.running_tasks = set() |
9542 |
if self.max_jobs is None: |
9543 |
self.max_jobs = 1 |
9544 |
self._dirty = True |
9545 |
|
9546 |
def add(self, task): |
9547 |
self._task_queue.append(task) |
9548 |
self._dirty = True |
9549 |
|
9550 |
def addFront(self, task): |
9551 |
self._task_queue.appendleft(task) |
9552 |
self._dirty = True |
9553 |
|
9554 |
def schedule(self): |
9555 |
|
9556 |
if not self._dirty: |
9557 |
return False |
9558 |
|
9559 |
if not self: |
9560 |
return False |
9561 |
|
9562 |
if self._scheduling: |
9563 |
# Ignore any recursive schedule() calls triggered via |
9564 |
# self._task_exit(). |
9565 |
return False |
9566 |
|
9567 |
self._scheduling = True |
9568 |
|
9569 |
task_queue = self._task_queue |
9570 |
running_tasks = self.running_tasks |
9571 |
max_jobs = self.max_jobs |
9572 |
state_changed = False |
9573 |
|
9574 |
while task_queue and \ |
9575 |
(max_jobs is True or len(running_tasks) < max_jobs): |
9576 |
task = task_queue.popleft() |
9577 |
cancelled = getattr(task, "cancelled", None) |
9578 |
if not cancelled: |
9579 |
running_tasks.add(task) |
9580 |
task.addExitListener(self._task_exit) |
9581 |
task.start() |
9582 |
state_changed = True |
9583 |
|
9584 |
self._dirty = False |
9585 |
self._scheduling = False |
9586 |
|
9587 |
return state_changed |
9588 |
|
9589 |
def _task_exit(self, task): |
9590 |
""" |
9591 |
Since we can always rely on exit listeners being called, the set of |
9592 |
running tasks is always pruned automatically and there is never any need |
9593 |
to actively prune it. |
9594 |
""" |
9595 |
self.running_tasks.remove(task) |
9596 |
if self._task_queue: |
9597 |
self._dirty = True |
9598 |
|
9599 |
def clear(self): |
9600 |
self._task_queue.clear() |
9601 |
running_tasks = self.running_tasks |
9602 |
while running_tasks: |
9603 |
task = running_tasks.pop() |
9604 |
task.removeExitListener(self._task_exit) |
9605 |
task.cancel() |
9606 |
self._dirty = False |
9607 |
|
9608 |
def __nonzero__(self): |
9609 |
return bool(self._task_queue or self.running_tasks) |
9610 |
|
9611 |
def __len__(self): |
9612 |
return len(self._task_queue) + len(self.running_tasks) |
9613 |
|
9614 |
_can_poll_device = None |
6525 |
_can_poll_device = None |
9615 |
|
6526 |
|
9616 |
def can_poll_device(): |
6527 |
def can_poll_device(): |
Lines 10215-10238
Link Here
|
10215 |
if self.xterm_titles: |
7126 |
if self.xterm_titles: |
10216 |
xtermTitle(" ".join(plain_output.split())) |
7127 |
xtermTitle(" ".join(plain_output.split())) |
10217 |
|
7128 |
|
10218 |
class ProgressHandler(object): |
|
|
10219 |
def __init__(self): |
10220 |
self.curval = 0 |
10221 |
self.maxval = 0 |
10222 |
self._last_update = 0 |
10223 |
self.min_latency = 0.2 |
10224 |
|
10225 |
def onProgress(self, maxval, curval): |
10226 |
self.maxval = maxval |
10227 |
self.curval = curval |
10228 |
cur_time = time.time() |
10229 |
if cur_time - self._last_update >= self.min_latency: |
10230 |
self._last_update = cur_time |
10231 |
self.display() |
10232 |
|
10233 |
def display(self): |
10234 |
raise NotImplementedError(self) |
10235 |
|
10236 |
class Scheduler(PollScheduler): |
7129 |
class Scheduler(PollScheduler): |
10237 |
|
7130 |
|
10238 |
_opts_ignore_blockers = \ |
7131 |
_opts_ignore_blockers = \ |