* [gentoo-commits] portage r9957 - in main/branches/2.1.2: bin doc/dependency_resolution man pym
@ 2008-04-24 3:34 Zac Medico (zmedico)
0 siblings, 0 replies; only message in thread
From: Zac Medico (zmedico) @ 2008-04-24 3:34 UTC (permalink / raw
To: gentoo-commits
Author: zmedico
Date: 2008-04-24 03:34:07 +0000 (Thu, 24 Apr 2008)
New Revision: 9957
Modified:
main/branches/2.1.2/bin/emerge
main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook
main/branches/2.1.2/man/color.map.5
main/branches/2.1.2/pym/output.py
Log:
Bug #172812 - Automatically uninstall packages to avoid blocker conflicts.
(trunk r9944:9956)
Modified: main/branches/2.1.2/bin/emerge
===================================================================
--- main/branches/2.1.2/bin/emerge 2008-04-24 03:17:26 UTC (rev 9956)
+++ main/branches/2.1.2/bin/emerge 2008-04-24 03:34:07 UTC (rev 9957)
@@ -743,7 +743,7 @@
return mynewlines
-def clean_world(vardb, cpv):
+def world_clean_package(vardb, cpv):
"""Remove a package from the world file when unmerged."""
world_set = WorldSet(vardb.settings)
world_set.lock()
@@ -1088,7 +1088,13 @@
return "medium-soft"
return "soft"
+class BlockerDepPriority(DepPriority):
+ __slots__ = ()
+ def __int__(self):
+ return 0
+
class UnmergeDepPriority(AbstractDepPriority):
+ __slots__ = ()
"""
Combination of properties Priority Category
@@ -1340,10 +1346,9 @@
shown_comments.add(comment)
return have_eapi_mask
-class Package(object):
- __slots__ = ("__weakref__", "built", "cpv", "depth",
- "installed", "metadata", "root", "onlydeps", "type_name",
- "cp", "cpv_slot", "slot_atom", "_digraph_node")
+class Task(object):
+ __slots__ = ("__weakref__", "_hash_key",)
+
def __init__(self, **kwargs):
for myattr in self.__slots__:
if myattr == "__weakref__":
@@ -1351,14 +1356,56 @@
myvalue = kwargs.get(myattr, None)
setattr(self, myattr, myvalue)
+ def _get_hash_key(self):
+ try:
+ return self._hash_key
+ except AttributeError:
+ raise NotImplementedError(self)
+
+ def __eq__(self, other):
+ return self._get_hash_key() == other
+
+ def __ne__(self, other):
+ return self._get_hash_key() != other
+
+ def __hash__(self):
+ return hash(self._get_hash_key())
+
+ def __len__(self):
+ return len(self._get_hash_key())
+
+ def __getitem__(self, key):
+ return self._get_hash_key()[key]
+
+ def __iter__(self):
+ return iter(self._get_hash_key())
+
+ def __contains__(self, key):
+ return key in self._get_hash_key()
+
+ def __str__(self):
+ return str(self._get_hash_key())
+
+class Package(Task):
+ __slots__ = ("built", "cpv", "depth",
+ "installed", "metadata", "root", "onlydeps", "type_name",
+ "cp", "cpv_slot", "slot_atom")
+ def __init__(self, **kwargs):
+ Task.__init__(self, **kwargs)
self.cp = portage.cpv_getkey(self.cpv)
self.slot_atom = "%s:%s" % (self.cp, self.metadata["SLOT"])
self.cpv_slot = "%s:%s" % (self.cpv, self.metadata["SLOT"])
- status = "merge"
- if self.onlydeps or self.installed:
- status = "nomerge"
- self._digraph_node = (self.type_name, self.root, self.cpv, status)
+ def _get_hash_key(self):
+ try:
+ return self._hash_key
+ except AttributeError:
+ operation = "merge"
+ if self.onlydeps or self.installed:
+ operation = "nomerge"
+ self._hash_key = \
+ (self.type_name, self.root, self.cpv, operation)
+ return self._hash_key
def __lt__(self, other):
other_split = portage.catpkgsplit(other.cpv)
@@ -1378,22 +1425,14 @@
return True
return False
- def __eq__(self, other):
- return self._digraph_node == other
- def __ne__(self, other):
- return self._digraph_node != other
- def __hash__(self):
- return hash(self._digraph_node)
- def __len__(self):
- return len(self._digraph_node)
- def __getitem__(self, key):
- return self._digraph_node[key]
- def __iter__(self):
- return iter(self._digraph_node)
- def __contains__(self, key):
- return key in self._digraph_node
- def __str__(self):
- return str(self._digraph_node)
+class Uninstall(Package):
+ def _get_hash_key(self):
+ try:
+ return self._hash_key
+ except AttributeError:
+ self._hash_key = \
+ (self.type_name, self.root, self.cpv, "uninstall")
+ return self._hash_key
class DependencyArg(object):
def __init__(self, arg=None, root_config=None):
@@ -1594,6 +1633,13 @@
self._cp_map = {}
self._cpv_map = {}
+ def __contains__(self, item):
+ existing = self._cpv_map.get(item.cpv)
+ if existing is not None and \
+ existing == item:
+ return True
+ return False
+
def _clear_cache(self):
if self._categories is not None:
self._categories = None
@@ -3264,20 +3310,19 @@
# require that the old version be uninstalled at build
# time.
continue
- if parent_static and \
- slot_atom not in modified_slots[myroot]:
- # This blocker will be handled the next time that a
- # merge of either package is triggered.
+ if parent.installed:
+ # Two currently installed packages conflict with
+ # eachother. Ignore this case since the damage
+ # is already done and this would be likely to
+ # confuse users if displayed like a normal blocker.
continue
- if pstatus == "merge" and \
- slot_atom in modified_slots[myroot]:
- replacement = self._slot_pkg_map[myroot][slot_atom]
- if not portage.match_from_list(
- mydep, [replacement.cpv_slot]):
- # Apparently a replacement may be able to
- # invalidate this block.
- depends_on_order.add((replacement, parent))
- continue
+ if pstatus == "merge":
+ # Maybe the blocked package can be replaced or simply
+ # unmerged to resolve this block.
+ inst_pkg = self._pkg_cache[
+ ("installed", myroot, cpv, "nomerge")]
+ depends_on_order.add((inst_pkg, parent))
+ continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
@@ -3291,30 +3336,47 @@
# This blocker will be handled the next time that a
# merge of either package is triggered.
continue
- if not parent_static and pstatus == "nomerge" and \
- slot_atom in modified_slots[myroot]:
- replacement = self._slot_pkg_map[myroot][pslot_atom]
- if replacement not in \
- self.blocker_parents[blocker]:
- # Apparently a replacement may be able to
- # invalidate this block.
- blocked_node = \
- self._slot_pkg_map[myroot][slot_atom]
- depends_on_order.add(
- (replacement, blocked_node))
- continue
+
+ # Maybe the blocking package can be
+ # unmerged to resolve this block.
+ try:
+ blocked_pkg = self._slot_pkg_map[myroot][slot_atom]
+ except KeyError:
+ blocked_pkg = self._pkg_cache[
+ ("installed", myroot, cpv, "nomerge")]
+ if pstatus == "merge" and blocked_pkg.installed:
+ depends_on_order.add((blocked_pkg, parent))
+ continue
+ elif pstatus == "nomerge":
+ depends_on_order.add((parent, blocked_pkg))
+ continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
+
+ # Make sure we don't unmerge any package that have been pulled
+ # into the graph.
if not unresolved_blocks and depends_on_order:
- for node, pnode in depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ if self.digraph.contains(inst_pkg) and \
+ self.digraph.parent_nodes(inst_pkg):
+ unresolved_blocks = True
+ break
+
+ if not unresolved_blocks and depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ uninst_task = Uninstall(built=inst_pkg.built,
+ cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+ metadata=inst_pkg.metadata, root=inst_pkg.root,
+ type_name=inst_pkg.type_name)
+ self._pkg_cache[uninst_task] = uninst_task
# Enforce correct merge order with a hard dep.
- self.digraph.addnode(node, pnode,
- priority=DepPriority(buildtime=True))
+ self.digraph.addnode(uninst_task, inst_task,
+ priority=BlockerDepPriority())
# Count references to this blocker so that it can be
# invalidated after nodes referencing it have been
# merged.
- self.blocker_digraph.addnode(node, blocker)
+ self.blocker_digraph.addnode(uninst_task, blocker)
if not unresolved_blocks and not depends_on_order:
self.blocker_parents[blocker].remove(parent)
if unresolved_blocks:
@@ -3395,14 +3457,22 @@
return -1
myblockers = self.blocker_digraph.copy()
retlist=[]
- circular_blocks = False
+ # Contains any Uninstall tasks that have been ignored
+ # in order to avoid the circular deps code path. These
+ # correspond to blocker conflicts that could not be
+ # resolved.
+ ignored_uninstall_tasks = set()
blocker_deps = None
asap_nodes = []
portage_node = None
- if reversed:
- get_nodes = mygraph.root_nodes
- else:
- get_nodes = mygraph.leaf_nodes
+ def get_nodes(**kwargs):
+ """
+ Returns leaf nodes excluding Uninstall instances
+ since those should be executed as late as possible.
+ """
+ return [node for node in mygraph.leaf_nodes(**kwargs) \
+ if not isinstance(node, Uninstall)]
+ if True:
for node in mygraph.order:
if node.root == "/" and \
"sys-apps/portage" == portage.cpv_getkey(node.cpv):
@@ -3564,27 +3634,102 @@
selected_nodes = list(selected_nodes)
selected_nodes.sort(cmp_circular_bias)
- if not selected_nodes:
- if not myblockers.is_empty():
- """A blocker couldn't be circumnavigated while keeping all
- dependencies satisfied. The user will have to resolve this
- manually. This is a panic condition and thus the order
- doesn't really matter, so just pop a random node in order
- to avoid a circular dependency panic if possible."""
- if not circular_blocks:
- circular_blocks = True
- blocker_deps = myblockers.leaf_nodes()
- while blocker_deps:
- # Some of these nodes might have already been selected
- # by the normal node selection process after the
- # circular_blocks flag has been set. Therefore, we
- # have to verify that they're still in the graph so
- # that they're not selected more than once.
- node = blocker_deps.pop()
- if mygraph.contains(node):
- selected_nodes = [node]
+ if not selected_nodes and not myblockers.is_empty():
+ # An Uninstall task needs to be executed in order to
+ # avoid conflict if possible.
+
+ min_parent_deps = None
+ uninst_task = None
+ for task in myblockers.leaf_nodes():
+ # Do some sanity checks so that system or world packages
+ # don't get uninstalled inappropriately here (only really
+ # necessary when --complete-graph has not been enabled).
+
+ if task in ignored_uninstall_tasks:
+ continue
+
+ root_config = self.roots[task.root]
+ inst_pkg = self._pkg_cache[
+ ("installed", task.root, task.cpv, "nomerge")]
+
+ # For packages in the system set, don't take
+ # any chances. If the conflict can't be resolved
+ # by a normal upgrade operation then require
+ # user intervention.
+ skip = False
+ try:
+ for atom in root_config.sets[
+ "system"].iterAtomsForPackage(task):
+ skip = True
break
+ except portage_exception.InvalidDependString:
+ skip = True
+ if skip:
+ continue
+ # For packages in the world set, go ahead an uninstall
+ # when necessary, as long as the atom will be satisfied
+ # in the final state.
+ graph_db = self.mydbapi[task.root]
+ try:
+ for atom in root_config.sets[
+ "world"].iterAtomsForPackage(task):
+ satisfied = False
+ for cpv in graph_db.match(atom):
+ if cpv == inst_pkg.cpv and \
+ inst_pkg in graph_db:
+ continue
+ satisfied = True
+ break
+ if not satisfied:
+ skip = True
+ break
+ except portage_exception.InvalidDependString:
+ skip = True
+ if skip:
+ continue
+
+ # Check the deps of parent nodes to ensure that
+ # the chosen task produces a leaf node. Maybe
+ # this can be optimized some more to make the
+ # best possible choice, but the current algorithm
+ # is simple and should be near optimal for most
+ # common cases.
+ parent_deps = set()
+ for parent in mygraph.parent_nodes(task):
+ parent_deps.update(mygraph.child_nodes(parent,
+ ignore_priority=DepPriority.MEDIUM_SOFT))
+ parent_deps.remove(task)
+ if min_parent_deps is None or \
+ len(parent_deps) < min_parent_deps:
+ min_parent_deps = len(parent_deps)
+ uninst_task = task
+
+ if uninst_task is not None:
+ selected_nodes = [uninst_task]
+ else:
+ # None of the Uninstall tasks are acceptable, so
+ # the corresponding blockers are unresolvable.
+ # We need to drop an Uninstall task here in order
+ # to avoid the circular deps code path, but the
+ # blocker will still be counted as an unresolved
+ # conflict.
+ for node in myblockers.leaf_nodes():
+ try:
+ mygraph.remove(node)
+ except KeyError:
+ pass
+ else:
+ ignored_uninstall_tasks.add(node)
+ break
+
+ # After dropping an Uninstall task, reset
+ # the state variables for leaf node selection and
+ # continue trying to select leaf nodes.
+ prefer_asap = True
+ accept_root_node = False
+ continue
+
if not selected_nodes:
# No leaf nodes are available, so we have a circular
# dependency panic situation. Reduce the noise level to a
@@ -3630,21 +3775,43 @@
accept_root_node = False
for node in selected_nodes:
+
+ # Handle interactions between blockers
+ # and uninstallation tasks.
+ uninst_task = None
+ if isinstance(node, Uninstall):
+ uninst_task = node
+ else:
+ vardb = self.trees[node.root]["vartree"].dbapi
+ previous_cpv = vardb.match(node.slot_atom)
+ if previous_cpv:
+ # The package will be replaced by this one, so remove
+ # the corresponding Uninstall task if necessary.
+ previous_cpv = previous_cpv[0]
+ uninst_task = \
+ ("installed", node.root, previous_cpv, "uninstall")
+ try:
+ mygraph.remove(uninst_task)
+ except KeyError:
+ pass
+ if uninst_task is not None and \
+ uninst_task not in ignored_uninstall_tasks and \
+ myblockers.contains(uninst_task):
+ myblockers.remove(uninst_task)
+ for blocker in myblockers.root_nodes():
+ if myblockers.child_nodes(blocker):
+ continue
+ myblockers.remove(blocker)
+ unresolved = \
+ self._unresolved_blocker_parents.get(blocker)
+ if unresolved:
+ self.blocker_parents[blocker] = unresolved
+ else:
+ del self.blocker_parents[blocker]
+
if node[-1] != "nomerge":
retlist.append(list(node))
mygraph.remove(node)
- if not reversed and not circular_blocks and myblockers.contains(node):
- """This node may have invalidated one or more blockers."""
- myblockers.remove(node)
- for blocker in myblockers.root_nodes():
- if not myblockers.child_nodes(blocker):
- myblockers.remove(blocker)
- unresolved = \
- self._unresolved_blocker_parents.get(blocker)
- if unresolved:
- self.blocker_parents[blocker] = unresolved
- else:
- del self.blocker_parents[blocker]
if not reversed:
"""Blocker validation does not work with reverse mode,
@@ -3873,8 +4040,12 @@
pkg = self._pkg_cache[tuple(x)]
metadata = pkg.metadata
pkg_status = x[3]
- pkg_merge = ordered and pkg_status != "nomerge"
- if pkg in self._slot_collision_nodes or pkg.onlydeps:
+ pkg_merge = ordered and pkg_status == "merge"
+ if not pkg_merge and pkg_status == "merge":
+ pkg_status = "nomerge"
+ if pkg_status == "uninstall":
+ mydbapi = vardb
+ elif pkg in self._slot_collision_nodes or pkg.onlydeps:
# The metadata isn't cached due to a slot collision or
# --onlydeps.
mydbapi = self.trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
@@ -3898,7 +4069,7 @@
mydbapi.aux_get(pkg_key, ["RESTRICT"])[0]),
uselist=pkg_use))
except portage_exception.InvalidDependString, e:
- if pkg_status != "nomerge":
+ if not pkg.installed:
restrict = mydbapi.aux_get(pkg_key, ["RESTRICT"])[0]
show_invalid_depstring_notice(x, restrict, str(e))
del e
@@ -3921,9 +4092,10 @@
installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
if vardb.cpv_exists(pkg_key):
addl=" "+yellow("R")+fetch+" "
- if x[3] != "nomerge":
- if ordered:
- counters.reinst += 1
+ if pkg_merge:
+ counters.reinst += 1
+ elif pkg_status == "uninstall":
+ counters.uninst += 1
# filter out old-style virtual matches
elif installed_versions and \
portage.cpv_getkey(installed_versions[0]) == \
@@ -4112,7 +4284,7 @@
# now use the data to generate output
repoadd = None
- if pkg_status == "nomerge" or not has_previous:
+ if pkg.installed or not has_previous:
repoadd = repo_display.repoStr(repo_path_real)
else:
repo_path_prev = None
@@ -4197,6 +4369,8 @@
return colorize("PKG_MERGE_WORLD", pkg_str)
else:
return colorize("PKG_MERGE", pkg_str)
+ elif pkg_status == "uninstall":
+ return colorize("PKG_UNINSTALL", pkg_str)
else:
if pkg_system:
return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
@@ -4215,7 +4389,14 @@
myprint=myprint+myoldbest
myprint=myprint+darkgreen("to "+x[1])
else:
- myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_cp)
+ if not pkg_merge:
+ myprint = "[%s] %s%s" % \
+ (pkgprint(pkg_status.ljust(13)),
+ indent, pkgprint(pkg.cp))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (pkgprint(pkg.type_name), addl,
+ indent, pkgprint(pkg.cp))
if (newlp-nc_len(myprint)) > 0:
myprint=myprint+(" "*(newlp-nc_len(myprint)))
myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
@@ -4225,7 +4406,7 @@
myprint=myprint+darkgreen("to "+x[1])+" "+verboseadd
else:
if not pkg_merge:
- myprint = "[%s ] " % pkgprint("nomerge")
+ myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
else:
myprint = "[" + pkg_type + " " + addl + "] "
myprint += indent + pkgprint(pkg_key) + " " + \
@@ -4238,7 +4419,14 @@
myprint=myprint+" "+green(xs[1]+xs[2])+" "
myprint=myprint+myoldbest
else:
- myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_cp)
+ if not pkg_merge:
+ myprint = "[%s] %s%s" % \
+ (pkgprint(pkg_status.ljust(13)),
+ indent, pkgprint(pkg.cp))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (pkgprint(pkg.type_name), addl,
+ indent, pkgprint(pkg.cp))
if (newlp-nc_len(myprint)) > 0:
myprint=myprint+(" "*(newlp-nc_len(myprint)))
myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
@@ -4247,7 +4435,10 @@
myprint=myprint+myoldbest+" "+verboseadd
else:
if not pkg_merge:
- myprint="["+pkgprint("nomerge")+" ] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
+ myprint = "[%s] %s%s %s %s" % \
+ (pkgprint(pkg_status.ljust(13)),
+ indent, pkgprint(pkg.cpv),
+ myoldbest, verboseadd)
else:
myprint="["+pkgprint(pkg_type)+" "+addl+"] "+indent+pkgprint(pkg_key)+" "+myoldbest+" "+verboseadd
p.append(myprint)
@@ -4501,7 +4692,7 @@
pkg_type, myroot, pkg_key, action = x
if pkg_type not in self.pkg_tree_map:
continue
- if action != "merge":
+ if action not in ("merge", "uninstall"):
continue
mydb = trees[myroot][self.pkg_tree_map[pkg_type]].dbapi
try:
@@ -4509,15 +4700,22 @@
mydb.aux_get(pkg_key, self._mydbapi_keys)))
except KeyError:
# It does no exist or it is corrupt.
+ if action == "uninstall":
+ continue
raise portage_exception.PackageNotFound(pkg_key)
if pkg_type == "ebuild":
pkgsettings = self.pkgsettings[myroot]
pkgsettings.setcpv(pkg_key, mydb=metadata)
metadata["USE"] = pkgsettings["PORTAGE_USE"]
- installed = False
+ installed = action == "uninstall"
built = pkg_type != "ebuild"
- pkg = Package(built=built, cpv=pkg_key, installed=installed,
- metadata=metadata, root=myroot, type_name=pkg_type)
+ if installed:
+ pkg_constructor = Uninstall
+ else:
+ pkg_constructor = Package
+ pkg = pkg_constructor(built=built, cpv=pkg_key,
+ installed=installed, metadata=metadata,
+ root=myroot, type_name=pkg_type)
self._pkg_cache[pkg] = pkg
fakedb[myroot].cpv_inject(pkg)
self.spinner.update()
@@ -4713,6 +4911,7 @@
self.new = 0
self.newslot = 0
self.reinst = 0
+ self.uninst = 0
self.blocks = 0
self.totalsize = 0
self.restrict_fetch = 0
@@ -4745,6 +4944,10 @@
details.append("%s reinstall" % self.reinst)
if self.reinst > 1:
details[-1] += "s"
+ if self.uninst > 0:
+ details.append("%s uninstall" % self.uninst)
+ if self.uninst > 1:
+ details[-1] += "s"
if self.blocks > 0:
details.append("%s block" % self.blocks)
if self.blocks > 1:
@@ -4780,6 +4983,7 @@
portage.config(clone=trees["/"]["vartree"].settings)
self.curval = 0
self._spawned_pids = []
+ self._uninstall_queue = []
def merge(self, mylist, favorites, mtimedb):
try:
@@ -4808,8 +5012,21 @@
pass
spawned_pids.remove(pid)
+ def _dequeue_uninstall_tasks(self, mtimedb):
+ if not self._uninstall_queue:
+ return
+ for uninst_task in self._uninstall_queue:
+ root_config = self.trees[uninst_task.root]["root_config"]
+ unmerge(root_config.settings, self.myopts,
+ root_config.trees["vartree"], "unmerge",
+ [uninst_task.cpv], mtimedb["ldpath"], clean_world=0)
+ del mtimedb["resume"]["mergelist"][0]
+ mtimedb.commit()
+ del self._uninstall_queue[:]
+
def _merge(self, mylist, favorites, mtimedb):
failed_fetches = []
+ buildpkgonly = "--buildpkgonly" in self.myopts
fetchonly = "--fetchonly" in self.myopts or \
"--fetch-all-uri" in self.myopts
pretend = "--pretend" in self.myopts
@@ -4913,18 +5130,23 @@
metadata_keys = [k for k in portage.auxdbkeys \
if not k.startswith("UNUSED_")] + ["USE"]
+ task_list = mymergelist
+ # Filter mymergelist so that all the len(mymergelist) calls
+ # below (for display) do not count Uninstall instances.
+ mymergelist = [x for x in mymergelist if x[-1] == "merge"]
mergecount=0
- for x in mymergelist:
+ for x in task_list:
pkg_type = x[0]
if pkg_type == "blocks":
continue
- mergecount+=1
myroot=x[1]
pkg_key = x[2]
pkgindex=2
portdb = self.trees[myroot]["porttree"].dbapi
bindb = self.trees[myroot]["bintree"].dbapi
vartree = self.trees[myroot]["vartree"]
+ vardb = vartree.dbapi
+ root_config = self.trees[myroot]["root_config"]
pkgsettings = self.pkgsettings[myroot]
metadata = {}
if pkg_type == "blocks":
@@ -4938,15 +5160,27 @@
else:
if pkg_type == "binary":
mydbapi = bindb
+ elif pkg_type == "installed":
+ mydbapi = vardb
else:
raise AssertionError("Package type: '%s'" % pkg_type)
metadata.update(izip(metadata_keys,
mydbapi.aux_get(pkg_key, metadata_keys)))
built = pkg_type != "ebuild"
installed = pkg_type == "installed"
- pkg = Package(type_name=pkg_type, root=myroot,
+ if installed:
+ pkg_constructor = Uninstall
+ else:
+ pkg_constructor = Package
+ mergecount += 1
+ pkg = pkg_constructor(type_name=pkg_type, root=myroot,
cpv=pkg_key, built=built, installed=installed,
metadata=metadata)
+ if pkg.installed:
+ if not (buildpkgonly or fetchonly or pretend):
+ self._uninstall_queue.append(pkg)
+ continue
+
if x[0]=="blocks":
pkgindex=3
y = portdb.findname(pkg_key)
@@ -5041,6 +5275,7 @@
bintree = self.trees[myroot]["bintree"]
if bintree.populated:
bintree.inject(pkg_key)
+ self._dequeue_uninstall_tasks(mtimedb)
if "--buildpkgonly" not in self.myopts:
msg = " === (%s of %s) Merging (%s::%s)" % \
(mergecount, len(mymergelist), pkg_key, y)
@@ -5066,12 +5301,22 @@
short_msg = "emerge: (%s of %s) %s Compile" % \
(mergecount, len(mymergelist), pkg_key)
emergelog(xterm_titles, msg, short_msg=short_msg)
- retval = portage.doebuild(y, "merge", myroot,
+ retval = portage.doebuild(y, "install", myroot,
pkgsettings, self.edebug, vartree=vartree,
mydbapi=portdb, tree="porttree",
prev_mtimes=ldpath_mtimes)
if retval != os.EX_OK:
return retval
+ self._dequeue_uninstall_tasks(mtimedb)
+ retval = portage.merge(pkgsettings["CATEGORY"],
+ pkgsettings["PF"], pkgsettings["D"],
+ os.path.join(pkgsettings["PORTAGE_BUILDDIR"],
+ "build-info"), myroot, pkgsettings,
+ myebuild=pkgsettings["EBUILD"],
+ mytree="porttree", mydbapi=portdb,
+ vartree=vartree, prev_mtimes=ldpath_mtimes)
+ if retval != os.EX_OK:
+ return retval
finally:
if builddir_lock:
portage_locks.unlockdir(builddir_lock)
@@ -5091,6 +5336,7 @@
portage_locks.unlockdir(catdir_lock)
elif x[0]=="binary":
+ self._dequeue_uninstall_tasks(mtimedb)
#merge the tbz2
mytbz2 = self.trees[myroot]["bintree"].getname(pkg_key)
if "--getbinpkg" in self.myopts:
@@ -5252,7 +5498,7 @@
return os.EX_OK
def unmerge(settings, myopts, vartree, unmerge_action, unmerge_files,
- ldpath_mtimes, autoclean=0):
+ ldpath_mtimes, autoclean=0, clean_world=1):
candidate_catpkgs=[]
global_unmerge=0
xterm_titles = "notitles" not in settings.features
@@ -5566,7 +5812,8 @@
emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
sys.exit(retval)
else:
- clean_world(vartree.dbapi, y)
+ if clean_world:
+ world_clean_package(vartree.dbapi, y)
emergelog(xterm_titles, " >>> unmerge success: "+y)
return 1
Modified: main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook
===================================================================
--- main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook 2008-04-24 03:17:26 UTC (rev 9956)
+++ main/branches/2.1.2/doc/dependency_resolution/task_scheduling.docbook 2008-04-24 03:34:07 UTC (rev 9957)
@@ -12,11 +12,38 @@
<sect1 id='dependency-resolution-task-scheduling-conflict-avoidance'>
<title>Conflict Avoidance</title>
<para>
- In some cases it is possible to adjust package installation order
- to avoid having two conflicting packages installed simultaneously.
+ Sometimes a package installation order exists such that it is
+ possible to avoid having two conflicting packages installed
+ simultaneously. If a currently installed package conflicts with a
+ new package that is planned to be installed, it may be possible to
+ solve the conflict by replacing the installed package with a
+ different package that occupies the same slot.
</para>
<para>
- TODO: Automatically uninstall packages when necessary to avoid conflicts.
+ In order to avoid a conflict, a package may need to be uninstalled
+ in advance, rather than through replacement. The following constraints
+ protect inappropriate packages from being chosen for automatic
+ uninstallation:
+ <itemizedlist>
+ <listitem>
+ Installed packages that have been pulled into the current dependency
+ graph will not be uninstalled. Due to
+ <link linkend='dependency-resolution-package-modeling-dependency-neglection'>
+ dependency neglection</link>, other checks may be necessary in order
+ to protect inappropriate packages from being uninstalled.
+ </listitem>
+ <listitem>
+ An installed package that is matched by a dependency atom from the
+ "system" set will not be uninstalled in advance since it might not
+ be safe. Such a package will be uninstalled through replacement.
+ </listitem>
+ <listitem>
+ An installed package that is matched by a dependency atom from the
+ "world" set will not be uninstalled if the dependency graph does not
+ contain a replacement package that is matched by the same dependency
+ atom.
+ </listitem>
+ </itemizedlist>
</para>
</sect1>
<sect1 id='dependency-resolution-task-scheduling-circular-dependencies'>
Modified: main/branches/2.1.2/man/color.map.5
===================================================================
--- main/branches/2.1.2/man/color.map.5 2008-04-24 03:17:26 UTC (rev 9956)
+++ main/branches/2.1.2/man/color.map.5 2008-04-24 03:34:07 UTC (rev 9957)
@@ -48,6 +48,10 @@
\fBPKG_NOMERGE_WORLD\fR = \fI"blue"\fR
Defines color used for world packages not planned to be merged.
.TP
+\fBPKG_UNINSTALL\fR = \fI"red"\fR
+Defines color used for packages planned to be uninstalled in order
+to resolve conflicts.
+.TP
\fBPROMPT_CHOICE_DEFAULT\fR = \fI"green"\fR
Defines color used for the default choice at a prompt.
.TP
Modified: main/branches/2.1.2/pym/output.py
===================================================================
--- main/branches/2.1.2/pym/output.py 2008-04-24 03:17:26 UTC (rev 9956)
+++ main/branches/2.1.2/pym/output.py 2008-04-24 03:34:07 UTC (rev 9957)
@@ -145,6 +145,7 @@
codes["PKG_MERGE"] = codes["darkgreen"]
codes["PKG_MERGE_SYSTEM"] = codes["darkgreen"]
codes["PKG_MERGE_WORLD"] = codes["green"]
+codes["PKG_UNINSTALL"] = codes["red"]
codes["PKG_NOMERGE"] = codes["darkblue"]
codes["PKG_NOMERGE_SYSTEM"] = codes["darkblue"]
codes["PKG_NOMERGE_WORLD"] = codes["blue"]
--
gentoo-commits@lists.gentoo.org mailing list
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2008-04-24 3:34 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-04-24 3:34 [gentoo-commits] portage r9957 - in main/branches/2.1.2: bin doc/dependency_resolution man pym Zac Medico (zmedico)
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox