public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] portage r13708 - main/branches/prefix/pym/_emerge
@ 2009-06-27 13:35 Fabian Groffen (grobian)
  0 siblings, 0 replies; only message in thread
From: Fabian Groffen (grobian) @ 2009-06-27 13:35 UTC (permalink / raw
  To: gentoo-commits

Author: grobian
Date: 2009-06-27 13:35:38 +0000 (Sat, 27 Jun 2009)
New Revision: 13708

Added:
   main/branches/prefix/pym/_emerge/BlockerDB.py
   main/branches/prefix/pym/_emerge/FakeVartree.py
   main/branches/prefix/pym/_emerge/MergeListItem.py
   main/branches/prefix/pym/_emerge/MetadataRegen.py
   main/branches/prefix/pym/_emerge/Package.py
   main/branches/prefix/pym/_emerge/PackageCounters.py
   main/branches/prefix/pym/_emerge/PackageUninstall.py
   main/branches/prefix/pym/_emerge/RootConfig.py
   main/branches/prefix/pym/_emerge/countdown.py
   main/branches/prefix/pym/_emerge/emergelog.py
   main/branches/prefix/pym/_emerge/format_size.py
   main/branches/prefix/pym/_emerge/search.py
   main/branches/prefix/pym/_emerge/show_invalid_depstring_notice.py
   main/branches/prefix/pym/_emerge/unmerge.py
   main/branches/prefix/pym/_emerge/userquery.py
   main/branches/prefix/pym/_emerge/visible.py
Modified:
   main/branches/prefix/pym/_emerge/__init__.py
Log:
   Merged from trunk -r13668:13669

   | 13669   | Bug #275047 - Split _emerge/__init__.py into smaller pieces  |
   | zmedico | (part 4). Thanks to Sebastian Mingramm (few)                 |
   |         | <s.mingramm@gmx.de> for this patch.                          |


Copied: main/branches/prefix/pym/_emerge/BlockerDB.py (from rev 13669, main/trunk/pym/_emerge/BlockerDB.py)
===================================================================
--- main/branches/prefix/pym/_emerge/BlockerDB.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/BlockerDB.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,126 @@
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import os
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+from portage import digraph
+from portage.sets.base import InternalPackageSet
+
+from _emerge.BlockerCache import BlockerCache
+from _emerge.FakeVartree import FakeVartree
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+class BlockerDB(object):
+
+	def __init__(self, root_config):
+		self._root_config = root_config
+		self._vartree = root_config.trees["vartree"]
+		self._portdb = root_config.trees["porttree"].dbapi
+
+		self._dep_check_trees = None
+		self._fake_vartree = None
+
+	def _get_fake_vartree(self, acquire_lock=0):
+		fake_vartree = self._fake_vartree
+		if fake_vartree is None:
+			fake_vartree = FakeVartree(self._root_config,
+				acquire_lock=acquire_lock)
+			self._fake_vartree = fake_vartree
+			self._dep_check_trees = { self._vartree.root : {
+				"porttree"    :  fake_vartree,
+				"vartree"     :  fake_vartree,
+			}}
+		else:
+			fake_vartree.sync(acquire_lock=acquire_lock)
+		return fake_vartree
+
+	def findInstalledBlockers(self, new_pkg, acquire_lock=0):
+		blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
+		dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+		settings = self._vartree.settings
+		stale_cache = set(blocker_cache)
+		fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
+		dep_check_trees = self._dep_check_trees
+		vardb = fake_vartree.dbapi
+		installed_pkgs = list(vardb)
+
+		for inst_pkg in installed_pkgs:
+			stale_cache.discard(inst_pkg.cpv)
+			cached_blockers = blocker_cache.get(inst_pkg.cpv)
+			if cached_blockers is not None and \
+				cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
+				cached_blockers = None
+			if cached_blockers is not None:
+				blocker_atoms = cached_blockers.atoms
+			else:
+				# Use aux_get() to trigger FakeVartree global
+				# updates on *DEPEND when appropriate.
+				depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
+				try:
+					portage.dep._dep_check_strict = False
+					success, atoms = portage.dep_check(depstr,
+						vardb, settings, myuse=inst_pkg.use.enabled,
+						trees=dep_check_trees, myroot=inst_pkg.root)
+				finally:
+					portage.dep._dep_check_strict = True
+				if not success:
+					pkg_location = os.path.join(inst_pkg.root,
+						portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
+					portage.writemsg("!!! %s/*DEPEND: %s\n" % \
+						(pkg_location, atoms), noiselevel=-1)
+					continue
+
+				blocker_atoms = [atom for atom in atoms \
+					if atom.startswith("!")]
+				blocker_atoms.sort()
+				counter = long(inst_pkg.metadata["COUNTER"])
+				blocker_cache[inst_pkg.cpv] = \
+					blocker_cache.BlockerData(counter, blocker_atoms)
+		for cpv in stale_cache:
+			del blocker_cache[cpv]
+		blocker_cache.flush()
+
+		blocker_parents = digraph()
+		blocker_atoms = []
+		for pkg in installed_pkgs:
+			for blocker_atom in blocker_cache[pkg.cpv].atoms:
+				blocker_atom = blocker_atom.lstrip("!")
+				blocker_atoms.append(blocker_atom)
+				blocker_parents.add(blocker_atom, pkg)
+
+		blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+		blocking_pkgs = set()
+		for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
+			blocking_pkgs.update(blocker_parents.parent_nodes(atom))
+
+		# Check for blockers in the other direction.
+		depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
+		try:
+			portage.dep._dep_check_strict = False
+			success, atoms = portage.dep_check(depstr,
+				vardb, settings, myuse=new_pkg.use.enabled,
+				trees=dep_check_trees, myroot=new_pkg.root)
+		finally:
+			portage.dep._dep_check_strict = True
+		if not success:
+			# We should never get this far with invalid deps.
+			show_invalid_depstring_notice(new_pkg, depstr, atoms)
+			assert False
+
+		blocker_atoms = [atom.lstrip("!") for atom in atoms \
+			if atom[:1] == "!"]
+		if blocker_atoms:
+			blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+			for inst_pkg in installed_pkgs:
+				try:
+					blocker_atoms.iterAtomsForPackage(inst_pkg).next()
+				except (portage.exception.InvalidDependString, StopIteration):
+					continue
+				blocking_pkgs.add(inst_pkg)
+
+		return blocking_pkgs
+

Copied: main/branches/prefix/pym/_emerge/FakeVartree.py (from rev 13669, main/trunk/pym/_emerge/FakeVartree.py)
===================================================================
--- main/branches/prefix/pym/_emerge/FakeVartree.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/FakeVartree.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,235 @@
+import os
+from itertools import izip
+
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+from _emerge.Package import Package
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+
+class FakeVartree(portage.vartree):
+	"""This is implements an in-memory copy of a vartree instance that provides
+	all the interfaces required for use by the depgraph.  The vardb is locked
+	during the constructor call just long enough to read a copy of the
+	installed package information.  This allows the depgraph to do it's
+	dependency calculations without holding a lock on the vardb.  It also
+	allows things like vardb global updates to be done in memory so that the
+	user doesn't necessarily need write access to the vardb in cases where
+	global updates are necessary (updates are performed when necessary if there
+	is not a matching ebuild in the tree)."""
+	def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
+		self._root_config = root_config
+		if pkg_cache is None:
+			pkg_cache = {}
+		real_vartree = root_config.trees["vartree"]
+		portdb = root_config.trees["porttree"].dbapi
+		self.root = real_vartree.root
+		self.settings = real_vartree.settings
+		mykeys = list(real_vartree.dbapi._aux_cache_keys)
+		if "_mtime_" not in mykeys:
+			mykeys.append("_mtime_")
+		self._db_keys = mykeys
+		self._pkg_cache = pkg_cache
+		self.dbapi = PackageVirtualDbapi(real_vartree.settings)
+		vdb_path = os.path.join(self.root, portage.VDB_PATH)
+		try:
+			# At least the parent needs to exist for the lock file.
+			portage.util.ensure_dirs(vdb_path)
+		except portage.exception.PortageException:
+			pass
+		vdb_lock = None
+		try:
+			if acquire_lock and os.access(vdb_path, os.W_OK):
+				vdb_lock = portage.locks.lockdir(vdb_path)
+			real_dbapi = real_vartree.dbapi
+			slot_counters = {}
+			for cpv in real_dbapi.cpv_all():
+				cache_key = ("installed", self.root, cpv, "nomerge")
+				pkg = self._pkg_cache.get(cache_key)
+				if pkg is not None:
+					metadata = pkg.metadata
+				else:
+					metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
+				myslot = metadata["SLOT"]
+				mycp = portage.dep_getkey(cpv)
+				myslot_atom = "%s:%s" % (mycp, myslot)
+				try:
+					mycounter = long(metadata["COUNTER"])
+				except ValueError:
+					mycounter = 0
+					metadata["COUNTER"] = str(mycounter)
+				other_counter = slot_counters.get(myslot_atom, None)
+				if other_counter is not None:
+					if other_counter > mycounter:
+						continue
+				slot_counters[myslot_atom] = mycounter
+				if pkg is None:
+					pkg = Package(built=True, cpv=cpv,
+						installed=True, metadata=metadata,
+						root_config=root_config, type_name="installed")
+				self._pkg_cache[pkg] = pkg
+				self.dbapi.cpv_inject(pkg)
+			real_dbapi.flush_cache()
+		finally:
+			if vdb_lock:
+				portage.locks.unlockdir(vdb_lock)
+		# Populate the old-style virtuals using the cached values.
+		if not self.settings.treeVirtuals:
+			self.settings.treeVirtuals = portage.util.map_dictlist_vals(
+				portage.getCPFromCPV, self.get_all_provides())
+
+		# Intialize variables needed for lazy cache pulls of the live ebuild
+		# metadata.  This ensures that the vardb lock is released ASAP, without
+		# being delayed in case cache generation is triggered.
+		self._aux_get = self.dbapi.aux_get
+		self.dbapi.aux_get = self._aux_get_wrapper
+		self._match = self.dbapi.match
+		self.dbapi.match = self._match_wrapper
+		self._aux_get_history = set()
+		self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
+		self._portdb = portdb
+		self._global_updates = None
+
+	def _match_wrapper(self, cpv, use_cache=1):
+		"""
+		Make sure the metadata in Package instances gets updated for any
+		cpv that is returned from a match() call, since the metadata can
+		be accessed directly from the Package instance instead of via
+		aux_get().
+		"""
+		matches = self._match(cpv, use_cache=use_cache)
+		for cpv in matches:
+			if cpv in self._aux_get_history:
+				continue
+			self._aux_get_wrapper(cpv, [])
+		return matches
+
+	def _aux_get_wrapper(self, pkg, wants):
+		if pkg in self._aux_get_history:
+			return self._aux_get(pkg, wants)
+		self._aux_get_history.add(pkg)
+		try:
+			# Use the live ebuild metadata if possible.
+			live_metadata = dict(izip(self._portdb_keys,
+				self._portdb.aux_get(pkg, self._portdb_keys)))
+			if not portage.eapi_is_supported(live_metadata["EAPI"]):
+				raise KeyError(pkg)
+			self.dbapi.aux_update(pkg, live_metadata)
+		except (KeyError, portage.exception.PortageException):
+			if self._global_updates is None:
+				self._global_updates = \
+					grab_global_updates(self._portdb.porttree_root)
+			perform_global_updates(
+				pkg, self.dbapi, self._global_updates)
+		return self._aux_get(pkg, wants)
+
+	def sync(self, acquire_lock=1):
+		"""
+		Call this method to synchronize state with the real vardb
+		after one or more packages may have been installed or
+		uninstalled.
+		"""
+		vdb_path = os.path.join(self.root, portage.VDB_PATH)
+		try:
+			# At least the parent needs to exist for the lock file.
+			portage.util.ensure_dirs(vdb_path)
+		except portage.exception.PortageException:
+			pass
+		vdb_lock = None
+		try:
+			if acquire_lock and os.access(vdb_path, os.W_OK):
+				vdb_lock = portage.locks.lockdir(vdb_path)
+			self._sync()
+		finally:
+			if vdb_lock:
+				portage.locks.unlockdir(vdb_lock)
+
+	def _sync(self):
+
+		real_vardb = self._root_config.trees["vartree"].dbapi
+		current_cpv_set = frozenset(real_vardb.cpv_all())
+		pkg_vardb = self.dbapi
+		aux_get_history = self._aux_get_history
+
+		# Remove any packages that have been uninstalled.
+		for pkg in list(pkg_vardb):
+			if pkg.cpv not in current_cpv_set:
+				pkg_vardb.cpv_remove(pkg)
+				aux_get_history.discard(pkg.cpv)
+
+		# Validate counters and timestamps.
+		slot_counters = {}
+		root = self.root
+		validation_keys = ["COUNTER", "_mtime_"]
+		for cpv in current_cpv_set:
+
+			pkg_hash_key = ("installed", root, cpv, "nomerge")
+			pkg = pkg_vardb.get(pkg_hash_key)
+			if pkg is not None:
+				counter, mtime = real_vardb.aux_get(cpv, validation_keys)
+				try:
+					counter = long(counter)
+				except ValueError:
+					counter = 0
+
+				if counter != pkg.counter or \
+					mtime != pkg.mtime:
+					pkg_vardb.cpv_remove(pkg)
+					aux_get_history.discard(pkg.cpv)
+					pkg = None
+
+			if pkg is None:
+				pkg = self._pkg(cpv)
+
+			other_counter = slot_counters.get(pkg.slot_atom)
+			if other_counter is not None:
+				if other_counter > pkg.counter:
+					continue
+
+			slot_counters[pkg.slot_atom] = pkg.counter
+			pkg_vardb.cpv_inject(pkg)
+
+		real_vardb.flush_cache()
+
+	def _pkg(self, cpv):
+		root_config = self._root_config
+		real_vardb = root_config.trees["vartree"].dbapi
+		pkg = Package(cpv=cpv, installed=True,
+			metadata=izip(self._db_keys,
+			real_vardb.aux_get(cpv, self._db_keys)),
+			root_config=root_config,
+			type_name="installed")
+
+		try:
+			mycounter = long(pkg.metadata["COUNTER"])
+		except ValueError:
+			mycounter = 0
+			pkg.metadata["COUNTER"] = str(mycounter)
+
+		return pkg
+
+def grab_global_updates(portdir):
+	from portage.update import grab_updates, parse_updates
+	updpath = os.path.join(portdir, "profiles", "updates")
+	try:
+		rawupdates = grab_updates(updpath)
+	except portage.exception.DirectoryNotFound:
+		rawupdates = []
+	upd_commands = []
+	for mykey, mystat, mycontent in rawupdates:
+		commands, errors = parse_updates(mycontent)
+		upd_commands.extend(commands)
+	return upd_commands
+
+def perform_global_updates(mycpv, mydb, mycommands):
+	from portage.update import update_dbentries
+	aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+	aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
+	updates = update_dbentries(mycommands, aux_dict)
+	if updates:
+		mydb.aux_update(mycpv, updates)

Copied: main/branches/prefix/pym/_emerge/MergeListItem.py (from rev 13669, main/trunk/pym/_emerge/MergeListItem.py)
===================================================================
--- main/branches/prefix/pym/_emerge/MergeListItem.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/MergeListItem.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,146 @@
+import os
+
+from portage.output import colorize
+
+from _emerge.Binpkg import Binpkg
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildBuild import EbuildBuild
+from _emerge.PackageUninstall import PackageUninstall
+
+class MergeListItem(CompositeTask):
+
+	"""
+	TODO: For parallel scheduling, everything here needs asynchronous
+	execution support (start, poll, and wait methods).
+	"""
+
+	__slots__ = ("args_set",
+		"binpkg_opts", "build_opts", "config_pool", "emerge_opts",
+		"find_blockers", "logger", "mtimedb", "pkg",
+		"pkg_count", "pkg_to_replace", "prefetcher",
+		"settings", "statusMessage", "world_atom") + \
+		("_install_task",)
+
+	def _start(self):
+
+		pkg = self.pkg
+		build_opts = self.build_opts
+
+		if pkg.installed:
+			# uninstall,  executed by self.merge()
+			self.returncode = os.EX_OK
+			self.wait()
+			return
+
+		args_set = self.args_set
+		find_blockers = self.find_blockers
+		logger = self.logger
+		mtimedb = self.mtimedb
+		pkg_count = self.pkg_count
+		scheduler = self.scheduler
+		settings = self.settings
+		world_atom = self.world_atom
+		ldpath_mtimes = mtimedb["ldpath"]
+
+		action_desc = "Emerging"
+		preposition = "for"
+		if pkg.type_name == "binary":
+			action_desc += " binary"
+
+		if build_opts.fetchonly:
+			action_desc = "Fetching"
+
+		msg = "%s (%s of %s) %s" % \
+			(action_desc,
+			colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+			colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
+			colorize("GOOD", pkg.cpv))
+
+		portdb = pkg.root_config.trees["porttree"].dbapi
+		portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
+		if portdir_repo_name:
+			pkg_repo_name = pkg.metadata.get("repository")
+			if pkg_repo_name != portdir_repo_name:
+				if not pkg_repo_name:
+					pkg_repo_name = "unknown repo"
+				msg += " from %s" % pkg_repo_name
+
+		if pkg.root != "/":
+			msg += " %s %s" % (preposition, pkg.root)
+
+		if not build_opts.pretend:
+			self.statusMessage(msg)
+			logger.log(" >>> emerge (%s of %s) %s to %s" % \
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+		if pkg.type_name == "ebuild":
+
+			build = EbuildBuild(args_set=args_set,
+				background=self.background,
+				config_pool=self.config_pool,
+				find_blockers=find_blockers,
+				ldpath_mtimes=ldpath_mtimes, logger=logger,
+				opts=build_opts, pkg=pkg, pkg_count=pkg_count,
+				prefetcher=self.prefetcher, scheduler=scheduler,
+				settings=settings, world_atom=world_atom)
+
+			self._install_task = build
+			self._start_task(build, self._default_final_exit)
+			return
+
+		elif pkg.type_name == "binary":
+
+			binpkg = Binpkg(background=self.background,
+				find_blockers=find_blockers,
+				ldpath_mtimes=ldpath_mtimes, logger=logger,
+				opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
+				prefetcher=self.prefetcher, settings=settings,
+				scheduler=scheduler, world_atom=world_atom)
+
+			self._install_task = binpkg
+			self._start_task(binpkg, self._default_final_exit)
+			return
+
+	def _poll(self):
+		self._install_task.poll()
+		return self.returncode
+
+	def _wait(self):
+		self._install_task.wait()
+		return self.returncode
+
+	def merge(self):
+
+		pkg = self.pkg
+		build_opts = self.build_opts
+		find_blockers = self.find_blockers
+		logger = self.logger
+		mtimedb = self.mtimedb
+		pkg_count = self.pkg_count
+		prefetcher = self.prefetcher
+		scheduler = self.scheduler
+		settings = self.settings
+		world_atom = self.world_atom
+		ldpath_mtimes = mtimedb["ldpath"]
+
+		if pkg.installed:
+			if not (build_opts.buildpkgonly or \
+				build_opts.fetchonly or build_opts.pretend):
+
+				uninstall = PackageUninstall(background=self.background,
+					ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
+					pkg=pkg, scheduler=scheduler, settings=settings)
+
+				uninstall.start()
+				retval = uninstall.wait()
+				if retval != os.EX_OK:
+					return retval
+			return os.EX_OK
+
+		if build_opts.fetchonly or \
+			build_opts.buildpkgonly:
+			return self.returncode
+
+		retval = self._install_task.install()
+		return retval
+

Copied: main/branches/prefix/pym/_emerge/MetadataRegen.py (from rev 13669, main/trunk/pym/_emerge/MetadataRegen.py)
===================================================================
--- main/branches/prefix/pym/_emerge/MetadataRegen.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/MetadataRegen.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,169 @@
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import os
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.PollScheduler import PollScheduler
+
+class MetadataRegen(PollScheduler):
+
+	def __init__(self, portdb, cp_iter=None, consumer=None,
+		max_jobs=None, max_load=None):
+		PollScheduler.__init__(self)
+		self._portdb = portdb
+		self._global_cleanse = False
+		if cp_iter is None:
+			cp_iter = self._iter_every_cp()
+			# We can globally cleanse stale cache only if we
+			# iterate over every single cp.
+			self._global_cleanse = True
+		self._cp_iter = cp_iter
+		self._consumer = consumer
+
+		if max_jobs is None:
+			max_jobs = 1
+
+		self._max_jobs = max_jobs
+		self._max_load = max_load
+		self._sched_iface = self._sched_iface_class(
+			register=self._register,
+			schedule=self._schedule_wait,
+			unregister=self._unregister)
+
+		self._valid_pkgs = set()
+		self._cp_set = set()
+		self._process_iter = self._iter_metadata_processes()
+		self.returncode = os.EX_OK
+		self._error_count = 0
+
+	def _iter_every_cp(self):
+		every_cp = self._portdb.cp_all()
+		every_cp.sort(reverse=True)
+		try:
+			while True:
+				yield every_cp.pop()
+		except IndexError:
+			pass
+
+	def _iter_metadata_processes(self):
+		portdb = self._portdb
+		valid_pkgs = self._valid_pkgs
+		cp_set = self._cp_set
+		consumer = self._consumer
+
+		for cp in self._cp_iter:
+			cp_set.add(cp)
+			portage.writemsg_stdout("Processing %s\n" % cp)
+			cpv_list = portdb.cp_list(cp)
+			for cpv in cpv_list:
+				valid_pkgs.add(cpv)
+				ebuild_path, repo_path = portdb.findname2(cpv)
+				metadata, st, emtime = portdb._pull_valid_cache(
+					cpv, ebuild_path, repo_path)
+				if metadata is not None:
+					if consumer is not None:
+						consumer(cpv, ebuild_path,
+							repo_path, metadata)
+					continue
+
+				yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
+					ebuild_mtime=emtime,
+					metadata_callback=portdb._metadata_callback,
+					portdb=portdb, repo_path=repo_path,
+					settings=portdb.doebuild_settings)
+
+	def run(self):
+
+		portdb = self._portdb
+		from portage.cache.cache_errors import CacheError
+		dead_nodes = {}
+
+		while self._schedule():
+			self._poll_loop()
+
+		while self._jobs:
+			self._poll_loop()
+
+		if self._global_cleanse:
+			for mytree in portdb.porttrees:
+				try:
+					dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
+				except CacheError, e:
+					portage.writemsg("Error listing cache entries for " + \
+						"'%s': %s, continuing...\n" % (mytree, e),
+						noiselevel=-1)
+					del e
+					dead_nodes = None
+					break
+		else:
+			cp_set = self._cp_set
+			cpv_getkey = portage.cpv_getkey
+			for mytree in portdb.porttrees:
+				try:
+					dead_nodes[mytree] = set(cpv for cpv in \
+						portdb.auxdb[mytree].iterkeys() \
+						if cpv_getkey(cpv) in cp_set)
+				except CacheError, e:
+					portage.writemsg("Error listing cache entries for " + \
+						"'%s': %s, continuing...\n" % (mytree, e),
+						noiselevel=-1)
+					del e
+					dead_nodes = None
+					break
+
+		if dead_nodes:
+			for y in self._valid_pkgs:
+				for mytree in portdb.porttrees:
+					if portdb.findname2(y, mytree=mytree)[0]:
+						dead_nodes[mytree].discard(y)
+
+			for mytree, nodes in dead_nodes.iteritems():
+				auxdb = portdb.auxdb[mytree]
+				for y in nodes:
+					try:
+						del auxdb[y]
+					except (KeyError, CacheError):
+						pass
+
+	def _schedule_tasks(self):
+		"""
+		@rtype: bool
+		@returns: True if there may be remaining tasks to schedule,
+			False otherwise.
+		"""
+		while self._can_add_job():
+			try:
+				metadata_process = self._process_iter.next()
+			except StopIteration:
+				return False
+
+			self._jobs += 1
+			metadata_process.scheduler = self._sched_iface
+			metadata_process.addExitListener(self._metadata_exit)
+			metadata_process.start()
+		return True
+
+	def _metadata_exit(self, metadata_process):
+		self._jobs -= 1
+		if metadata_process.returncode != os.EX_OK:
+			self.returncode = 1
+			self._error_count += 1
+			self._valid_pkgs.discard(metadata_process.cpv)
+			portage.writemsg("Error processing %s, continuing...\n" % \
+				(metadata_process.cpv,), noiselevel=-1)
+
+		if self._consumer is not None:
+			# On failure, still notify the consumer (in this case the metadata
+			# argument is None).
+			self._consumer(metadata_process.cpv,
+				metadata_process.ebuild_path,
+				metadata_process.repo_path,
+				metadata_process.metadata)
+
+		self._schedule()
+

Copied: main/branches/prefix/pym/_emerge/Package.py (from rev 13669, main/trunk/pym/_emerge/Package.py)
===================================================================
--- main/branches/prefix/pym/_emerge/Package.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/Package.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,187 @@
+import re
+from itertools import chain
+
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import os
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+from portage.cache.mappings import slot_dict_class
+
+from _emerge.Task import Task
+
+class Package(Task):
+
+	__hash__ = Task.__hash__
+	__slots__ = ("built", "cpv", "depth",
+		"installed", "metadata", "onlydeps", "operation",
+		"root_config", "type_name",
+		"category", "counter", "cp", "cpv_split",
+		"inherited", "iuse", "mtime",
+		"pf", "pv_split", "root", "slot", "slot_atom", "use")
+
+	metadata_keys = [
+		"CHOST", "COUNTER", "DEPEND", "EAPI",
+		"INHERITED", "IUSE", "KEYWORDS",
+		"LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
+		"repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_",
+		"EPREFIX" ]
+
+	def __init__(self, **kwargs):
+		Task.__init__(self, **kwargs)
+		self.root = self.root_config.root
+		self.metadata = _PackageMetadataWrapper(self, self.metadata)
+		self.cp = portage.cpv_getkey(self.cpv)
+		slot = self.slot
+		if not slot:
+			# Avoid an InvalidAtom exception when creating slot_atom.
+			# This package instance will be masked due to empty SLOT.
+			slot = '0'
+		self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
+		self.category, self.pf = portage.catsplit(self.cpv)
+		self.cpv_split = portage.catpkgsplit(self.cpv)
+		self.pv_split = self.cpv_split[1:]
+
+	class _use(object):
+
+		__slots__ = ("__weakref__", "enabled")
+
+		def __init__(self, use):
+			self.enabled = frozenset(use)
+
+	class _iuse(object):
+
+		__slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
+
+		def __init__(self, tokens, iuse_implicit):
+			self.tokens = tuple(tokens)
+			self.iuse_implicit = iuse_implicit
+			enabled = []
+			disabled = []
+			other = []
+			for x in tokens:
+				prefix = x[:1]
+				if prefix == "+":
+					enabled.append(x[1:])
+				elif prefix == "-":
+					disabled.append(x[1:])
+				else:
+					other.append(x)
+			self.enabled = frozenset(enabled)
+			self.disabled = frozenset(disabled)
+			self.all = frozenset(chain(enabled, disabled, other))
+
+		def __getattribute__(self, name):
+			if name == "regex":
+				try:
+					return object.__getattribute__(self, "regex")
+				except AttributeError:
+					all = object.__getattribute__(self, "all")
+					iuse_implicit = object.__getattribute__(self, "iuse_implicit")
+					# Escape anything except ".*" which is supposed
+					# to pass through from _get_implicit_iuse()
+					regex = (re.escape(x) for x in chain(all, iuse_implicit))
+					regex = "^(%s)$" % "|".join(regex)
+					regex = regex.replace("\\.\\*", ".*")
+					self.regex = re.compile(regex)
+			return object.__getattribute__(self, name)
+
+	def _get_hash_key(self):
+		hash_key = getattr(self, "_hash_key", None)
+		if hash_key is None:
+			if self.operation is None:
+				self.operation = "merge"
+				if self.onlydeps or self.installed:
+					self.operation = "nomerge"
+			self._hash_key = \
+				(self.type_name, self.root, self.cpv, self.operation)
+		return self._hash_key
+
+	def __lt__(self, other):
+		if other.cp != self.cp:
+			return False
+		if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
+			return True
+		return False
+
+	def __le__(self, other):
+		if other.cp != self.cp:
+			return False
+		if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
+			return True
+		return False
+
+	def __gt__(self, other):
+		if other.cp != self.cp:
+			return False
+		if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
+			return True
+		return False
+
+	def __ge__(self, other):
+		if other.cp != self.cp:
+			return False
+		if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
+			return True
+		return False
+
+_all_metadata_keys = set(x for x in portage.auxdbkeys \
+	if not x.startswith("UNUSED_"))
+_all_metadata_keys.discard("CDEPEND")
+_all_metadata_keys.update(Package.metadata_keys)
+
+_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
+
+class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
+	"""
+	Detect metadata updates and synchronize Package attributes.
+	"""
+
+	__slots__ = ("_pkg",)
+	_wrapped_keys = frozenset(
+		["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
+
+	def __init__(self, pkg, metadata):
+		_PackageMetadataWrapperBase.__init__(self)
+		self._pkg = pkg
+		self.update(metadata)
+
+	def __setitem__(self, k, v):
+		_PackageMetadataWrapperBase.__setitem__(self, k, v)
+		if k in self._wrapped_keys:
+			getattr(self, "_set_" + k.lower())(k, v)
+
+	def _set_inherited(self, k, v):
+		if isinstance(v, basestring):
+			v = frozenset(v.split())
+		self._pkg.inherited = v
+
+	def _set_iuse(self, k, v):
+		self._pkg.iuse = self._pkg._iuse(
+			v.split(), self._pkg.root_config.iuse_implicit)
+
+	def _set_slot(self, k, v):
+		self._pkg.slot = v
+
+	def _set_use(self, k, v):
+		self._pkg.use = self._pkg._use(v.split())
+
+	def _set_counter(self, k, v):
+		if isinstance(v, basestring):
+			try:
+				v = long(v.strip())
+			except ValueError:
+				v = 0
+		self._pkg.counter = v
+
+	def _set__mtime_(self, k, v):
+		if isinstance(v, basestring):
+			try:
+				v = long(v.strip())
+			except ValueError:
+				v = 0
+		self._pkg.mtime = v

Copied: main/branches/prefix/pym/_emerge/PackageCounters.py (from rev 13669, main/trunk/pym/_emerge/PackageCounters.py)
===================================================================
--- main/branches/prefix/pym/_emerge/PackageCounters.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/PackageCounters.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,77 @@
+from portage.output import colorize, create_color_func
+bad = create_color_func("BAD")
+
+from _emerge.format_size import format_size
+
+class PackageCounters(object):
+
+	def __init__(self):
+		self.upgrades   = 0
+		self.downgrades = 0
+		self.new        = 0
+		self.newslot    = 0
+		self.reinst     = 0
+		self.uninst     = 0
+		self.blocks     = 0
+		self.blocks_satisfied         = 0
+		self.totalsize  = 0
+		self.restrict_fetch           = 0
+		self.restrict_fetch_satisfied = 0
+		self.interactive              = 0
+
+	def __str__(self):
+		total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
+		myoutput = []
+		details = []
+		myoutput.append("Total: %s package" % total_installs)
+		if total_installs != 1:
+			myoutput.append("s")
+		if total_installs != 0:
+			myoutput.append(" (")
+		if self.upgrades > 0:
+			details.append("%s upgrade" % self.upgrades)
+			if self.upgrades > 1:
+				details[-1] += "s"
+		if self.downgrades > 0:
+			details.append("%s downgrade" % self.downgrades)
+			if self.downgrades > 1:
+				details[-1] += "s"
+		if self.new > 0:
+			details.append("%s new" % self.new)
+		if self.newslot > 0:
+			details.append("%s in new slot" % self.newslot)
+			if self.newslot > 1:
+				details[-1] += "s"
+		if self.reinst > 0:
+			details.append("%s reinstall" % self.reinst)
+			if self.reinst > 1:
+				details[-1] += "s"
+		if self.uninst > 0:
+			details.append("%s uninstall" % self.uninst)
+			if self.uninst > 1:
+				details[-1] += "s"
+		if self.interactive > 0:
+			details.append("%s %s" % (self.interactive,
+				colorize("WARN", "interactive")))
+		myoutput.append(", ".join(details))
+		if total_installs != 0:
+			myoutput.append(")")
+		myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
+		if self.restrict_fetch:
+			myoutput.append("\nFetch Restriction: %s package" % \
+				self.restrict_fetch)
+			if self.restrict_fetch > 1:
+				myoutput.append("s")
+		if self.restrict_fetch_satisfied < self.restrict_fetch:
+			myoutput.append(bad(" (%s unsatisfied)") % \
+				(self.restrict_fetch - self.restrict_fetch_satisfied))
+		if self.blocks > 0:
+			myoutput.append("\nConflict: %s block" % \
+				self.blocks)
+			if self.blocks > 1:
+				myoutput.append("s")
+			if self.blocks_satisfied < self.blocks:
+				myoutput.append(bad(" (%s unsatisfied)") % \
+					(self.blocks - self.blocks_satisfied))
+		return "".join(myoutput)
+

Copied: main/branches/prefix/pym/_emerge/PackageUninstall.py (from rev 13669, main/trunk/pym/_emerge/PackageUninstall.py)
===================================================================
--- main/branches/prefix/pym/_emerge/PackageUninstall.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/PackageUninstall.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,50 @@
+import logging
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import os
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.unmerge import unmerge
+from _emerge.UninstallFailure import UninstallFailure
+
+class PackageUninstall(AsynchronousTask):
+
+	__slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
+
+	def _start(self):
+		try:
+			unmerge(self.pkg.root_config, self.opts, "unmerge",
+				[self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
+				clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
+				writemsg_level=self._writemsg_level)
+		except UninstallFailure, e:
+			self.returncode = e.status
+		else:
+			self.returncode = os.EX_OK
+		self.wait()
+
+	def _writemsg_level(self, msg, level=0, noiselevel=0):
+
+		log_path = self.settings.get("PORTAGE_LOG_FILE")
+		background = self.background
+
+		if log_path is None:
+			if not (background and level < logging.WARNING):
+				portage.util.writemsg_level(msg,
+					level=level, noiselevel=noiselevel)
+		else:
+			if not background:
+				portage.util.writemsg_level(msg,
+					level=level, noiselevel=noiselevel)
+
+			f = open(log_path, 'a')
+			try:
+				f.write(msg)
+			finally:
+				f.close()
+

Copied: main/branches/prefix/pym/_emerge/RootConfig.py (from rev 13669, main/trunk/pym/_emerge/RootConfig.py)
===================================================================
--- main/branches/prefix/pym/_emerge/RootConfig.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/RootConfig.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,28 @@
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+
+class RootConfig(object):
+	"""This is used internally by depgraph to track information about a
+	particular $ROOT."""
+
+	pkg_tree_map = {
+		"ebuild"    : "porttree",
+		"binary"    : "bintree",
+		"installed" : "vartree"
+	}
+
+	tree_pkg_map = {}
+	for k, v in pkg_tree_map.iteritems():
+		tree_pkg_map[v] = k
+
+	def __init__(self, settings, trees, setconfig):
+		self.trees = trees
+		self.settings = settings
+		self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
+		self.root = self.settings["ROOT"]
+		self.setconfig = setconfig
+		if setconfig is None:
+			self.sets = {}
+		else:
+			self.sets = self.setconfig.getSets()
+		self.visible_pkgs = PackageVirtualDbapi(self.settings)
+

Modified: main/branches/prefix/pym/_emerge/__init__.py
===================================================================
--- main/branches/prefix/pym/_emerge/__init__.py	2009-06-27 13:19:35 UTC (rev 13707)
+++ main/branches/prefix/pym/_emerge/__init__.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -35,17 +35,16 @@
 
 from portage import digraph
 from portage.const import NEWS_LIB_PATH
+from portage.cache.mappings import slot_dict_class
 
 import _emerge.help
 import portage.xpak, commands, errno, re, socket, time
 from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
-	nc_len, red, teal, turquoise, xtermTitle, \
+	nc_len, red, teal, turquoise, \
 	xtermTitleReset, yellow
 from portage.output import create_color_func
 good = create_color_func("GOOD")
 bad = create_color_func("BAD")
-# white looks bad on terminals with white background
-from portage.output import bold as white
 
 import portage.elog
 import portage.dep
@@ -70,15 +69,11 @@
 from _emerge.UnmergeDepPriority import UnmergeDepPriority
 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
-from _emerge.Task import Task
+from _emerge.Package import Package
 from _emerge.Blocker import Blocker
-from _emerge.AsynchronousTask import AsynchronousTask
-from _emerge.CompositeTask import CompositeTask
+from _emerge.BlockerDB import BlockerDB
 from _emerge.EbuildFetcher import EbuildFetcher
-from _emerge.EbuildBuild import EbuildBuild
-from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
 from _emerge.EbuildPhase import EbuildPhase
-from _emerge.Binpkg import Binpkg
 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
 from _emerge.PackageMerge import PackageMerge
 from _emerge.DependencyArg import DependencyArg
@@ -93,50 +88,22 @@
 from _emerge.SequentialTaskQueue import SequentialTaskQueue
 from _emerge.ProgressHandler import ProgressHandler
 from _emerge.stdout_spinner import stdout_spinner
-from _emerge.UninstallFailure import UninstallFailure
 from _emerge.JobStatusDisplay import JobStatusDisplay
 from _emerge.PollScheduler import PollScheduler
+from _emerge.search import search
+from _emerge.visible import visible
+from _emerge.emergelog import emergelog, _emerge_log_dir
+from _emerge.userquery import userquery
+from _emerge.countdown import countdown
+from _emerge.unmerge import unmerge
+from _emerge.MergeListItem import MergeListItem
+from _emerge.MetadataRegen import MetadataRegen
+from _emerge.RootConfig import RootConfig
+from _emerge.format_size import format_size
+from _emerge.PackageCounters import PackageCounters
+from _emerge.FakeVartree import FakeVartree
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
 
-def userquery(prompt, responses=None, colours=None):
-	"""Displays a prompt and a set of responses, then waits for a response
-	which is checked against the responses and the first to match is
-	returned.  An empty response will match the first value in responses.  The
-	input buffer is *not* cleared prior to the prompt!
-
-	prompt: a String.
-	responses: a List of Strings.
-	colours: a List of Functions taking and returning a String, used to
-	process the responses for display. Typically these will be functions
-	like red() but could be e.g. lambda x: "DisplayString".
-	If responses is omitted, defaults to ["Yes", "No"], [green, red].
-	If only colours is omitted, defaults to [bold, ...].
-
-	Returns a member of the List responses. (If called without optional
-	arguments, returns "Yes" or "No".)
-	KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
-	printed."""
-	if responses is None:
-		responses = ["Yes", "No"]
-		colours = [
-			create_color_func("PROMPT_CHOICE_DEFAULT"),
-			create_color_func("PROMPT_CHOICE_OTHER")
-		]
-	elif colours is None:
-		colours=[bold]
-	colours=(colours*len(responses))[:len(responses)]
-	print bold(prompt),
-	try:
-		while True:
-			response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
-			for key in responses:
-				# An empty response will match the first value in responses.
-				if response.upper()==key[:len(response)].upper():
-					return key
-			print "Sorry, response '%s' not understood." % response,
-	except (EOFError, KeyboardInterrupt):
-		print "Interrupted."
-		sys.exit(1)
-
 actions = frozenset([
 "clean", "config", "depclean",
 "info", "list-sets", "metadata",
@@ -191,63 +158,6 @@
 "v":"--verbose",   "V":"--version"
 }
 
-_emerge_log_dir = '/var/log'
-
-def emergelog(xterm_titles, mystr, short_msg=None):
-	if xterm_titles and short_msg:
-		if "HOSTNAME" in os.environ:
-			short_msg = os.environ["HOSTNAME"]+": "+short_msg
-		xtermTitle(short_msg)
-	try:
-		file_path = os.path.join(_emerge_log_dir, 'emerge.log')
-		mylogfile = open(file_path, "a")
-		portage.util.apply_secpass_permissions(file_path,
-			uid=portage.portage_uid, gid=portage.portage_gid,
-			mode=0660)
-		mylock = None
-		try:
-			mylock = portage.locks.lockfile(mylogfile)
-			# seek because we may have gotten held up by the lock.
-			# if so, we may not be positioned at the end of the file.
-			mylogfile.seek(0, 2)
-			mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
-			mylogfile.flush()
-		finally:
-			if mylock:
-				portage.locks.unlockfile(mylock)
-			mylogfile.close()
-	except (IOError,OSError,portage.exception.PortageException), e:
-		if secpass >= 1:
-			print >> sys.stderr, "emergelog():",e
-
-def countdown(secs=5, doing="Starting"):
-	if secs:
-		print ">>> Waiting",secs,"seconds before starting..."
-		print ">>> (Control-C to abort)...\n"+doing+" in: ",
-		ticks=range(secs)
-		ticks.reverse()
-		for sec in ticks:
-			sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
-			sys.stdout.flush()
-			time.sleep(1)
-		print
-
-# formats a size given in bytes nicely
-def format_size(mysize):
-	if isinstance(mysize, basestring):
-		return mysize
-	if 0 != mysize % 1024:
-		# Always round up to the next kB so that it doesn't show 0 kB when
-		# some small file still needs to be fetched.
-		mysize += 1024 - mysize % 1024
-	mystr=str(mysize/1024)
-	mycount=len(mystr)
-	while (mycount > 3):
-		mycount-=3
-		mystr=mystr[:mycount]+","+mystr[mycount:]
-	return mystr+" kB"
-
-
 def getgccversion(chost):
 	"""
 	rtype: C{str}
@@ -344,394 +254,6 @@
 		myparams.add("complete")
 	return myparams
 
-# search functionality
-class search(object):
-
-	#
-	# class constants
-	#
-	VERSION_SHORT=1
-	VERSION_RELEASE=2
-
-	#
-	# public interface
-	#
-	def __init__(self, root_config, spinner, searchdesc,
-		verbose, usepkg, usepkgonly):
-		"""Searches the available and installed packages for the supplied search key.
-		The list of available and installed packages is created at object instantiation.
-		This makes successive searches faster."""
-		self.settings = root_config.settings
-		self.vartree = root_config.trees["vartree"]
-		self.spinner = spinner
-		self.verbose = verbose
-		self.searchdesc = searchdesc
-		self.root_config = root_config
-		self.setconfig = root_config.setconfig
-		self.matches = {"pkg" : []}
-		self.mlen = 0
-
-		def fake_portdb():
-			pass
-		self.portdb = fake_portdb
-		for attrib in ("aux_get", "cp_all",
-			"xmatch", "findname", "getFetchMap"):
-			setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
-
-		self._dbs = []
-
-		portdb = root_config.trees["porttree"].dbapi
-		bindb = root_config.trees["bintree"].dbapi
-		vardb = root_config.trees["vartree"].dbapi
-
-		if not usepkgonly and portdb._have_root_eclass_dir:
-			self._dbs.append(portdb)
-
-		if (usepkg or usepkgonly) and bindb.cp_all():
-			self._dbs.append(bindb)
-
-		self._dbs.append(vardb)
-		self._portdb = portdb
-
-	def _cp_all(self):
-		cp_all = set()
-		for db in self._dbs:
-			cp_all.update(db.cp_all())
-		return list(sorted(cp_all))
-
-	def _aux_get(self, *args, **kwargs):
-		for db in self._dbs:
-			try:
-				return db.aux_get(*args, **kwargs)
-			except KeyError:
-				pass
-		raise
-
-	def _findname(self, *args, **kwargs):
-		for db in self._dbs:
-			if db is not self._portdb:
-				# We don't want findname to return anything
-				# unless it's an ebuild in a portage tree.
-				# Otherwise, it's already built and we don't
-				# care about it.
-				continue
-			func = getattr(db, "findname", None)
-			if func:
-				value = func(*args, **kwargs)
-				if value:
-					return value
-		return None
-
-	def _getFetchMap(self, *args, **kwargs):
-		for db in self._dbs:
-			func = getattr(db, "getFetchMap", None)
-			if func:
-				value = func(*args, **kwargs)
-				if value:
-					return value
-		return {}
-
-	def _visible(self, db, cpv, metadata):
-		installed = db is self.vartree.dbapi
-		built = installed or db is not self._portdb
-		pkg_type = "ebuild"
-		if installed:
-			pkg_type = "installed"
-		elif built:
-			pkg_type = "binary"
-		return visible(self.settings,
-			Package(type_name=pkg_type, root_config=self.root_config,
-			cpv=cpv, built=built, installed=installed, metadata=metadata))
-
-	def _xmatch(self, level, atom):
-		"""
-		This method does not expand old-style virtuals because it
-		is restricted to returning matches for a single ${CATEGORY}/${PN}
-		and old-style virual matches unreliable for that when querying
-		multiple package databases. If necessary, old-style virtuals
-		can be performed on atoms prior to calling this method.
-		"""
-		cp = portage.dep_getkey(atom)
-		if level == "match-all":
-			matches = set()
-			for db in self._dbs:
-				if hasattr(db, "xmatch"):
-					matches.update(db.xmatch(level, atom))
-				else:
-					matches.update(db.match(atom))
-			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
-			db._cpv_sort_ascending(result)
-		elif level == "match-visible":
-			matches = set()
-			for db in self._dbs:
-				if hasattr(db, "xmatch"):
-					matches.update(db.xmatch(level, atom))
-				else:
-					db_keys = list(db._aux_cache_keys)
-					for cpv in db.match(atom):
-						metadata = izip(db_keys,
-							db.aux_get(cpv, db_keys))
-						if not self._visible(db, cpv, metadata):
-							continue
-						matches.add(cpv)
-			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
-			db._cpv_sort_ascending(result)
-		elif level == "bestmatch-visible":
-			result = None
-			for db in self._dbs:
-				if hasattr(db, "xmatch"):
-					cpv = db.xmatch("bestmatch-visible", atom)
-					if not cpv or portage.cpv_getkey(cpv) != cp:
-						continue
-					if not result or cpv == portage.best([cpv, result]):
-						result = cpv
-				else:
-					db_keys = Package.metadata_keys
-					# break out of this loop with highest visible
-					# match, checked in descending order
-					for cpv in reversed(db.match(atom)):
-						if portage.cpv_getkey(cpv) != cp:
-							continue
-						metadata = izip(db_keys,
-							db.aux_get(cpv, db_keys))
-						if not self._visible(db, cpv, metadata):
-							continue
-						if not result or cpv == portage.best([cpv, result]):
-							result = cpv
-						break
-		else:
-			raise NotImplementedError(level)
-		return result
-
-	def execute(self,searchkey):
-		"""Performs the search for the supplied search key"""
-		match_category = 0
-		self.searchkey=searchkey
-		self.packagematches = []
-		if self.searchdesc:
-			self.searchdesc=1
-			self.matches = {"pkg":[], "desc":[], "set":[]}
-		else:
-			self.searchdesc=0
-			self.matches = {"pkg":[], "set":[]}
-		print "Searching...   ",
-
-		regexsearch = False
-		if self.searchkey.startswith('%'):
-			regexsearch = True
-			self.searchkey = self.searchkey[1:]
-		if self.searchkey.startswith('@'):
-			match_category = 1
-			self.searchkey = self.searchkey[1:]
-		if regexsearch:
-			self.searchre=re.compile(self.searchkey,re.I)
-		else:
-			self.searchre=re.compile(re.escape(self.searchkey), re.I)
-		for package in self.portdb.cp_all():
-			self.spinner.update()
-
-			if match_category:
-				match_string  = package[:]
-			else:
-				match_string  = package.split("/")[-1]
-
-			masked=0
-			if self.searchre.search(match_string):
-				if not self.portdb.xmatch("match-visible", package):
-					masked=1
-				self.matches["pkg"].append([package,masked])
-			elif self.searchdesc: # DESCRIPTION searching
-				full_package = self.portdb.xmatch("bestmatch-visible", package)
-				if not full_package:
-					#no match found; we don't want to query description
-					full_package = portage.best(
-						self.portdb.xmatch("match-all", package))
-					if not full_package:
-						continue
-					else:
-						masked=1
-				try:
-					full_desc = self.portdb.aux_get(
-						full_package, ["DESCRIPTION"])[0]
-				except KeyError:
-					print "emerge: search: aux_get() failed, skipping"
-					continue
-				if self.searchre.search(full_desc):
-					self.matches["desc"].append([full_package,masked])
-
-		self.sdict = self.setconfig.getSets()
-		for setname in self.sdict:
-			self.spinner.update()
-			if match_category:
-				match_string = setname
-			else:
-				match_string = setname.split("/")[-1]
-			
-			if self.searchre.search(match_string):
-				self.matches["set"].append([setname, False])
-			elif self.searchdesc:
-				if self.searchre.search(
-					self.sdict[setname].getMetadata("DESCRIPTION")):
-					self.matches["set"].append([setname, False])
-			
-		self.mlen=0
-		for mtype in self.matches:
-			self.matches[mtype].sort()
-			self.mlen += len(self.matches[mtype])
-
-	def addCP(self, cp):
-		if not self.portdb.xmatch("match-all", cp):
-			return
-		masked = 0
-		if not self.portdb.xmatch("bestmatch-visible", cp):
-			masked = 1
-		self.matches["pkg"].append([cp, masked])
-		self.mlen += 1
-
-	def output(self):
-		"""Outputs the results of the search."""
-		print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
-		print "[ Applications found : "+white(str(self.mlen))+" ]"
-		print " "
-		vardb = self.vartree.dbapi
-		for mtype in self.matches:
-			for match,masked in self.matches[mtype]:
-				full_package = None
-				if mtype == "pkg":
-					catpack = match
-					full_package = self.portdb.xmatch(
-						"bestmatch-visible", match)
-					if not full_package:
-						#no match found; we don't want to query description
-						masked=1
-						full_package = portage.best(
-							self.portdb.xmatch("match-all",match))
-				elif mtype == "desc":
-					full_package = match
-					match        = portage.cpv_getkey(match)
-				elif mtype == "set":
-					print green("*")+"  "+white(match)
-					print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
-					print
-				if full_package:
-					try:
-						desc, homepage, license = self.portdb.aux_get(
-							full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
-					except KeyError:
-						print "emerge: search: aux_get() failed, skipping"
-						continue
-					if masked:
-						print green("*")+"  "+white(match)+" "+red("[ Masked ]")
-					else:
-						print green("*")+"  "+white(match)
-					myversion = self.getVersion(full_package, search.VERSION_RELEASE)
-
-					mysum = [0,0]
-					file_size_str = None
-					mycat = match.split("/")[0]
-					mypkg = match.split("/")[1]
-					mycpv = match + "-" + myversion
-					myebuild = self.portdb.findname(mycpv)
-					if myebuild:
-						pkgdir = os.path.dirname(myebuild)
-						from portage import manifest
-						mf = manifest.Manifest(
-							pkgdir, self.settings["DISTDIR"])
-						try:
-							uri_map = self.portdb.getFetchMap(mycpv)
-						except portage.exception.InvalidDependString, e:
-							file_size_str = "Unknown (%s)" % (e,)
-							del e
-						else:
-							try:
-								mysum[0] = mf.getDistfilesSize(uri_map)
-							except KeyError, e:
-								file_size_str = "Unknown (missing " + \
-									"digest for %s)" % (e,)
-								del e
-
-					available = False
-					for db in self._dbs:
-						if db is not vardb and \
-							db.cpv_exists(mycpv):
-							available = True
-							if not myebuild and hasattr(db, "bintree"):
-								myebuild = db.bintree.getname(mycpv)
-								try:
-									mysum[0] = os.stat(myebuild).st_size
-								except OSError:
-									myebuild = None
-							break
-
-					if myebuild and file_size_str is None:
-						mystr = str(mysum[0] / 1024)
-						mycount = len(mystr)
-						while (mycount > 3):
-							mycount -= 3
-							mystr = mystr[:mycount] + "," + mystr[mycount:]
-						file_size_str = mystr + " kB"
-
-					if self.verbose:
-						if available:
-							print "     ", darkgreen("Latest version available:"),myversion
-						print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
-						if myebuild:
-							print "      %s %s" % \
-								(darkgreen("Size of files:"), file_size_str)
-						print "     ", darkgreen("Homepage:")+"     ",homepage
-						print "     ", darkgreen("Description:")+"  ",desc
-						print "     ", darkgreen("License:")+"      ",license
-						print
-	#
-	# private interface
-	#
-	def getInstallationStatus(self,package):
-		installed_package = self.vartree.dep_bestmatch(package)
-		result = ""
-		version = self.getVersion(installed_package,search.VERSION_RELEASE)
-		if len(version) > 0:
-			result = darkgreen("Latest version installed:")+" "+version
-		else:
-			result = darkgreen("Latest version installed:")+" [ Not Installed ]"
-		return result
-
-	def getVersion(self,full_package,detail):
-		if len(full_package) > 1:
-			package_parts = portage.catpkgsplit(full_package)
-			if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
-				result = package_parts[2]+ "-" + package_parts[3]
-			else:
-				result = package_parts[2]
-		else:
-			result = ""
-		return result
-
-class RootConfig(object):
-	"""This is used internally by depgraph to track information about a
-	particular $ROOT."""
-
-	pkg_tree_map = {
-		"ebuild"    : "porttree",
-		"binary"    : "bintree",
-		"installed" : "vartree"
-	}
-
-	tree_pkg_map = {}
-	for k, v in pkg_tree_map.iteritems():
-		tree_pkg_map[v] = k
-
-	def __init__(self, settings, trees, setconfig):
-		self.trees = trees
-		self.settings = settings
-		self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
-		self.root = self.settings["ROOT"]
-		self.setconfig = setconfig
-		if setconfig is None:
-			self.sets = {}
-		else:
-			self.sets = self.setconfig.getSets()
-		self.visible_pkgs = PackageVirtualDbapi(self.settings)
-
 def create_world_atom(pkg, args_set, root_config):
 	"""Create a new atom for the world file if one does not exist.  If the
 	argument atom is precise enough to identify a specific slot then a slot
@@ -854,266 +376,6 @@
 
 	return deep_system_deps
 
-class FakeVartree(portage.vartree):
-	"""This is implements an in-memory copy of a vartree instance that provides
-	all the interfaces required for use by the depgraph.  The vardb is locked
-	during the constructor call just long enough to read a copy of the
-	installed package information.  This allows the depgraph to do it's
-	dependency calculations without holding a lock on the vardb.  It also
-	allows things like vardb global updates to be done in memory so that the
-	user doesn't necessarily need write access to the vardb in cases where
-	global updates are necessary (updates are performed when necessary if there
-	is not a matching ebuild in the tree)."""
-	def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
-		self._root_config = root_config
-		if pkg_cache is None:
-			pkg_cache = {}
-		real_vartree = root_config.trees["vartree"]
-		portdb = root_config.trees["porttree"].dbapi
-		self.root = real_vartree.root
-		self.settings = real_vartree.settings
-		mykeys = list(real_vartree.dbapi._aux_cache_keys)
-		if "_mtime_" not in mykeys:
-			mykeys.append("_mtime_")
-		self._db_keys = mykeys
-		self._pkg_cache = pkg_cache
-		self.dbapi = PackageVirtualDbapi(real_vartree.settings)
-		vdb_path = os.path.join(self.root, portage.VDB_PATH)
-		try:
-			# At least the parent needs to exist for the lock file.
-			portage.util.ensure_dirs(vdb_path)
-		except portage.exception.PortageException:
-			pass
-		vdb_lock = None
-		try:
-			if acquire_lock and os.access(vdb_path, os.W_OK):
-				vdb_lock = portage.locks.lockdir(vdb_path)
-			real_dbapi = real_vartree.dbapi
-			slot_counters = {}
-			for cpv in real_dbapi.cpv_all():
-				cache_key = ("installed", self.root, cpv, "nomerge")
-				pkg = self._pkg_cache.get(cache_key)
-				if pkg is not None:
-					metadata = pkg.metadata
-				else:
-					metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
-				myslot = metadata["SLOT"]
-				mycp = portage.dep_getkey(cpv)
-				myslot_atom = "%s:%s" % (mycp, myslot)
-				try:
-					mycounter = long(metadata["COUNTER"])
-				except ValueError:
-					mycounter = 0
-					metadata["COUNTER"] = str(mycounter)
-				other_counter = slot_counters.get(myslot_atom, None)
-				if other_counter is not None:
-					if other_counter > mycounter:
-						continue
-				slot_counters[myslot_atom] = mycounter
-				if pkg is None:
-					pkg = Package(built=True, cpv=cpv,
-						installed=True, metadata=metadata,
-						root_config=root_config, type_name="installed")
-				self._pkg_cache[pkg] = pkg
-				self.dbapi.cpv_inject(pkg)
-			real_dbapi.flush_cache()
-		finally:
-			if vdb_lock:
-				portage.locks.unlockdir(vdb_lock)
-		# Populate the old-style virtuals using the cached values.
-		if not self.settings.treeVirtuals:
-			self.settings.treeVirtuals = portage.util.map_dictlist_vals(
-				portage.getCPFromCPV, self.get_all_provides())
-
-		# Intialize variables needed for lazy cache pulls of the live ebuild
-		# metadata.  This ensures that the vardb lock is released ASAP, without
-		# being delayed in case cache generation is triggered.
-		self._aux_get = self.dbapi.aux_get
-		self.dbapi.aux_get = self._aux_get_wrapper
-		self._match = self.dbapi.match
-		self.dbapi.match = self._match_wrapper
-		self._aux_get_history = set()
-		self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
-		self._portdb = portdb
-		self._global_updates = None
-
-	def _match_wrapper(self, cpv, use_cache=1):
-		"""
-		Make sure the metadata in Package instances gets updated for any
-		cpv that is returned from a match() call, since the metadata can
-		be accessed directly from the Package instance instead of via
-		aux_get().
-		"""
-		matches = self._match(cpv, use_cache=use_cache)
-		for cpv in matches:
-			if cpv in self._aux_get_history:
-				continue
-			self._aux_get_wrapper(cpv, [])
-		return matches
-
-	def _aux_get_wrapper(self, pkg, wants):
-		if pkg in self._aux_get_history:
-			return self._aux_get(pkg, wants)
-		self._aux_get_history.add(pkg)
-		try:
-			# Use the live ebuild metadata if possible.
-			live_metadata = dict(izip(self._portdb_keys,
-				self._portdb.aux_get(pkg, self._portdb_keys)))
-			if not portage.eapi_is_supported(live_metadata["EAPI"]):
-				raise KeyError(pkg)
-			self.dbapi.aux_update(pkg, live_metadata)
-		except (KeyError, portage.exception.PortageException):
-			if self._global_updates is None:
-				self._global_updates = \
-					grab_global_updates(self._portdb.porttree_root)
-			perform_global_updates(
-				pkg, self.dbapi, self._global_updates)
-		return self._aux_get(pkg, wants)
-
-	def sync(self, acquire_lock=1):
-		"""
-		Call this method to synchronize state with the real vardb
-		after one or more packages may have been installed or
-		uninstalled.
-		"""
-		vdb_path = os.path.join(self.root, portage.VDB_PATH)
-		try:
-			# At least the parent needs to exist for the lock file.
-			portage.util.ensure_dirs(vdb_path)
-		except portage.exception.PortageException:
-			pass
-		vdb_lock = None
-		try:
-			if acquire_lock and os.access(vdb_path, os.W_OK):
-				vdb_lock = portage.locks.lockdir(vdb_path)
-			self._sync()
-		finally:
-			if vdb_lock:
-				portage.locks.unlockdir(vdb_lock)
-
-	def _sync(self):
-
-		real_vardb = self._root_config.trees["vartree"].dbapi
-		current_cpv_set = frozenset(real_vardb.cpv_all())
-		pkg_vardb = self.dbapi
-		aux_get_history = self._aux_get_history
-
-		# Remove any packages that have been uninstalled.
-		for pkg in list(pkg_vardb):
-			if pkg.cpv not in current_cpv_set:
-				pkg_vardb.cpv_remove(pkg)
-				aux_get_history.discard(pkg.cpv)
-
-		# Validate counters and timestamps.
-		slot_counters = {}
-		root = self.root
-		validation_keys = ["COUNTER", "_mtime_"]
-		for cpv in current_cpv_set:
-
-			pkg_hash_key = ("installed", root, cpv, "nomerge")
-			pkg = pkg_vardb.get(pkg_hash_key)
-			if pkg is not None:
-				counter, mtime = real_vardb.aux_get(cpv, validation_keys)
-				try:
-					counter = long(counter)
-				except ValueError:
-					counter = 0
-
-				if counter != pkg.counter or \
-					mtime != pkg.mtime:
-					pkg_vardb.cpv_remove(pkg)
-					aux_get_history.discard(pkg.cpv)
-					pkg = None
-
-			if pkg is None:
-				pkg = self._pkg(cpv)
-
-			other_counter = slot_counters.get(pkg.slot_atom)
-			if other_counter is not None:
-				if other_counter > pkg.counter:
-					continue
-
-			slot_counters[pkg.slot_atom] = pkg.counter
-			pkg_vardb.cpv_inject(pkg)
-
-		real_vardb.flush_cache()
-
-	def _pkg(self, cpv):
-		root_config = self._root_config
-		real_vardb = root_config.trees["vartree"].dbapi
-		pkg = Package(cpv=cpv, installed=True,
-			metadata=izip(self._db_keys,
-			real_vardb.aux_get(cpv, self._db_keys)),
-			root_config=root_config,
-			type_name="installed")
-
-		try:
-			mycounter = long(pkg.metadata["COUNTER"])
-		except ValueError:
-			mycounter = 0
-			pkg.metadata["COUNTER"] = str(mycounter)
-
-		return pkg
-
-def grab_global_updates(portdir):
-	from portage.update import grab_updates, parse_updates
-	updpath = os.path.join(portdir, "profiles", "updates")
-	try:
-		rawupdates = grab_updates(updpath)
-	except portage.exception.DirectoryNotFound:
-		rawupdates = []
-	upd_commands = []
-	for mykey, mystat, mycontent in rawupdates:
-		commands, errors = parse_updates(mycontent)
-		upd_commands.extend(commands)
-	return upd_commands
-
-def perform_global_updates(mycpv, mydb, mycommands):
-	from portage.update import update_dbentries
-	aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
-	aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
-	updates = update_dbentries(mycommands, aux_dict)
-	if updates:
-		mydb.aux_update(mycpv, updates)
-
-def visible(pkgsettings, pkg):
-	"""
-	Check if a package is visible. This can raise an InvalidDependString
-	exception if LICENSE is invalid.
-	TODO: optionally generate a list of masking reasons
-	@rtype: Boolean
-	@returns: True if the package is visible, False otherwise.
-	"""
-	if not pkg.metadata["SLOT"]:
-		return False
-	if not pkg.installed:
-		if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
-			return False
-	if pkg.built and not pkg.installed:
-		# we can have an old binary which has no EPREFIX information
-		if "EPREFIX" not in pkg.metadata or not pkg.metadata["EPREFIX"]:
-			return False
-		if len(pkg.metadata["EPREFIX"].strip()) < len(pkgsettings["EPREFIX"]):
-			return False
-	eapi = pkg.metadata["EAPI"]
-	if not portage.eapi_is_supported(eapi):
-		return False
-	if not pkg.installed:
-		if portage._eapi_is_deprecated(eapi):
-			return False
-		if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
-			return False
-	if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
-		return False
-	if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
-		return False
-	try:
-		if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
-			return False
-	except portage.exception.InvalidDependString:
-		return False
-	return True
-
 def get_masking_status(pkg, pkgsettings, root_config):
 
 	mreasons = portage.getmaskingstatus(
@@ -1212,487 +474,6 @@
 			shown_licenses.add(l)
 	return have_eapi_mask
 
-class Package(Task):
-
-	__hash__ = Task.__hash__
-	__slots__ = ("built", "cpv", "depth",
-		"installed", "metadata", "onlydeps", "operation",
-		"root_config", "type_name",
-		"category", "counter", "cp", "cpv_split",
-		"inherited", "iuse", "mtime",
-		"pf", "pv_split", "root", "slot", "slot_atom", "use")
-
-	metadata_keys = [
-		"CHOST", "COUNTER", "DEPEND", "EAPI",
-		"INHERITED", "IUSE", "KEYWORDS",
-		"LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
-		"repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_",
-		"EPREFIX" ]
-
-	def __init__(self, **kwargs):
-		Task.__init__(self, **kwargs)
-		self.root = self.root_config.root
-		self.metadata = _PackageMetadataWrapper(self, self.metadata)
-		self.cp = portage.cpv_getkey(self.cpv)
-		slot = self.slot
-		if not slot:
-			# Avoid an InvalidAtom exception when creating slot_atom.
-			# This package instance will be masked due to empty SLOT.
-			slot = '0'
-		self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
-		self.category, self.pf = portage.catsplit(self.cpv)
-		self.cpv_split = portage.catpkgsplit(self.cpv)
-		self.pv_split = self.cpv_split[1:]
-
-	class _use(object):
-
-		__slots__ = ("__weakref__", "enabled")
-
-		def __init__(self, use):
-			self.enabled = frozenset(use)
-
-	class _iuse(object):
-
-		__slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
-
-		def __init__(self, tokens, iuse_implicit):
-			self.tokens = tuple(tokens)
-			self.iuse_implicit = iuse_implicit
-			enabled = []
-			disabled = []
-			other = []
-			for x in tokens:
-				prefix = x[:1]
-				if prefix == "+":
-					enabled.append(x[1:])
-				elif prefix == "-":
-					disabled.append(x[1:])
-				else:
-					other.append(x)
-			self.enabled = frozenset(enabled)
-			self.disabled = frozenset(disabled)
-			self.all = frozenset(chain(enabled, disabled, other))
-
-		def __getattribute__(self, name):
-			if name == "regex":
-				try:
-					return object.__getattribute__(self, "regex")
-				except AttributeError:
-					all = object.__getattribute__(self, "all")
-					iuse_implicit = object.__getattribute__(self, "iuse_implicit")
-					# Escape anything except ".*" which is supposed
-					# to pass through from _get_implicit_iuse()
-					regex = (re.escape(x) for x in chain(all, iuse_implicit))
-					regex = "^(%s)$" % "|".join(regex)
-					regex = regex.replace("\\.\\*", ".*")
-					self.regex = re.compile(regex)
-			return object.__getattribute__(self, name)
-
-	def _get_hash_key(self):
-		hash_key = getattr(self, "_hash_key", None)
-		if hash_key is None:
-			if self.operation is None:
-				self.operation = "merge"
-				if self.onlydeps or self.installed:
-					self.operation = "nomerge"
-			self._hash_key = \
-				(self.type_name, self.root, self.cpv, self.operation)
-		return self._hash_key
-
-	def __lt__(self, other):
-		if other.cp != self.cp:
-			return False
-		if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
-			return True
-		return False
-
-	def __le__(self, other):
-		if other.cp != self.cp:
-			return False
-		if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
-			return True
-		return False
-
-	def __gt__(self, other):
-		if other.cp != self.cp:
-			return False
-		if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
-			return True
-		return False
-
-	def __ge__(self, other):
-		if other.cp != self.cp:
-			return False
-		if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
-			return True
-		return False
-
-_all_metadata_keys = set(x for x in portage.auxdbkeys \
-	if not x.startswith("UNUSED_"))
-_all_metadata_keys.discard("CDEPEND")
-_all_metadata_keys.update(Package.metadata_keys)
-
-from portage.cache.mappings import slot_dict_class
-_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
-
-class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
-	"""
-	Detect metadata updates and synchronize Package attributes.
-	"""
-
-	__slots__ = ("_pkg",)
-	_wrapped_keys = frozenset(
-		["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
-
-	def __init__(self, pkg, metadata):
-		_PackageMetadataWrapperBase.__init__(self)
-		self._pkg = pkg
-		self.update(metadata)
-
-	def __setitem__(self, k, v):
-		_PackageMetadataWrapperBase.__setitem__(self, k, v)
-		if k in self._wrapped_keys:
-			getattr(self, "_set_" + k.lower())(k, v)
-
-	def _set_inherited(self, k, v):
-		if isinstance(v, basestring):
-			v = frozenset(v.split())
-		self._pkg.inherited = v
-
-	def _set_iuse(self, k, v):
-		self._pkg.iuse = self._pkg._iuse(
-			v.split(), self._pkg.root_config.iuse_implicit)
-
-	def _set_slot(self, k, v):
-		self._pkg.slot = v
-
-	def _set_use(self, k, v):
-		self._pkg.use = self._pkg._use(v.split())
-
-	def _set_counter(self, k, v):
-		if isinstance(v, basestring):
-			try:
-				v = long(v.strip())
-			except ValueError:
-				v = 0
-		self._pkg.counter = v
-
-	def _set__mtime_(self, k, v):
-		if isinstance(v, basestring):
-			try:
-				v = long(v.strip())
-			except ValueError:
-				v = 0
-		self._pkg.mtime = v
-
-class PackageUninstall(AsynchronousTask):
-
-	__slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
-
-	def _start(self):
-		try:
-			unmerge(self.pkg.root_config, self.opts, "unmerge",
-				[self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
-				clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
-				writemsg_level=self._writemsg_level)
-		except UninstallFailure, e:
-			self.returncode = e.status
-		else:
-			self.returncode = os.EX_OK
-		self.wait()
-
-	def _writemsg_level(self, msg, level=0, noiselevel=0):
-
-		log_path = self.settings.get("PORTAGE_LOG_FILE")
-		background = self.background
-
-		if log_path is None:
-			if not (background and level < logging.WARNING):
-				portage.util.writemsg_level(msg,
-					level=level, noiselevel=noiselevel)
-		else:
-			if not background:
-				portage.util.writemsg_level(msg,
-					level=level, noiselevel=noiselevel)
-
-			f = open(log_path, 'a')
-			try:
-				f.write(msg)
-			finally:
-				f.close()
-
-class MergeListItem(CompositeTask):
-
-	"""
-	TODO: For parallel scheduling, everything here needs asynchronous
-	execution support (start, poll, and wait methods).
-	"""
-
-	__slots__ = ("args_set",
-		"binpkg_opts", "build_opts", "config_pool", "emerge_opts",
-		"find_blockers", "logger", "mtimedb", "pkg",
-		"pkg_count", "pkg_to_replace", "prefetcher",
-		"settings", "statusMessage", "world_atom") + \
-		("_install_task",)
-
-	def _start(self):
-
-		pkg = self.pkg
-		build_opts = self.build_opts
-
-		if pkg.installed:
-			# uninstall,  executed by self.merge()
-			self.returncode = os.EX_OK
-			self.wait()
-			return
-
-		args_set = self.args_set
-		find_blockers = self.find_blockers
-		logger = self.logger
-		mtimedb = self.mtimedb
-		pkg_count = self.pkg_count
-		scheduler = self.scheduler
-		settings = self.settings
-		world_atom = self.world_atom
-		ldpath_mtimes = mtimedb["ldpath"]
-
-		action_desc = "Emerging"
-		preposition = "for"
-		if pkg.type_name == "binary":
-			action_desc += " binary"
-
-		if build_opts.fetchonly:
-			action_desc = "Fetching"
-
-		msg = "%s (%s of %s) %s" % \
-			(action_desc,
-			colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
-			colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
-			colorize("GOOD", pkg.cpv))
-
-		portdb = pkg.root_config.trees["porttree"].dbapi
-		portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
-		if portdir_repo_name:
-			pkg_repo_name = pkg.metadata.get("repository")
-			if pkg_repo_name != portdir_repo_name:
-				if not pkg_repo_name:
-					pkg_repo_name = "unknown repo"
-				msg += " from %s" % pkg_repo_name
-
-		if pkg.root != "/":
-			msg += " %s %s" % (preposition, pkg.root)
-
-		if not build_opts.pretend:
-			self.statusMessage(msg)
-			logger.log(" >>> emerge (%s of %s) %s to %s" % \
-				(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
-
-		if pkg.type_name == "ebuild":
-
-			build = EbuildBuild(args_set=args_set,
-				background=self.background,
-				config_pool=self.config_pool,
-				find_blockers=find_blockers,
-				ldpath_mtimes=ldpath_mtimes, logger=logger,
-				opts=build_opts, pkg=pkg, pkg_count=pkg_count,
-				prefetcher=self.prefetcher, scheduler=scheduler,
-				settings=settings, world_atom=world_atom)
-
-			self._install_task = build
-			self._start_task(build, self._default_final_exit)
-			return
-
-		elif pkg.type_name == "binary":
-
-			binpkg = Binpkg(background=self.background,
-				find_blockers=find_blockers,
-				ldpath_mtimes=ldpath_mtimes, logger=logger,
-				opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
-				prefetcher=self.prefetcher, settings=settings,
-				scheduler=scheduler, world_atom=world_atom)
-
-			self._install_task = binpkg
-			self._start_task(binpkg, self._default_final_exit)
-			return
-
-	def _poll(self):
-		self._install_task.poll()
-		return self.returncode
-
-	def _wait(self):
-		self._install_task.wait()
-		return self.returncode
-
-	def merge(self):
-
-		pkg = self.pkg
-		build_opts = self.build_opts
-		find_blockers = self.find_blockers
-		logger = self.logger
-		mtimedb = self.mtimedb
-		pkg_count = self.pkg_count
-		prefetcher = self.prefetcher
-		scheduler = self.scheduler
-		settings = self.settings
-		world_atom = self.world_atom
-		ldpath_mtimes = mtimedb["ldpath"]
-
-		if pkg.installed:
-			if not (build_opts.buildpkgonly or \
-				build_opts.fetchonly or build_opts.pretend):
-
-				uninstall = PackageUninstall(background=self.background,
-					ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
-					pkg=pkg, scheduler=scheduler, settings=settings)
-
-				uninstall.start()
-				retval = uninstall.wait()
-				if retval != os.EX_OK:
-					return retval
-			return os.EX_OK
-
-		if build_opts.fetchonly or \
-			build_opts.buildpkgonly:
-			return self.returncode
-
-		retval = self._install_task.install()
-		return retval
-
-class BlockerDB(object):
-
-	def __init__(self, root_config):
-		self._root_config = root_config
-		self._vartree = root_config.trees["vartree"]
-		self._portdb = root_config.trees["porttree"].dbapi
-
-		self._dep_check_trees = None
-		self._fake_vartree = None
-
-	def _get_fake_vartree(self, acquire_lock=0):
-		fake_vartree = self._fake_vartree
-		if fake_vartree is None:
-			fake_vartree = FakeVartree(self._root_config,
-				acquire_lock=acquire_lock)
-			self._fake_vartree = fake_vartree
-			self._dep_check_trees = { self._vartree.root : {
-				"porttree"    :  fake_vartree,
-				"vartree"     :  fake_vartree,
-			}}
-		else:
-			fake_vartree.sync(acquire_lock=acquire_lock)
-		return fake_vartree
-
-	def findInstalledBlockers(self, new_pkg, acquire_lock=0):
-		blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
-		dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
-		settings = self._vartree.settings
-		stale_cache = set(blocker_cache)
-		fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
-		dep_check_trees = self._dep_check_trees
-		vardb = fake_vartree.dbapi
-		installed_pkgs = list(vardb)
-
-		for inst_pkg in installed_pkgs:
-			stale_cache.discard(inst_pkg.cpv)
-			cached_blockers = blocker_cache.get(inst_pkg.cpv)
-			if cached_blockers is not None and \
-				cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
-				cached_blockers = None
-			if cached_blockers is not None:
-				blocker_atoms = cached_blockers.atoms
-			else:
-				# Use aux_get() to trigger FakeVartree global
-				# updates on *DEPEND when appropriate.
-				depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
-				try:
-					portage.dep._dep_check_strict = False
-					success, atoms = portage.dep_check(depstr,
-						vardb, settings, myuse=inst_pkg.use.enabled,
-						trees=dep_check_trees, myroot=inst_pkg.root)
-				finally:
-					portage.dep._dep_check_strict = True
-				if not success:
-					pkg_location = os.path.join(inst_pkg.root,
-						portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
-					portage.writemsg("!!! %s/*DEPEND: %s\n" % \
-						(pkg_location, atoms), noiselevel=-1)
-					continue
-
-				blocker_atoms = [atom for atom in atoms \
-					if atom.startswith("!")]
-				blocker_atoms.sort()
-				counter = long(inst_pkg.metadata["COUNTER"])
-				blocker_cache[inst_pkg.cpv] = \
-					blocker_cache.BlockerData(counter, blocker_atoms)
-		for cpv in stale_cache:
-			del blocker_cache[cpv]
-		blocker_cache.flush()
-
-		blocker_parents = digraph()
-		blocker_atoms = []
-		for pkg in installed_pkgs:
-			for blocker_atom in blocker_cache[pkg.cpv].atoms:
-				blocker_atom = blocker_atom.lstrip("!")
-				blocker_atoms.append(blocker_atom)
-				blocker_parents.add(blocker_atom, pkg)
-
-		blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
-		blocking_pkgs = set()
-		for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
-			blocking_pkgs.update(blocker_parents.parent_nodes(atom))
-
-		# Check for blockers in the other direction.
-		depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
-		try:
-			portage.dep._dep_check_strict = False
-			success, atoms = portage.dep_check(depstr,
-				vardb, settings, myuse=new_pkg.use.enabled,
-				trees=dep_check_trees, myroot=new_pkg.root)
-		finally:
-			portage.dep._dep_check_strict = True
-		if not success:
-			# We should never get this far with invalid deps.
-			show_invalid_depstring_notice(new_pkg, depstr, atoms)
-			assert False
-
-		blocker_atoms = [atom.lstrip("!") for atom in atoms \
-			if atom[:1] == "!"]
-		if blocker_atoms:
-			blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
-			for inst_pkg in installed_pkgs:
-				try:
-					blocker_atoms.iterAtomsForPackage(inst_pkg).next()
-				except (portage.exception.InvalidDependString, StopIteration):
-					continue
-				blocking_pkgs.add(inst_pkg)
-
-		return blocking_pkgs
-
-def show_invalid_depstring_notice(parent_node, depstring, error_msg):
-
-	msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
-		"\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
-	p_type, p_root, p_key, p_status = parent_node
-	msg = []
-	if p_status == "nomerge":
-		category, pf = portage.catsplit(p_key)
-		pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
-		msg.append("Portage is unable to process the dependencies of the ")
-		msg.append("'%s' package. " % p_key)
-		msg.append("In order to correct this problem, the package ")
-		msg.append("should be uninstalled, reinstalled, or upgraded. ")
-		msg.append("As a temporary workaround, the --nodeps option can ")
-		msg.append("be used to ignore all dependencies.  For reference, ")
-		msg.append("the problematic dependencies can be found in the ")
-		msg.append("*DEPEND files located in '%s/'." % pkg_location)
-	else:
-		msg.append("This package can not be installed. ")
-		msg.append("Please notify the '%s' package maintainer " % p_key)
-		msg.append("about this problem.")
-
-	msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
-	writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
-
 class depgraph(object):
 
 	pkg_tree_map = RootConfig.pkg_tree_map
@@ -6400,78 +5181,6 @@
 			metadata = self._cpv_pkg_map[cpv].metadata
 			return [metadata.get(x, "") for x in wants]
 
-class PackageCounters(object):
-
-	def __init__(self):
-		self.upgrades   = 0
-		self.downgrades = 0
-		self.new        = 0
-		self.newslot    = 0
-		self.reinst     = 0
-		self.uninst     = 0
-		self.blocks     = 0
-		self.blocks_satisfied         = 0
-		self.totalsize  = 0
-		self.restrict_fetch           = 0
-		self.restrict_fetch_satisfied = 0
-		self.interactive              = 0
-
-	def __str__(self):
-		total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
-		myoutput = []
-		details = []
-		myoutput.append("Total: %s package" % total_installs)
-		if total_installs != 1:
-			myoutput.append("s")
-		if total_installs != 0:
-			myoutput.append(" (")
-		if self.upgrades > 0:
-			details.append("%s upgrade" % self.upgrades)
-			if self.upgrades > 1:
-				details[-1] += "s"
-		if self.downgrades > 0:
-			details.append("%s downgrade" % self.downgrades)
-			if self.downgrades > 1:
-				details[-1] += "s"
-		if self.new > 0:
-			details.append("%s new" % self.new)
-		if self.newslot > 0:
-			details.append("%s in new slot" % self.newslot)
-			if self.newslot > 1:
-				details[-1] += "s"
-		if self.reinst > 0:
-			details.append("%s reinstall" % self.reinst)
-			if self.reinst > 1:
-				details[-1] += "s"
-		if self.uninst > 0:
-			details.append("%s uninstall" % self.uninst)
-			if self.uninst > 1:
-				details[-1] += "s"
-		if self.interactive > 0:
-			details.append("%s %s" % (self.interactive,
-				colorize("WARN", "interactive")))
-		myoutput.append(", ".join(details))
-		if total_installs != 0:
-			myoutput.append(")")
-		myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
-		if self.restrict_fetch:
-			myoutput.append("\nFetch Restriction: %s package" % \
-				self.restrict_fetch)
-			if self.restrict_fetch > 1:
-				myoutput.append("s")
-		if self.restrict_fetch_satisfied < self.restrict_fetch:
-			myoutput.append(bad(" (%s unsatisfied)") % \
-				(self.restrict_fetch - self.restrict_fetch_satisfied))
-		if self.blocks > 0:
-			myoutput.append("\nConflict: %s block" % \
-				self.blocks)
-			if self.blocks > 1:
-				myoutput.append("s")
-			if self.blocks_satisfied < self.blocks:
-				myoutput.append(bad(" (%s unsatisfied)") % \
-					(self.blocks - self.blocks_satisfied))
-		return "".join(myoutput)
-
 class Scheduler(PollScheduler):
 
 	_opts_ignore_blockers = \
@@ -8065,663 +6774,6 @@
 
 		return pkg
 
-class MetadataRegen(PollScheduler):
-
-	def __init__(self, portdb, cp_iter=None, consumer=None,
-		max_jobs=None, max_load=None):
-		PollScheduler.__init__(self)
-		self._portdb = portdb
-		self._global_cleanse = False
-		if cp_iter is None:
-			cp_iter = self._iter_every_cp()
-			# We can globally cleanse stale cache only if we
-			# iterate over every single cp.
-			self._global_cleanse = True
-		self._cp_iter = cp_iter
-		self._consumer = consumer
-
-		if max_jobs is None:
-			max_jobs = 1
-
-		self._max_jobs = max_jobs
-		self._max_load = max_load
-		self._sched_iface = self._sched_iface_class(
-			register=self._register,
-			schedule=self._schedule_wait,
-			unregister=self._unregister)
-
-		self._valid_pkgs = set()
-		self._cp_set = set()
-		self._process_iter = self._iter_metadata_processes()
-		self.returncode = os.EX_OK
-		self._error_count = 0
-
-	def _iter_every_cp(self):
-		every_cp = self._portdb.cp_all()
-		every_cp.sort(reverse=True)
-		try:
-			while True:
-				yield every_cp.pop()
-		except IndexError:
-			pass
-
-	def _iter_metadata_processes(self):
-		portdb = self._portdb
-		valid_pkgs = self._valid_pkgs
-		cp_set = self._cp_set
-		consumer = self._consumer
-
-		for cp in self._cp_iter:
-			cp_set.add(cp)
-			portage.writemsg_stdout("Processing %s\n" % cp)
-			cpv_list = portdb.cp_list(cp)
-			for cpv in cpv_list:
-				valid_pkgs.add(cpv)
-				ebuild_path, repo_path = portdb.findname2(cpv)
-				metadata, st, emtime = portdb._pull_valid_cache(
-					cpv, ebuild_path, repo_path)
-				if metadata is not None:
-					if consumer is not None:
-						consumer(cpv, ebuild_path,
-							repo_path, metadata)
-					continue
-
-				yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
-					ebuild_mtime=emtime,
-					metadata_callback=portdb._metadata_callback,
-					portdb=portdb, repo_path=repo_path,
-					settings=portdb.doebuild_settings)
-
-	def run(self):
-
-		portdb = self._portdb
-		from portage.cache.cache_errors import CacheError
-		dead_nodes = {}
-
-		while self._schedule():
-			self._poll_loop()
-
-		while self._jobs:
-			self._poll_loop()
-
-		if self._global_cleanse:
-			for mytree in portdb.porttrees:
-				try:
-					dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
-				except CacheError, e:
-					portage.writemsg("Error listing cache entries for " + \
-						"'%s': %s, continuing...\n" % (mytree, e),
-						noiselevel=-1)
-					del e
-					dead_nodes = None
-					break
-		else:
-			cp_set = self._cp_set
-			cpv_getkey = portage.cpv_getkey
-			for mytree in portdb.porttrees:
-				try:
-					dead_nodes[mytree] = set(cpv for cpv in \
-						portdb.auxdb[mytree].iterkeys() \
-						if cpv_getkey(cpv) in cp_set)
-				except CacheError, e:
-					portage.writemsg("Error listing cache entries for " + \
-						"'%s': %s, continuing...\n" % (mytree, e),
-						noiselevel=-1)
-					del e
-					dead_nodes = None
-					break
-
-		if dead_nodes:
-			for y in self._valid_pkgs:
-				for mytree in portdb.porttrees:
-					if portdb.findname2(y, mytree=mytree)[0]:
-						dead_nodes[mytree].discard(y)
-
-			for mytree, nodes in dead_nodes.iteritems():
-				auxdb = portdb.auxdb[mytree]
-				for y in nodes:
-					try:
-						del auxdb[y]
-					except (KeyError, CacheError):
-						pass
-
-	def _schedule_tasks(self):
-		"""
-		@rtype: bool
-		@returns: True if there may be remaining tasks to schedule,
-			False otherwise.
-		"""
-		while self._can_add_job():
-			try:
-				metadata_process = self._process_iter.next()
-			except StopIteration:
-				return False
-
-			self._jobs += 1
-			metadata_process.scheduler = self._sched_iface
-			metadata_process.addExitListener(self._metadata_exit)
-			metadata_process.start()
-		return True
-
-	def _metadata_exit(self, metadata_process):
-		self._jobs -= 1
-		if metadata_process.returncode != os.EX_OK:
-			self.returncode = 1
-			self._error_count += 1
-			self._valid_pkgs.discard(metadata_process.cpv)
-			portage.writemsg("Error processing %s, continuing...\n" % \
-				(metadata_process.cpv,), noiselevel=-1)
-
-		if self._consumer is not None:
-			# On failure, still notify the consumer (in this case the metadata
-			# argument is None).
-			self._consumer(metadata_process.cpv,
-				metadata_process.ebuild_path,
-				metadata_process.repo_path,
-				metadata_process.metadata)
-
-		self._schedule()
-
-def unmerge(root_config, myopts, unmerge_action,
-	unmerge_files, ldpath_mtimes, autoclean=0,
-	clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
-	scheduler=None, writemsg_level=portage.util.writemsg_level):
-
-	if clean_world:
-		clean_world = myopts.get('--deselect') != 'n'
-	quiet = "--quiet" in myopts
-	settings = root_config.settings
-	sets = root_config.sets
-	vartree = root_config.trees["vartree"]
-	candidate_catpkgs=[]
-	global_unmerge=0
-	xterm_titles = "notitles" not in settings.features
-	out = portage.output.EOutput()
-	pkg_cache = {}
-	db_keys = list(vartree.dbapi._aux_cache_keys)
-
-	def _pkg(cpv):
-		pkg = pkg_cache.get(cpv)
-		if pkg is None:
-			pkg = Package(cpv=cpv, installed=True,
-				metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
-				root_config=root_config,
-				type_name="installed")
-			pkg_cache[cpv] = pkg
-		return pkg
-
-	vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
-	try:
-		# At least the parent needs to exist for the lock file.
-		portage.util.ensure_dirs(vdb_path)
-	except portage.exception.PortageException:
-		pass
-	vdb_lock = None
-	try:
-		if os.access(vdb_path, os.W_OK):
-			vdb_lock = portage.locks.lockdir(vdb_path)
-		realsyslist = sets["system"].getAtoms()
-		syslist = []
-		for x in realsyslist:
-			mycp = portage.dep_getkey(x)
-			if mycp in settings.getvirtuals():
-				providers = []
-				for provider in settings.getvirtuals()[mycp]:
-					if vartree.dbapi.match(provider):
-						providers.append(provider)
-				if len(providers) == 1:
-					syslist.extend(providers)
-			else:
-				syslist.append(mycp)
-	
-		mysettings = portage.config(clone=settings)
-	
-		if not unmerge_files:
-			if unmerge_action == "unmerge":
-				print
-				print bold("emerge unmerge") + " can only be used with specific package names"
-				print
-				return 0
-			else:
-				global_unmerge = 1
-	
-		localtree = vartree
-		# process all arguments and add all
-		# valid db entries to candidate_catpkgs
-		if global_unmerge:
-			if not unmerge_files:
-				candidate_catpkgs.extend(vartree.dbapi.cp_all())
-		else:
-			#we've got command-line arguments
-			if not unmerge_files:
-				print "\nNo packages to unmerge have been provided.\n"
-				return 0
-			for x in unmerge_files:
-				arg_parts = x.split('/')
-				if x[0] not in [".","/"] and \
-					arg_parts[-1][-7:] != ".ebuild":
-					#possible cat/pkg or dep; treat as such
-					candidate_catpkgs.append(x)
-				elif unmerge_action in ["prune","clean"]:
-					print "\n!!! Prune and clean do not accept individual" + \
-						" ebuilds as arguments;\n    skipping.\n"
-					continue
-				else:
-					# it appears that the user is specifying an installed
-					# ebuild and we're in "unmerge" mode, so it's ok.
-					if not os.path.exists(x):
-						print "\n!!! The path '"+x+"' doesn't exist.\n"
-						return 0
-	
-					absx   = os.path.abspath(x)
-					sp_absx = absx.split("/")
-					if sp_absx[-1][-7:] == ".ebuild":
-						del sp_absx[-1]
-						absx = "/".join(sp_absx)
-	
-					sp_absx_len = len(sp_absx)
-	
-					vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
-					vdb_len  = len(vdb_path)
-	
-					sp_vdb     = vdb_path.split("/")
-					sp_vdb_len = len(sp_vdb)
-	
-					if not os.path.exists(absx+"/CONTENTS"):
-						print "!!! Not a valid db dir: "+str(absx)
-						return 0
-	
-					if sp_absx_len <= sp_vdb_len:
-						# The Path is shorter... so it can't be inside the vdb.
-						print sp_absx
-						print absx
-						print "\n!!!",x,"cannot be inside "+ \
-							vdb_path+"; aborting.\n"
-						return 0
-	
-					for idx in range(0,sp_vdb_len):
-						if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
-							print sp_absx
-							print absx
-							print "\n!!!", x, "is not inside "+\
-								vdb_path+"; aborting.\n"
-							return 0
-	
-					print "="+"/".join(sp_absx[sp_vdb_len:])
-					candidate_catpkgs.append(
-						"="+"/".join(sp_absx[sp_vdb_len:]))
-	
-		newline=""
-		if (not "--quiet" in myopts):
-			newline="\n"
-		if settings["ROOT"] != "/":
-			writemsg_level(darkgreen(newline+ \
-				">>> Using system located in ROOT tree %s\n" % \
-				settings["ROOT"]))
-
-		if (("--pretend" in myopts) or ("--ask" in myopts)) and \
-			not ("--quiet" in myopts):
-			writemsg_level(darkgreen(newline+\
-				">>> These are the packages that would be unmerged:\n"))
-
-		# Preservation of order is required for --depclean and --prune so
-		# that dependencies are respected. Use all_selected to eliminate
-		# duplicate packages since the same package may be selected by
-		# multiple atoms.
-		pkgmap = []
-		all_selected = set()
-		for x in candidate_catpkgs:
-			# cycle through all our candidate deps and determine
-			# what will and will not get unmerged
-			try:
-				mymatch = vartree.dbapi.match(x)
-			except portage.exception.AmbiguousPackageName, errpkgs:
-				print "\n\n!!! The short ebuild name \"" + \
-					x + "\" is ambiguous.  Please specify"
-				print "!!! one of the following fully-qualified " + \
-					"ebuild names instead:\n"
-				for i in errpkgs[0]:
-					print "    " + green(i)
-				print
-				sys.exit(1)
-	
-			if not mymatch and x[0] not in "<>=~":
-				mymatch = localtree.dep_match(x)
-			if not mymatch:
-				portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
-					(x, unmerge_action), noiselevel=-1)
-				continue
-
-			pkgmap.append(
-				{"protected": set(), "selected": set(), "omitted": set()})
-			mykey = len(pkgmap) - 1
-			if unmerge_action=="unmerge":
-					for y in mymatch:
-						if y not in all_selected:
-							pkgmap[mykey]["selected"].add(y)
-							all_selected.add(y)
-			elif unmerge_action == "prune":
-				if len(mymatch) == 1:
-					continue
-				best_version = mymatch[0]
-				best_slot = vartree.getslot(best_version)
-				best_counter = vartree.dbapi.cpv_counter(best_version)
-				for mypkg in mymatch[1:]:
-					myslot = vartree.getslot(mypkg)
-					mycounter = vartree.dbapi.cpv_counter(mypkg)
-					if (myslot == best_slot and mycounter > best_counter) or \
-						mypkg == portage.best([mypkg, best_version]):
-						if myslot == best_slot:
-							if mycounter < best_counter:
-								# On slot collision, keep the one with the
-								# highest counter since it is the most
-								# recently installed.
-								continue
-						best_version = mypkg
-						best_slot = myslot
-						best_counter = mycounter
-				pkgmap[mykey]["protected"].add(best_version)
-				pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
-					if mypkg != best_version and mypkg not in all_selected)
-				all_selected.update(pkgmap[mykey]["selected"])
-			else:
-				# unmerge_action == "clean"
-				slotmap={}
-				for mypkg in mymatch:
-					if unmerge_action == "clean":
-						myslot = localtree.getslot(mypkg)
-					else:
-						# since we're pruning, we don't care about slots
-						# and put all the pkgs in together
-						myslot = 0
-					if myslot not in slotmap:
-						slotmap[myslot] = {}
-					slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
-
-				for mypkg in vartree.dbapi.cp_list(
-					portage.dep_getkey(mymatch[0])):
-					myslot = vartree.getslot(mypkg)
-					if myslot not in slotmap:
-						slotmap[myslot] = {}
-					slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
-
-				for myslot in slotmap:
-					counterkeys = slotmap[myslot].keys()
-					if not counterkeys:
-						continue
-					counterkeys.sort()
-					pkgmap[mykey]["protected"].add(
-						slotmap[myslot][counterkeys[-1]])
-					del counterkeys[-1]
-
-					for counter in counterkeys[:]:
-						mypkg = slotmap[myslot][counter]
-						if mypkg not in mymatch:
-							counterkeys.remove(counter)
-							pkgmap[mykey]["protected"].add(
-								slotmap[myslot][counter])
-
-					#be pretty and get them in order of merge:
-					for ckey in counterkeys:
-						mypkg = slotmap[myslot][ckey]
-						if mypkg not in all_selected:
-							pkgmap[mykey]["selected"].add(mypkg)
-							all_selected.add(mypkg)
-					# ok, now the last-merged package
-					# is protected, and the rest are selected
-		numselected = len(all_selected)
-		if global_unmerge and not numselected:
-			portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
-			return 0
-	
-		if not numselected:
-			portage.writemsg_stdout(
-				"\n>>> No packages selected for removal by " + \
-				unmerge_action + "\n")
-			return 0
-	finally:
-		if vdb_lock:
-			vartree.dbapi.flush_cache()
-			portage.locks.unlockdir(vdb_lock)
-	
-	from portage.sets.base import EditablePackageSet
-	
-	# generate a list of package sets that are directly or indirectly listed in "world",
-	# as there is no persistent list of "installed" sets
-	installed_sets = ["world"]
-	stop = False
-	pos = 0
-	while not stop:
-		stop = True
-		pos = len(installed_sets)
-		for s in installed_sets[pos - 1:]:
-			if s not in sets:
-				continue
-			candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
-			if candidates:
-				stop = False
-				installed_sets += candidates
-	installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
-	del stop, pos
-
-	# we don't want to unmerge packages that are still listed in user-editable package sets
-	# listed in "world" as they would be remerged on the next update of "world" or the 
-	# relevant package sets.
-	unknown_sets = set()
-	for cp in xrange(len(pkgmap)):
-		for cpv in pkgmap[cp]["selected"].copy():
-			try:
-				pkg = _pkg(cpv)
-			except KeyError:
-				# It could have been uninstalled
-				# by a concurrent process.
-				continue
-
-			if unmerge_action != "clean" and \
-				root_config.root == "/" and \
-				portage.match_from_list(
-				portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
-				msg = ("Not unmerging package %s since there is no valid " + \
-				"reason for portage to unmerge itself.") % (pkg.cpv,)
-				for line in textwrap.wrap(msg, 75):
-					out.eerror(line)
-				# adjust pkgmap so the display output is correct
-				pkgmap[cp]["selected"].remove(cpv)
-				all_selected.remove(cpv)
-				pkgmap[cp]["protected"].add(cpv)
-				continue
-
-			parents = []
-			for s in installed_sets:
-				# skip sets that the user requested to unmerge, and skip world 
-				# unless we're unmerging a package set (as the package would be 
-				# removed from "world" later on)
-				if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
-					continue
-
-				if s not in sets:
-					if s in unknown_sets:
-						continue
-					unknown_sets.add(s)
-					out = portage.output.EOutput()
-					out.eerror(("Unknown set '@%s' in " + \
-						"%svar/lib/portage/world_sets") % \
-						(s, root_config.root))
-					continue
-
-				# only check instances of EditablePackageSet as other classes are generally used for
-				# special purposes and can be ignored here (and are usually generated dynamically, so the
-				# user can't do much about them anyway)
-				if isinstance(sets[s], EditablePackageSet):
-
-					# This is derived from a snippet of code in the
-					# depgraph._iter_atoms_for_pkg() method.
-					for atom in sets[s].iterAtomsForPackage(pkg):
-						inst_matches = vartree.dbapi.match(atom)
-						inst_matches.reverse() # descending order
-						higher_slot = None
-						for inst_cpv in inst_matches:
-							try:
-								inst_pkg = _pkg(inst_cpv)
-							except KeyError:
-								# It could have been uninstalled
-								# by a concurrent process.
-								continue
-
-							if inst_pkg.cp != atom.cp:
-								continue
-							if pkg >= inst_pkg:
-								# This is descending order, and we're not
-								# interested in any versions <= pkg given.
-								break
-							if pkg.slot_atom != inst_pkg.slot_atom:
-								higher_slot = inst_pkg
-								break
-						if higher_slot is None:
-							parents.append(s)
-							break
-			if parents:
-				#print colorize("WARN", "Package %s is going to be unmerged," % cpv)
-				#print colorize("WARN", "but still listed in the following package sets:")
-				#print "    %s\n" % ", ".join(parents)
-				print colorize("WARN", "Not unmerging package %s as it is" % cpv)
-				print colorize("WARN", "still referenced by the following package sets:")
-				print "    %s\n" % ", ".join(parents)
-				# adjust pkgmap so the display output is correct
-				pkgmap[cp]["selected"].remove(cpv)
-				all_selected.remove(cpv)
-				pkgmap[cp]["protected"].add(cpv)
-	
-	del installed_sets
-
-	numselected = len(all_selected)
-	if not numselected:
-		writemsg_level(
-			"\n>>> No packages selected for removal by " + \
-			unmerge_action + "\n")
-		return 0
-
-	# Unmerge order only matters in some cases
-	if not ordered:
-		unordered = {}
-		for d in pkgmap:
-			selected = d["selected"]
-			if not selected:
-				continue
-			cp = portage.cpv_getkey(iter(selected).next())
-			cp_dict = unordered.get(cp)
-			if cp_dict is None:
-				cp_dict = {}
-				unordered[cp] = cp_dict
-				for k in d:
-					cp_dict[k] = set()
-			for k, v in d.iteritems():
-				cp_dict[k].update(v)
-		pkgmap = [unordered[cp] for cp in sorted(unordered)]
-
-	for x in xrange(len(pkgmap)):
-		selected = pkgmap[x]["selected"]
-		if not selected:
-			continue
-		for mytype, mylist in pkgmap[x].iteritems():
-			if mytype == "selected":
-				continue
-			mylist.difference_update(all_selected)
-		cp = portage.cpv_getkey(iter(selected).next())
-		for y in localtree.dep_match(cp):
-			if y not in pkgmap[x]["omitted"] and \
-				y not in pkgmap[x]["selected"] and \
-				y not in pkgmap[x]["protected"] and \
-				y not in all_selected:
-				pkgmap[x]["omitted"].add(y)
-		if global_unmerge and not pkgmap[x]["selected"]:
-			#avoid cluttering the preview printout with stuff that isn't getting unmerged
-			continue
-		if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
-			writemsg_level(colorize("BAD","\a\n\n!!! " + \
-				"'%s' is part of your system profile.\n" % cp),
-				level=logging.WARNING, noiselevel=-1)
-			writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
-				"be damaging to your system.\n\n"),
-				level=logging.WARNING, noiselevel=-1)
-			if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
-				countdown(int(settings["EMERGE_WARNING_DELAY"]),
-					colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
-		if not quiet:
-			writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
-		else:
-			writemsg_level(bold(cp) + ": ", noiselevel=-1)
-		for mytype in ["selected","protected","omitted"]:
-			if not quiet:
-				writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
-			if pkgmap[x][mytype]:
-				sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
-				sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
-				for pn, ver, rev in sorted_pkgs:
-					if rev == "r0":
-						myversion = ver
-					else:
-						myversion = ver + "-" + rev
-					if mytype == "selected":
-						writemsg_level(
-							colorize("UNMERGE_WARN", myversion + " "),
-							noiselevel=-1)
-					else:
-						writemsg_level(
-							colorize("GOOD", myversion + " "), noiselevel=-1)
-			else:
-				writemsg_level("none ", noiselevel=-1)
-			if not quiet:
-				writemsg_level("\n", noiselevel=-1)
-		if quiet:
-			writemsg_level("\n", noiselevel=-1)
-
-	writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
-		" packages are slated for removal.\n")
-	writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
-			" and " + colorize("GOOD", "'omitted'") + \
-			" packages will not be removed.\n\n")
-
-	if "--pretend" in myopts:
-		#we're done... return
-		return 0
-	if "--ask" in myopts:
-		if userquery("Would you like to unmerge these packages?")=="No":
-			# enter pretend mode for correct formatting of results
-			myopts["--pretend"] = True
-			print
-			print "Quitting."
-			print
-			return 0
-	#the real unmerging begins, after a short delay....
-	if clean_delay and not autoclean:
-		countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
-
-	for x in xrange(len(pkgmap)):
-		for y in pkgmap[x]["selected"]:
-			writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
-			emergelog(xterm_titles, "=== Unmerging... ("+y+")")
-			mysplit = y.split("/")
-			#unmerge...
-			retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
-				mysettings, unmerge_action not in ["clean","prune"],
-				vartree=vartree, ldpath_mtimes=ldpath_mtimes,
-				scheduler=scheduler)
-
-			if retval != os.EX_OK:
-				emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
-				if raise_on_error:
-					raise UninstallFailure(retval)
-				sys.exit(retval)
-			else:
-				if clean_world and hasattr(sets["world"], "cleanPackage"):
-					sets["world"].cleanPackage(vartree.dbapi, y)
-				emergelog(xterm_titles, " >>> unmerge success: "+y)
-	if clean_world and hasattr(sets["world"], "remove"):
-		for s in root_config.setconfig.active:
-			sets["world"].remove(SETPREFIX+s)
-	return 1
-
 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
 
 	if os.path.exists(EPREFIX + "/usr/bin/install-info"):

Copied: main/branches/prefix/pym/_emerge/countdown.py (from rev 13669, main/trunk/pym/_emerge/countdown.py)
===================================================================
--- main/branches/prefix/pym/_emerge/countdown.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/countdown.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,17 @@
+import sys
+import time
+
+from portage.output import colorize
+
+def countdown(secs=5, doing="Starting"):
+	if secs:
+		print ">>> Waiting",secs,"seconds before starting..."
+		print ">>> (Control-C to abort)...\n"+doing+" in: ",
+		ticks=range(secs)
+		ticks.reverse()
+		for sec in ticks:
+			sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
+			sys.stdout.flush()
+			time.sleep(1)
+		print
+

Copied: main/branches/prefix/pym/_emerge/emergelog.py (from rev 13669, main/trunk/pym/_emerge/emergelog.py)
===================================================================
--- main/branches/prefix/pym/_emerge/emergelog.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/emergelog.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,43 @@
+import os
+import sys
+import time
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import os
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+from portage.data import secpass
+from portage.output import xtermTitle
+	
+_emerge_log_dir = '/var/log'
+
+def emergelog(xterm_titles, mystr, short_msg=None):
+	if xterm_titles and short_msg:
+		if "HOSTNAME" in os.environ:
+			short_msg = os.environ["HOSTNAME"]+": "+short_msg
+		xtermTitle(short_msg)
+	try:
+		file_path = os.path.join(_emerge_log_dir, 'emerge.log')
+		mylogfile = open(file_path, "a")
+		portage.util.apply_secpass_permissions(file_path,
+			uid=portage.portage_uid, gid=portage.portage_gid,
+			mode=0660)
+		mylock = None
+		try:
+			mylock = portage.locks.lockfile(mylogfile)
+			# seek because we may have gotten held up by the lock.
+			# if so, we may not be positioned at the end of the file.
+			mylogfile.seek(0, 2)
+			mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
+			mylogfile.flush()
+		finally:
+			if mylock:
+				portage.locks.unlockfile(mylock)
+			mylogfile.close()
+	except (IOError,OSError,portage.exception.PortageException), e:
+		if secpass >= 1:
+			print >> sys.stderr, "emergelog():",e

Copied: main/branches/prefix/pym/_emerge/format_size.py (from rev 13669, main/trunk/pym/_emerge/format_size.py)
===================================================================
--- main/branches/prefix/pym/_emerge/format_size.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/format_size.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,16 @@
+
+# formats a size given in bytes nicely
+def format_size(mysize):
+	if isinstance(mysize, basestring):
+		return mysize
+	if 0 != mysize % 1024:
+		# Always round up to the next kB so that it doesn't show 0 kB when
+		# some small file still needs to be fetched.
+		mysize += 1024 - mysize % 1024
+	mystr=str(mysize/1024)
+	mycount=len(mystr)
+	while (mycount > 3):
+		mycount-=3
+		mystr=mystr[:mycount]+","+mystr[mycount:]
+	return mystr+" kB"
+

Copied: main/branches/prefix/pym/_emerge/search.py (from rev 13669, main/trunk/pym/_emerge/search.py)
===================================================================
--- main/branches/prefix/pym/_emerge/search.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/search.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,379 @@
+import os
+import re
+from itertools import izip
+
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import os
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+from portage.output import  bold as white, darkgreen, green, red
+
+from _emerge.Package import Package
+from _emerge.visible import visible
+
+class search(object):
+
+	#
+	# class constants
+	#
+	VERSION_SHORT=1
+	VERSION_RELEASE=2
+
+	#
+	# public interface
+	#
+	def __init__(self, root_config, spinner, searchdesc,
+		verbose, usepkg, usepkgonly):
+		"""Searches the available and installed packages for the supplied search key.
+		The list of available and installed packages is created at object instantiation.
+		This makes successive searches faster."""
+		self.settings = root_config.settings
+		self.vartree = root_config.trees["vartree"]
+		self.spinner = spinner
+		self.verbose = verbose
+		self.searchdesc = searchdesc
+		self.root_config = root_config
+		self.setconfig = root_config.setconfig
+		self.matches = {"pkg" : []}
+		self.mlen = 0
+
+		def fake_portdb():
+			pass
+		self.portdb = fake_portdb
+		for attrib in ("aux_get", "cp_all",
+			"xmatch", "findname", "getFetchMap"):
+			setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
+
+		self._dbs = []
+
+		portdb = root_config.trees["porttree"].dbapi
+		bindb = root_config.trees["bintree"].dbapi
+		vardb = root_config.trees["vartree"].dbapi
+
+		if not usepkgonly and portdb._have_root_eclass_dir:
+			self._dbs.append(portdb)
+
+		if (usepkg or usepkgonly) and bindb.cp_all():
+			self._dbs.append(bindb)
+
+		self._dbs.append(vardb)
+		self._portdb = portdb
+
+	def _cp_all(self):
+		cp_all = set()
+		for db in self._dbs:
+			cp_all.update(db.cp_all())
+		return list(sorted(cp_all))
+
+	def _aux_get(self, *args, **kwargs):
+		for db in self._dbs:
+			try:
+				return db.aux_get(*args, **kwargs)
+			except KeyError:
+				pass
+		raise
+
+	def _findname(self, *args, **kwargs):
+		for db in self._dbs:
+			if db is not self._portdb:
+				# We don't want findname to return anything
+				# unless it's an ebuild in a portage tree.
+				# Otherwise, it's already built and we don't
+				# care about it.
+				continue
+			func = getattr(db, "findname", None)
+			if func:
+				value = func(*args, **kwargs)
+				if value:
+					return value
+		return None
+
+	def _getFetchMap(self, *args, **kwargs):
+		for db in self._dbs:
+			func = getattr(db, "getFetchMap", None)
+			if func:
+				value = func(*args, **kwargs)
+				if value:
+					return value
+		return {}
+
+	def _visible(self, db, cpv, metadata):
+		installed = db is self.vartree.dbapi
+		built = installed or db is not self._portdb
+		pkg_type = "ebuild"
+		if installed:
+			pkg_type = "installed"
+		elif built:
+			pkg_type = "binary"
+		return visible(self.settings,
+			Package(type_name=pkg_type, root_config=self.root_config,
+			cpv=cpv, built=built, installed=installed, metadata=metadata))
+
+	def _xmatch(self, level, atom):
+		"""
+		This method does not expand old-style virtuals because it
+		is restricted to returning matches for a single ${CATEGORY}/${PN}
+		and old-style virual matches unreliable for that when querying
+		multiple package databases. If necessary, old-style virtuals
+		can be performed on atoms prior to calling this method.
+		"""
+		cp = portage.dep_getkey(atom)
+		if level == "match-all":
+			matches = set()
+			for db in self._dbs:
+				if hasattr(db, "xmatch"):
+					matches.update(db.xmatch(level, atom))
+				else:
+					matches.update(db.match(atom))
+			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+			db._cpv_sort_ascending(result)
+		elif level == "match-visible":
+			matches = set()
+			for db in self._dbs:
+				if hasattr(db, "xmatch"):
+					matches.update(db.xmatch(level, atom))
+				else:
+					db_keys = list(db._aux_cache_keys)
+					for cpv in db.match(atom):
+						metadata = izip(db_keys,
+							db.aux_get(cpv, db_keys))
+						if not self._visible(db, cpv, metadata):
+							continue
+						matches.add(cpv)
+			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+			db._cpv_sort_ascending(result)
+		elif level == "bestmatch-visible":
+			result = None
+			for db in self._dbs:
+				if hasattr(db, "xmatch"):
+					cpv = db.xmatch("bestmatch-visible", atom)
+					if not cpv or portage.cpv_getkey(cpv) != cp:
+						continue
+					if not result or cpv == portage.best([cpv, result]):
+						result = cpv
+				else:
+					db_keys = Package.metadata_keys
+					# break out of this loop with highest visible
+					# match, checked in descending order
+					for cpv in reversed(db.match(atom)):
+						if portage.cpv_getkey(cpv) != cp:
+							continue
+						metadata = izip(db_keys,
+							db.aux_get(cpv, db_keys))
+						if not self._visible(db, cpv, metadata):
+							continue
+						if not result or cpv == portage.best([cpv, result]):
+							result = cpv
+						break
+		else:
+			raise NotImplementedError(level)
+		return result
+
+	def execute(self,searchkey):
+		"""Performs the search for the supplied search key"""
+		match_category = 0
+		self.searchkey=searchkey
+		self.packagematches = []
+		if self.searchdesc:
+			self.searchdesc=1
+			self.matches = {"pkg":[], "desc":[], "set":[]}
+		else:
+			self.searchdesc=0
+			self.matches = {"pkg":[], "set":[]}
+		print "Searching...   ",
+
+		regexsearch = False
+		if self.searchkey.startswith('%'):
+			regexsearch = True
+			self.searchkey = self.searchkey[1:]
+		if self.searchkey.startswith('@'):
+			match_category = 1
+			self.searchkey = self.searchkey[1:]
+		if regexsearch:
+			self.searchre=re.compile(self.searchkey,re.I)
+		else:
+			self.searchre=re.compile(re.escape(self.searchkey), re.I)
+		for package in self.portdb.cp_all():
+			self.spinner.update()
+
+			if match_category:
+				match_string  = package[:]
+			else:
+				match_string  = package.split("/")[-1]
+
+			masked=0
+			if self.searchre.search(match_string):
+				if not self.portdb.xmatch("match-visible", package):
+					masked=1
+				self.matches["pkg"].append([package,masked])
+			elif self.searchdesc: # DESCRIPTION searching
+				full_package = self.portdb.xmatch("bestmatch-visible", package)
+				if not full_package:
+					#no match found; we don't want to query description
+					full_package = portage.best(
+						self.portdb.xmatch("match-all", package))
+					if not full_package:
+						continue
+					else:
+						masked=1
+				try:
+					full_desc = self.portdb.aux_get(
+						full_package, ["DESCRIPTION"])[0]
+				except KeyError:
+					print "emerge: search: aux_get() failed, skipping"
+					continue
+				if self.searchre.search(full_desc):
+					self.matches["desc"].append([full_package,masked])
+
+		self.sdict = self.setconfig.getSets()
+		for setname in self.sdict:
+			self.spinner.update()
+			if match_category:
+				match_string = setname
+			else:
+				match_string = setname.split("/")[-1]
+			
+			if self.searchre.search(match_string):
+				self.matches["set"].append([setname, False])
+			elif self.searchdesc:
+				if self.searchre.search(
+					self.sdict[setname].getMetadata("DESCRIPTION")):
+					self.matches["set"].append([setname, False])
+			
+		self.mlen=0
+		for mtype in self.matches:
+			self.matches[mtype].sort()
+			self.mlen += len(self.matches[mtype])
+
+	def addCP(self, cp):
+		if not self.portdb.xmatch("match-all", cp):
+			return
+		masked = 0
+		if not self.portdb.xmatch("bestmatch-visible", cp):
+			masked = 1
+		self.matches["pkg"].append([cp, masked])
+		self.mlen += 1
+
+	def output(self):
+		"""Outputs the results of the search."""
+		print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
+		print "[ Applications found : "+white(str(self.mlen))+" ]"
+		print " "
+		vardb = self.vartree.dbapi
+		for mtype in self.matches:
+			for match,masked in self.matches[mtype]:
+				full_package = None
+				if mtype == "pkg":
+					catpack = match
+					full_package = self.portdb.xmatch(
+						"bestmatch-visible", match)
+					if not full_package:
+						#no match found; we don't want to query description
+						masked=1
+						full_package = portage.best(
+							self.portdb.xmatch("match-all",match))
+				elif mtype == "desc":
+					full_package = match
+					match        = portage.cpv_getkey(match)
+				elif mtype == "set":
+					print green("*")+"  "+white(match)
+					print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
+					print
+				if full_package:
+					try:
+						desc, homepage, license = self.portdb.aux_get(
+							full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
+					except KeyError:
+						print "emerge: search: aux_get() failed, skipping"
+						continue
+					if masked:
+						print green("*")+"  "+white(match)+" "+red("[ Masked ]")
+					else:
+						print green("*")+"  "+white(match)
+					myversion = self.getVersion(full_package, search.VERSION_RELEASE)
+
+					mysum = [0,0]
+					file_size_str = None
+					mycat = match.split("/")[0]
+					mypkg = match.split("/")[1]
+					mycpv = match + "-" + myversion
+					myebuild = self.portdb.findname(mycpv)
+					if myebuild:
+						pkgdir = os.path.dirname(myebuild)
+						from portage import manifest
+						mf = manifest.Manifest(
+							pkgdir, self.settings["DISTDIR"])
+						try:
+							uri_map = self.portdb.getFetchMap(mycpv)
+						except portage.exception.InvalidDependString, e:
+							file_size_str = "Unknown (%s)" % (e,)
+							del e
+						else:
+							try:
+								mysum[0] = mf.getDistfilesSize(uri_map)
+							except KeyError, e:
+								file_size_str = "Unknown (missing " + \
+									"digest for %s)" % (e,)
+								del e
+
+					available = False
+					for db in self._dbs:
+						if db is not vardb and \
+							db.cpv_exists(mycpv):
+							available = True
+							if not myebuild and hasattr(db, "bintree"):
+								myebuild = db.bintree.getname(mycpv)
+								try:
+									mysum[0] = os.stat(myebuild).st_size
+								except OSError:
+									myebuild = None
+							break
+
+					if myebuild and file_size_str is None:
+						mystr = str(mysum[0] / 1024)
+						mycount = len(mystr)
+						while (mycount > 3):
+							mycount -= 3
+							mystr = mystr[:mycount] + "," + mystr[mycount:]
+						file_size_str = mystr + " kB"
+
+					if self.verbose:
+						if available:
+							print "     ", darkgreen("Latest version available:"),myversion
+						print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
+						if myebuild:
+							print "      %s %s" % \
+								(darkgreen("Size of files:"), file_size_str)
+						print "     ", darkgreen("Homepage:")+"     ",homepage
+						print "     ", darkgreen("Description:")+"  ",desc
+						print "     ", darkgreen("License:")+"      ",license
+						print
+	#
+	# private interface
+	#
+	def getInstallationStatus(self,package):
+		installed_package = self.vartree.dep_bestmatch(package)
+		result = ""
+		version = self.getVersion(installed_package,search.VERSION_RELEASE)
+		if len(version) > 0:
+			result = darkgreen("Latest version installed:")+" "+version
+		else:
+			result = darkgreen("Latest version installed:")+" [ Not Installed ]"
+		return result
+
+	def getVersion(self,full_package,detail):
+		if len(full_package) > 1:
+			package_parts = portage.catpkgsplit(full_package)
+			if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
+				result = package_parts[2]+ "-" + package_parts[3]
+			else:
+				result = package_parts[2]
+		else:
+			result = ""
+		return result
+

Copied: main/branches/prefix/pym/_emerge/show_invalid_depstring_notice.py (from rev 13669, main/trunk/pym/_emerge/show_invalid_depstring_notice.py)
===================================================================
--- main/branches/prefix/pym/_emerge/show_invalid_depstring_notice.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/show_invalid_depstring_notice.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,40 @@
+import logging
+import os
+import textwrap
+
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import os
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+from portage.util import writemsg_level
+
+def show_invalid_depstring_notice(parent_node, depstring, error_msg):
+
+	msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
+		"\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
+	p_type, p_root, p_key, p_status = parent_node
+	msg = []
+	if p_status == "nomerge":
+		category, pf = portage.catsplit(p_key)
+		pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
+		msg.append("Portage is unable to process the dependencies of the ")
+		msg.append("'%s' package. " % p_key)
+		msg.append("In order to correct this problem, the package ")
+		msg.append("should be uninstalled, reinstalled, or upgraded. ")
+		msg.append("As a temporary workaround, the --nodeps option can ")
+		msg.append("be used to ignore all dependencies.  For reference, ")
+		msg.append("the problematic dependencies can be found in the ")
+		msg.append("*DEPEND files located in '%s/'." % pkg_location)
+	else:
+		msg.append("This package can not be installed. ")
+		msg.append("Please notify the '%s' package maintainer " % p_key)
+		msg.append("about this problem.")
+
+	msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
+	writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
+

Copied: main/branches/prefix/pym/_emerge/unmerge.py (from rev 13669, main/trunk/pym/_emerge/unmerge.py)
===================================================================
--- main/branches/prefix/pym/_emerge/unmerge.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/unmerge.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,525 @@
+import logging
+import os
+import sys
+import textwrap
+from itertools import izip
+
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import os
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+from portage.output import bold, colorize, darkgreen, green
+from portage.sets import SETPREFIX
+from portage.util import cmp_sort_key
+
+from _emerge.emergelog import emergelog
+from _emerge.Package import Package
+from _emerge.UninstallFailure import UninstallFailure
+from _emerge.userquery import userquery
+from _emerge.countdown import countdown
+
+def unmerge(root_config, myopts, unmerge_action,
+	unmerge_files, ldpath_mtimes, autoclean=0,
+	clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
+	scheduler=None, writemsg_level=portage.util.writemsg_level):
+
+	if clean_world:
+		clean_world = myopts.get('--deselect') != 'n'
+	quiet = "--quiet" in myopts
+	settings = root_config.settings
+	sets = root_config.sets
+	vartree = root_config.trees["vartree"]
+	candidate_catpkgs=[]
+	global_unmerge=0
+	xterm_titles = "notitles" not in settings.features
+	out = portage.output.EOutput()
+	pkg_cache = {}
+	db_keys = list(vartree.dbapi._aux_cache_keys)
+
+	def _pkg(cpv):
+		pkg = pkg_cache.get(cpv)
+		if pkg is None:
+			pkg = Package(cpv=cpv, installed=True,
+				metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
+				root_config=root_config,
+				type_name="installed")
+			pkg_cache[cpv] = pkg
+		return pkg
+
+	vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
+	try:
+		# At least the parent needs to exist for the lock file.
+		portage.util.ensure_dirs(vdb_path)
+	except portage.exception.PortageException:
+		pass
+	vdb_lock = None
+	try:
+		if os.access(vdb_path, os.W_OK):
+			vdb_lock = portage.locks.lockdir(vdb_path)
+		realsyslist = sets["system"].getAtoms()
+		syslist = []
+		for x in realsyslist:
+			mycp = portage.dep_getkey(x)
+			if mycp in settings.getvirtuals():
+				providers = []
+				for provider in settings.getvirtuals()[mycp]:
+					if vartree.dbapi.match(provider):
+						providers.append(provider)
+				if len(providers) == 1:
+					syslist.extend(providers)
+			else:
+				syslist.append(mycp)
+	
+		mysettings = portage.config(clone=settings)
+	
+		if not unmerge_files:
+			if unmerge_action == "unmerge":
+				print
+				print bold("emerge unmerge") + " can only be used with specific package names"
+				print
+				return 0
+			else:
+				global_unmerge = 1
+	
+		localtree = vartree
+		# process all arguments and add all
+		# valid db entries to candidate_catpkgs
+		if global_unmerge:
+			if not unmerge_files:
+				candidate_catpkgs.extend(vartree.dbapi.cp_all())
+		else:
+			#we've got command-line arguments
+			if not unmerge_files:
+				print "\nNo packages to unmerge have been provided.\n"
+				return 0
+			for x in unmerge_files:
+				arg_parts = x.split('/')
+				if x[0] not in [".","/"] and \
+					arg_parts[-1][-7:] != ".ebuild":
+					#possible cat/pkg or dep; treat as such
+					candidate_catpkgs.append(x)
+				elif unmerge_action in ["prune","clean"]:
+					print "\n!!! Prune and clean do not accept individual" + \
+						" ebuilds as arguments;\n    skipping.\n"
+					continue
+				else:
+					# it appears that the user is specifying an installed
+					# ebuild and we're in "unmerge" mode, so it's ok.
+					if not os.path.exists(x):
+						print "\n!!! The path '"+x+"' doesn't exist.\n"
+						return 0
+	
+					absx   = os.path.abspath(x)
+					sp_absx = absx.split("/")
+					if sp_absx[-1][-7:] == ".ebuild":
+						del sp_absx[-1]
+						absx = "/".join(sp_absx)
+	
+					sp_absx_len = len(sp_absx)
+	
+					vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
+					vdb_len  = len(vdb_path)
+	
+					sp_vdb     = vdb_path.split("/")
+					sp_vdb_len = len(sp_vdb)
+	
+					if not os.path.exists(absx+"/CONTENTS"):
+						print "!!! Not a valid db dir: "+str(absx)
+						return 0
+	
+					if sp_absx_len <= sp_vdb_len:
+						# The Path is shorter... so it can't be inside the vdb.
+						print sp_absx
+						print absx
+						print "\n!!!",x,"cannot be inside "+ \
+							vdb_path+"; aborting.\n"
+						return 0
+	
+					for idx in range(0,sp_vdb_len):
+						if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
+							print sp_absx
+							print absx
+							print "\n!!!", x, "is not inside "+\
+								vdb_path+"; aborting.\n"
+							return 0
+	
+					print "="+"/".join(sp_absx[sp_vdb_len:])
+					candidate_catpkgs.append(
+						"="+"/".join(sp_absx[sp_vdb_len:]))
+	
+		newline=""
+		if (not "--quiet" in myopts):
+			newline="\n"
+		if settings["ROOT"] != "/":
+			writemsg_level(darkgreen(newline+ \
+				">>> Using system located in ROOT tree %s\n" % \
+				settings["ROOT"]))
+
+		if (("--pretend" in myopts) or ("--ask" in myopts)) and \
+			not ("--quiet" in myopts):
+			writemsg_level(darkgreen(newline+\
+				">>> These are the packages that would be unmerged:\n"))
+
+		# Preservation of order is required for --depclean and --prune so
+		# that dependencies are respected. Use all_selected to eliminate
+		# duplicate packages since the same package may be selected by
+		# multiple atoms.
+		pkgmap = []
+		all_selected = set()
+		for x in candidate_catpkgs:
+			# cycle through all our candidate deps and determine
+			# what will and will not get unmerged
+			try:
+				mymatch = vartree.dbapi.match(x)
+			except portage.exception.AmbiguousPackageName, errpkgs:
+				print "\n\n!!! The short ebuild name \"" + \
+					x + "\" is ambiguous.  Please specify"
+				print "!!! one of the following fully-qualified " + \
+					"ebuild names instead:\n"
+				for i in errpkgs[0]:
+					print "    " + green(i)
+				print
+				sys.exit(1)
+	
+			if not mymatch and x[0] not in "<>=~":
+				mymatch = localtree.dep_match(x)
+			if not mymatch:
+				portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
+					(x, unmerge_action), noiselevel=-1)
+				continue
+
+			pkgmap.append(
+				{"protected": set(), "selected": set(), "omitted": set()})
+			mykey = len(pkgmap) - 1
+			if unmerge_action=="unmerge":
+					for y in mymatch:
+						if y not in all_selected:
+							pkgmap[mykey]["selected"].add(y)
+							all_selected.add(y)
+			elif unmerge_action == "prune":
+				if len(mymatch) == 1:
+					continue
+				best_version = mymatch[0]
+				best_slot = vartree.getslot(best_version)
+				best_counter = vartree.dbapi.cpv_counter(best_version)
+				for mypkg in mymatch[1:]:
+					myslot = vartree.getslot(mypkg)
+					mycounter = vartree.dbapi.cpv_counter(mypkg)
+					if (myslot == best_slot and mycounter > best_counter) or \
+						mypkg == portage.best([mypkg, best_version]):
+						if myslot == best_slot:
+							if mycounter < best_counter:
+								# On slot collision, keep the one with the
+								# highest counter since it is the most
+								# recently installed.
+								continue
+						best_version = mypkg
+						best_slot = myslot
+						best_counter = mycounter
+				pkgmap[mykey]["protected"].add(best_version)
+				pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
+					if mypkg != best_version and mypkg not in all_selected)
+				all_selected.update(pkgmap[mykey]["selected"])
+			else:
+				# unmerge_action == "clean"
+				slotmap={}
+				for mypkg in mymatch:
+					if unmerge_action == "clean":
+						myslot = localtree.getslot(mypkg)
+					else:
+						# since we're pruning, we don't care about slots
+						# and put all the pkgs in together
+						myslot = 0
+					if myslot not in slotmap:
+						slotmap[myslot] = {}
+					slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
+
+				for mypkg in vartree.dbapi.cp_list(
+					portage.dep_getkey(mymatch[0])):
+					myslot = vartree.getslot(mypkg)
+					if myslot not in slotmap:
+						slotmap[myslot] = {}
+					slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
+
+				for myslot in slotmap:
+					counterkeys = slotmap[myslot].keys()
+					if not counterkeys:
+						continue
+					counterkeys.sort()
+					pkgmap[mykey]["protected"].add(
+						slotmap[myslot][counterkeys[-1]])
+					del counterkeys[-1]
+
+					for counter in counterkeys[:]:
+						mypkg = slotmap[myslot][counter]
+						if mypkg not in mymatch:
+							counterkeys.remove(counter)
+							pkgmap[mykey]["protected"].add(
+								slotmap[myslot][counter])
+
+					#be pretty and get them in order of merge:
+					for ckey in counterkeys:
+						mypkg = slotmap[myslot][ckey]
+						if mypkg not in all_selected:
+							pkgmap[mykey]["selected"].add(mypkg)
+							all_selected.add(mypkg)
+					# ok, now the last-merged package
+					# is protected, and the rest are selected
+		numselected = len(all_selected)
+		if global_unmerge and not numselected:
+			portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
+			return 0
+	
+		if not numselected:
+			portage.writemsg_stdout(
+				"\n>>> No packages selected for removal by " + \
+				unmerge_action + "\n")
+			return 0
+	finally:
+		if vdb_lock:
+			vartree.dbapi.flush_cache()
+			portage.locks.unlockdir(vdb_lock)
+	
+	from portage.sets.base import EditablePackageSet
+	
+	# generate a list of package sets that are directly or indirectly listed in "world",
+	# as there is no persistent list of "installed" sets
+	installed_sets = ["world"]
+	stop = False
+	pos = 0
+	while not stop:
+		stop = True
+		pos = len(installed_sets)
+		for s in installed_sets[pos - 1:]:
+			if s not in sets:
+				continue
+			candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
+			if candidates:
+				stop = False
+				installed_sets += candidates
+	installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
+	del stop, pos
+
+	# we don't want to unmerge packages that are still listed in user-editable package sets
+	# listed in "world" as they would be remerged on the next update of "world" or the 
+	# relevant package sets.
+	unknown_sets = set()
+	for cp in xrange(len(pkgmap)):
+		for cpv in pkgmap[cp]["selected"].copy():
+			try:
+				pkg = _pkg(cpv)
+			except KeyError:
+				# It could have been uninstalled
+				# by a concurrent process.
+				continue
+
+			if unmerge_action != "clean" and \
+				root_config.root == "/" and \
+				portage.match_from_list(
+				portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
+				msg = ("Not unmerging package %s since there is no valid " + \
+				"reason for portage to unmerge itself.") % (pkg.cpv,)
+				for line in textwrap.wrap(msg, 75):
+					out.eerror(line)
+				# adjust pkgmap so the display output is correct
+				pkgmap[cp]["selected"].remove(cpv)
+				all_selected.remove(cpv)
+				pkgmap[cp]["protected"].add(cpv)
+				continue
+
+			parents = []
+			for s in installed_sets:
+				# skip sets that the user requested to unmerge, and skip world 
+				# unless we're unmerging a package set (as the package would be 
+				# removed from "world" later on)
+				if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
+					continue
+
+				if s not in sets:
+					if s in unknown_sets:
+						continue
+					unknown_sets.add(s)
+					out = portage.output.EOutput()
+					out.eerror(("Unknown set '@%s' in " + \
+						"%svar/lib/portage/world_sets") % \
+						(s, root_config.root))
+					continue
+
+				# only check instances of EditablePackageSet as other classes are generally used for
+				# special purposes and can be ignored here (and are usually generated dynamically, so the
+				# user can't do much about them anyway)
+				if isinstance(sets[s], EditablePackageSet):
+
+					# This is derived from a snippet of code in the
+					# depgraph._iter_atoms_for_pkg() method.
+					for atom in sets[s].iterAtomsForPackage(pkg):
+						inst_matches = vartree.dbapi.match(atom)
+						inst_matches.reverse() # descending order
+						higher_slot = None
+						for inst_cpv in inst_matches:
+							try:
+								inst_pkg = _pkg(inst_cpv)
+							except KeyError:
+								# It could have been uninstalled
+								# by a concurrent process.
+								continue
+
+							if inst_pkg.cp != atom.cp:
+								continue
+							if pkg >= inst_pkg:
+								# This is descending order, and we're not
+								# interested in any versions <= pkg given.
+								break
+							if pkg.slot_atom != inst_pkg.slot_atom:
+								higher_slot = inst_pkg
+								break
+						if higher_slot is None:
+							parents.append(s)
+							break
+			if parents:
+				#print colorize("WARN", "Package %s is going to be unmerged," % cpv)
+				#print colorize("WARN", "but still listed in the following package sets:")
+				#print "    %s\n" % ", ".join(parents)
+				print colorize("WARN", "Not unmerging package %s as it is" % cpv)
+				print colorize("WARN", "still referenced by the following package sets:")
+				print "    %s\n" % ", ".join(parents)
+				# adjust pkgmap so the display output is correct
+				pkgmap[cp]["selected"].remove(cpv)
+				all_selected.remove(cpv)
+				pkgmap[cp]["protected"].add(cpv)
+	
+	del installed_sets
+
+	numselected = len(all_selected)
+	if not numselected:
+		writemsg_level(
+			"\n>>> No packages selected for removal by " + \
+			unmerge_action + "\n")
+		return 0
+
+	# Unmerge order only matters in some cases
+	if not ordered:
+		unordered = {}
+		for d in pkgmap:
+			selected = d["selected"]
+			if not selected:
+				continue
+			cp = portage.cpv_getkey(iter(selected).next())
+			cp_dict = unordered.get(cp)
+			if cp_dict is None:
+				cp_dict = {}
+				unordered[cp] = cp_dict
+				for k in d:
+					cp_dict[k] = set()
+			for k, v in d.iteritems():
+				cp_dict[k].update(v)
+		pkgmap = [unordered[cp] for cp in sorted(unordered)]
+
+	for x in xrange(len(pkgmap)):
+		selected = pkgmap[x]["selected"]
+		if not selected:
+			continue
+		for mytype, mylist in pkgmap[x].iteritems():
+			if mytype == "selected":
+				continue
+			mylist.difference_update(all_selected)
+		cp = portage.cpv_getkey(iter(selected).next())
+		for y in localtree.dep_match(cp):
+			if y not in pkgmap[x]["omitted"] and \
+				y not in pkgmap[x]["selected"] and \
+				y not in pkgmap[x]["protected"] and \
+				y not in all_selected:
+				pkgmap[x]["omitted"].add(y)
+		if global_unmerge and not pkgmap[x]["selected"]:
+			#avoid cluttering the preview printout with stuff that isn't getting unmerged
+			continue
+		if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
+			writemsg_level(colorize("BAD","\a\n\n!!! " + \
+				"'%s' is part of your system profile.\n" % cp),
+				level=logging.WARNING, noiselevel=-1)
+			writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
+				"be damaging to your system.\n\n"),
+				level=logging.WARNING, noiselevel=-1)
+			if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
+				countdown(int(settings["EMERGE_WARNING_DELAY"]),
+					colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
+		if not quiet:
+			writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
+		else:
+			writemsg_level(bold(cp) + ": ", noiselevel=-1)
+		for mytype in ["selected","protected","omitted"]:
+			if not quiet:
+				writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
+			if pkgmap[x][mytype]:
+				sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
+				sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
+				for pn, ver, rev in sorted_pkgs:
+					if rev == "r0":
+						myversion = ver
+					else:
+						myversion = ver + "-" + rev
+					if mytype == "selected":
+						writemsg_level(
+							colorize("UNMERGE_WARN", myversion + " "),
+							noiselevel=-1)
+					else:
+						writemsg_level(
+							colorize("GOOD", myversion + " "), noiselevel=-1)
+			else:
+				writemsg_level("none ", noiselevel=-1)
+			if not quiet:
+				writemsg_level("\n", noiselevel=-1)
+		if quiet:
+			writemsg_level("\n", noiselevel=-1)
+
+	writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
+		" packages are slated for removal.\n")
+	writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
+			" and " + colorize("GOOD", "'omitted'") + \
+			" packages will not be removed.\n\n")
+
+	if "--pretend" in myopts:
+		#we're done... return
+		return 0
+	if "--ask" in myopts:
+		if userquery("Would you like to unmerge these packages?")=="No":
+			# enter pretend mode for correct formatting of results
+			myopts["--pretend"] = True
+			print
+			print "Quitting."
+			print
+			return 0
+	#the real unmerging begins, after a short delay....
+	if clean_delay and not autoclean:
+		countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
+
+	for x in xrange(len(pkgmap)):
+		for y in pkgmap[x]["selected"]:
+			writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
+			emergelog(xterm_titles, "=== Unmerging... ("+y+")")
+			mysplit = y.split("/")
+			#unmerge...
+			retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
+				mysettings, unmerge_action not in ["clean","prune"],
+				vartree=vartree, ldpath_mtimes=ldpath_mtimes,
+				scheduler=scheduler)
+
+			if retval != os.EX_OK:
+				emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
+				if raise_on_error:
+					raise UninstallFailure(retval)
+				sys.exit(retval)
+			else:
+				if clean_world and hasattr(sets["world"], "cleanPackage"):
+					sets["world"].cleanPackage(vartree.dbapi, y)
+				emergelog(xterm_titles, " >>> unmerge success: "+y)
+	if clean_world and hasattr(sets["world"], "remove"):
+		for s in root_config.setconfig.active:
+			sets["world"].remove(SETPREFIX+s)
+	return 1
+

Copied: main/branches/prefix/pym/_emerge/userquery.py (from rev 13669, main/trunk/pym/_emerge/userquery.py)
===================================================================
--- main/branches/prefix/pym/_emerge/userquery.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/userquery.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,44 @@
+import sys
+
+from portage.output import bold, create_color_func
+
+def userquery(prompt, responses=None, colours=None):
+	"""Displays a prompt and a set of responses, then waits for a response
+	which is checked against the responses and the first to match is
+	returned.  An empty response will match the first value in responses.  The
+	input buffer is *not* cleared prior to the prompt!
+
+	prompt: a String.
+	responses: a List of Strings.
+	colours: a List of Functions taking and returning a String, used to
+	process the responses for display. Typically these will be functions
+	like red() but could be e.g. lambda x: "DisplayString".
+	If responses is omitted, defaults to ["Yes", "No"], [green, red].
+	If only colours is omitted, defaults to [bold, ...].
+
+	Returns a member of the List responses. (If called without optional
+	arguments, returns "Yes" or "No".)
+	KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
+	printed."""
+	if responses is None:
+		responses = ["Yes", "No"]
+		colours = [
+			create_color_func("PROMPT_CHOICE_DEFAULT"),
+			create_color_func("PROMPT_CHOICE_OTHER")
+		]
+	elif colours is None:
+		colours=[bold]
+	colours=(colours*len(responses))[:len(responses)]
+	print bold(prompt),
+	try:
+		while True:
+			response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
+			for key in responses:
+				# An empty response will match the first value in responses.
+				if response.upper()==key[:len(response)].upper():
+					return key
+			print "Sorry, response '%s' not understood." % response,
+	except (EOFError, KeyboardInterrupt):
+		print "Interrupted."
+		sys.exit(1)
+

Copied: main/branches/prefix/pym/_emerge/visible.py (from rev 13669, main/trunk/pym/_emerge/visible.py)
===================================================================
--- main/branches/prefix/pym/_emerge/visible.py	                        (rev 0)
+++ main/branches/prefix/pym/_emerge/visible.py	2009-06-27 13:35:38 UTC (rev 13708)
@@ -0,0 +1,47 @@
+# for an explanation on this logic, see pym/_emerge/__init__.py
+import os
+import sys
+if os.environ.__contains__("PORTAGE_PYTHONPATH"):
+	sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"])
+else:
+	sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "pym"))
+import portage
+
+def visible(pkgsettings, pkg):
+	"""
+	Check if a package is visible. This can raise an InvalidDependString
+	exception if LICENSE is invalid.
+	TODO: optionally generate a list of masking reasons
+	@rtype: Boolean
+	@returns: True if the package is visible, False otherwise.
+	"""
+	if not pkg.metadata["SLOT"]:
+		return False
+	if not pkg.installed:
+		if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
+			return False
+	if pkg.built and not pkg.installed:
+		# we can have an old binary which has no EPREFIX information
+		if "EPREFIX" not in pkg.metadata or not pkg.metadata["EPREFIX"]:
+			return False
+		if len(pkg.metadata["EPREFIX"].strip()) < len(pkgsettings["EPREFIX"]):
+			return False
+	eapi = pkg.metadata["EAPI"]
+	if not portage.eapi_is_supported(eapi):
+		return False
+	if not pkg.installed:
+		if portage._eapi_is_deprecated(eapi):
+			return False
+		if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
+			return False
+	if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
+		return False
+	if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
+		return False
+	try:
+		if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
+			return False
+	except portage.exception.InvalidDependString:
+		return False
+	return True
+




^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2009-06-27 13:35 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-06-27 13:35 [gentoo-commits] portage r13708 - main/branches/prefix/pym/_emerge Fabian Groffen (grobian)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox