From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from pigeon.gentoo.org ([208.92.234.80] helo=lists.gentoo.org) by finch.gentoo.org with esmtp (Exim 4.60) (envelope-from ) id 1MKY4D-0006Jd-Nw for garchives@archives.gentoo.org; Sat, 27 Jun 2009 13:35:45 +0000 Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 38450E0669; Sat, 27 Jun 2009 13:35:41 +0000 (UTC) Received: from smtp.gentoo.org (smtp.gentoo.org [140.211.166.183]) by pigeon.gentoo.org (Postfix) with ESMTP id 80DDDE0669 for ; Sat, 27 Jun 2009 13:35:40 +0000 (UTC) Received: from stork.gentoo.org (stork.gentoo.org [64.127.104.133]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTP id D7EBA651E2 for ; Sat, 27 Jun 2009 13:35:39 +0000 (UTC) Received: from grobian by stork.gentoo.org with local (Exim 4.69) (envelope-from ) id 1MKY4B-0000GA-H3 for gentoo-commits@lists.gentoo.org; Sat, 27 Jun 2009 13:35:39 +0000 To: gentoo-commits@lists.gentoo.org From: "Fabian Groffen (grobian)" Subject: [gentoo-commits] portage r13708 - main/branches/prefix/pym/_emerge X-VCS-Repository: portage X-VCS-Revision: 13708 X-VCS-Files: main/branches/prefix/pym/_emerge/BlockerDB.py main/branches/prefix/pym/_emerge/FakeVartree.py main/branches/prefix/pym/_emerge/MergeListItem.py main/branches/prefix/pym/_emerge/MetadataRegen.py main/branches/prefix/pym/_emerge/Package.py main/branches/prefix/pym/_emerge/PackageCounters.py main/branches/prefix/pym/_emerge/PackageUninstall.py main/branches/prefix/pym/_emerge/RootConfig.py main/branches/prefix/pym/_emerge/countdown.py main/branches/prefix/pym/_emerge/emergelog.py main/branches/prefix/pym/_emerge/format_size.py main/branches/prefix/pym/_emerge/search.py main/branches/prefix/pym/_emerge/show_invalid_depstring_notice.py main/branches/prefix/pym/_emerge/unmerge.py main/branches/prefix/pym/_emerge/userquery.py main/branches/prefix/pym/_emerge/visible.py main/branches/prefix/pym/_emerge/__init__.py X-VCS-Directories: main/branches/prefix/pym/_emerge X-VCS-Committer: grobian X-VCS-Committer-Name: Fabian Groffen Content-Type: text/plain; charset=UTF-8 Message-Id: Sender: Fabian Groffen Date: Sat, 27 Jun 2009 13:35:39 +0000 Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: quoted-printable X-Archives-Salt: d5e625ef-815f-4de9-8bba-a9bfacb3f71c X-Archives-Hash: 9c296b9fd5bfde41055dc00dc395408d Author: grobian Date: 2009-06-27 13:35:38 +0000 (Sat, 27 Jun 2009) New Revision: 13708 Added: main/branches/prefix/pym/_emerge/BlockerDB.py main/branches/prefix/pym/_emerge/FakeVartree.py main/branches/prefix/pym/_emerge/MergeListItem.py main/branches/prefix/pym/_emerge/MetadataRegen.py main/branches/prefix/pym/_emerge/Package.py main/branches/prefix/pym/_emerge/PackageCounters.py main/branches/prefix/pym/_emerge/PackageUninstall.py main/branches/prefix/pym/_emerge/RootConfig.py main/branches/prefix/pym/_emerge/countdown.py main/branches/prefix/pym/_emerge/emergelog.py main/branches/prefix/pym/_emerge/format_size.py main/branches/prefix/pym/_emerge/search.py main/branches/prefix/pym/_emerge/show_invalid_depstring_notice.py main/branches/prefix/pym/_emerge/unmerge.py main/branches/prefix/pym/_emerge/userquery.py main/branches/prefix/pym/_emerge/visible.py Modified: main/branches/prefix/pym/_emerge/__init__.py Log: Merged from trunk -r13668:13669 | 13669 | Bug #275047 - Split _emerge/__init__.py into smaller piece= s | | zmedico | (part 4). Thanks to Sebastian Mingramm (few) = | | | for this patch. = | Copied: main/branches/prefix/pym/_emerge/BlockerDB.py (from rev 13669, ma= in/trunk/pym/_emerge/BlockerDB.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/BlockerDB.py = (rev 0) +++ main/branches/prefix/pym/_emerge/BlockerDB.py 2009-06-27 13:35:38 UTC= (rev 13708) @@ -0,0 +1,126 @@ +# for an explanation on this logic, see pym/_emerge/__init__.py +import os +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +from portage import digraph +from portage.sets.base import InternalPackageSet + +from _emerge.BlockerCache import BlockerCache +from _emerge.FakeVartree import FakeVartree +from _emerge.show_invalid_depstring_notice import show_invalid_depstring= _notice + +class BlockerDB(object): + + def __init__(self, root_config): + self._root_config =3D root_config + self._vartree =3D root_config.trees["vartree"] + self._portdb =3D root_config.trees["porttree"].dbapi + + self._dep_check_trees =3D None + self._fake_vartree =3D None + + def _get_fake_vartree(self, acquire_lock=3D0): + fake_vartree =3D self._fake_vartree + if fake_vartree is None: + fake_vartree =3D FakeVartree(self._root_config, + acquire_lock=3Dacquire_lock) + self._fake_vartree =3D fake_vartree + self._dep_check_trees =3D { self._vartree.root : { + "porttree" : fake_vartree, + "vartree" : fake_vartree, + }} + else: + fake_vartree.sync(acquire_lock=3Dacquire_lock) + return fake_vartree + + def findInstalledBlockers(self, new_pkg, acquire_lock=3D0): + blocker_cache =3D BlockerCache(self._vartree.root, self._vartree.dbapi= ) + dep_keys =3D ["DEPEND", "RDEPEND", "PDEPEND"] + settings =3D self._vartree.settings + stale_cache =3D set(blocker_cache) + fake_vartree =3D self._get_fake_vartree(acquire_lock=3Dacquire_lock) + dep_check_trees =3D self._dep_check_trees + vardb =3D fake_vartree.dbapi + installed_pkgs =3D list(vardb) + + for inst_pkg in installed_pkgs: + stale_cache.discard(inst_pkg.cpv) + cached_blockers =3D blocker_cache.get(inst_pkg.cpv) + if cached_blockers is not None and \ + cached_blockers.counter !=3D long(inst_pkg.metadata["COUNTER"]): + cached_blockers =3D None + if cached_blockers is not None: + blocker_atoms =3D cached_blockers.atoms + else: + # Use aux_get() to trigger FakeVartree global + # updates on *DEPEND when appropriate. + depstr =3D " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys)) + try: + portage.dep._dep_check_strict =3D False + success, atoms =3D portage.dep_check(depstr, + vardb, settings, myuse=3Dinst_pkg.use.enabled, + trees=3Ddep_check_trees, myroot=3Dinst_pkg.root) + finally: + portage.dep._dep_check_strict =3D True + if not success: + pkg_location =3D os.path.join(inst_pkg.root, + portage.VDB_PATH, inst_pkg.category, inst_pkg.pf) + portage.writemsg("!!! %s/*DEPEND: %s\n" % \ + (pkg_location, atoms), noiselevel=3D-1) + continue + + blocker_atoms =3D [atom for atom in atoms \ + if atom.startswith("!")] + blocker_atoms.sort() + counter =3D long(inst_pkg.metadata["COUNTER"]) + blocker_cache[inst_pkg.cpv] =3D \ + blocker_cache.BlockerData(counter, blocker_atoms) + for cpv in stale_cache: + del blocker_cache[cpv] + blocker_cache.flush() + + blocker_parents =3D digraph() + blocker_atoms =3D [] + for pkg in installed_pkgs: + for blocker_atom in blocker_cache[pkg.cpv].atoms: + blocker_atom =3D blocker_atom.lstrip("!") + blocker_atoms.append(blocker_atom) + blocker_parents.add(blocker_atom, pkg) + + blocker_atoms =3D InternalPackageSet(initial_atoms=3Dblocker_atoms) + blocking_pkgs =3D set() + for atom in blocker_atoms.iterAtomsForPackage(new_pkg): + blocking_pkgs.update(blocker_parents.parent_nodes(atom)) + + # Check for blockers in the other direction. + depstr =3D " ".join(new_pkg.metadata[k] for k in dep_keys) + try: + portage.dep._dep_check_strict =3D False + success, atoms =3D portage.dep_check(depstr, + vardb, settings, myuse=3Dnew_pkg.use.enabled, + trees=3Ddep_check_trees, myroot=3Dnew_pkg.root) + finally: + portage.dep._dep_check_strict =3D True + if not success: + # We should never get this far with invalid deps. + show_invalid_depstring_notice(new_pkg, depstr, atoms) + assert False + + blocker_atoms =3D [atom.lstrip("!") for atom in atoms \ + if atom[:1] =3D=3D "!"] + if blocker_atoms: + blocker_atoms =3D InternalPackageSet(initial_atoms=3Dblocker_atoms) + for inst_pkg in installed_pkgs: + try: + blocker_atoms.iterAtomsForPackage(inst_pkg).next() + except (portage.exception.InvalidDependString, StopIteration): + continue + blocking_pkgs.add(inst_pkg) + + return blocking_pkgs + Copied: main/branches/prefix/pym/_emerge/FakeVartree.py (from rev 13669, = main/trunk/pym/_emerge/FakeVartree.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/FakeVartree.py = (rev 0) +++ main/branches/prefix/pym/_emerge/FakeVartree.py 2009-06-27 13:35:38 U= TC (rev 13708) @@ -0,0 +1,235 @@ +import os +from itertools import izip + +# for an explanation on this logic, see pym/_emerge/__init__.py +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +from _emerge.Package import Package +from _emerge.PackageVirtualDbapi import PackageVirtualDbapi + +class FakeVartree(portage.vartree): + """This is implements an in-memory copy of a vartree instance that prov= ides + all the interfaces required for use by the depgraph. The vardb is lock= ed + during the constructor call just long enough to read a copy of the + installed package information. This allows the depgraph to do it's + dependency calculations without holding a lock on the vardb. It also + allows things like vardb global updates to be done in memory so that th= e + user doesn't necessarily need write access to the vardb in cases where + global updates are necessary (updates are performed when necessary if t= here + is not a matching ebuild in the tree).""" + def __init__(self, root_config, pkg_cache=3DNone, acquire_lock=3D1): + self._root_config =3D root_config + if pkg_cache is None: + pkg_cache =3D {} + real_vartree =3D root_config.trees["vartree"] + portdb =3D root_config.trees["porttree"].dbapi + self.root =3D real_vartree.root + self.settings =3D real_vartree.settings + mykeys =3D list(real_vartree.dbapi._aux_cache_keys) + if "_mtime_" not in mykeys: + mykeys.append("_mtime_") + self._db_keys =3D mykeys + self._pkg_cache =3D pkg_cache + self.dbapi =3D PackageVirtualDbapi(real_vartree.settings) + vdb_path =3D os.path.join(self.root, portage.VDB_PATH) + try: + # At least the parent needs to exist for the lock file. + portage.util.ensure_dirs(vdb_path) + except portage.exception.PortageException: + pass + vdb_lock =3D None + try: + if acquire_lock and os.access(vdb_path, os.W_OK): + vdb_lock =3D portage.locks.lockdir(vdb_path) + real_dbapi =3D real_vartree.dbapi + slot_counters =3D {} + for cpv in real_dbapi.cpv_all(): + cache_key =3D ("installed", self.root, cpv, "nomerge") + pkg =3D self._pkg_cache.get(cache_key) + if pkg is not None: + metadata =3D pkg.metadata + else: + metadata =3D dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys))) + myslot =3D metadata["SLOT"] + mycp =3D portage.dep_getkey(cpv) + myslot_atom =3D "%s:%s" % (mycp, myslot) + try: + mycounter =3D long(metadata["COUNTER"]) + except ValueError: + mycounter =3D 0 + metadata["COUNTER"] =3D str(mycounter) + other_counter =3D slot_counters.get(myslot_atom, None) + if other_counter is not None: + if other_counter > mycounter: + continue + slot_counters[myslot_atom] =3D mycounter + if pkg is None: + pkg =3D Package(built=3DTrue, cpv=3Dcpv, + installed=3DTrue, metadata=3Dmetadata, + root_config=3Droot_config, type_name=3D"installed") + self._pkg_cache[pkg] =3D pkg + self.dbapi.cpv_inject(pkg) + real_dbapi.flush_cache() + finally: + if vdb_lock: + portage.locks.unlockdir(vdb_lock) + # Populate the old-style virtuals using the cached values. + if not self.settings.treeVirtuals: + self.settings.treeVirtuals =3D portage.util.map_dictlist_vals( + portage.getCPFromCPV, self.get_all_provides()) + + # Intialize variables needed for lazy cache pulls of the live ebuild + # metadata. This ensures that the vardb lock is released ASAP, withou= t + # being delayed in case cache generation is triggered. + self._aux_get =3D self.dbapi.aux_get + self.dbapi.aux_get =3D self._aux_get_wrapper + self._match =3D self.dbapi.match + self.dbapi.match =3D self._match_wrapper + self._aux_get_history =3D set() + self._portdb_keys =3D ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"] + self._portdb =3D portdb + self._global_updates =3D None + + def _match_wrapper(self, cpv, use_cache=3D1): + """ + Make sure the metadata in Package instances gets updated for any + cpv that is returned from a match() call, since the metadata can + be accessed directly from the Package instance instead of via + aux_get(). + """ + matches =3D self._match(cpv, use_cache=3Duse_cache) + for cpv in matches: + if cpv in self._aux_get_history: + continue + self._aux_get_wrapper(cpv, []) + return matches + + def _aux_get_wrapper(self, pkg, wants): + if pkg in self._aux_get_history: + return self._aux_get(pkg, wants) + self._aux_get_history.add(pkg) + try: + # Use the live ebuild metadata if possible. + live_metadata =3D dict(izip(self._portdb_keys, + self._portdb.aux_get(pkg, self._portdb_keys))) + if not portage.eapi_is_supported(live_metadata["EAPI"]): + raise KeyError(pkg) + self.dbapi.aux_update(pkg, live_metadata) + except (KeyError, portage.exception.PortageException): + if self._global_updates is None: + self._global_updates =3D \ + grab_global_updates(self._portdb.porttree_root) + perform_global_updates( + pkg, self.dbapi, self._global_updates) + return self._aux_get(pkg, wants) + + def sync(self, acquire_lock=3D1): + """ + Call this method to synchronize state with the real vardb + after one or more packages may have been installed or + uninstalled. + """ + vdb_path =3D os.path.join(self.root, portage.VDB_PATH) + try: + # At least the parent needs to exist for the lock file. + portage.util.ensure_dirs(vdb_path) + except portage.exception.PortageException: + pass + vdb_lock =3D None + try: + if acquire_lock and os.access(vdb_path, os.W_OK): + vdb_lock =3D portage.locks.lockdir(vdb_path) + self._sync() + finally: + if vdb_lock: + portage.locks.unlockdir(vdb_lock) + + def _sync(self): + + real_vardb =3D self._root_config.trees["vartree"].dbapi + current_cpv_set =3D frozenset(real_vardb.cpv_all()) + pkg_vardb =3D self.dbapi + aux_get_history =3D self._aux_get_history + + # Remove any packages that have been uninstalled. + for pkg in list(pkg_vardb): + if pkg.cpv not in current_cpv_set: + pkg_vardb.cpv_remove(pkg) + aux_get_history.discard(pkg.cpv) + + # Validate counters and timestamps. + slot_counters =3D {} + root =3D self.root + validation_keys =3D ["COUNTER", "_mtime_"] + for cpv in current_cpv_set: + + pkg_hash_key =3D ("installed", root, cpv, "nomerge") + pkg =3D pkg_vardb.get(pkg_hash_key) + if pkg is not None: + counter, mtime =3D real_vardb.aux_get(cpv, validation_keys) + try: + counter =3D long(counter) + except ValueError: + counter =3D 0 + + if counter !=3D pkg.counter or \ + mtime !=3D pkg.mtime: + pkg_vardb.cpv_remove(pkg) + aux_get_history.discard(pkg.cpv) + pkg =3D None + + if pkg is None: + pkg =3D self._pkg(cpv) + + other_counter =3D slot_counters.get(pkg.slot_atom) + if other_counter is not None: + if other_counter > pkg.counter: + continue + + slot_counters[pkg.slot_atom] =3D pkg.counter + pkg_vardb.cpv_inject(pkg) + + real_vardb.flush_cache() + + def _pkg(self, cpv): + root_config =3D self._root_config + real_vardb =3D root_config.trees["vartree"].dbapi + pkg =3D Package(cpv=3Dcpv, installed=3DTrue, + metadata=3Dizip(self._db_keys, + real_vardb.aux_get(cpv, self._db_keys)), + root_config=3Droot_config, + type_name=3D"installed") + + try: + mycounter =3D long(pkg.metadata["COUNTER"]) + except ValueError: + mycounter =3D 0 + pkg.metadata["COUNTER"] =3D str(mycounter) + + return pkg + +def grab_global_updates(portdir): + from portage.update import grab_updates, parse_updates + updpath =3D os.path.join(portdir, "profiles", "updates") + try: + rawupdates =3D grab_updates(updpath) + except portage.exception.DirectoryNotFound: + rawupdates =3D [] + upd_commands =3D [] + for mykey, mystat, mycontent in rawupdates: + commands, errors =3D parse_updates(mycontent) + upd_commands.extend(commands) + return upd_commands + +def perform_global_updates(mycpv, mydb, mycommands): + from portage.update import update_dbentries + aux_keys =3D ["DEPEND", "RDEPEND", "PDEPEND"] + aux_dict =3D dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys))) + updates =3D update_dbentries(mycommands, aux_dict) + if updates: + mydb.aux_update(mycpv, updates) Copied: main/branches/prefix/pym/_emerge/MergeListItem.py (from rev 13669= , main/trunk/pym/_emerge/MergeListItem.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/MergeListItem.py = (rev 0) +++ main/branches/prefix/pym/_emerge/MergeListItem.py 2009-06-27 13:35:38= UTC (rev 13708) @@ -0,0 +1,146 @@ +import os + +from portage.output import colorize + +from _emerge.Binpkg import Binpkg +from _emerge.CompositeTask import CompositeTask +from _emerge.EbuildBuild import EbuildBuild +from _emerge.PackageUninstall import PackageUninstall + +class MergeListItem(CompositeTask): + + """ + TODO: For parallel scheduling, everything here needs asynchronous + execution support (start, poll, and wait methods). + """ + + __slots__ =3D ("args_set", + "binpkg_opts", "build_opts", "config_pool", "emerge_opts", + "find_blockers", "logger", "mtimedb", "pkg", + "pkg_count", "pkg_to_replace", "prefetcher", + "settings", "statusMessage", "world_atom") + \ + ("_install_task",) + + def _start(self): + + pkg =3D self.pkg + build_opts =3D self.build_opts + + if pkg.installed: + # uninstall, executed by self.merge() + self.returncode =3D os.EX_OK + self.wait() + return + + args_set =3D self.args_set + find_blockers =3D self.find_blockers + logger =3D self.logger + mtimedb =3D self.mtimedb + pkg_count =3D self.pkg_count + scheduler =3D self.scheduler + settings =3D self.settings + world_atom =3D self.world_atom + ldpath_mtimes =3D mtimedb["ldpath"] + + action_desc =3D "Emerging" + preposition =3D "for" + if pkg.type_name =3D=3D "binary": + action_desc +=3D " binary" + + if build_opts.fetchonly: + action_desc =3D "Fetching" + + msg =3D "%s (%s of %s) %s" % \ + (action_desc, + colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)), + colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)), + colorize("GOOD", pkg.cpv)) + + portdb =3D pkg.root_config.trees["porttree"].dbapi + portdir_repo_name =3D portdb._repository_map.get(portdb.porttree_root) + if portdir_repo_name: + pkg_repo_name =3D pkg.metadata.get("repository") + if pkg_repo_name !=3D portdir_repo_name: + if not pkg_repo_name: + pkg_repo_name =3D "unknown repo" + msg +=3D " from %s" % pkg_repo_name + + if pkg.root !=3D "/": + msg +=3D " %s %s" % (preposition, pkg.root) + + if not build_opts.pretend: + self.statusMessage(msg) + logger.log(" >>> emerge (%s of %s) %s to %s" % \ + (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root)) + + if pkg.type_name =3D=3D "ebuild": + + build =3D EbuildBuild(args_set=3Dargs_set, + background=3Dself.background, + config_pool=3Dself.config_pool, + find_blockers=3Dfind_blockers, + ldpath_mtimes=3Dldpath_mtimes, logger=3Dlogger, + opts=3Dbuild_opts, pkg=3Dpkg, pkg_count=3Dpkg_count, + prefetcher=3Dself.prefetcher, scheduler=3Dscheduler, + settings=3Dsettings, world_atom=3Dworld_atom) + + self._install_task =3D build + self._start_task(build, self._default_final_exit) + return + + elif pkg.type_name =3D=3D "binary": + + binpkg =3D Binpkg(background=3Dself.background, + find_blockers=3Dfind_blockers, + ldpath_mtimes=3Dldpath_mtimes, logger=3Dlogger, + opts=3Dself.binpkg_opts, pkg=3Dpkg, pkg_count=3Dpkg_count, + prefetcher=3Dself.prefetcher, settings=3Dsettings, + scheduler=3Dscheduler, world_atom=3Dworld_atom) + + self._install_task =3D binpkg + self._start_task(binpkg, self._default_final_exit) + return + + def _poll(self): + self._install_task.poll() + return self.returncode + + def _wait(self): + self._install_task.wait() + return self.returncode + + def merge(self): + + pkg =3D self.pkg + build_opts =3D self.build_opts + find_blockers =3D self.find_blockers + logger =3D self.logger + mtimedb =3D self.mtimedb + pkg_count =3D self.pkg_count + prefetcher =3D self.prefetcher + scheduler =3D self.scheduler + settings =3D self.settings + world_atom =3D self.world_atom + ldpath_mtimes =3D mtimedb["ldpath"] + + if pkg.installed: + if not (build_opts.buildpkgonly or \ + build_opts.fetchonly or build_opts.pretend): + + uninstall =3D PackageUninstall(background=3Dself.background, + ldpath_mtimes=3Dldpath_mtimes, opts=3Dself.emerge_opts, + pkg=3Dpkg, scheduler=3Dscheduler, settings=3Dsettings) + + uninstall.start() + retval =3D uninstall.wait() + if retval !=3D os.EX_OK: + return retval + return os.EX_OK + + if build_opts.fetchonly or \ + build_opts.buildpkgonly: + return self.returncode + + retval =3D self._install_task.install() + return retval + Copied: main/branches/prefix/pym/_emerge/MetadataRegen.py (from rev 13669= , main/trunk/pym/_emerge/MetadataRegen.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/MetadataRegen.py = (rev 0) +++ main/branches/prefix/pym/_emerge/MetadataRegen.py 2009-06-27 13:35:38= UTC (rev 13708) @@ -0,0 +1,169 @@ +# for an explanation on this logic, see pym/_emerge/__init__.py +import os +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +from _emerge.EbuildMetadataPhase import EbuildMetadataPhase +from _emerge.PollScheduler import PollScheduler + +class MetadataRegen(PollScheduler): + + def __init__(self, portdb, cp_iter=3DNone, consumer=3DNone, + max_jobs=3DNone, max_load=3DNone): + PollScheduler.__init__(self) + self._portdb =3D portdb + self._global_cleanse =3D False + if cp_iter is None: + cp_iter =3D self._iter_every_cp() + # We can globally cleanse stale cache only if we + # iterate over every single cp. + self._global_cleanse =3D True + self._cp_iter =3D cp_iter + self._consumer =3D consumer + + if max_jobs is None: + max_jobs =3D 1 + + self._max_jobs =3D max_jobs + self._max_load =3D max_load + self._sched_iface =3D self._sched_iface_class( + register=3Dself._register, + schedule=3Dself._schedule_wait, + unregister=3Dself._unregister) + + self._valid_pkgs =3D set() + self._cp_set =3D set() + self._process_iter =3D self._iter_metadata_processes() + self.returncode =3D os.EX_OK + self._error_count =3D 0 + + def _iter_every_cp(self): + every_cp =3D self._portdb.cp_all() + every_cp.sort(reverse=3DTrue) + try: + while True: + yield every_cp.pop() + except IndexError: + pass + + def _iter_metadata_processes(self): + portdb =3D self._portdb + valid_pkgs =3D self._valid_pkgs + cp_set =3D self._cp_set + consumer =3D self._consumer + + for cp in self._cp_iter: + cp_set.add(cp) + portage.writemsg_stdout("Processing %s\n" % cp) + cpv_list =3D portdb.cp_list(cp) + for cpv in cpv_list: + valid_pkgs.add(cpv) + ebuild_path, repo_path =3D portdb.findname2(cpv) + metadata, st, emtime =3D portdb._pull_valid_cache( + cpv, ebuild_path, repo_path) + if metadata is not None: + if consumer is not None: + consumer(cpv, ebuild_path, + repo_path, metadata) + continue + + yield EbuildMetadataPhase(cpv=3Dcpv, ebuild_path=3Debuild_path, + ebuild_mtime=3Demtime, + metadata_callback=3Dportdb._metadata_callback, + portdb=3Dportdb, repo_path=3Drepo_path, + settings=3Dportdb.doebuild_settings) + + def run(self): + + portdb =3D self._portdb + from portage.cache.cache_errors import CacheError + dead_nodes =3D {} + + while self._schedule(): + self._poll_loop() + + while self._jobs: + self._poll_loop() + + if self._global_cleanse: + for mytree in portdb.porttrees: + try: + dead_nodes[mytree] =3D set(portdb.auxdb[mytree].iterkeys()) + except CacheError, e: + portage.writemsg("Error listing cache entries for " + \ + "'%s': %s, continuing...\n" % (mytree, e), + noiselevel=3D-1) + del e + dead_nodes =3D None + break + else: + cp_set =3D self._cp_set + cpv_getkey =3D portage.cpv_getkey + for mytree in portdb.porttrees: + try: + dead_nodes[mytree] =3D set(cpv for cpv in \ + portdb.auxdb[mytree].iterkeys() \ + if cpv_getkey(cpv) in cp_set) + except CacheError, e: + portage.writemsg("Error listing cache entries for " + \ + "'%s': %s, continuing...\n" % (mytree, e), + noiselevel=3D-1) + del e + dead_nodes =3D None + break + + if dead_nodes: + for y in self._valid_pkgs: + for mytree in portdb.porttrees: + if portdb.findname2(y, mytree=3Dmytree)[0]: + dead_nodes[mytree].discard(y) + + for mytree, nodes in dead_nodes.iteritems(): + auxdb =3D portdb.auxdb[mytree] + for y in nodes: + try: + del auxdb[y] + except (KeyError, CacheError): + pass + + def _schedule_tasks(self): + """ + @rtype: bool + @returns: True if there may be remaining tasks to schedule, + False otherwise. + """ + while self._can_add_job(): + try: + metadata_process =3D self._process_iter.next() + except StopIteration: + return False + + self._jobs +=3D 1 + metadata_process.scheduler =3D self._sched_iface + metadata_process.addExitListener(self._metadata_exit) + metadata_process.start() + return True + + def _metadata_exit(self, metadata_process): + self._jobs -=3D 1 + if metadata_process.returncode !=3D os.EX_OK: + self.returncode =3D 1 + self._error_count +=3D 1 + self._valid_pkgs.discard(metadata_process.cpv) + portage.writemsg("Error processing %s, continuing...\n" % \ + (metadata_process.cpv,), noiselevel=3D-1) + + if self._consumer is not None: + # On failure, still notify the consumer (in this case the metadata + # argument is None). + self._consumer(metadata_process.cpv, + metadata_process.ebuild_path, + metadata_process.repo_path, + metadata_process.metadata) + + self._schedule() + Copied: main/branches/prefix/pym/_emerge/Package.py (from rev 13669, main= /trunk/pym/_emerge/Package.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/Package.py (= rev 0) +++ main/branches/prefix/pym/_emerge/Package.py 2009-06-27 13:35:38 UTC (= rev 13708) @@ -0,0 +1,187 @@ +import re +from itertools import chain + +# for an explanation on this logic, see pym/_emerge/__init__.py +import os +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +from portage.cache.mappings import slot_dict_class + +from _emerge.Task import Task + +class Package(Task): + + __hash__ =3D Task.__hash__ + __slots__ =3D ("built", "cpv", "depth", + "installed", "metadata", "onlydeps", "operation", + "root_config", "type_name", + "category", "counter", "cp", "cpv_split", + "inherited", "iuse", "mtime", + "pf", "pv_split", "root", "slot", "slot_atom", "use") + + metadata_keys =3D [ + "CHOST", "COUNTER", "DEPEND", "EAPI", + "INHERITED", "IUSE", "KEYWORDS", + "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND", + "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_", + "EPREFIX" ] + + def __init__(self, **kwargs): + Task.__init__(self, **kwargs) + self.root =3D self.root_config.root + self.metadata =3D _PackageMetadataWrapper(self, self.metadata) + self.cp =3D portage.cpv_getkey(self.cpv) + slot =3D self.slot + if not slot: + # Avoid an InvalidAtom exception when creating slot_atom. + # This package instance will be masked due to empty SLOT. + slot =3D '0' + self.slot_atom =3D portage.dep.Atom("%s:%s" % (self.cp, slot)) + self.category, self.pf =3D portage.catsplit(self.cpv) + self.cpv_split =3D portage.catpkgsplit(self.cpv) + self.pv_split =3D self.cpv_split[1:] + + class _use(object): + + __slots__ =3D ("__weakref__", "enabled") + + def __init__(self, use): + self.enabled =3D frozenset(use) + + class _iuse(object): + + __slots__ =3D ("__weakref__", "all", "enabled", "disabled", "iuse_impl= icit", "regex", "tokens") + + def __init__(self, tokens, iuse_implicit): + self.tokens =3D tuple(tokens) + self.iuse_implicit =3D iuse_implicit + enabled =3D [] + disabled =3D [] + other =3D [] + for x in tokens: + prefix =3D x[:1] + if prefix =3D=3D "+": + enabled.append(x[1:]) + elif prefix =3D=3D "-": + disabled.append(x[1:]) + else: + other.append(x) + self.enabled =3D frozenset(enabled) + self.disabled =3D frozenset(disabled) + self.all =3D frozenset(chain(enabled, disabled, other)) + + def __getattribute__(self, name): + if name =3D=3D "regex": + try: + return object.__getattribute__(self, "regex") + except AttributeError: + all =3D object.__getattribute__(self, "all") + iuse_implicit =3D object.__getattribute__(self, "iuse_implicit") + # Escape anything except ".*" which is supposed + # to pass through from _get_implicit_iuse() + regex =3D (re.escape(x) for x in chain(all, iuse_implicit)) + regex =3D "^(%s)$" % "|".join(regex) + regex =3D regex.replace("\\.\\*", ".*") + self.regex =3D re.compile(regex) + return object.__getattribute__(self, name) + + def _get_hash_key(self): + hash_key =3D getattr(self, "_hash_key", None) + if hash_key is None: + if self.operation is None: + self.operation =3D "merge" + if self.onlydeps or self.installed: + self.operation =3D "nomerge" + self._hash_key =3D \ + (self.type_name, self.root, self.cpv, self.operation) + return self._hash_key + + def __lt__(self, other): + if other.cp !=3D self.cp: + return False + if portage.pkgcmp(self.pv_split, other.pv_split) < 0: + return True + return False + + def __le__(self, other): + if other.cp !=3D self.cp: + return False + if portage.pkgcmp(self.pv_split, other.pv_split) <=3D 0: + return True + return False + + def __gt__(self, other): + if other.cp !=3D self.cp: + return False + if portage.pkgcmp(self.pv_split, other.pv_split) > 0: + return True + return False + + def __ge__(self, other): + if other.cp !=3D self.cp: + return False + if portage.pkgcmp(self.pv_split, other.pv_split) >=3D 0: + return True + return False + +_all_metadata_keys =3D set(x for x in portage.auxdbkeys \ + if not x.startswith("UNUSED_")) +_all_metadata_keys.discard("CDEPEND") +_all_metadata_keys.update(Package.metadata_keys) + +_PackageMetadataWrapperBase =3D slot_dict_class(_all_metadata_keys) + +class _PackageMetadataWrapper(_PackageMetadataWrapperBase): + """ + Detect metadata updates and synchronize Package attributes. + """ + + __slots__ =3D ("_pkg",) + _wrapped_keys =3D frozenset( + ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"]) + + def __init__(self, pkg, metadata): + _PackageMetadataWrapperBase.__init__(self) + self._pkg =3D pkg + self.update(metadata) + + def __setitem__(self, k, v): + _PackageMetadataWrapperBase.__setitem__(self, k, v) + if k in self._wrapped_keys: + getattr(self, "_set_" + k.lower())(k, v) + + def _set_inherited(self, k, v): + if isinstance(v, basestring): + v =3D frozenset(v.split()) + self._pkg.inherited =3D v + + def _set_iuse(self, k, v): + self._pkg.iuse =3D self._pkg._iuse( + v.split(), self._pkg.root_config.iuse_implicit) + + def _set_slot(self, k, v): + self._pkg.slot =3D v + + def _set_use(self, k, v): + self._pkg.use =3D self._pkg._use(v.split()) + + def _set_counter(self, k, v): + if isinstance(v, basestring): + try: + v =3D long(v.strip()) + except ValueError: + v =3D 0 + self._pkg.counter =3D v + + def _set__mtime_(self, k, v): + if isinstance(v, basestring): + try: + v =3D long(v.strip()) + except ValueError: + v =3D 0 + self._pkg.mtime =3D v Copied: main/branches/prefix/pym/_emerge/PackageCounters.py (from rev 136= 69, main/trunk/pym/_emerge/PackageCounters.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/PackageCounters.py = (rev 0) +++ main/branches/prefix/pym/_emerge/PackageCounters.py 2009-06-27 13:35:= 38 UTC (rev 13708) @@ -0,0 +1,77 @@ +from portage.output import colorize, create_color_func +bad =3D create_color_func("BAD") + +from _emerge.format_size import format_size + +class PackageCounters(object): + + def __init__(self): + self.upgrades =3D 0 + self.downgrades =3D 0 + self.new =3D 0 + self.newslot =3D 0 + self.reinst =3D 0 + self.uninst =3D 0 + self.blocks =3D 0 + self.blocks_satisfied =3D 0 + self.totalsize =3D 0 + self.restrict_fetch =3D 0 + self.restrict_fetch_satisfied =3D 0 + self.interactive =3D 0 + + def __str__(self): + total_installs =3D self.upgrades + self.downgrades + self.newslot + se= lf.new + self.reinst + myoutput =3D [] + details =3D [] + myoutput.append("Total: %s package" % total_installs) + if total_installs !=3D 1: + myoutput.append("s") + if total_installs !=3D 0: + myoutput.append(" (") + if self.upgrades > 0: + details.append("%s upgrade" % self.upgrades) + if self.upgrades > 1: + details[-1] +=3D "s" + if self.downgrades > 0: + details.append("%s downgrade" % self.downgrades) + if self.downgrades > 1: + details[-1] +=3D "s" + if self.new > 0: + details.append("%s new" % self.new) + if self.newslot > 0: + details.append("%s in new slot" % self.newslot) + if self.newslot > 1: + details[-1] +=3D "s" + if self.reinst > 0: + details.append("%s reinstall" % self.reinst) + if self.reinst > 1: + details[-1] +=3D "s" + if self.uninst > 0: + details.append("%s uninstall" % self.uninst) + if self.uninst > 1: + details[-1] +=3D "s" + if self.interactive > 0: + details.append("%s %s" % (self.interactive, + colorize("WARN", "interactive"))) + myoutput.append(", ".join(details)) + if total_installs !=3D 0: + myoutput.append(")") + myoutput.append(", Size of downloads: %s" % format_size(self.totalsize= )) + if self.restrict_fetch: + myoutput.append("\nFetch Restriction: %s package" % \ + self.restrict_fetch) + if self.restrict_fetch > 1: + myoutput.append("s") + if self.restrict_fetch_satisfied < self.restrict_fetch: + myoutput.append(bad(" (%s unsatisfied)") % \ + (self.restrict_fetch - self.restrict_fetch_satisfied)) + if self.blocks > 0: + myoutput.append("\nConflict: %s block" % \ + self.blocks) + if self.blocks > 1: + myoutput.append("s") + if self.blocks_satisfied < self.blocks: + myoutput.append(bad(" (%s unsatisfied)") % \ + (self.blocks - self.blocks_satisfied)) + return "".join(myoutput) + Copied: main/branches/prefix/pym/_emerge/PackageUninstall.py (from rev 13= 669, main/trunk/pym/_emerge/PackageUninstall.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/PackageUninstall.py = (rev 0) +++ main/branches/prefix/pym/_emerge/PackageUninstall.py 2009-06-27 13:35= :38 UTC (rev 13708) @@ -0,0 +1,50 @@ +import logging +# for an explanation on this logic, see pym/_emerge/__init__.py +import os +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +from _emerge.AsynchronousTask import AsynchronousTask +from _emerge.unmerge import unmerge +from _emerge.UninstallFailure import UninstallFailure + +class PackageUninstall(AsynchronousTask): + + __slots__ =3D ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings") + + def _start(self): + try: + unmerge(self.pkg.root_config, self.opts, "unmerge", + [self.pkg.cpv], self.ldpath_mtimes, clean_world=3D0, + clean_delay=3D0, raise_on_error=3D1, scheduler=3Dself.scheduler, + writemsg_level=3Dself._writemsg_level) + except UninstallFailure, e: + self.returncode =3D e.status + else: + self.returncode =3D os.EX_OK + self.wait() + + def _writemsg_level(self, msg, level=3D0, noiselevel=3D0): + + log_path =3D self.settings.get("PORTAGE_LOG_FILE") + background =3D self.background + + if log_path is None: + if not (background and level < logging.WARNING): + portage.util.writemsg_level(msg, + level=3Dlevel, noiselevel=3Dnoiselevel) + else: + if not background: + portage.util.writemsg_level(msg, + level=3Dlevel, noiselevel=3Dnoiselevel) + + f =3D open(log_path, 'a') + try: + f.write(msg) + finally: + f.close() + Copied: main/branches/prefix/pym/_emerge/RootConfig.py (from rev 13669, m= ain/trunk/pym/_emerge/RootConfig.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/RootConfig.py = (rev 0) +++ main/branches/prefix/pym/_emerge/RootConfig.py 2009-06-27 13:35:38 UT= C (rev 13708) @@ -0,0 +1,28 @@ +from _emerge.PackageVirtualDbapi import PackageVirtualDbapi + +class RootConfig(object): + """This is used internally by depgraph to track information about a + particular $ROOT.""" + + pkg_tree_map =3D { + "ebuild" : "porttree", + "binary" : "bintree", + "installed" : "vartree" + } + + tree_pkg_map =3D {} + for k, v in pkg_tree_map.iteritems(): + tree_pkg_map[v] =3D k + + def __init__(self, settings, trees, setconfig): + self.trees =3D trees + self.settings =3D settings + self.iuse_implicit =3D tuple(sorted(settings._get_implicit_iuse())) + self.root =3D self.settings["ROOT"] + self.setconfig =3D setconfig + if setconfig is None: + self.sets =3D {} + else: + self.sets =3D self.setconfig.getSets() + self.visible_pkgs =3D PackageVirtualDbapi(self.settings) + Modified: main/branches/prefix/pym/_emerge/__init__.py =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/__init__.py 2009-06-27 13:19:35 UTC = (rev 13707) +++ main/branches/prefix/pym/_emerge/__init__.py 2009-06-27 13:35:38 UTC = (rev 13708) @@ -35,17 +35,16 @@ =20 from portage import digraph from portage.const import NEWS_LIB_PATH +from portage.cache.mappings import slot_dict_class =20 import _emerge.help import portage.xpak, commands, errno, re, socket, time from portage.output import blue, bold, colorize, darkblue, darkgreen, gr= een, \ - nc_len, red, teal, turquoise, xtermTitle, \ + nc_len, red, teal, turquoise, \ xtermTitleReset, yellow from portage.output import create_color_func good =3D create_color_func("GOOD") bad =3D create_color_func("BAD") -# white looks bad on terminals with white background -from portage.output import bold as white =20 import portage.elog import portage.dep @@ -70,15 +69,11 @@ from _emerge.UnmergeDepPriority import UnmergeDepPriority from _emerge.DepPriorityNormalRange import DepPriorityNormalRange from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange -from _emerge.Task import Task +from _emerge.Package import Package from _emerge.Blocker import Blocker -from _emerge.AsynchronousTask import AsynchronousTask -from _emerge.CompositeTask import CompositeTask +from _emerge.BlockerDB import BlockerDB from _emerge.EbuildFetcher import EbuildFetcher -from _emerge.EbuildBuild import EbuildBuild -from _emerge.EbuildMetadataPhase import EbuildMetadataPhase from _emerge.EbuildPhase import EbuildPhase -from _emerge.Binpkg import Binpkg from _emerge.BinpkgPrefetcher import BinpkgPrefetcher from _emerge.PackageMerge import PackageMerge from _emerge.DependencyArg import DependencyArg @@ -93,50 +88,22 @@ from _emerge.SequentialTaskQueue import SequentialTaskQueue from _emerge.ProgressHandler import ProgressHandler from _emerge.stdout_spinner import stdout_spinner -from _emerge.UninstallFailure import UninstallFailure from _emerge.JobStatusDisplay import JobStatusDisplay from _emerge.PollScheduler import PollScheduler +from _emerge.search import search +from _emerge.visible import visible +from _emerge.emergelog import emergelog, _emerge_log_dir +from _emerge.userquery import userquery +from _emerge.countdown import countdown +from _emerge.unmerge import unmerge +from _emerge.MergeListItem import MergeListItem +from _emerge.MetadataRegen import MetadataRegen +from _emerge.RootConfig import RootConfig +from _emerge.format_size import format_size +from _emerge.PackageCounters import PackageCounters +from _emerge.FakeVartree import FakeVartree +from _emerge.show_invalid_depstring_notice import show_invalid_depstring= _notice =20 -def userquery(prompt, responses=3DNone, colours=3DNone): - """Displays a prompt and a set of responses, then waits for a response - which is checked against the responses and the first to match is - returned. An empty response will match the first value in responses. = The - input buffer is *not* cleared prior to the prompt! - - prompt: a String. - responses: a List of Strings. - colours: a List of Functions taking and returning a String, used to - process the responses for display. Typically these will be functions - like red() but could be e.g. lambda x: "DisplayString". - If responses is omitted, defaults to ["Yes", "No"], [green, red]. - If only colours is omitted, defaults to [bold, ...]. - - Returns a member of the List responses. (If called without optional - arguments, returns "Yes" or "No".) - KeyboardInterrupt is converted to SystemExit to avoid tracebacks being - printed.""" - if responses is None: - responses =3D ["Yes", "No"] - colours =3D [ - create_color_func("PROMPT_CHOICE_DEFAULT"), - create_color_func("PROMPT_CHOICE_OTHER") - ] - elif colours is None: - colours=3D[bold] - colours=3D(colours*len(responses))[:len(responses)] - print bold(prompt), - try: - while True: - response=3Draw_input("["+"/".join([colours[i](responses[i]) for i in = range(len(responses))])+"] ") - for key in responses: - # An empty response will match the first value in responses. - if response.upper()=3D=3Dkey[:len(response)].upper(): - return key - print "Sorry, response '%s' not understood." % response, - except (EOFError, KeyboardInterrupt): - print "Interrupted." - sys.exit(1) - actions =3D frozenset([ "clean", "config", "depclean", "info", "list-sets", "metadata", @@ -191,63 +158,6 @@ "v":"--verbose", "V":"--version" } =20 -_emerge_log_dir =3D '/var/log' - -def emergelog(xterm_titles, mystr, short_msg=3DNone): - if xterm_titles and short_msg: - if "HOSTNAME" in os.environ: - short_msg =3D os.environ["HOSTNAME"]+": "+short_msg - xtermTitle(short_msg) - try: - file_path =3D os.path.join(_emerge_log_dir, 'emerge.log') - mylogfile =3D open(file_path, "a") - portage.util.apply_secpass_permissions(file_path, - uid=3Dportage.portage_uid, gid=3Dportage.portage_gid, - mode=3D0660) - mylock =3D None - try: - mylock =3D portage.locks.lockfile(mylogfile) - # seek because we may have gotten held up by the lock. - # if so, we may not be positioned at the end of the file. - mylogfile.seek(0, 2) - mylogfile.write(str(time.time())[:10]+": "+mystr+"\n") - mylogfile.flush() - finally: - if mylock: - portage.locks.unlockfile(mylock) - mylogfile.close() - except (IOError,OSError,portage.exception.PortageException), e: - if secpass >=3D 1: - print >> sys.stderr, "emergelog():",e - -def countdown(secs=3D5, doing=3D"Starting"): - if secs: - print ">>> Waiting",secs,"seconds before starting..." - print ">>> (Control-C to abort)...\n"+doing+" in: ", - ticks=3Drange(secs) - ticks.reverse() - for sec in ticks: - sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" ")) - sys.stdout.flush() - time.sleep(1) - print - -# formats a size given in bytes nicely -def format_size(mysize): - if isinstance(mysize, basestring): - return mysize - if 0 !=3D mysize % 1024: - # Always round up to the next kB so that it doesn't show 0 kB when - # some small file still needs to be fetched. - mysize +=3D 1024 - mysize % 1024 - mystr=3Dstr(mysize/1024) - mycount=3Dlen(mystr) - while (mycount > 3): - mycount-=3D3 - mystr=3Dmystr[:mycount]+","+mystr[mycount:] - return mystr+" kB" - - def getgccversion(chost): """ rtype: C{str} @@ -344,394 +254,6 @@ myparams.add("complete") return myparams =20 -# search functionality -class search(object): - - # - # class constants - # - VERSION_SHORT=3D1 - VERSION_RELEASE=3D2 - - # - # public interface - # - def __init__(self, root_config, spinner, searchdesc, - verbose, usepkg, usepkgonly): - """Searches the available and installed packages for the supplied sear= ch key. - The list of available and installed packages is created at object inst= antiation. - This makes successive searches faster.""" - self.settings =3D root_config.settings - self.vartree =3D root_config.trees["vartree"] - self.spinner =3D spinner - self.verbose =3D verbose - self.searchdesc =3D searchdesc - self.root_config =3D root_config - self.setconfig =3D root_config.setconfig - self.matches =3D {"pkg" : []} - self.mlen =3D 0 - - def fake_portdb(): - pass - self.portdb =3D fake_portdb - for attrib in ("aux_get", "cp_all", - "xmatch", "findname", "getFetchMap"): - setattr(fake_portdb, attrib, getattr(self, "_"+attrib)) - - self._dbs =3D [] - - portdb =3D root_config.trees["porttree"].dbapi - bindb =3D root_config.trees["bintree"].dbapi - vardb =3D root_config.trees["vartree"].dbapi - - if not usepkgonly and portdb._have_root_eclass_dir: - self._dbs.append(portdb) - - if (usepkg or usepkgonly) and bindb.cp_all(): - self._dbs.append(bindb) - - self._dbs.append(vardb) - self._portdb =3D portdb - - def _cp_all(self): - cp_all =3D set() - for db in self._dbs: - cp_all.update(db.cp_all()) - return list(sorted(cp_all)) - - def _aux_get(self, *args, **kwargs): - for db in self._dbs: - try: - return db.aux_get(*args, **kwargs) - except KeyError: - pass - raise - - def _findname(self, *args, **kwargs): - for db in self._dbs: - if db is not self._portdb: - # We don't want findname to return anything - # unless it's an ebuild in a portage tree. - # Otherwise, it's already built and we don't - # care about it. - continue - func =3D getattr(db, "findname", None) - if func: - value =3D func(*args, **kwargs) - if value: - return value - return None - - def _getFetchMap(self, *args, **kwargs): - for db in self._dbs: - func =3D getattr(db, "getFetchMap", None) - if func: - value =3D func(*args, **kwargs) - if value: - return value - return {} - - def _visible(self, db, cpv, metadata): - installed =3D db is self.vartree.dbapi - built =3D installed or db is not self._portdb - pkg_type =3D "ebuild" - if installed: - pkg_type =3D "installed" - elif built: - pkg_type =3D "binary" - return visible(self.settings, - Package(type_name=3Dpkg_type, root_config=3Dself.root_config, - cpv=3Dcpv, built=3Dbuilt, installed=3Dinstalled, metadata=3Dmetadata)= ) - - def _xmatch(self, level, atom): - """ - This method does not expand old-style virtuals because it - is restricted to returning matches for a single ${CATEGORY}/${PN} - and old-style virual matches unreliable for that when querying - multiple package databases. If necessary, old-style virtuals - can be performed on atoms prior to calling this method. - """ - cp =3D portage.dep_getkey(atom) - if level =3D=3D "match-all": - matches =3D set() - for db in self._dbs: - if hasattr(db, "xmatch"): - matches.update(db.xmatch(level, atom)) - else: - matches.update(db.match(atom)) - result =3D list(x for x in matches if portage.cpv_getkey(x) =3D=3D cp= ) - db._cpv_sort_ascending(result) - elif level =3D=3D "match-visible": - matches =3D set() - for db in self._dbs: - if hasattr(db, "xmatch"): - matches.update(db.xmatch(level, atom)) - else: - db_keys =3D list(db._aux_cache_keys) - for cpv in db.match(atom): - metadata =3D izip(db_keys, - db.aux_get(cpv, db_keys)) - if not self._visible(db, cpv, metadata): - continue - matches.add(cpv) - result =3D list(x for x in matches if portage.cpv_getkey(x) =3D=3D cp= ) - db._cpv_sort_ascending(result) - elif level =3D=3D "bestmatch-visible": - result =3D None - for db in self._dbs: - if hasattr(db, "xmatch"): - cpv =3D db.xmatch("bestmatch-visible", atom) - if not cpv or portage.cpv_getkey(cpv) !=3D cp: - continue - if not result or cpv =3D=3D portage.best([cpv, result]): - result =3D cpv - else: - db_keys =3D Package.metadata_keys - # break out of this loop with highest visible - # match, checked in descending order - for cpv in reversed(db.match(atom)): - if portage.cpv_getkey(cpv) !=3D cp: - continue - metadata =3D izip(db_keys, - db.aux_get(cpv, db_keys)) - if not self._visible(db, cpv, metadata): - continue - if not result or cpv =3D=3D portage.best([cpv, result]): - result =3D cpv - break - else: - raise NotImplementedError(level) - return result - - def execute(self,searchkey): - """Performs the search for the supplied search key""" - match_category =3D 0 - self.searchkey=3Dsearchkey - self.packagematches =3D [] - if self.searchdesc: - self.searchdesc=3D1 - self.matches =3D {"pkg":[], "desc":[], "set":[]} - else: - self.searchdesc=3D0 - self.matches =3D {"pkg":[], "set":[]} - print "Searching... ", - - regexsearch =3D False - if self.searchkey.startswith('%'): - regexsearch =3D True - self.searchkey =3D self.searchkey[1:] - if self.searchkey.startswith('@'): - match_category =3D 1 - self.searchkey =3D self.searchkey[1:] - if regexsearch: - self.searchre=3Dre.compile(self.searchkey,re.I) - else: - self.searchre=3Dre.compile(re.escape(self.searchkey), re.I) - for package in self.portdb.cp_all(): - self.spinner.update() - - if match_category: - match_string =3D package[:] - else: - match_string =3D package.split("/")[-1] - - masked=3D0 - if self.searchre.search(match_string): - if not self.portdb.xmatch("match-visible", package): - masked=3D1 - self.matches["pkg"].append([package,masked]) - elif self.searchdesc: # DESCRIPTION searching - full_package =3D self.portdb.xmatch("bestmatch-visible", package) - if not full_package: - #no match found; we don't want to query description - full_package =3D portage.best( - self.portdb.xmatch("match-all", package)) - if not full_package: - continue - else: - masked=3D1 - try: - full_desc =3D self.portdb.aux_get( - full_package, ["DESCRIPTION"])[0] - except KeyError: - print "emerge: search: aux_get() failed, skipping" - continue - if self.searchre.search(full_desc): - self.matches["desc"].append([full_package,masked]) - - self.sdict =3D self.setconfig.getSets() - for setname in self.sdict: - self.spinner.update() - if match_category: - match_string =3D setname - else: - match_string =3D setname.split("/")[-1] - =09 - if self.searchre.search(match_string): - self.matches["set"].append([setname, False]) - elif self.searchdesc: - if self.searchre.search( - self.sdict[setname].getMetadata("DESCRIPTION")): - self.matches["set"].append([setname, False]) - =09 - self.mlen=3D0 - for mtype in self.matches: - self.matches[mtype].sort() - self.mlen +=3D len(self.matches[mtype]) - - def addCP(self, cp): - if not self.portdb.xmatch("match-all", cp): - return - masked =3D 0 - if not self.portdb.xmatch("bestmatch-visible", cp): - masked =3D 1 - self.matches["pkg"].append([cp, masked]) - self.mlen +=3D 1 - - def output(self): - """Outputs the results of the search.""" - print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]" - print "[ Applications found : "+white(str(self.mlen))+" ]" - print " " - vardb =3D self.vartree.dbapi - for mtype in self.matches: - for match,masked in self.matches[mtype]: - full_package =3D None - if mtype =3D=3D "pkg": - catpack =3D match - full_package =3D self.portdb.xmatch( - "bestmatch-visible", match) - if not full_package: - #no match found; we don't want to query description - masked=3D1 - full_package =3D portage.best( - self.portdb.xmatch("match-all",match)) - elif mtype =3D=3D "desc": - full_package =3D match - match =3D portage.cpv_getkey(match) - elif mtype =3D=3D "set": - print green("*")+" "+white(match) - print " ", darkgreen("Description:")+" ", self.sdict[match].ge= tMetadata("DESCRIPTION") - print - if full_package: - try: - desc, homepage, license =3D self.portdb.aux_get( - full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"]) - except KeyError: - print "emerge: search: aux_get() failed, skipping" - continue - if masked: - print green("*")+" "+white(match)+" "+red("[ Masked ]") - else: - print green("*")+" "+white(match) - myversion =3D self.getVersion(full_package, search.VERSION_RELEASE) - - mysum =3D [0,0] - file_size_str =3D None - mycat =3D match.split("/")[0] - mypkg =3D match.split("/")[1] - mycpv =3D match + "-" + myversion - myebuild =3D self.portdb.findname(mycpv) - if myebuild: - pkgdir =3D os.path.dirname(myebuild) - from portage import manifest - mf =3D manifest.Manifest( - pkgdir, self.settings["DISTDIR"]) - try: - uri_map =3D self.portdb.getFetchMap(mycpv) - except portage.exception.InvalidDependString, e: - file_size_str =3D "Unknown (%s)" % (e,) - del e - else: - try: - mysum[0] =3D mf.getDistfilesSize(uri_map) - except KeyError, e: - file_size_str =3D "Unknown (missing " + \ - "digest for %s)" % (e,) - del e - - available =3D False - for db in self._dbs: - if db is not vardb and \ - db.cpv_exists(mycpv): - available =3D True - if not myebuild and hasattr(db, "bintree"): - myebuild =3D db.bintree.getname(mycpv) - try: - mysum[0] =3D os.stat(myebuild).st_size - except OSError: - myebuild =3D None - break - - if myebuild and file_size_str is None: - mystr =3D str(mysum[0] / 1024) - mycount =3D len(mystr) - while (mycount > 3): - mycount -=3D 3 - mystr =3D mystr[:mycount] + "," + mystr[mycount:] - file_size_str =3D mystr + " kB" - - if self.verbose: - if available: - print " ", darkgreen("Latest version available:"),myversion - print " ", self.getInstallationStatus(mycat+'/'+mypkg) - if myebuild: - print " %s %s" % \ - (darkgreen("Size of files:"), file_size_str) - print " ", darkgreen("Homepage:")+" ",homepage - print " ", darkgreen("Description:")+" ",desc - print " ", darkgreen("License:")+" ",license - print - # - # private interface - # - def getInstallationStatus(self,package): - installed_package =3D self.vartree.dep_bestmatch(package) - result =3D "" - version =3D self.getVersion(installed_package,search.VERSION_RELEASE) - if len(version) > 0: - result =3D darkgreen("Latest version installed:")+" "+version - else: - result =3D darkgreen("Latest version installed:")+" [ Not Installed ]= " - return result - - def getVersion(self,full_package,detail): - if len(full_package) > 1: - package_parts =3D portage.catpkgsplit(full_package) - if detail =3D=3D search.VERSION_RELEASE and package_parts[3] !=3D 'r0= ': - result =3D package_parts[2]+ "-" + package_parts[3] - else: - result =3D package_parts[2] - else: - result =3D "" - return result - -class RootConfig(object): - """This is used internally by depgraph to track information about a - particular $ROOT.""" - - pkg_tree_map =3D { - "ebuild" : "porttree", - "binary" : "bintree", - "installed" : "vartree" - } - - tree_pkg_map =3D {} - for k, v in pkg_tree_map.iteritems(): - tree_pkg_map[v] =3D k - - def __init__(self, settings, trees, setconfig): - self.trees =3D trees - self.settings =3D settings - self.iuse_implicit =3D tuple(sorted(settings._get_implicit_iuse())) - self.root =3D self.settings["ROOT"] - self.setconfig =3D setconfig - if setconfig is None: - self.sets =3D {} - else: - self.sets =3D self.setconfig.getSets() - self.visible_pkgs =3D PackageVirtualDbapi(self.settings) - def create_world_atom(pkg, args_set, root_config): """Create a new atom for the world file if one does not exist. If the argument atom is precise enough to identify a specific slot then a slot @@ -854,266 +376,6 @@ =20 return deep_system_deps =20 -class FakeVartree(portage.vartree): - """This is implements an in-memory copy of a vartree instance that prov= ides - all the interfaces required for use by the depgraph. The vardb is lock= ed - during the constructor call just long enough to read a copy of the - installed package information. This allows the depgraph to do it's - dependency calculations without holding a lock on the vardb. It also - allows things like vardb global updates to be done in memory so that th= e - user doesn't necessarily need write access to the vardb in cases where - global updates are necessary (updates are performed when necessary if t= here - is not a matching ebuild in the tree).""" - def __init__(self, root_config, pkg_cache=3DNone, acquire_lock=3D1): - self._root_config =3D root_config - if pkg_cache is None: - pkg_cache =3D {} - real_vartree =3D root_config.trees["vartree"] - portdb =3D root_config.trees["porttree"].dbapi - self.root =3D real_vartree.root - self.settings =3D real_vartree.settings - mykeys =3D list(real_vartree.dbapi._aux_cache_keys) - if "_mtime_" not in mykeys: - mykeys.append("_mtime_") - self._db_keys =3D mykeys - self._pkg_cache =3D pkg_cache - self.dbapi =3D PackageVirtualDbapi(real_vartree.settings) - vdb_path =3D os.path.join(self.root, portage.VDB_PATH) - try: - # At least the parent needs to exist for the lock file. - portage.util.ensure_dirs(vdb_path) - except portage.exception.PortageException: - pass - vdb_lock =3D None - try: - if acquire_lock and os.access(vdb_path, os.W_OK): - vdb_lock =3D portage.locks.lockdir(vdb_path) - real_dbapi =3D real_vartree.dbapi - slot_counters =3D {} - for cpv in real_dbapi.cpv_all(): - cache_key =3D ("installed", self.root, cpv, "nomerge") - pkg =3D self._pkg_cache.get(cache_key) - if pkg is not None: - metadata =3D pkg.metadata - else: - metadata =3D dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys))) - myslot =3D metadata["SLOT"] - mycp =3D portage.dep_getkey(cpv) - myslot_atom =3D "%s:%s" % (mycp, myslot) - try: - mycounter =3D long(metadata["COUNTER"]) - except ValueError: - mycounter =3D 0 - metadata["COUNTER"] =3D str(mycounter) - other_counter =3D slot_counters.get(myslot_atom, None) - if other_counter is not None: - if other_counter > mycounter: - continue - slot_counters[myslot_atom] =3D mycounter - if pkg is None: - pkg =3D Package(built=3DTrue, cpv=3Dcpv, - installed=3DTrue, metadata=3Dmetadata, - root_config=3Droot_config, type_name=3D"installed") - self._pkg_cache[pkg] =3D pkg - self.dbapi.cpv_inject(pkg) - real_dbapi.flush_cache() - finally: - if vdb_lock: - portage.locks.unlockdir(vdb_lock) - # Populate the old-style virtuals using the cached values. - if not self.settings.treeVirtuals: - self.settings.treeVirtuals =3D portage.util.map_dictlist_vals( - portage.getCPFromCPV, self.get_all_provides()) - - # Intialize variables needed for lazy cache pulls of the live ebuild - # metadata. This ensures that the vardb lock is released ASAP, withou= t - # being delayed in case cache generation is triggered. - self._aux_get =3D self.dbapi.aux_get - self.dbapi.aux_get =3D self._aux_get_wrapper - self._match =3D self.dbapi.match - self.dbapi.match =3D self._match_wrapper - self._aux_get_history =3D set() - self._portdb_keys =3D ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"] - self._portdb =3D portdb - self._global_updates =3D None - - def _match_wrapper(self, cpv, use_cache=3D1): - """ - Make sure the metadata in Package instances gets updated for any - cpv that is returned from a match() call, since the metadata can - be accessed directly from the Package instance instead of via - aux_get(). - """ - matches =3D self._match(cpv, use_cache=3Duse_cache) - for cpv in matches: - if cpv in self._aux_get_history: - continue - self._aux_get_wrapper(cpv, []) - return matches - - def _aux_get_wrapper(self, pkg, wants): - if pkg in self._aux_get_history: - return self._aux_get(pkg, wants) - self._aux_get_history.add(pkg) - try: - # Use the live ebuild metadata if possible. - live_metadata =3D dict(izip(self._portdb_keys, - self._portdb.aux_get(pkg, self._portdb_keys))) - if not portage.eapi_is_supported(live_metadata["EAPI"]): - raise KeyError(pkg) - self.dbapi.aux_update(pkg, live_metadata) - except (KeyError, portage.exception.PortageException): - if self._global_updates is None: - self._global_updates =3D \ - grab_global_updates(self._portdb.porttree_root) - perform_global_updates( - pkg, self.dbapi, self._global_updates) - return self._aux_get(pkg, wants) - - def sync(self, acquire_lock=3D1): - """ - Call this method to synchronize state with the real vardb - after one or more packages may have been installed or - uninstalled. - """ - vdb_path =3D os.path.join(self.root, portage.VDB_PATH) - try: - # At least the parent needs to exist for the lock file. - portage.util.ensure_dirs(vdb_path) - except portage.exception.PortageException: - pass - vdb_lock =3D None - try: - if acquire_lock and os.access(vdb_path, os.W_OK): - vdb_lock =3D portage.locks.lockdir(vdb_path) - self._sync() - finally: - if vdb_lock: - portage.locks.unlockdir(vdb_lock) - - def _sync(self): - - real_vardb =3D self._root_config.trees["vartree"].dbapi - current_cpv_set =3D frozenset(real_vardb.cpv_all()) - pkg_vardb =3D self.dbapi - aux_get_history =3D self._aux_get_history - - # Remove any packages that have been uninstalled. - for pkg in list(pkg_vardb): - if pkg.cpv not in current_cpv_set: - pkg_vardb.cpv_remove(pkg) - aux_get_history.discard(pkg.cpv) - - # Validate counters and timestamps. - slot_counters =3D {} - root =3D self.root - validation_keys =3D ["COUNTER", "_mtime_"] - for cpv in current_cpv_set: - - pkg_hash_key =3D ("installed", root, cpv, "nomerge") - pkg =3D pkg_vardb.get(pkg_hash_key) - if pkg is not None: - counter, mtime =3D real_vardb.aux_get(cpv, validation_keys) - try: - counter =3D long(counter) - except ValueError: - counter =3D 0 - - if counter !=3D pkg.counter or \ - mtime !=3D pkg.mtime: - pkg_vardb.cpv_remove(pkg) - aux_get_history.discard(pkg.cpv) - pkg =3D None - - if pkg is None: - pkg =3D self._pkg(cpv) - - other_counter =3D slot_counters.get(pkg.slot_atom) - if other_counter is not None: - if other_counter > pkg.counter: - continue - - slot_counters[pkg.slot_atom] =3D pkg.counter - pkg_vardb.cpv_inject(pkg) - - real_vardb.flush_cache() - - def _pkg(self, cpv): - root_config =3D self._root_config - real_vardb =3D root_config.trees["vartree"].dbapi - pkg =3D Package(cpv=3Dcpv, installed=3DTrue, - metadata=3Dizip(self._db_keys, - real_vardb.aux_get(cpv, self._db_keys)), - root_config=3Droot_config, - type_name=3D"installed") - - try: - mycounter =3D long(pkg.metadata["COUNTER"]) - except ValueError: - mycounter =3D 0 - pkg.metadata["COUNTER"] =3D str(mycounter) - - return pkg - -def grab_global_updates(portdir): - from portage.update import grab_updates, parse_updates - updpath =3D os.path.join(portdir, "profiles", "updates") - try: - rawupdates =3D grab_updates(updpath) - except portage.exception.DirectoryNotFound: - rawupdates =3D [] - upd_commands =3D [] - for mykey, mystat, mycontent in rawupdates: - commands, errors =3D parse_updates(mycontent) - upd_commands.extend(commands) - return upd_commands - -def perform_global_updates(mycpv, mydb, mycommands): - from portage.update import update_dbentries - aux_keys =3D ["DEPEND", "RDEPEND", "PDEPEND"] - aux_dict =3D dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys))) - updates =3D update_dbentries(mycommands, aux_dict) - if updates: - mydb.aux_update(mycpv, updates) - -def visible(pkgsettings, pkg): - """ - Check if a package is visible. This can raise an InvalidDependString - exception if LICENSE is invalid. - TODO: optionally generate a list of masking reasons - @rtype: Boolean - @returns: True if the package is visible, False otherwise. - """ - if not pkg.metadata["SLOT"]: - return False - if not pkg.installed: - if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata): - return False - if pkg.built and not pkg.installed: - # we can have an old binary which has no EPREFIX information - if "EPREFIX" not in pkg.metadata or not pkg.metadata["EPREFIX"]: - return False - if len(pkg.metadata["EPREFIX"].strip()) < len(pkgsettings["EPREFIX"]): - return False - eapi =3D pkg.metadata["EAPI"] - if not portage.eapi_is_supported(eapi): - return False - if not pkg.installed: - if portage._eapi_is_deprecated(eapi): - return False - if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata): - return False - if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata): - return False - if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata): - return False - try: - if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata): - return False - except portage.exception.InvalidDependString: - return False - return True - def get_masking_status(pkg, pkgsettings, root_config): =20 mreasons =3D portage.getmaskingstatus( @@ -1212,487 +474,6 @@ shown_licenses.add(l) return have_eapi_mask =20 -class Package(Task): - - __hash__ =3D Task.__hash__ - __slots__ =3D ("built", "cpv", "depth", - "installed", "metadata", "onlydeps", "operation", - "root_config", "type_name", - "category", "counter", "cp", "cpv_split", - "inherited", "iuse", "mtime", - "pf", "pv_split", "root", "slot", "slot_atom", "use") - - metadata_keys =3D [ - "CHOST", "COUNTER", "DEPEND", "EAPI", - "INHERITED", "IUSE", "KEYWORDS", - "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND", - "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_", - "EPREFIX" ] - - def __init__(self, **kwargs): - Task.__init__(self, **kwargs) - self.root =3D self.root_config.root - self.metadata =3D _PackageMetadataWrapper(self, self.metadata) - self.cp =3D portage.cpv_getkey(self.cpv) - slot =3D self.slot - if not slot: - # Avoid an InvalidAtom exception when creating slot_atom. - # This package instance will be masked due to empty SLOT. - slot =3D '0' - self.slot_atom =3D portage.dep.Atom("%s:%s" % (self.cp, slot)) - self.category, self.pf =3D portage.catsplit(self.cpv) - self.cpv_split =3D portage.catpkgsplit(self.cpv) - self.pv_split =3D self.cpv_split[1:] - - class _use(object): - - __slots__ =3D ("__weakref__", "enabled") - - def __init__(self, use): - self.enabled =3D frozenset(use) - - class _iuse(object): - - __slots__ =3D ("__weakref__", "all", "enabled", "disabled", "iuse_impl= icit", "regex", "tokens") - - def __init__(self, tokens, iuse_implicit): - self.tokens =3D tuple(tokens) - self.iuse_implicit =3D iuse_implicit - enabled =3D [] - disabled =3D [] - other =3D [] - for x in tokens: - prefix =3D x[:1] - if prefix =3D=3D "+": - enabled.append(x[1:]) - elif prefix =3D=3D "-": - disabled.append(x[1:]) - else: - other.append(x) - self.enabled =3D frozenset(enabled) - self.disabled =3D frozenset(disabled) - self.all =3D frozenset(chain(enabled, disabled, other)) - - def __getattribute__(self, name): - if name =3D=3D "regex": - try: - return object.__getattribute__(self, "regex") - except AttributeError: - all =3D object.__getattribute__(self, "all") - iuse_implicit =3D object.__getattribute__(self, "iuse_implicit") - # Escape anything except ".*" which is supposed - # to pass through from _get_implicit_iuse() - regex =3D (re.escape(x) for x in chain(all, iuse_implicit)) - regex =3D "^(%s)$" % "|".join(regex) - regex =3D regex.replace("\\.\\*", ".*") - self.regex =3D re.compile(regex) - return object.__getattribute__(self, name) - - def _get_hash_key(self): - hash_key =3D getattr(self, "_hash_key", None) - if hash_key is None: - if self.operation is None: - self.operation =3D "merge" - if self.onlydeps or self.installed: - self.operation =3D "nomerge" - self._hash_key =3D \ - (self.type_name, self.root, self.cpv, self.operation) - return self._hash_key - - def __lt__(self, other): - if other.cp !=3D self.cp: - return False - if portage.pkgcmp(self.pv_split, other.pv_split) < 0: - return True - return False - - def __le__(self, other): - if other.cp !=3D self.cp: - return False - if portage.pkgcmp(self.pv_split, other.pv_split) <=3D 0: - return True - return False - - def __gt__(self, other): - if other.cp !=3D self.cp: - return False - if portage.pkgcmp(self.pv_split, other.pv_split) > 0: - return True - return False - - def __ge__(self, other): - if other.cp !=3D self.cp: - return False - if portage.pkgcmp(self.pv_split, other.pv_split) >=3D 0: - return True - return False - -_all_metadata_keys =3D set(x for x in portage.auxdbkeys \ - if not x.startswith("UNUSED_")) -_all_metadata_keys.discard("CDEPEND") -_all_metadata_keys.update(Package.metadata_keys) - -from portage.cache.mappings import slot_dict_class -_PackageMetadataWrapperBase =3D slot_dict_class(_all_metadata_keys) - -class _PackageMetadataWrapper(_PackageMetadataWrapperBase): - """ - Detect metadata updates and synchronize Package attributes. - """ - - __slots__ =3D ("_pkg",) - _wrapped_keys =3D frozenset( - ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"]) - - def __init__(self, pkg, metadata): - _PackageMetadataWrapperBase.__init__(self) - self._pkg =3D pkg - self.update(metadata) - - def __setitem__(self, k, v): - _PackageMetadataWrapperBase.__setitem__(self, k, v) - if k in self._wrapped_keys: - getattr(self, "_set_" + k.lower())(k, v) - - def _set_inherited(self, k, v): - if isinstance(v, basestring): - v =3D frozenset(v.split()) - self._pkg.inherited =3D v - - def _set_iuse(self, k, v): - self._pkg.iuse =3D self._pkg._iuse( - v.split(), self._pkg.root_config.iuse_implicit) - - def _set_slot(self, k, v): - self._pkg.slot =3D v - - def _set_use(self, k, v): - self._pkg.use =3D self._pkg._use(v.split()) - - def _set_counter(self, k, v): - if isinstance(v, basestring): - try: - v =3D long(v.strip()) - except ValueError: - v =3D 0 - self._pkg.counter =3D v - - def _set__mtime_(self, k, v): - if isinstance(v, basestring): - try: - v =3D long(v.strip()) - except ValueError: - v =3D 0 - self._pkg.mtime =3D v - -class PackageUninstall(AsynchronousTask): - - __slots__ =3D ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings") - - def _start(self): - try: - unmerge(self.pkg.root_config, self.opts, "unmerge", - [self.pkg.cpv], self.ldpath_mtimes, clean_world=3D0, - clean_delay=3D0, raise_on_error=3D1, scheduler=3Dself.scheduler, - writemsg_level=3Dself._writemsg_level) - except UninstallFailure, e: - self.returncode =3D e.status - else: - self.returncode =3D os.EX_OK - self.wait() - - def _writemsg_level(self, msg, level=3D0, noiselevel=3D0): - - log_path =3D self.settings.get("PORTAGE_LOG_FILE") - background =3D self.background - - if log_path is None: - if not (background and level < logging.WARNING): - portage.util.writemsg_level(msg, - level=3Dlevel, noiselevel=3Dnoiselevel) - else: - if not background: - portage.util.writemsg_level(msg, - level=3Dlevel, noiselevel=3Dnoiselevel) - - f =3D open(log_path, 'a') - try: - f.write(msg) - finally: - f.close() - -class MergeListItem(CompositeTask): - - """ - TODO: For parallel scheduling, everything here needs asynchronous - execution support (start, poll, and wait methods). - """ - - __slots__ =3D ("args_set", - "binpkg_opts", "build_opts", "config_pool", "emerge_opts", - "find_blockers", "logger", "mtimedb", "pkg", - "pkg_count", "pkg_to_replace", "prefetcher", - "settings", "statusMessage", "world_atom") + \ - ("_install_task",) - - def _start(self): - - pkg =3D self.pkg - build_opts =3D self.build_opts - - if pkg.installed: - # uninstall, executed by self.merge() - self.returncode =3D os.EX_OK - self.wait() - return - - args_set =3D self.args_set - find_blockers =3D self.find_blockers - logger =3D self.logger - mtimedb =3D self.mtimedb - pkg_count =3D self.pkg_count - scheduler =3D self.scheduler - settings =3D self.settings - world_atom =3D self.world_atom - ldpath_mtimes =3D mtimedb["ldpath"] - - action_desc =3D "Emerging" - preposition =3D "for" - if pkg.type_name =3D=3D "binary": - action_desc +=3D " binary" - - if build_opts.fetchonly: - action_desc =3D "Fetching" - - msg =3D "%s (%s of %s) %s" % \ - (action_desc, - colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)), - colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)), - colorize("GOOD", pkg.cpv)) - - portdb =3D pkg.root_config.trees["porttree"].dbapi - portdir_repo_name =3D portdb._repository_map.get(portdb.porttree_root) - if portdir_repo_name: - pkg_repo_name =3D pkg.metadata.get("repository") - if pkg_repo_name !=3D portdir_repo_name: - if not pkg_repo_name: - pkg_repo_name =3D "unknown repo" - msg +=3D " from %s" % pkg_repo_name - - if pkg.root !=3D "/": - msg +=3D " %s %s" % (preposition, pkg.root) - - if not build_opts.pretend: - self.statusMessage(msg) - logger.log(" >>> emerge (%s of %s) %s to %s" % \ - (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root)) - - if pkg.type_name =3D=3D "ebuild": - - build =3D EbuildBuild(args_set=3Dargs_set, - background=3Dself.background, - config_pool=3Dself.config_pool, - find_blockers=3Dfind_blockers, - ldpath_mtimes=3Dldpath_mtimes, logger=3Dlogger, - opts=3Dbuild_opts, pkg=3Dpkg, pkg_count=3Dpkg_count, - prefetcher=3Dself.prefetcher, scheduler=3Dscheduler, - settings=3Dsettings, world_atom=3Dworld_atom) - - self._install_task =3D build - self._start_task(build, self._default_final_exit) - return - - elif pkg.type_name =3D=3D "binary": - - binpkg =3D Binpkg(background=3Dself.background, - find_blockers=3Dfind_blockers, - ldpath_mtimes=3Dldpath_mtimes, logger=3Dlogger, - opts=3Dself.binpkg_opts, pkg=3Dpkg, pkg_count=3Dpkg_count, - prefetcher=3Dself.prefetcher, settings=3Dsettings, - scheduler=3Dscheduler, world_atom=3Dworld_atom) - - self._install_task =3D binpkg - self._start_task(binpkg, self._default_final_exit) - return - - def _poll(self): - self._install_task.poll() - return self.returncode - - def _wait(self): - self._install_task.wait() - return self.returncode - - def merge(self): - - pkg =3D self.pkg - build_opts =3D self.build_opts - find_blockers =3D self.find_blockers - logger =3D self.logger - mtimedb =3D self.mtimedb - pkg_count =3D self.pkg_count - prefetcher =3D self.prefetcher - scheduler =3D self.scheduler - settings =3D self.settings - world_atom =3D self.world_atom - ldpath_mtimes =3D mtimedb["ldpath"] - - if pkg.installed: - if not (build_opts.buildpkgonly or \ - build_opts.fetchonly or build_opts.pretend): - - uninstall =3D PackageUninstall(background=3Dself.background, - ldpath_mtimes=3Dldpath_mtimes, opts=3Dself.emerge_opts, - pkg=3Dpkg, scheduler=3Dscheduler, settings=3Dsettings) - - uninstall.start() - retval =3D uninstall.wait() - if retval !=3D os.EX_OK: - return retval - return os.EX_OK - - if build_opts.fetchonly or \ - build_opts.buildpkgonly: - return self.returncode - - retval =3D self._install_task.install() - return retval - -class BlockerDB(object): - - def __init__(self, root_config): - self._root_config =3D root_config - self._vartree =3D root_config.trees["vartree"] - self._portdb =3D root_config.trees["porttree"].dbapi - - self._dep_check_trees =3D None - self._fake_vartree =3D None - - def _get_fake_vartree(self, acquire_lock=3D0): - fake_vartree =3D self._fake_vartree - if fake_vartree is None: - fake_vartree =3D FakeVartree(self._root_config, - acquire_lock=3Dacquire_lock) - self._fake_vartree =3D fake_vartree - self._dep_check_trees =3D { self._vartree.root : { - "porttree" : fake_vartree, - "vartree" : fake_vartree, - }} - else: - fake_vartree.sync(acquire_lock=3Dacquire_lock) - return fake_vartree - - def findInstalledBlockers(self, new_pkg, acquire_lock=3D0): - blocker_cache =3D BlockerCache(self._vartree.root, self._vartree.dbapi= ) - dep_keys =3D ["DEPEND", "RDEPEND", "PDEPEND"] - settings =3D self._vartree.settings - stale_cache =3D set(blocker_cache) - fake_vartree =3D self._get_fake_vartree(acquire_lock=3Dacquire_lock) - dep_check_trees =3D self._dep_check_trees - vardb =3D fake_vartree.dbapi - installed_pkgs =3D list(vardb) - - for inst_pkg in installed_pkgs: - stale_cache.discard(inst_pkg.cpv) - cached_blockers =3D blocker_cache.get(inst_pkg.cpv) - if cached_blockers is not None and \ - cached_blockers.counter !=3D long(inst_pkg.metadata["COUNTER"]): - cached_blockers =3D None - if cached_blockers is not None: - blocker_atoms =3D cached_blockers.atoms - else: - # Use aux_get() to trigger FakeVartree global - # updates on *DEPEND when appropriate. - depstr =3D " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys)) - try: - portage.dep._dep_check_strict =3D False - success, atoms =3D portage.dep_check(depstr, - vardb, settings, myuse=3Dinst_pkg.use.enabled, - trees=3Ddep_check_trees, myroot=3Dinst_pkg.root) - finally: - portage.dep._dep_check_strict =3D True - if not success: - pkg_location =3D os.path.join(inst_pkg.root, - portage.VDB_PATH, inst_pkg.category, inst_pkg.pf) - portage.writemsg("!!! %s/*DEPEND: %s\n" % \ - (pkg_location, atoms), noiselevel=3D-1) - continue - - blocker_atoms =3D [atom for atom in atoms \ - if atom.startswith("!")] - blocker_atoms.sort() - counter =3D long(inst_pkg.metadata["COUNTER"]) - blocker_cache[inst_pkg.cpv] =3D \ - blocker_cache.BlockerData(counter, blocker_atoms) - for cpv in stale_cache: - del blocker_cache[cpv] - blocker_cache.flush() - - blocker_parents =3D digraph() - blocker_atoms =3D [] - for pkg in installed_pkgs: - for blocker_atom in blocker_cache[pkg.cpv].atoms: - blocker_atom =3D blocker_atom.lstrip("!") - blocker_atoms.append(blocker_atom) - blocker_parents.add(blocker_atom, pkg) - - blocker_atoms =3D InternalPackageSet(initial_atoms=3Dblocker_atoms) - blocking_pkgs =3D set() - for atom in blocker_atoms.iterAtomsForPackage(new_pkg): - blocking_pkgs.update(blocker_parents.parent_nodes(atom)) - - # Check for blockers in the other direction. - depstr =3D " ".join(new_pkg.metadata[k] for k in dep_keys) - try: - portage.dep._dep_check_strict =3D False - success, atoms =3D portage.dep_check(depstr, - vardb, settings, myuse=3Dnew_pkg.use.enabled, - trees=3Ddep_check_trees, myroot=3Dnew_pkg.root) - finally: - portage.dep._dep_check_strict =3D True - if not success: - # We should never get this far with invalid deps. - show_invalid_depstring_notice(new_pkg, depstr, atoms) - assert False - - blocker_atoms =3D [atom.lstrip("!") for atom in atoms \ - if atom[:1] =3D=3D "!"] - if blocker_atoms: - blocker_atoms =3D InternalPackageSet(initial_atoms=3Dblocker_atoms) - for inst_pkg in installed_pkgs: - try: - blocker_atoms.iterAtomsForPackage(inst_pkg).next() - except (portage.exception.InvalidDependString, StopIteration): - continue - blocking_pkgs.add(inst_pkg) - - return blocking_pkgs - -def show_invalid_depstring_notice(parent_node, depstring, error_msg): - - msg1 =3D "\n\n!!! Invalid or corrupt dependency specification: " + \ - "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring) - p_type, p_root, p_key, p_status =3D parent_node - msg =3D [] - if p_status =3D=3D "nomerge": - category, pf =3D portage.catsplit(p_key) - pkg_location =3D os.path.join(p_root, portage.VDB_PATH, category, pf) - msg.append("Portage is unable to process the dependencies of the ") - msg.append("'%s' package. " % p_key) - msg.append("In order to correct this problem, the package ") - msg.append("should be uninstalled, reinstalled, or upgraded. ") - msg.append("As a temporary workaround, the --nodeps option can ") - msg.append("be used to ignore all dependencies. For reference, ") - msg.append("the problematic dependencies can be found in the ") - msg.append("*DEPEND files located in '%s/'." % pkg_location) - else: - msg.append("This package can not be installed. ") - msg.append("Please notify the '%s' package maintainer " % p_key) - msg.append("about this problem.") - - msg2 =3D "".join("%s\n" % line for line in textwrap.wrap("".join(msg), = 72)) - writemsg_level(msg1 + msg2, level=3Dlogging.ERROR, noiselevel=3D-1) - class depgraph(object): =20 pkg_tree_map =3D RootConfig.pkg_tree_map @@ -6400,78 +5181,6 @@ metadata =3D self._cpv_pkg_map[cpv].metadata return [metadata.get(x, "") for x in wants] =20 -class PackageCounters(object): - - def __init__(self): - self.upgrades =3D 0 - self.downgrades =3D 0 - self.new =3D 0 - self.newslot =3D 0 - self.reinst =3D 0 - self.uninst =3D 0 - self.blocks =3D 0 - self.blocks_satisfied =3D 0 - self.totalsize =3D 0 - self.restrict_fetch =3D 0 - self.restrict_fetch_satisfied =3D 0 - self.interactive =3D 0 - - def __str__(self): - total_installs =3D self.upgrades + self.downgrades + self.newslot + se= lf.new + self.reinst - myoutput =3D [] - details =3D [] - myoutput.append("Total: %s package" % total_installs) - if total_installs !=3D 1: - myoutput.append("s") - if total_installs !=3D 0: - myoutput.append(" (") - if self.upgrades > 0: - details.append("%s upgrade" % self.upgrades) - if self.upgrades > 1: - details[-1] +=3D "s" - if self.downgrades > 0: - details.append("%s downgrade" % self.downgrades) - if self.downgrades > 1: - details[-1] +=3D "s" - if self.new > 0: - details.append("%s new" % self.new) - if self.newslot > 0: - details.append("%s in new slot" % self.newslot) - if self.newslot > 1: - details[-1] +=3D "s" - if self.reinst > 0: - details.append("%s reinstall" % self.reinst) - if self.reinst > 1: - details[-1] +=3D "s" - if self.uninst > 0: - details.append("%s uninstall" % self.uninst) - if self.uninst > 1: - details[-1] +=3D "s" - if self.interactive > 0: - details.append("%s %s" % (self.interactive, - colorize("WARN", "interactive"))) - myoutput.append(", ".join(details)) - if total_installs !=3D 0: - myoutput.append(")") - myoutput.append(", Size of downloads: %s" % format_size(self.totalsize= )) - if self.restrict_fetch: - myoutput.append("\nFetch Restriction: %s package" % \ - self.restrict_fetch) - if self.restrict_fetch > 1: - myoutput.append("s") - if self.restrict_fetch_satisfied < self.restrict_fetch: - myoutput.append(bad(" (%s unsatisfied)") % \ - (self.restrict_fetch - self.restrict_fetch_satisfied)) - if self.blocks > 0: - myoutput.append("\nConflict: %s block" % \ - self.blocks) - if self.blocks > 1: - myoutput.append("s") - if self.blocks_satisfied < self.blocks: - myoutput.append(bad(" (%s unsatisfied)") % \ - (self.blocks - self.blocks_satisfied)) - return "".join(myoutput) - class Scheduler(PollScheduler): =20 _opts_ignore_blockers =3D \ @@ -8065,663 +6774,6 @@ =20 return pkg =20 -class MetadataRegen(PollScheduler): - - def __init__(self, portdb, cp_iter=3DNone, consumer=3DNone, - max_jobs=3DNone, max_load=3DNone): - PollScheduler.__init__(self) - self._portdb =3D portdb - self._global_cleanse =3D False - if cp_iter is None: - cp_iter =3D self._iter_every_cp() - # We can globally cleanse stale cache only if we - # iterate over every single cp. - self._global_cleanse =3D True - self._cp_iter =3D cp_iter - self._consumer =3D consumer - - if max_jobs is None: - max_jobs =3D 1 - - self._max_jobs =3D max_jobs - self._max_load =3D max_load - self._sched_iface =3D self._sched_iface_class( - register=3Dself._register, - schedule=3Dself._schedule_wait, - unregister=3Dself._unregister) - - self._valid_pkgs =3D set() - self._cp_set =3D set() - self._process_iter =3D self._iter_metadata_processes() - self.returncode =3D os.EX_OK - self._error_count =3D 0 - - def _iter_every_cp(self): - every_cp =3D self._portdb.cp_all() - every_cp.sort(reverse=3DTrue) - try: - while True: - yield every_cp.pop() - except IndexError: - pass - - def _iter_metadata_processes(self): - portdb =3D self._portdb - valid_pkgs =3D self._valid_pkgs - cp_set =3D self._cp_set - consumer =3D self._consumer - - for cp in self._cp_iter: - cp_set.add(cp) - portage.writemsg_stdout("Processing %s\n" % cp) - cpv_list =3D portdb.cp_list(cp) - for cpv in cpv_list: - valid_pkgs.add(cpv) - ebuild_path, repo_path =3D portdb.findname2(cpv) - metadata, st, emtime =3D portdb._pull_valid_cache( - cpv, ebuild_path, repo_path) - if metadata is not None: - if consumer is not None: - consumer(cpv, ebuild_path, - repo_path, metadata) - continue - - yield EbuildMetadataPhase(cpv=3Dcpv, ebuild_path=3Debuild_path, - ebuild_mtime=3Demtime, - metadata_callback=3Dportdb._metadata_callback, - portdb=3Dportdb, repo_path=3Drepo_path, - settings=3Dportdb.doebuild_settings) - - def run(self): - - portdb =3D self._portdb - from portage.cache.cache_errors import CacheError - dead_nodes =3D {} - - while self._schedule(): - self._poll_loop() - - while self._jobs: - self._poll_loop() - - if self._global_cleanse: - for mytree in portdb.porttrees: - try: - dead_nodes[mytree] =3D set(portdb.auxdb[mytree].iterkeys()) - except CacheError, e: - portage.writemsg("Error listing cache entries for " + \ - "'%s': %s, continuing...\n" % (mytree, e), - noiselevel=3D-1) - del e - dead_nodes =3D None - break - else: - cp_set =3D self._cp_set - cpv_getkey =3D portage.cpv_getkey - for mytree in portdb.porttrees: - try: - dead_nodes[mytree] =3D set(cpv for cpv in \ - portdb.auxdb[mytree].iterkeys() \ - if cpv_getkey(cpv) in cp_set) - except CacheError, e: - portage.writemsg("Error listing cache entries for " + \ - "'%s': %s, continuing...\n" % (mytree, e), - noiselevel=3D-1) - del e - dead_nodes =3D None - break - - if dead_nodes: - for y in self._valid_pkgs: - for mytree in portdb.porttrees: - if portdb.findname2(y, mytree=3Dmytree)[0]: - dead_nodes[mytree].discard(y) - - for mytree, nodes in dead_nodes.iteritems(): - auxdb =3D portdb.auxdb[mytree] - for y in nodes: - try: - del auxdb[y] - except (KeyError, CacheError): - pass - - def _schedule_tasks(self): - """ - @rtype: bool - @returns: True if there may be remaining tasks to schedule, - False otherwise. - """ - while self._can_add_job(): - try: - metadata_process =3D self._process_iter.next() - except StopIteration: - return False - - self._jobs +=3D 1 - metadata_process.scheduler =3D self._sched_iface - metadata_process.addExitListener(self._metadata_exit) - metadata_process.start() - return True - - def _metadata_exit(self, metadata_process): - self._jobs -=3D 1 - if metadata_process.returncode !=3D os.EX_OK: - self.returncode =3D 1 - self._error_count +=3D 1 - self._valid_pkgs.discard(metadata_process.cpv) - portage.writemsg("Error processing %s, continuing...\n" % \ - (metadata_process.cpv,), noiselevel=3D-1) - - if self._consumer is not None: - # On failure, still notify the consumer (in this case the metadata - # argument is None). - self._consumer(metadata_process.cpv, - metadata_process.ebuild_path, - metadata_process.repo_path, - metadata_process.metadata) - - self._schedule() - -def unmerge(root_config, myopts, unmerge_action, - unmerge_files, ldpath_mtimes, autoclean=3D0, - clean_world=3D1, clean_delay=3D1, ordered=3D0, raise_on_error=3D0, - scheduler=3DNone, writemsg_level=3Dportage.util.writemsg_level): - - if clean_world: - clean_world =3D myopts.get('--deselect') !=3D 'n' - quiet =3D "--quiet" in myopts - settings =3D root_config.settings - sets =3D root_config.sets - vartree =3D root_config.trees["vartree"] - candidate_catpkgs=3D[] - global_unmerge=3D0 - xterm_titles =3D "notitles" not in settings.features - out =3D portage.output.EOutput() - pkg_cache =3D {} - db_keys =3D list(vartree.dbapi._aux_cache_keys) - - def _pkg(cpv): - pkg =3D pkg_cache.get(cpv) - if pkg is None: - pkg =3D Package(cpv=3Dcpv, installed=3DTrue, - metadata=3Dizip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), - root_config=3Droot_config, - type_name=3D"installed") - pkg_cache[cpv] =3D pkg - return pkg - - vdb_path =3D os.path.join(settings["ROOT"], portage.VDB_PATH) - try: - # At least the parent needs to exist for the lock file. - portage.util.ensure_dirs(vdb_path) - except portage.exception.PortageException: - pass - vdb_lock =3D None - try: - if os.access(vdb_path, os.W_OK): - vdb_lock =3D portage.locks.lockdir(vdb_path) - realsyslist =3D sets["system"].getAtoms() - syslist =3D [] - for x in realsyslist: - mycp =3D portage.dep_getkey(x) - if mycp in settings.getvirtuals(): - providers =3D [] - for provider in settings.getvirtuals()[mycp]: - if vartree.dbapi.match(provider): - providers.append(provider) - if len(providers) =3D=3D 1: - syslist.extend(providers) - else: - syslist.append(mycp) -=09 - mysettings =3D portage.config(clone=3Dsettings) -=09 - if not unmerge_files: - if unmerge_action =3D=3D "unmerge": - print - print bold("emerge unmerge") + " can only be used with specific pack= age names" - print - return 0 - else: - global_unmerge =3D 1 -=09 - localtree =3D vartree - # process all arguments and add all - # valid db entries to candidate_catpkgs - if global_unmerge: - if not unmerge_files: - candidate_catpkgs.extend(vartree.dbapi.cp_all()) - else: - #we've got command-line arguments - if not unmerge_files: - print "\nNo packages to unmerge have been provided.\n" - return 0 - for x in unmerge_files: - arg_parts =3D x.split('/') - if x[0] not in [".","/"] and \ - arg_parts[-1][-7:] !=3D ".ebuild": - #possible cat/pkg or dep; treat as such - candidate_catpkgs.append(x) - elif unmerge_action in ["prune","clean"]: - print "\n!!! Prune and clean do not accept individual" + \ - " ebuilds as arguments;\n skipping.\n" - continue - else: - # it appears that the user is specifying an installed - # ebuild and we're in "unmerge" mode, so it's ok. - if not os.path.exists(x): - print "\n!!! The path '"+x+"' doesn't exist.\n" - return 0 -=09 - absx =3D os.path.abspath(x) - sp_absx =3D absx.split("/") - if sp_absx[-1][-7:] =3D=3D ".ebuild": - del sp_absx[-1] - absx =3D "/".join(sp_absx) -=09 - sp_absx_len =3D len(sp_absx) -=09 - vdb_path =3D os.path.join(settings["ROOT"], portage.VDB_PATH) - vdb_len =3D len(vdb_path) -=09 - sp_vdb =3D vdb_path.split("/") - sp_vdb_len =3D len(sp_vdb) -=09 - if not os.path.exists(absx+"/CONTENTS"): - print "!!! Not a valid db dir: "+str(absx) - return 0 -=09 - if sp_absx_len <=3D sp_vdb_len: - # The Path is shorter... so it can't be inside the vdb. - print sp_absx - print absx - print "\n!!!",x,"cannot be inside "+ \ - vdb_path+"; aborting.\n" - return 0 -=09 - for idx in range(0,sp_vdb_len): - if idx >=3D sp_absx_len or sp_vdb[idx] !=3D sp_absx[idx]: - print sp_absx - print absx - print "\n!!!", x, "is not inside "+\ - vdb_path+"; aborting.\n" - return 0 -=09 - print "=3D"+"/".join(sp_absx[sp_vdb_len:]) - candidate_catpkgs.append( - "=3D"+"/".join(sp_absx[sp_vdb_len:])) -=09 - newline=3D"" - if (not "--quiet" in myopts): - newline=3D"\n" - if settings["ROOT"] !=3D "/": - writemsg_level(darkgreen(newline+ \ - ">>> Using system located in ROOT tree %s\n" % \ - settings["ROOT"])) - - if (("--pretend" in myopts) or ("--ask" in myopts)) and \ - not ("--quiet" in myopts): - writemsg_level(darkgreen(newline+\ - ">>> These are the packages that would be unmerged:\n")) - - # Preservation of order is required for --depclean and --prune so - # that dependencies are respected. Use all_selected to eliminate - # duplicate packages since the same package may be selected by - # multiple atoms. - pkgmap =3D [] - all_selected =3D set() - for x in candidate_catpkgs: - # cycle through all our candidate deps and determine - # what will and will not get unmerged - try: - mymatch =3D vartree.dbapi.match(x) - except portage.exception.AmbiguousPackageName, errpkgs: - print "\n\n!!! The short ebuild name \"" + \ - x + "\" is ambiguous. Please specify" - print "!!! one of the following fully-qualified " + \ - "ebuild names instead:\n" - for i in errpkgs[0]: - print " " + green(i) - print - sys.exit(1) -=09 - if not mymatch and x[0] not in "<>=3D~": - mymatch =3D localtree.dep_match(x) - if not mymatch: - portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ - (x, unmerge_action), noiselevel=3D-1) - continue - - pkgmap.append( - {"protected": set(), "selected": set(), "omitted": set()}) - mykey =3D len(pkgmap) - 1 - if unmerge_action=3D=3D"unmerge": - for y in mymatch: - if y not in all_selected: - pkgmap[mykey]["selected"].add(y) - all_selected.add(y) - elif unmerge_action =3D=3D "prune": - if len(mymatch) =3D=3D 1: - continue - best_version =3D mymatch[0] - best_slot =3D vartree.getslot(best_version) - best_counter =3D vartree.dbapi.cpv_counter(best_version) - for mypkg in mymatch[1:]: - myslot =3D vartree.getslot(mypkg) - mycounter =3D vartree.dbapi.cpv_counter(mypkg) - if (myslot =3D=3D best_slot and mycounter > best_counter) or \ - mypkg =3D=3D portage.best([mypkg, best_version]): - if myslot =3D=3D best_slot: - if mycounter < best_counter: - # On slot collision, keep the one with the - # highest counter since it is the most - # recently installed. - continue - best_version =3D mypkg - best_slot =3D myslot - best_counter =3D mycounter - pkgmap[mykey]["protected"].add(best_version) - pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ - if mypkg !=3D best_version and mypkg not in all_selected) - all_selected.update(pkgmap[mykey]["selected"]) - else: - # unmerge_action =3D=3D "clean" - slotmap=3D{} - for mypkg in mymatch: - if unmerge_action =3D=3D "clean": - myslot =3D localtree.getslot(mypkg) - else: - # since we're pruning, we don't care about slots - # and put all the pkgs in together - myslot =3D 0 - if myslot not in slotmap: - slotmap[myslot] =3D {} - slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] =3D mypkg - - for mypkg in vartree.dbapi.cp_list( - portage.dep_getkey(mymatch[0])): - myslot =3D vartree.getslot(mypkg) - if myslot not in slotmap: - slotmap[myslot] =3D {} - slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] =3D mypkg - - for myslot in slotmap: - counterkeys =3D slotmap[myslot].keys() - if not counterkeys: - continue - counterkeys.sort() - pkgmap[mykey]["protected"].add( - slotmap[myslot][counterkeys[-1]]) - del counterkeys[-1] - - for counter in counterkeys[:]: - mypkg =3D slotmap[myslot][counter] - if mypkg not in mymatch: - counterkeys.remove(counter) - pkgmap[mykey]["protected"].add( - slotmap[myslot][counter]) - - #be pretty and get them in order of merge: - for ckey in counterkeys: - mypkg =3D slotmap[myslot][ckey] - if mypkg not in all_selected: - pkgmap[mykey]["selected"].add(mypkg) - all_selected.add(mypkg) - # ok, now the last-merged package - # is protected, and the rest are selected - numselected =3D len(all_selected) - if global_unmerge and not numselected: - portage.writemsg_stdout("\n>>> No outdated packages were found on you= r system.\n") - return 0 -=09 - if not numselected: - portage.writemsg_stdout( - "\n>>> No packages selected for removal by " + \ - unmerge_action + "\n") - return 0 - finally: - if vdb_lock: - vartree.dbapi.flush_cache() - portage.locks.unlockdir(vdb_lock) -=09 - from portage.sets.base import EditablePackageSet -=09 - # generate a list of package sets that are directly or indirectly liste= d in "world", - # as there is no persistent list of "installed" sets - installed_sets =3D ["world"] - stop =3D False - pos =3D 0 - while not stop: - stop =3D True - pos =3D len(installed_sets) - for s in installed_sets[pos - 1:]: - if s not in sets: - continue - candidates =3D [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if = x.startswith(SETPREFIX)] - if candidates: - stop =3D False - installed_sets +=3D candidates - installed_sets =3D [x for x in installed_sets if x not in root_config.s= etconfig.active] - del stop, pos - - # we don't want to unmerge packages that are still listed in user-edita= ble package sets - # listed in "world" as they would be remerged on the next update of "wo= rld" or the=20 - # relevant package sets. - unknown_sets =3D set() - for cp in xrange(len(pkgmap)): - for cpv in pkgmap[cp]["selected"].copy(): - try: - pkg =3D _pkg(cpv) - except KeyError: - # It could have been uninstalled - # by a concurrent process. - continue - - if unmerge_action !=3D "clean" and \ - root_config.root =3D=3D "/" and \ - portage.match_from_list( - portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): - msg =3D ("Not unmerging package %s since there is no valid " + \ - "reason for portage to unmerge itself.") % (pkg.cpv,) - for line in textwrap.wrap(msg, 75): - out.eerror(line) - # adjust pkgmap so the display output is correct - pkgmap[cp]["selected"].remove(cpv) - all_selected.remove(cpv) - pkgmap[cp]["protected"].add(cpv) - continue - - parents =3D [] - for s in installed_sets: - # skip sets that the user requested to unmerge, and skip world=20 - # unless we're unmerging a package set (as the package would be=20 - # removed from "world" later on) - if s in root_config.setconfig.active or (s =3D=3D "world" and not ro= ot_config.setconfig.active): - continue - - if s not in sets: - if s in unknown_sets: - continue - unknown_sets.add(s) - out =3D portage.output.EOutput() - out.eerror(("Unknown set '@%s' in " + \ - "%svar/lib/portage/world_sets") % \ - (s, root_config.root)) - continue - - # only check instances of EditablePackageSet as other classes are ge= nerally used for - # special purposes and can be ignored here (and are usually generate= d dynamically, so the - # user can't do much about them anyway) - if isinstance(sets[s], EditablePackageSet): - - # This is derived from a snippet of code in the - # depgraph._iter_atoms_for_pkg() method. - for atom in sets[s].iterAtomsForPackage(pkg): - inst_matches =3D vartree.dbapi.match(atom) - inst_matches.reverse() # descending order - higher_slot =3D None - for inst_cpv in inst_matches: - try: - inst_pkg =3D _pkg(inst_cpv) - except KeyError: - # It could have been uninstalled - # by a concurrent process. - continue - - if inst_pkg.cp !=3D atom.cp: - continue - if pkg >=3D inst_pkg: - # This is descending order, and we're not - # interested in any versions <=3D pkg given. - break - if pkg.slot_atom !=3D inst_pkg.slot_atom: - higher_slot =3D inst_pkg - break - if higher_slot is None: - parents.append(s) - break - if parents: - #print colorize("WARN", "Package %s is going to be unmerged," % cpv) - #print colorize("WARN", "but still listed in the following package s= ets:") - #print " %s\n" % ", ".join(parents) - print colorize("WARN", "Not unmerging package %s as it is" % cpv) - print colorize("WARN", "still referenced by the following package se= ts:") - print " %s\n" % ", ".join(parents) - # adjust pkgmap so the display output is correct - pkgmap[cp]["selected"].remove(cpv) - all_selected.remove(cpv) - pkgmap[cp]["protected"].add(cpv) -=09 - del installed_sets - - numselected =3D len(all_selected) - if not numselected: - writemsg_level( - "\n>>> No packages selected for removal by " + \ - unmerge_action + "\n") - return 0 - - # Unmerge order only matters in some cases - if not ordered: - unordered =3D {} - for d in pkgmap: - selected =3D d["selected"] - if not selected: - continue - cp =3D portage.cpv_getkey(iter(selected).next()) - cp_dict =3D unordered.get(cp) - if cp_dict is None: - cp_dict =3D {} - unordered[cp] =3D cp_dict - for k in d: - cp_dict[k] =3D set() - for k, v in d.iteritems(): - cp_dict[k].update(v) - pkgmap =3D [unordered[cp] for cp in sorted(unordered)] - - for x in xrange(len(pkgmap)): - selected =3D pkgmap[x]["selected"] - if not selected: - continue - for mytype, mylist in pkgmap[x].iteritems(): - if mytype =3D=3D "selected": - continue - mylist.difference_update(all_selected) - cp =3D portage.cpv_getkey(iter(selected).next()) - for y in localtree.dep_match(cp): - if y not in pkgmap[x]["omitted"] and \ - y not in pkgmap[x]["selected"] and \ - y not in pkgmap[x]["protected"] and \ - y not in all_selected: - pkgmap[x]["omitted"].add(y) - if global_unmerge and not pkgmap[x]["selected"]: - #avoid cluttering the preview printout with stuff that isn't getting = unmerged - continue - if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in sysl= ist: - writemsg_level(colorize("BAD","\a\n\n!!! " + \ - "'%s' is part of your system profile.\n" % cp), - level=3Dlogging.WARNING, noiselevel=3D-1) - writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \ - "be damaging to your system.\n\n"), - level=3Dlogging.WARNING, noiselevel=3D-1) - if clean_delay and "--pretend" not in myopts and "--ask" not in myopt= s: - countdown(int(settings["EMERGE_WARNING_DELAY"]), - colorize("UNMERGE_WARN", "Press Ctrl-C to Stop")) - if not quiet: - writemsg_level("\n %s\n" % (bold(cp),), noiselevel=3D-1) - else: - writemsg_level(bold(cp) + ": ", noiselevel=3D-1) - for mytype in ["selected","protected","omitted"]: - if not quiet: - writemsg_level((mytype + ": ").rjust(14), noiselevel=3D-1) - if pkgmap[x][mytype]: - sorted_pkgs =3D [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[= x][mytype]] - sorted_pkgs.sort(key=3Dcmp_sort_key(portage.pkgcmp)) - for pn, ver, rev in sorted_pkgs: - if rev =3D=3D "r0": - myversion =3D ver - else: - myversion =3D ver + "-" + rev - if mytype =3D=3D "selected": - writemsg_level( - colorize("UNMERGE_WARN", myversion + " "), - noiselevel=3D-1) - else: - writemsg_level( - colorize("GOOD", myversion + " "), noiselevel=3D-1) - else: - writemsg_level("none ", noiselevel=3D-1) - if not quiet: - writemsg_level("\n", noiselevel=3D-1) - if quiet: - writemsg_level("\n", noiselevel=3D-1) - - writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ - " packages are slated for removal.\n") - writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ - " and " + colorize("GOOD", "'omitted'") + \ - " packages will not be removed.\n\n") - - if "--pretend" in myopts: - #we're done... return - return 0 - if "--ask" in myopts: - if userquery("Would you like to unmerge these packages?")=3D=3D"No": - # enter pretend mode for correct formatting of results - myopts["--pretend"] =3D True - print - print "Quitting." - print - return 0 - #the real unmerging begins, after a short delay.... - if clean_delay and not autoclean: - countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging") - - for x in xrange(len(pkgmap)): - for y in pkgmap[x]["selected"]: - writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=3D-1) - emergelog(xterm_titles, "=3D=3D=3D Unmerging... ("+y+")") - mysplit =3D y.split("/") - #unmerge... - retval =3D portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], - mysettings, unmerge_action not in ["clean","prune"], - vartree=3Dvartree, ldpath_mtimes=3Dldpath_mtimes, - scheduler=3Dscheduler) - - if retval !=3D os.EX_OK: - emergelog(xterm_titles, " !!! unmerge FAILURE: "+y) - if raise_on_error: - raise UninstallFailure(retval) - sys.exit(retval) - else: - if clean_world and hasattr(sets["world"], "cleanPackage"): - sets["world"].cleanPackage(vartree.dbapi, y) - emergelog(xterm_titles, " >>> unmerge success: "+y) - if clean_world and hasattr(sets["world"], "remove"): - for s in root_config.setconfig.active: - sets["world"].remove(SETPREFIX+s) - return 1 - def chk_updated_info_files(root, infodirs, prev_mtimes, retval): =20 if os.path.exists(EPREFIX + "/usr/bin/install-info"): Copied: main/branches/prefix/pym/_emerge/countdown.py (from rev 13669, ma= in/trunk/pym/_emerge/countdown.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/countdown.py = (rev 0) +++ main/branches/prefix/pym/_emerge/countdown.py 2009-06-27 13:35:38 UTC= (rev 13708) @@ -0,0 +1,17 @@ +import sys +import time + +from portage.output import colorize + +def countdown(secs=3D5, doing=3D"Starting"): + if secs: + print ">>> Waiting",secs,"seconds before starting..." + print ">>> (Control-C to abort)...\n"+doing+" in: ", + ticks=3Drange(secs) + ticks.reverse() + for sec in ticks: + sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" ")) + sys.stdout.flush() + time.sleep(1) + print + Copied: main/branches/prefix/pym/_emerge/emergelog.py (from rev 13669, ma= in/trunk/pym/_emerge/emergelog.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/emergelog.py = (rev 0) +++ main/branches/prefix/pym/_emerge/emergelog.py 2009-06-27 13:35:38 UTC= (rev 13708) @@ -0,0 +1,43 @@ +import os +import sys +import time +# for an explanation on this logic, see pym/_emerge/__init__.py +import os +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +from portage.data import secpass +from portage.output import xtermTitle +=09 +_emerge_log_dir =3D '/var/log' + +def emergelog(xterm_titles, mystr, short_msg=3DNone): + if xterm_titles and short_msg: + if "HOSTNAME" in os.environ: + short_msg =3D os.environ["HOSTNAME"]+": "+short_msg + xtermTitle(short_msg) + try: + file_path =3D os.path.join(_emerge_log_dir, 'emerge.log') + mylogfile =3D open(file_path, "a") + portage.util.apply_secpass_permissions(file_path, + uid=3Dportage.portage_uid, gid=3Dportage.portage_gid, + mode=3D0660) + mylock =3D None + try: + mylock =3D portage.locks.lockfile(mylogfile) + # seek because we may have gotten held up by the lock. + # if so, we may not be positioned at the end of the file. + mylogfile.seek(0, 2) + mylogfile.write(str(time.time())[:10]+": "+mystr+"\n") + mylogfile.flush() + finally: + if mylock: + portage.locks.unlockfile(mylock) + mylogfile.close() + except (IOError,OSError,portage.exception.PortageException), e: + if secpass >=3D 1: + print >> sys.stderr, "emergelog():",e Copied: main/branches/prefix/pym/_emerge/format_size.py (from rev 13669, = main/trunk/pym/_emerge/format_size.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/format_size.py = (rev 0) +++ main/branches/prefix/pym/_emerge/format_size.py 2009-06-27 13:35:38 U= TC (rev 13708) @@ -0,0 +1,16 @@ + +# formats a size given in bytes nicely +def format_size(mysize): + if isinstance(mysize, basestring): + return mysize + if 0 !=3D mysize % 1024: + # Always round up to the next kB so that it doesn't show 0 kB when + # some small file still needs to be fetched. + mysize +=3D 1024 - mysize % 1024 + mystr=3Dstr(mysize/1024) + mycount=3Dlen(mystr) + while (mycount > 3): + mycount-=3D3 + mystr=3Dmystr[:mycount]+","+mystr[mycount:] + return mystr+" kB" + Copied: main/branches/prefix/pym/_emerge/search.py (from rev 13669, main/= trunk/pym/_emerge/search.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/search.py (r= ev 0) +++ main/branches/prefix/pym/_emerge/search.py 2009-06-27 13:35:38 UTC (r= ev 13708) @@ -0,0 +1,379 @@ +import os +import re +from itertools import izip + +# for an explanation on this logic, see pym/_emerge/__init__.py +import os +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +from portage.output import bold as white, darkgreen, green, red + +from _emerge.Package import Package +from _emerge.visible import visible + +class search(object): + + # + # class constants + # + VERSION_SHORT=3D1 + VERSION_RELEASE=3D2 + + # + # public interface + # + def __init__(self, root_config, spinner, searchdesc, + verbose, usepkg, usepkgonly): + """Searches the available and installed packages for the supplied sear= ch key. + The list of available and installed packages is created at object inst= antiation. + This makes successive searches faster.""" + self.settings =3D root_config.settings + self.vartree =3D root_config.trees["vartree"] + self.spinner =3D spinner + self.verbose =3D verbose + self.searchdesc =3D searchdesc + self.root_config =3D root_config + self.setconfig =3D root_config.setconfig + self.matches =3D {"pkg" : []} + self.mlen =3D 0 + + def fake_portdb(): + pass + self.portdb =3D fake_portdb + for attrib in ("aux_get", "cp_all", + "xmatch", "findname", "getFetchMap"): + setattr(fake_portdb, attrib, getattr(self, "_"+attrib)) + + self._dbs =3D [] + + portdb =3D root_config.trees["porttree"].dbapi + bindb =3D root_config.trees["bintree"].dbapi + vardb =3D root_config.trees["vartree"].dbapi + + if not usepkgonly and portdb._have_root_eclass_dir: + self._dbs.append(portdb) + + if (usepkg or usepkgonly) and bindb.cp_all(): + self._dbs.append(bindb) + + self._dbs.append(vardb) + self._portdb =3D portdb + + def _cp_all(self): + cp_all =3D set() + for db in self._dbs: + cp_all.update(db.cp_all()) + return list(sorted(cp_all)) + + def _aux_get(self, *args, **kwargs): + for db in self._dbs: + try: + return db.aux_get(*args, **kwargs) + except KeyError: + pass + raise + + def _findname(self, *args, **kwargs): + for db in self._dbs: + if db is not self._portdb: + # We don't want findname to return anything + # unless it's an ebuild in a portage tree. + # Otherwise, it's already built and we don't + # care about it. + continue + func =3D getattr(db, "findname", None) + if func: + value =3D func(*args, **kwargs) + if value: + return value + return None + + def _getFetchMap(self, *args, **kwargs): + for db in self._dbs: + func =3D getattr(db, "getFetchMap", None) + if func: + value =3D func(*args, **kwargs) + if value: + return value + return {} + + def _visible(self, db, cpv, metadata): + installed =3D db is self.vartree.dbapi + built =3D installed or db is not self._portdb + pkg_type =3D "ebuild" + if installed: + pkg_type =3D "installed" + elif built: + pkg_type =3D "binary" + return visible(self.settings, + Package(type_name=3Dpkg_type, root_config=3Dself.root_config, + cpv=3Dcpv, built=3Dbuilt, installed=3Dinstalled, metadata=3Dmetadata)= ) + + def _xmatch(self, level, atom): + """ + This method does not expand old-style virtuals because it + is restricted to returning matches for a single ${CATEGORY}/${PN} + and old-style virual matches unreliable for that when querying + multiple package databases. If necessary, old-style virtuals + can be performed on atoms prior to calling this method. + """ + cp =3D portage.dep_getkey(atom) + if level =3D=3D "match-all": + matches =3D set() + for db in self._dbs: + if hasattr(db, "xmatch"): + matches.update(db.xmatch(level, atom)) + else: + matches.update(db.match(atom)) + result =3D list(x for x in matches if portage.cpv_getkey(x) =3D=3D cp= ) + db._cpv_sort_ascending(result) + elif level =3D=3D "match-visible": + matches =3D set() + for db in self._dbs: + if hasattr(db, "xmatch"): + matches.update(db.xmatch(level, atom)) + else: + db_keys =3D list(db._aux_cache_keys) + for cpv in db.match(atom): + metadata =3D izip(db_keys, + db.aux_get(cpv, db_keys)) + if not self._visible(db, cpv, metadata): + continue + matches.add(cpv) + result =3D list(x for x in matches if portage.cpv_getkey(x) =3D=3D cp= ) + db._cpv_sort_ascending(result) + elif level =3D=3D "bestmatch-visible": + result =3D None + for db in self._dbs: + if hasattr(db, "xmatch"): + cpv =3D db.xmatch("bestmatch-visible", atom) + if not cpv or portage.cpv_getkey(cpv) !=3D cp: + continue + if not result or cpv =3D=3D portage.best([cpv, result]): + result =3D cpv + else: + db_keys =3D Package.metadata_keys + # break out of this loop with highest visible + # match, checked in descending order + for cpv in reversed(db.match(atom)): + if portage.cpv_getkey(cpv) !=3D cp: + continue + metadata =3D izip(db_keys, + db.aux_get(cpv, db_keys)) + if not self._visible(db, cpv, metadata): + continue + if not result or cpv =3D=3D portage.best([cpv, result]): + result =3D cpv + break + else: + raise NotImplementedError(level) + return result + + def execute(self,searchkey): + """Performs the search for the supplied search key""" + match_category =3D 0 + self.searchkey=3Dsearchkey + self.packagematches =3D [] + if self.searchdesc: + self.searchdesc=3D1 + self.matches =3D {"pkg":[], "desc":[], "set":[]} + else: + self.searchdesc=3D0 + self.matches =3D {"pkg":[], "set":[]} + print "Searching... ", + + regexsearch =3D False + if self.searchkey.startswith('%'): + regexsearch =3D True + self.searchkey =3D self.searchkey[1:] + if self.searchkey.startswith('@'): + match_category =3D 1 + self.searchkey =3D self.searchkey[1:] + if regexsearch: + self.searchre=3Dre.compile(self.searchkey,re.I) + else: + self.searchre=3Dre.compile(re.escape(self.searchkey), re.I) + for package in self.portdb.cp_all(): + self.spinner.update() + + if match_category: + match_string =3D package[:] + else: + match_string =3D package.split("/")[-1] + + masked=3D0 + if self.searchre.search(match_string): + if not self.portdb.xmatch("match-visible", package): + masked=3D1 + self.matches["pkg"].append([package,masked]) + elif self.searchdesc: # DESCRIPTION searching + full_package =3D self.portdb.xmatch("bestmatch-visible", package) + if not full_package: + #no match found; we don't want to query description + full_package =3D portage.best( + self.portdb.xmatch("match-all", package)) + if not full_package: + continue + else: + masked=3D1 + try: + full_desc =3D self.portdb.aux_get( + full_package, ["DESCRIPTION"])[0] + except KeyError: + print "emerge: search: aux_get() failed, skipping" + continue + if self.searchre.search(full_desc): + self.matches["desc"].append([full_package,masked]) + + self.sdict =3D self.setconfig.getSets() + for setname in self.sdict: + self.spinner.update() + if match_category: + match_string =3D setname + else: + match_string =3D setname.split("/")[-1] + =09 + if self.searchre.search(match_string): + self.matches["set"].append([setname, False]) + elif self.searchdesc: + if self.searchre.search( + self.sdict[setname].getMetadata("DESCRIPTION")): + self.matches["set"].append([setname, False]) + =09 + self.mlen=3D0 + for mtype in self.matches: + self.matches[mtype].sort() + self.mlen +=3D len(self.matches[mtype]) + + def addCP(self, cp): + if not self.portdb.xmatch("match-all", cp): + return + masked =3D 0 + if not self.portdb.xmatch("bestmatch-visible", cp): + masked =3D 1 + self.matches["pkg"].append([cp, masked]) + self.mlen +=3D 1 + + def output(self): + """Outputs the results of the search.""" + print "\b\b \n[ Results for search key : "+white(self.searchkey)+" ]" + print "[ Applications found : "+white(str(self.mlen))+" ]" + print " " + vardb =3D self.vartree.dbapi + for mtype in self.matches: + for match,masked in self.matches[mtype]: + full_package =3D None + if mtype =3D=3D "pkg": + catpack =3D match + full_package =3D self.portdb.xmatch( + "bestmatch-visible", match) + if not full_package: + #no match found; we don't want to query description + masked=3D1 + full_package =3D portage.best( + self.portdb.xmatch("match-all",match)) + elif mtype =3D=3D "desc": + full_package =3D match + match =3D portage.cpv_getkey(match) + elif mtype =3D=3D "set": + print green("*")+" "+white(match) + print " ", darkgreen("Description:")+" ", self.sdict[match].ge= tMetadata("DESCRIPTION") + print + if full_package: + try: + desc, homepage, license =3D self.portdb.aux_get( + full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"]) + except KeyError: + print "emerge: search: aux_get() failed, skipping" + continue + if masked: + print green("*")+" "+white(match)+" "+red("[ Masked ]") + else: + print green("*")+" "+white(match) + myversion =3D self.getVersion(full_package, search.VERSION_RELEASE) + + mysum =3D [0,0] + file_size_str =3D None + mycat =3D match.split("/")[0] + mypkg =3D match.split("/")[1] + mycpv =3D match + "-" + myversion + myebuild =3D self.portdb.findname(mycpv) + if myebuild: + pkgdir =3D os.path.dirname(myebuild) + from portage import manifest + mf =3D manifest.Manifest( + pkgdir, self.settings["DISTDIR"]) + try: + uri_map =3D self.portdb.getFetchMap(mycpv) + except portage.exception.InvalidDependString, e: + file_size_str =3D "Unknown (%s)" % (e,) + del e + else: + try: + mysum[0] =3D mf.getDistfilesSize(uri_map) + except KeyError, e: + file_size_str =3D "Unknown (missing " + \ + "digest for %s)" % (e,) + del e + + available =3D False + for db in self._dbs: + if db is not vardb and \ + db.cpv_exists(mycpv): + available =3D True + if not myebuild and hasattr(db, "bintree"): + myebuild =3D db.bintree.getname(mycpv) + try: + mysum[0] =3D os.stat(myebuild).st_size + except OSError: + myebuild =3D None + break + + if myebuild and file_size_str is None: + mystr =3D str(mysum[0] / 1024) + mycount =3D len(mystr) + while (mycount > 3): + mycount -=3D 3 + mystr =3D mystr[:mycount] + "," + mystr[mycount:] + file_size_str =3D mystr + " kB" + + if self.verbose: + if available: + print " ", darkgreen("Latest version available:"),myversion + print " ", self.getInstallationStatus(mycat+'/'+mypkg) + if myebuild: + print " %s %s" % \ + (darkgreen("Size of files:"), file_size_str) + print " ", darkgreen("Homepage:")+" ",homepage + print " ", darkgreen("Description:")+" ",desc + print " ", darkgreen("License:")+" ",license + print + # + # private interface + # + def getInstallationStatus(self,package): + installed_package =3D self.vartree.dep_bestmatch(package) + result =3D "" + version =3D self.getVersion(installed_package,search.VERSION_RELEASE) + if len(version) > 0: + result =3D darkgreen("Latest version installed:")+" "+version + else: + result =3D darkgreen("Latest version installed:")+" [ Not Installed ]= " + return result + + def getVersion(self,full_package,detail): + if len(full_package) > 1: + package_parts =3D portage.catpkgsplit(full_package) + if detail =3D=3D search.VERSION_RELEASE and package_parts[3] !=3D 'r0= ': + result =3D package_parts[2]+ "-" + package_parts[3] + else: + result =3D package_parts[2] + else: + result =3D "" + return result + Copied: main/branches/prefix/pym/_emerge/show_invalid_depstring_notice.py= (from rev 13669, main/trunk/pym/_emerge/show_invalid_depstring_notice.py= ) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/show_invalid_depstring_notice.py = (rev 0) +++ main/branches/prefix/pym/_emerge/show_invalid_depstring_notice.py 200= 9-06-27 13:35:38 UTC (rev 13708) @@ -0,0 +1,40 @@ +import logging +import os +import textwrap + +# for an explanation on this logic, see pym/_emerge/__init__.py +import os +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +from portage.util import writemsg_level + +def show_invalid_depstring_notice(parent_node, depstring, error_msg): + + msg1 =3D "\n\n!!! Invalid or corrupt dependency specification: " + \ + "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring) + p_type, p_root, p_key, p_status =3D parent_node + msg =3D [] + if p_status =3D=3D "nomerge": + category, pf =3D portage.catsplit(p_key) + pkg_location =3D os.path.join(p_root, portage.VDB_PATH, category, pf) + msg.append("Portage is unable to process the dependencies of the ") + msg.append("'%s' package. " % p_key) + msg.append("In order to correct this problem, the package ") + msg.append("should be uninstalled, reinstalled, or upgraded. ") + msg.append("As a temporary workaround, the --nodeps option can ") + msg.append("be used to ignore all dependencies. For reference, ") + msg.append("the problematic dependencies can be found in the ") + msg.append("*DEPEND files located in '%s/'." % pkg_location) + else: + msg.append("This package can not be installed. ") + msg.append("Please notify the '%s' package maintainer " % p_key) + msg.append("about this problem.") + + msg2 =3D "".join("%s\n" % line for line in textwrap.wrap("".join(msg), = 72)) + writemsg_level(msg1 + msg2, level=3Dlogging.ERROR, noiselevel=3D-1) + Copied: main/branches/prefix/pym/_emerge/unmerge.py (from rev 13669, main= /trunk/pym/_emerge/unmerge.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/unmerge.py (= rev 0) +++ main/branches/prefix/pym/_emerge/unmerge.py 2009-06-27 13:35:38 UTC (= rev 13708) @@ -0,0 +1,525 @@ +import logging +import os +import sys +import textwrap +from itertools import izip + +# for an explanation on this logic, see pym/_emerge/__init__.py +import os +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +from portage.output import bold, colorize, darkgreen, green +from portage.sets import SETPREFIX +from portage.util import cmp_sort_key + +from _emerge.emergelog import emergelog +from _emerge.Package import Package +from _emerge.UninstallFailure import UninstallFailure +from _emerge.userquery import userquery +from _emerge.countdown import countdown + +def unmerge(root_config, myopts, unmerge_action, + unmerge_files, ldpath_mtimes, autoclean=3D0, + clean_world=3D1, clean_delay=3D1, ordered=3D0, raise_on_error=3D0, + scheduler=3DNone, writemsg_level=3Dportage.util.writemsg_level): + + if clean_world: + clean_world =3D myopts.get('--deselect') !=3D 'n' + quiet =3D "--quiet" in myopts + settings =3D root_config.settings + sets =3D root_config.sets + vartree =3D root_config.trees["vartree"] + candidate_catpkgs=3D[] + global_unmerge=3D0 + xterm_titles =3D "notitles" not in settings.features + out =3D portage.output.EOutput() + pkg_cache =3D {} + db_keys =3D list(vartree.dbapi._aux_cache_keys) + + def _pkg(cpv): + pkg =3D pkg_cache.get(cpv) + if pkg is None: + pkg =3D Package(cpv=3Dcpv, installed=3DTrue, + metadata=3Dizip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)), + root_config=3Droot_config, + type_name=3D"installed") + pkg_cache[cpv] =3D pkg + return pkg + + vdb_path =3D os.path.join(settings["ROOT"], portage.VDB_PATH) + try: + # At least the parent needs to exist for the lock file. + portage.util.ensure_dirs(vdb_path) + except portage.exception.PortageException: + pass + vdb_lock =3D None + try: + if os.access(vdb_path, os.W_OK): + vdb_lock =3D portage.locks.lockdir(vdb_path) + realsyslist =3D sets["system"].getAtoms() + syslist =3D [] + for x in realsyslist: + mycp =3D portage.dep_getkey(x) + if mycp in settings.getvirtuals(): + providers =3D [] + for provider in settings.getvirtuals()[mycp]: + if vartree.dbapi.match(provider): + providers.append(provider) + if len(providers) =3D=3D 1: + syslist.extend(providers) + else: + syslist.append(mycp) +=09 + mysettings =3D portage.config(clone=3Dsettings) +=09 + if not unmerge_files: + if unmerge_action =3D=3D "unmerge": + print + print bold("emerge unmerge") + " can only be used with specific pack= age names" + print + return 0 + else: + global_unmerge =3D 1 +=09 + localtree =3D vartree + # process all arguments and add all + # valid db entries to candidate_catpkgs + if global_unmerge: + if not unmerge_files: + candidate_catpkgs.extend(vartree.dbapi.cp_all()) + else: + #we've got command-line arguments + if not unmerge_files: + print "\nNo packages to unmerge have been provided.\n" + return 0 + for x in unmerge_files: + arg_parts =3D x.split('/') + if x[0] not in [".","/"] and \ + arg_parts[-1][-7:] !=3D ".ebuild": + #possible cat/pkg or dep; treat as such + candidate_catpkgs.append(x) + elif unmerge_action in ["prune","clean"]: + print "\n!!! Prune and clean do not accept individual" + \ + " ebuilds as arguments;\n skipping.\n" + continue + else: + # it appears that the user is specifying an installed + # ebuild and we're in "unmerge" mode, so it's ok. + if not os.path.exists(x): + print "\n!!! The path '"+x+"' doesn't exist.\n" + return 0 +=09 + absx =3D os.path.abspath(x) + sp_absx =3D absx.split("/") + if sp_absx[-1][-7:] =3D=3D ".ebuild": + del sp_absx[-1] + absx =3D "/".join(sp_absx) +=09 + sp_absx_len =3D len(sp_absx) +=09 + vdb_path =3D os.path.join(settings["ROOT"], portage.VDB_PATH) + vdb_len =3D len(vdb_path) +=09 + sp_vdb =3D vdb_path.split("/") + sp_vdb_len =3D len(sp_vdb) +=09 + if not os.path.exists(absx+"/CONTENTS"): + print "!!! Not a valid db dir: "+str(absx) + return 0 +=09 + if sp_absx_len <=3D sp_vdb_len: + # The Path is shorter... so it can't be inside the vdb. + print sp_absx + print absx + print "\n!!!",x,"cannot be inside "+ \ + vdb_path+"; aborting.\n" + return 0 +=09 + for idx in range(0,sp_vdb_len): + if idx >=3D sp_absx_len or sp_vdb[idx] !=3D sp_absx[idx]: + print sp_absx + print absx + print "\n!!!", x, "is not inside "+\ + vdb_path+"; aborting.\n" + return 0 +=09 + print "=3D"+"/".join(sp_absx[sp_vdb_len:]) + candidate_catpkgs.append( + "=3D"+"/".join(sp_absx[sp_vdb_len:])) +=09 + newline=3D"" + if (not "--quiet" in myopts): + newline=3D"\n" + if settings["ROOT"] !=3D "/": + writemsg_level(darkgreen(newline+ \ + ">>> Using system located in ROOT tree %s\n" % \ + settings["ROOT"])) + + if (("--pretend" in myopts) or ("--ask" in myopts)) and \ + not ("--quiet" in myopts): + writemsg_level(darkgreen(newline+\ + ">>> These are the packages that would be unmerged:\n")) + + # Preservation of order is required for --depclean and --prune so + # that dependencies are respected. Use all_selected to eliminate + # duplicate packages since the same package may be selected by + # multiple atoms. + pkgmap =3D [] + all_selected =3D set() + for x in candidate_catpkgs: + # cycle through all our candidate deps and determine + # what will and will not get unmerged + try: + mymatch =3D vartree.dbapi.match(x) + except portage.exception.AmbiguousPackageName, errpkgs: + print "\n\n!!! The short ebuild name \"" + \ + x + "\" is ambiguous. Please specify" + print "!!! one of the following fully-qualified " + \ + "ebuild names instead:\n" + for i in errpkgs[0]: + print " " + green(i) + print + sys.exit(1) +=09 + if not mymatch and x[0] not in "<>=3D~": + mymatch =3D localtree.dep_match(x) + if not mymatch: + portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \ + (x, unmerge_action), noiselevel=3D-1) + continue + + pkgmap.append( + {"protected": set(), "selected": set(), "omitted": set()}) + mykey =3D len(pkgmap) - 1 + if unmerge_action=3D=3D"unmerge": + for y in mymatch: + if y not in all_selected: + pkgmap[mykey]["selected"].add(y) + all_selected.add(y) + elif unmerge_action =3D=3D "prune": + if len(mymatch) =3D=3D 1: + continue + best_version =3D mymatch[0] + best_slot =3D vartree.getslot(best_version) + best_counter =3D vartree.dbapi.cpv_counter(best_version) + for mypkg in mymatch[1:]: + myslot =3D vartree.getslot(mypkg) + mycounter =3D vartree.dbapi.cpv_counter(mypkg) + if (myslot =3D=3D best_slot and mycounter > best_counter) or \ + mypkg =3D=3D portage.best([mypkg, best_version]): + if myslot =3D=3D best_slot: + if mycounter < best_counter: + # On slot collision, keep the one with the + # highest counter since it is the most + # recently installed. + continue + best_version =3D mypkg + best_slot =3D myslot + best_counter =3D mycounter + pkgmap[mykey]["protected"].add(best_version) + pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \ + if mypkg !=3D best_version and mypkg not in all_selected) + all_selected.update(pkgmap[mykey]["selected"]) + else: + # unmerge_action =3D=3D "clean" + slotmap=3D{} + for mypkg in mymatch: + if unmerge_action =3D=3D "clean": + myslot =3D localtree.getslot(mypkg) + else: + # since we're pruning, we don't care about slots + # and put all the pkgs in together + myslot =3D 0 + if myslot not in slotmap: + slotmap[myslot] =3D {} + slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] =3D mypkg + + for mypkg in vartree.dbapi.cp_list( + portage.dep_getkey(mymatch[0])): + myslot =3D vartree.getslot(mypkg) + if myslot not in slotmap: + slotmap[myslot] =3D {} + slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] =3D mypkg + + for myslot in slotmap: + counterkeys =3D slotmap[myslot].keys() + if not counterkeys: + continue + counterkeys.sort() + pkgmap[mykey]["protected"].add( + slotmap[myslot][counterkeys[-1]]) + del counterkeys[-1] + + for counter in counterkeys[:]: + mypkg =3D slotmap[myslot][counter] + if mypkg not in mymatch: + counterkeys.remove(counter) + pkgmap[mykey]["protected"].add( + slotmap[myslot][counter]) + + #be pretty and get them in order of merge: + for ckey in counterkeys: + mypkg =3D slotmap[myslot][ckey] + if mypkg not in all_selected: + pkgmap[mykey]["selected"].add(mypkg) + all_selected.add(mypkg) + # ok, now the last-merged package + # is protected, and the rest are selected + numselected =3D len(all_selected) + if global_unmerge and not numselected: + portage.writemsg_stdout("\n>>> No outdated packages were found on you= r system.\n") + return 0 +=09 + if not numselected: + portage.writemsg_stdout( + "\n>>> No packages selected for removal by " + \ + unmerge_action + "\n") + return 0 + finally: + if vdb_lock: + vartree.dbapi.flush_cache() + portage.locks.unlockdir(vdb_lock) +=09 + from portage.sets.base import EditablePackageSet +=09 + # generate a list of package sets that are directly or indirectly liste= d in "world", + # as there is no persistent list of "installed" sets + installed_sets =3D ["world"] + stop =3D False + pos =3D 0 + while not stop: + stop =3D True + pos =3D len(installed_sets) + for s in installed_sets[pos - 1:]: + if s not in sets: + continue + candidates =3D [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if = x.startswith(SETPREFIX)] + if candidates: + stop =3D False + installed_sets +=3D candidates + installed_sets =3D [x for x in installed_sets if x not in root_config.s= etconfig.active] + del stop, pos + + # we don't want to unmerge packages that are still listed in user-edita= ble package sets + # listed in "world" as they would be remerged on the next update of "wo= rld" or the=20 + # relevant package sets. + unknown_sets =3D set() + for cp in xrange(len(pkgmap)): + for cpv in pkgmap[cp]["selected"].copy(): + try: + pkg =3D _pkg(cpv) + except KeyError: + # It could have been uninstalled + # by a concurrent process. + continue + + if unmerge_action !=3D "clean" and \ + root_config.root =3D=3D "/" and \ + portage.match_from_list( + portage.const.PORTAGE_PACKAGE_ATOM, [pkg]): + msg =3D ("Not unmerging package %s since there is no valid " + \ + "reason for portage to unmerge itself.") % (pkg.cpv,) + for line in textwrap.wrap(msg, 75): + out.eerror(line) + # adjust pkgmap so the display output is correct + pkgmap[cp]["selected"].remove(cpv) + all_selected.remove(cpv) + pkgmap[cp]["protected"].add(cpv) + continue + + parents =3D [] + for s in installed_sets: + # skip sets that the user requested to unmerge, and skip world=20 + # unless we're unmerging a package set (as the package would be=20 + # removed from "world" later on) + if s in root_config.setconfig.active or (s =3D=3D "world" and not ro= ot_config.setconfig.active): + continue + + if s not in sets: + if s in unknown_sets: + continue + unknown_sets.add(s) + out =3D portage.output.EOutput() + out.eerror(("Unknown set '@%s' in " + \ + "%svar/lib/portage/world_sets") % \ + (s, root_config.root)) + continue + + # only check instances of EditablePackageSet as other classes are ge= nerally used for + # special purposes and can be ignored here (and are usually generate= d dynamically, so the + # user can't do much about them anyway) + if isinstance(sets[s], EditablePackageSet): + + # This is derived from a snippet of code in the + # depgraph._iter_atoms_for_pkg() method. + for atom in sets[s].iterAtomsForPackage(pkg): + inst_matches =3D vartree.dbapi.match(atom) + inst_matches.reverse() # descending order + higher_slot =3D None + for inst_cpv in inst_matches: + try: + inst_pkg =3D _pkg(inst_cpv) + except KeyError: + # It could have been uninstalled + # by a concurrent process. + continue + + if inst_pkg.cp !=3D atom.cp: + continue + if pkg >=3D inst_pkg: + # This is descending order, and we're not + # interested in any versions <=3D pkg given. + break + if pkg.slot_atom !=3D inst_pkg.slot_atom: + higher_slot =3D inst_pkg + break + if higher_slot is None: + parents.append(s) + break + if parents: + #print colorize("WARN", "Package %s is going to be unmerged," % cpv) + #print colorize("WARN", "but still listed in the following package s= ets:") + #print " %s\n" % ", ".join(parents) + print colorize("WARN", "Not unmerging package %s as it is" % cpv) + print colorize("WARN", "still referenced by the following package se= ts:") + print " %s\n" % ", ".join(parents) + # adjust pkgmap so the display output is correct + pkgmap[cp]["selected"].remove(cpv) + all_selected.remove(cpv) + pkgmap[cp]["protected"].add(cpv) +=09 + del installed_sets + + numselected =3D len(all_selected) + if not numselected: + writemsg_level( + "\n>>> No packages selected for removal by " + \ + unmerge_action + "\n") + return 0 + + # Unmerge order only matters in some cases + if not ordered: + unordered =3D {} + for d in pkgmap: + selected =3D d["selected"] + if not selected: + continue + cp =3D portage.cpv_getkey(iter(selected).next()) + cp_dict =3D unordered.get(cp) + if cp_dict is None: + cp_dict =3D {} + unordered[cp] =3D cp_dict + for k in d: + cp_dict[k] =3D set() + for k, v in d.iteritems(): + cp_dict[k].update(v) + pkgmap =3D [unordered[cp] for cp in sorted(unordered)] + + for x in xrange(len(pkgmap)): + selected =3D pkgmap[x]["selected"] + if not selected: + continue + for mytype, mylist in pkgmap[x].iteritems(): + if mytype =3D=3D "selected": + continue + mylist.difference_update(all_selected) + cp =3D portage.cpv_getkey(iter(selected).next()) + for y in localtree.dep_match(cp): + if y not in pkgmap[x]["omitted"] and \ + y not in pkgmap[x]["selected"] and \ + y not in pkgmap[x]["protected"] and \ + y not in all_selected: + pkgmap[x]["omitted"].add(y) + if global_unmerge and not pkgmap[x]["selected"]: + #avoid cluttering the preview printout with stuff that isn't getting = unmerged + continue + if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in sysl= ist: + writemsg_level(colorize("BAD","\a\n\n!!! " + \ + "'%s' is part of your system profile.\n" % cp), + level=3Dlogging.WARNING, noiselevel=3D-1) + writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \ + "be damaging to your system.\n\n"), + level=3Dlogging.WARNING, noiselevel=3D-1) + if clean_delay and "--pretend" not in myopts and "--ask" not in myopt= s: + countdown(int(settings["EMERGE_WARNING_DELAY"]), + colorize("UNMERGE_WARN", "Press Ctrl-C to Stop")) + if not quiet: + writemsg_level("\n %s\n" % (bold(cp),), noiselevel=3D-1) + else: + writemsg_level(bold(cp) + ": ", noiselevel=3D-1) + for mytype in ["selected","protected","omitted"]: + if not quiet: + writemsg_level((mytype + ": ").rjust(14), noiselevel=3D-1) + if pkgmap[x][mytype]: + sorted_pkgs =3D [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[= x][mytype]] + sorted_pkgs.sort(key=3Dcmp_sort_key(portage.pkgcmp)) + for pn, ver, rev in sorted_pkgs: + if rev =3D=3D "r0": + myversion =3D ver + else: + myversion =3D ver + "-" + rev + if mytype =3D=3D "selected": + writemsg_level( + colorize("UNMERGE_WARN", myversion + " "), + noiselevel=3D-1) + else: + writemsg_level( + colorize("GOOD", myversion + " "), noiselevel=3D-1) + else: + writemsg_level("none ", noiselevel=3D-1) + if not quiet: + writemsg_level("\n", noiselevel=3D-1) + if quiet: + writemsg_level("\n", noiselevel=3D-1) + + writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \ + " packages are slated for removal.\n") + writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \ + " and " + colorize("GOOD", "'omitted'") + \ + " packages will not be removed.\n\n") + + if "--pretend" in myopts: + #we're done... return + return 0 + if "--ask" in myopts: + if userquery("Would you like to unmerge these packages?")=3D=3D"No": + # enter pretend mode for correct formatting of results + myopts["--pretend"] =3D True + print + print "Quitting." + print + return 0 + #the real unmerging begins, after a short delay.... + if clean_delay and not autoclean: + countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging") + + for x in xrange(len(pkgmap)): + for y in pkgmap[x]["selected"]: + writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=3D-1) + emergelog(xterm_titles, "=3D=3D=3D Unmerging... ("+y+")") + mysplit =3D y.split("/") + #unmerge... + retval =3D portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"], + mysettings, unmerge_action not in ["clean","prune"], + vartree=3Dvartree, ldpath_mtimes=3Dldpath_mtimes, + scheduler=3Dscheduler) + + if retval !=3D os.EX_OK: + emergelog(xterm_titles, " !!! unmerge FAILURE: "+y) + if raise_on_error: + raise UninstallFailure(retval) + sys.exit(retval) + else: + if clean_world and hasattr(sets["world"], "cleanPackage"): + sets["world"].cleanPackage(vartree.dbapi, y) + emergelog(xterm_titles, " >>> unmerge success: "+y) + if clean_world and hasattr(sets["world"], "remove"): + for s in root_config.setconfig.active: + sets["world"].remove(SETPREFIX+s) + return 1 + Copied: main/branches/prefix/pym/_emerge/userquery.py (from rev 13669, ma= in/trunk/pym/_emerge/userquery.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/userquery.py = (rev 0) +++ main/branches/prefix/pym/_emerge/userquery.py 2009-06-27 13:35:38 UTC= (rev 13708) @@ -0,0 +1,44 @@ +import sys + +from portage.output import bold, create_color_func + +def userquery(prompt, responses=3DNone, colours=3DNone): + """Displays a prompt and a set of responses, then waits for a response + which is checked against the responses and the first to match is + returned. An empty response will match the first value in responses. = The + input buffer is *not* cleared prior to the prompt! + + prompt: a String. + responses: a List of Strings. + colours: a List of Functions taking and returning a String, used to + process the responses for display. Typically these will be functions + like red() but could be e.g. lambda x: "DisplayString". + If responses is omitted, defaults to ["Yes", "No"], [green, red]. + If only colours is omitted, defaults to [bold, ...]. + + Returns a member of the List responses. (If called without optional + arguments, returns "Yes" or "No".) + KeyboardInterrupt is converted to SystemExit to avoid tracebacks being + printed.""" + if responses is None: + responses =3D ["Yes", "No"] + colours =3D [ + create_color_func("PROMPT_CHOICE_DEFAULT"), + create_color_func("PROMPT_CHOICE_OTHER") + ] + elif colours is None: + colours=3D[bold] + colours=3D(colours*len(responses))[:len(responses)] + print bold(prompt), + try: + while True: + response=3Draw_input("["+"/".join([colours[i](responses[i]) for i in = range(len(responses))])+"] ") + for key in responses: + # An empty response will match the first value in responses. + if response.upper()=3D=3Dkey[:len(response)].upper(): + return key + print "Sorry, response '%s' not understood." % response, + except (EOFError, KeyboardInterrupt): + print "Interrupted." + sys.exit(1) + Copied: main/branches/prefix/pym/_emerge/visible.py (from rev 13669, main= /trunk/pym/_emerge/visible.py) =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D --- main/branches/prefix/pym/_emerge/visible.py (= rev 0) +++ main/branches/prefix/pym/_emerge/visible.py 2009-06-27 13:35:38 UTC (= rev 13708) @@ -0,0 +1,47 @@ +# for an explanation on this logic, see pym/_emerge/__init__.py +import os +import sys +if os.environ.__contains__("PORTAGE_PYTHONPATH"): + sys.path.insert(0, os.environ["PORTAGE_PYTHONPATH"]) +else: + sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path= .realpath(__file__))), "pym")) +import portage + +def visible(pkgsettings, pkg): + """ + Check if a package is visible. This can raise an InvalidDependString + exception if LICENSE is invalid. + TODO: optionally generate a list of masking reasons + @rtype: Boolean + @returns: True if the package is visible, False otherwise. + """ + if not pkg.metadata["SLOT"]: + return False + if not pkg.installed: + if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata): + return False + if pkg.built and not pkg.installed: + # we can have an old binary which has no EPREFIX information + if "EPREFIX" not in pkg.metadata or not pkg.metadata["EPREFIX"]: + return False + if len(pkg.metadata["EPREFIX"].strip()) < len(pkgsettings["EPREFIX"]): + return False + eapi =3D pkg.metadata["EAPI"] + if not portage.eapi_is_supported(eapi): + return False + if not pkg.installed: + if portage._eapi_is_deprecated(eapi): + return False + if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata): + return False + if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata): + return False + if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata): + return False + try: + if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata): + return False + except portage.exception.InvalidDependString: + return False + return True +