public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/autodep:master commit in: portage_with_autodep/pym/portage/tests/news/, ...
@ 2011-08-21 11:37 Александр Берсенев
  0 siblings, 0 replies; 2+ messages in thread
From: Александр Берсенев @ 2011-08-21 11:37 UTC (permalink / raw
  To: gentoo-commits

commit:     91ffc6c50001d41fe1d16981baa32fb557463375
Author:     Alexander Bersenev <bay <AT> hackerdom <DOT> ru>
AuthorDate: Sun Aug 21 17:35:50 2011 +0000
Commit:     Александр Берсенев <bay <AT> hackerdom <DOT> ru>
CommitDate: Sun Aug 21 17:35:50 2011 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/autodep.git;a=commit;h=91ffc6c5

add a patched version of portage

---
 portage_with_autodep/bin/archive-conf              |  111 +
 portage_with_autodep/bin/banned-helper             |    6 +
 portage_with_autodep/bin/binhost-snapshot          |  142 +
 .../bin/check-implicit-pointer-usage.py            |   84 +
 portage_with_autodep/bin/clean_locks               |   47 +
 portage_with_autodep/bin/dispatch-conf             |  434 ++
 portage_with_autodep/bin/dohtml.py                 |  191 +
 portage_with_autodep/bin/ebuild                    |  346 +
 portage_with_autodep/bin/ebuild-helpers/4/dodoc    |    1 +
 portage_with_autodep/bin/ebuild-helpers/4/dohard   |    1 +
 portage_with_autodep/bin/ebuild-helpers/4/dosed    |    1 +
 .../bin/ebuild-helpers/4/prepalldocs               |    1 +
 portage_with_autodep/bin/ebuild-helpers/die        |    7 +
 portage_with_autodep/bin/ebuild-helpers/dobin      |   29 +
 portage_with_autodep/bin/ebuild-helpers/doconfd    |   14 +
 portage_with_autodep/bin/ebuild-helpers/dodir      |   10 +
 portage_with_autodep/bin/ebuild-helpers/dodoc      |   31 +
 portage_with_autodep/bin/ebuild-helpers/doenvd     |   14 +
 portage_with_autodep/bin/ebuild-helpers/doexe      |   43 +
 portage_with_autodep/bin/ebuild-helpers/dohard     |   13 +
 portage_with_autodep/bin/ebuild-helpers/dohtml     |   14 +
 portage_with_autodep/bin/ebuild-helpers/doinfo     |   24 +
 portage_with_autodep/bin/ebuild-helpers/doinitd    |   14 +
 portage_with_autodep/bin/ebuild-helpers/doins      |  155 +
 portage_with_autodep/bin/ebuild-helpers/dolib      |   43 +
 portage_with_autodep/bin/ebuild-helpers/dolib.a    |    6 +
 portage_with_autodep/bin/ebuild-helpers/dolib.so   |    6 +
 portage_with_autodep/bin/ebuild-helpers/doman      |   64 +
 portage_with_autodep/bin/ebuild-helpers/domo       |   34 +
 portage_with_autodep/bin/ebuild-helpers/dosbin     |   29 +
 portage_with_autodep/bin/ebuild-helpers/dosed      |   35 +
 portage_with_autodep/bin/ebuild-helpers/dosym      |   18 +
 portage_with_autodep/bin/ebuild-helpers/ecompress  |  161 +
 .../bin/ebuild-helpers/ecompressdir                |  143 +
 portage_with_autodep/bin/ebuild-helpers/eerror     |    1 +
 portage_with_autodep/bin/ebuild-helpers/einfo      |    1 +
 portage_with_autodep/bin/ebuild-helpers/elog       |    7 +
 portage_with_autodep/bin/ebuild-helpers/emake      |   28 +
 portage_with_autodep/bin/ebuild-helpers/eqawarn    |    1 +
 portage_with_autodep/bin/ebuild-helpers/ewarn      |    1 +
 portage_with_autodep/bin/ebuild-helpers/fowners    |   13 +
 portage_with_autodep/bin/ebuild-helpers/fperms     |   13 +
 portage_with_autodep/bin/ebuild-helpers/newbin     |   19 +
 portage_with_autodep/bin/ebuild-helpers/newconfd   |   19 +
 portage_with_autodep/bin/ebuild-helpers/newdoc     |   19 +
 portage_with_autodep/bin/ebuild-helpers/newenvd    |   19 +
 portage_with_autodep/bin/ebuild-helpers/newexe     |   19 +
 portage_with_autodep/bin/ebuild-helpers/newinitd   |   19 +
 portage_with_autodep/bin/ebuild-helpers/newins     |   35 +
 portage_with_autodep/bin/ebuild-helpers/newlib.a   |   19 +
 portage_with_autodep/bin/ebuild-helpers/newlib.so  |   19 +
 portage_with_autodep/bin/ebuild-helpers/newman     |   19 +
 portage_with_autodep/bin/ebuild-helpers/newsbin    |   19 +
 portage_with_autodep/bin/ebuild-helpers/portageq   |    8 +
 portage_with_autodep/bin/ebuild-helpers/prepall    |   23 +
 .../bin/ebuild-helpers/prepalldocs                 |   15 +
 .../bin/ebuild-helpers/prepallinfo                 |    9 +
 portage_with_autodep/bin/ebuild-helpers/prepallman |   19 +
 .../bin/ebuild-helpers/prepallstrip                |    5 +
 portage_with_autodep/bin/ebuild-helpers/prepinfo   |   34 +
 portage_with_autodep/bin/ebuild-helpers/preplib    |   28 +
 portage_with_autodep/bin/ebuild-helpers/prepman    |   32 +
 portage_with_autodep/bin/ebuild-helpers/prepstrip  |  193 +
 portage_with_autodep/bin/ebuild-helpers/sed        |   27 +
 portage_with_autodep/bin/ebuild-ipc                |    8 +
 portage_with_autodep/bin/ebuild-ipc.py             |  276 +
 portage_with_autodep/bin/ebuild.sh                 | 2424 +++++++
 portage_with_autodep/bin/egencache                 |  851 +++
 portage_with_autodep/bin/emaint                    |  654 ++
 portage_with_autodep/bin/emerge                    |   66 +
 portage_with_autodep/bin/emerge-webrsync           |  457 ++
 portage_with_autodep/bin/env-update                |   41 +
 portage_with_autodep/bin/etc-update                |  616 ++
 .../bin/filter-bash-environment.py                 |  150 +
 portage_with_autodep/bin/fixpackages               |   45 +
 portage_with_autodep/bin/glsa-check                |  316 +
 portage_with_autodep/bin/isolated-functions.sh     |  630 ++
 portage_with_autodep/bin/lock-helper.py            |   28 +
 portage_with_autodep/bin/misc-functions.sh         | 1002 +++
 portage_with_autodep/bin/portageq                  |  822 +++
 portage_with_autodep/bin/quickpkg                  |  291 +
 portage_with_autodep/bin/regenworld                |  144 +
 portage_with_autodep/bin/repoman                   | 2672 ++++++++
 portage_with_autodep/bin/xpak-helper.py            |   68 +
 .../pym/_emerge/AbstractDepPriority.py             |   29 +
 .../pym/_emerge/AbstractEbuildProcess.py           |  266 +
 .../pym/_emerge/AbstractPollTask.py                |   62 +
 .../pym/_emerge/AsynchronousLock.py                |  288 +
 .../pym/_emerge/AsynchronousTask.py                |  129 +
 portage_with_autodep/pym/_emerge/AtomArg.py        |   11 +
 portage_with_autodep/pym/_emerge/Binpkg.py         |  333 +
 .../pym/_emerge/BinpkgEnvExtractor.py              |   66 +
 .../pym/_emerge/BinpkgExtractorAsync.py            |   31 +
 portage_with_autodep/pym/_emerge/BinpkgFetcher.py  |  181 +
 .../pym/_emerge/BinpkgPrefetcher.py                |   43 +
 portage_with_autodep/pym/_emerge/BinpkgVerifier.py |   75 +
 portage_with_autodep/pym/_emerge/Blocker.py        |   15 +
 portage_with_autodep/pym/_emerge/BlockerCache.py   |  182 +
 portage_with_autodep/pym/_emerge/BlockerDB.py      |  124 +
 .../pym/_emerge/BlockerDepPriority.py              |   13 +
 portage_with_autodep/pym/_emerge/CompositeTask.py  |  157 +
 portage_with_autodep/pym/_emerge/DepPriority.py    |   49 +
 .../pym/_emerge/DepPriorityNormalRange.py          |   47 +
 .../pym/_emerge/DepPrioritySatisfiedRange.py       |   85 +
 portage_with_autodep/pym/_emerge/Dependency.py     |   20 +
 portage_with_autodep/pym/_emerge/DependencyArg.py  |   33 +
 portage_with_autodep/pym/_emerge/EbuildBinpkg.py   |   46 +
 portage_with_autodep/pym/_emerge/EbuildBuild.py    |  426 ++
 portage_with_autodep/pym/_emerge/EbuildBuildDir.py |  109 +
 portage_with_autodep/pym/_emerge/EbuildExecuter.py |   99 +
 portage_with_autodep/pym/_emerge/EbuildFetcher.py  |  302 +
 .../pym/_emerge/EbuildFetchonly.py                 |   32 +
 .../pym/_emerge/EbuildIpcDaemon.py                 |  108 +
 portage_with_autodep/pym/_emerge/EbuildMerge.py    |   56 +
 .../pym/_emerge/EbuildMetadataPhase.py             |  133 +
 portage_with_autodep/pym/_emerge/EbuildPhase.py    |  350 +
 portage_with_autodep/pym/_emerge/EbuildProcess.py  |   21 +
 .../pym/_emerge/EbuildSpawnProcess.py              |   16 +
 portage_with_autodep/pym/_emerge/EventsAnalyser.py |  511 ++
 portage_with_autodep/pym/_emerge/EventsLogger.py   |  180 +
 portage_with_autodep/pym/_emerge/FakeVartree.py    |  265 +
 portage_with_autodep/pym/_emerge/FifoIpcDaemon.py  |   81 +
 .../pym/_emerge/JobStatusDisplay.py                |  292 +
 portage_with_autodep/pym/_emerge/MergeListItem.py  |  135 +
 portage_with_autodep/pym/_emerge/MetadataRegen.py  |  184 +
 .../pym/_emerge/MiscFunctionsProcess.py            |   33 +
 portage_with_autodep/pym/_emerge/Package.py        |  700 ++
 portage_with_autodep/pym/_emerge/PackageArg.py     |   19 +
 portage_with_autodep/pym/_emerge/PackageMerge.py   |   40 +
 .../pym/_emerge/PackageUninstall.py                |  110 +
 .../pym/_emerge/PackageVirtualDbapi.py             |  145 +
 portage_with_autodep/pym/_emerge/PipeReader.py     |   96 +
 portage_with_autodep/pym/_emerge/PollConstants.py  |   18 +
 portage_with_autodep/pym/_emerge/PollScheduler.py  |  398 ++
 .../pym/_emerge/PollSelectAdapter.py               |   73 +
 .../pym/_emerge/ProgressHandler.py                 |   22 +
 portage_with_autodep/pym/_emerge/QueueScheduler.py |  116 +
 portage_with_autodep/pym/_emerge/RootConfig.py     |   34 +
 portage_with_autodep/pym/_emerge/Scheduler.py      | 1975 ++++++
 .../pym/_emerge/SequentialTaskQueue.py             |   89 +
 portage_with_autodep/pym/_emerge/SetArg.py         |   11 +
 portage_with_autodep/pym/_emerge/SlotObject.py     |   42 +
 portage_with_autodep/pym/_emerge/SpawnProcess.py   |  235 +
 portage_with_autodep/pym/_emerge/SubProcess.py     |  141 +
 portage_with_autodep/pym/_emerge/Task.py           |   42 +
 portage_with_autodep/pym/_emerge/TaskScheduler.py  |   25 +
 portage_with_autodep/pym/_emerge/TaskSequence.py   |   44 +
 .../pym/_emerge/UninstallFailure.py                |   15 +
 .../pym/_emerge/UnmergeDepPriority.py              |   41 +
 portage_with_autodep/pym/_emerge/UseFlagDisplay.py |  122 +
 portage_with_autodep/pym/_emerge/__init__.py       |    2 +
 .../pym/_emerge/_find_deep_system_runtime_deps.py  |   38 +
 .../pym/_emerge/_flush_elog_mod_echo.py            |   15 +
 portage_with_autodep/pym/_emerge/actions.py        | 3123 +++++++++
 portage_with_autodep/pym/_emerge/clear_caches.py   |   19 +
 portage_with_autodep/pym/_emerge/countdown.py      |   22 +
 .../pym/_emerge/create_depgraph_params.py          |   72 +
 .../pym/_emerge/create_world_atom.py               |   92 +
 portage_with_autodep/pym/_emerge/depgraph.py       | 7029 ++++++++++++++++++++
 portage_with_autodep/pym/_emerge/emergelog.py      |   63 +
 portage_with_autodep/pym/_emerge/getloadavg.py     |   27 +
 portage_with_autodep/pym/_emerge/help.py           |  815 +++
 .../pym/_emerge/is_valid_package_atom.py           |   21 +
 portage_with_autodep/pym/_emerge/main.py           | 1910 ++++++
 .../pym/_emerge/resolver/__init__.py               |    2 +
 .../pym/_emerge/resolver/backtracking.py           |  197 +
 .../pym/_emerge/resolver/circular_dependency.py    |  267 +
 .../pym/_emerge/resolver/output.py                 |  888 +++
 .../pym/_emerge/resolver/output_helpers.py         |  576 ++
 .../pym/_emerge/resolver/slot_collision.py         |  978 +++
 portage_with_autodep/pym/_emerge/search.py         |  385 ++
 .../pym/_emerge/show_invalid_depstring_notice.py   |   35 +
 portage_with_autodep/pym/_emerge/stdout_spinner.py |   83 +
 portage_with_autodep/pym/_emerge/sync/__init__.py  |    2 +
 .../pym/_emerge/sync/getaddrinfo_validate.py       |   29 +
 .../pym/_emerge/sync/old_tree_timestamp.py         |   98 +
 portage_with_autodep/pym/_emerge/unmerge.py        |  578 ++
 portage_with_autodep/pym/_emerge/userquery.py      |   55 +
 portage_with_autodep/pym/portage/__init__.py       |  610 ++
 .../pym/portage/_global_updates.py                 |  250 +
 .../pym/portage/_legacy_globals.py                 |   81 +
 portage_with_autodep/pym/portage/_selinux.py       |  129 +
 portage_with_autodep/pym/portage/_sets/__init__.py |  245 +
 portage_with_autodep/pym/portage/_sets/base.py     |  264 +
 portage_with_autodep/pym/portage/_sets/dbapi.py    |  383 ++
 portage_with_autodep/pym/portage/_sets/files.py    |  341 +
 portage_with_autodep/pym/portage/_sets/libs.py     |   98 +
 portage_with_autodep/pym/portage/_sets/profiles.py |   53 +
 portage_with_autodep/pym/portage/_sets/security.py |   86 +
 portage_with_autodep/pym/portage/_sets/shell.py    |   44 +
 portage_with_autodep/pym/portage/cache/__init__.py |    4 +
 portage_with_autodep/pym/portage/cache/anydbm.py   |  113 +
 .../pym/portage/cache/cache_errors.py              |   62 +
 .../pym/portage/cache/ebuild_xattr.py              |  171 +
 .../pym/portage/cache/flat_hash.py                 |  155 +
 .../pym/portage/cache/flat_list.py                 |  134 +
 .../pym/portage/cache/fs_template.py               |   90 +
 portage_with_autodep/pym/portage/cache/mappings.py |  485 ++
 portage_with_autodep/pym/portage/cache/metadata.py |  154 +
 .../pym/portage/cache/metadata_overlay.py          |  105 +
 .../pym/portage/cache/sql_template.py              |  301 +
 portage_with_autodep/pym/portage/cache/sqlite.py   |  245 +
 portage_with_autodep/pym/portage/cache/template.py |  236 +
 portage_with_autodep/pym/portage/cache/util.py     |  170 +
 portage_with_autodep/pym/portage/cache/volatile.py |   25 +
 portage_with_autodep/pym/portage/checksum.py       |  291 +
 portage_with_autodep/pym/portage/const.py          |  143 +
 portage_with_autodep/pym/portage/cvstree.py        |  293 +
 portage_with_autodep/pym/portage/data.py           |  122 +
 .../pym/portage/dbapi/_MergeProcess.py             |  282 +
 portage_with_autodep/pym/portage/dbapi/__init__.py |  302 +
 .../pym/portage/dbapi/_expand_new_virt.py          |   72 +
 portage_with_autodep/pym/portage/dbapi/bintree.py  | 1366 ++++
 .../pym/portage/dbapi/cpv_expand.py                |  106 +
 .../pym/portage/dbapi/dep_expand.py                |   56 +
 portage_with_autodep/pym/portage/dbapi/porttree.py | 1168 ++++
 portage_with_autodep/pym/portage/dbapi/vartree.py  | 4527 +++++++++++++
 portage_with_autodep/pym/portage/dbapi/virtual.py  |  131 +
 portage_with_autodep/pym/portage/debug.py          |  120 +
 portage_with_autodep/pym/portage/dep/__init__.py   | 2432 +++++++
 portage_with_autodep/pym/portage/dep/dep_check.py  |  679 ++
 portage_with_autodep/pym/portage/dispatch_conf.py  |  188 +
 portage_with_autodep/pym/portage/eapi.py           |   50 +
 portage_with_autodep/pym/portage/eclass_cache.py   |  123 +
 portage_with_autodep/pym/portage/elog/__init__.py  |  182 +
 portage_with_autodep/pym/portage/elog/filtering.py |   15 +
 portage_with_autodep/pym/portage/elog/messages.py  |  172 +
 .../pym/portage/elog/mod_custom.py                 |   19 +
 portage_with_autodep/pym/portage/elog/mod_echo.py  |   46 +
 portage_with_autodep/pym/portage/elog/mod_mail.py  |   43 +
 .../pym/portage/elog/mod_mail_summary.py           |   89 +
 portage_with_autodep/pym/portage/elog/mod_save.py  |   51 +
 .../pym/portage/elog/mod_save_summary.py           |   59 +
 .../pym/portage/elog/mod_syslog.py                 |   32 +
 portage_with_autodep/pym/portage/env/__init__.py   |    3 +
 portage_with_autodep/pym/portage/env/config.py     |  105 +
 portage_with_autodep/pym/portage/env/loaders.py    |  319 +
 portage_with_autodep/pym/portage/env/validators.py |   20 +
 portage_with_autodep/pym/portage/exception.py      |  186 +
 portage_with_autodep/pym/portage/getbinpkg.py      |  861 +++
 portage_with_autodep/pym/portage/glsa.py           |  699 ++
 portage_with_autodep/pym/portage/localization.py   |   20 +
 portage_with_autodep/pym/portage/locks.py          |  395 ++
 portage_with_autodep/pym/portage/mail.py           |  177 +
 portage_with_autodep/pym/portage/manifest.py       |  538 ++
 portage_with_autodep/pym/portage/news.py           |  351 +
 portage_with_autodep/pym/portage/output.py         |  794 +++
 .../pym/portage/package/__init__.py                |    2 +
 .../pym/portage/package/ebuild/__init__.py         |    2 +
 .../package/ebuild/_config/KeywordsManager.py      |  284 +
 .../package/ebuild/_config/LicenseManager.py       |  236 +
 .../package/ebuild/_config/LocationsManager.py     |  182 +
 .../portage/package/ebuild/_config/MaskManager.py  |  189 +
 .../portage/package/ebuild/_config/UseManager.py   |  235 +
 .../package/ebuild/_config/VirtualsManager.py      |  233 +
 .../pym/portage/package/ebuild/_config/__init__.py |    2 +
 .../package/ebuild/_config/env_var_validation.py   |   23 +
 .../portage/package/ebuild/_config/features_set.py |  128 +
 .../pym/portage/package/ebuild/_config/helper.py   |   64 +
 .../package/ebuild/_config/special_env_vars.py     |  185 +
 .../pym/portage/package/ebuild/_ipc/ExitCommand.py |   27 +
 .../pym/portage/package/ebuild/_ipc/IpcCommand.py  |    9 +
 .../portage/package/ebuild/_ipc/QueryCommand.py    |   98 +
 .../pym/portage/package/ebuild/_ipc/__init__.py    |    2 +
 .../pym/portage/package/ebuild/_spawn_nofetch.py   |   82 +
 .../pym/portage/package/ebuild/config.py           | 2224 +++++++
 .../package/ebuild/deprecated_profile_check.py     |   42 +
 .../pym/portage/package/ebuild/digestcheck.py      |  167 +
 .../pym/portage/package/ebuild/digestgen.py        |  202 +
 .../pym/portage/package/ebuild/doebuild.py         | 1791 +++++
 .../pym/portage/package/ebuild/fetch.py            | 1129 ++++
 .../pym/portage/package/ebuild/getmaskingreason.py |  124 +
 .../pym/portage/package/ebuild/getmaskingstatus.py |  174 +
 .../portage/package/ebuild/prepare_build_dirs.py   |  370 +
 portage_with_autodep/pym/portage/process.py        |  427 ++
 portage_with_autodep/pym/portage/proxy/__init__.py |    2 +
 .../pym/portage/proxy/lazyimport.py                |  212 +
 .../pym/portage/proxy/objectproxy.py               |   91 +
 .../pym/portage/repository/__init__.py             |    2 +
 .../pym/portage/repository/config.py               |  504 ++
 portage_with_autodep/pym/portage/tests/__init__.py |  244 +
 .../pym/portage/tests/bin/setup_env.py             |   85 +
 .../pym/portage/tests/bin/test_dobin.py            |   16 +
 .../pym/portage/tests/bin/test_dodir.py            |   16 +
 .../pym/portage/tests/dbapi/__init__.py            |    2 +
 .../pym/portage/tests/dbapi/test_fakedbapi.py      |   58 +
 .../pym/portage/tests/dep/__init__.py              |    3 +
 .../pym/portage/tests/dep/testAtom.py              |  315 +
 .../pym/portage/tests/dep/testCheckRequiredUse.py  |  219 +
 .../pym/portage/tests/dep/testExtendedAtomDict.py  |   18 +
 .../portage/tests/dep/testExtractAffectingUSE.py   |   75 +
 .../pym/portage/tests/dep/testStandalone.py        |   36 +
 .../portage/tests/dep/test_best_match_to_list.py   |   43 +
 .../pym/portage/tests/dep/test_dep_getcpv.py       |   35 +
 .../pym/portage/tests/dep/test_dep_getrepo.py      |   29 +
 .../pym/portage/tests/dep/test_dep_getslot.py      |   28 +
 .../pym/portage/tests/dep/test_dep_getusedeps.py   |   35 +
 .../pym/portage/tests/dep/test_get_operator.py     |   33 +
 .../tests/dep/test_get_required_use_flags.py       |   42 +
 .../pym/portage/tests/dep/test_isjustname.py       |   24 +
 .../pym/portage/tests/dep/test_isvalidatom.py      |  146 +
 .../pym/portage/tests/dep/test_match_from_list.py  |  108 +
 .../pym/portage/tests/dep/test_paren_reduce.py     |   66 +
 .../pym/portage/tests/dep/test_use_reduce.py       |  627 ++
 .../pym/portage/tests/ebuild/__init__.py           |    2 +
 .../tests/ebuild/test_array_fromfile_eof.py        |   43 +
 .../pym/portage/tests/ebuild/test_config.py        |  198 +
 .../portage/tests/ebuild/test_doebuild_spawn.py    |   82 +
 .../pym/portage/tests/ebuild/test_ipc_daemon.py    |  124 +
 .../pym/portage/tests/ebuild/test_pty_eof.py       |   32 +
 .../pym/portage/tests/ebuild/test_spawn.py         |   52 +
 .../pym/portage/tests/env/__init__.py              |    4 +
 .../pym/portage/tests/env/config/__init__.py       |    4 +
 .../tests/env/config/test_PackageKeywordsFile.py   |   40 +
 .../tests/env/config/test_PackageMaskFile.py       |   29 +
 .../tests/env/config/test_PackageUseFile.py        |   37 +
 .../tests/env/config/test_PortageModulesFile.py    |   39 +
 .../portage/tests/lafilefixer/test_lafilefixer.py  |  145 +
 .../test_lazy_import_portage_baseline.py           |   81 +
 .../lazyimport/test_preload_portage_submodules.py  |   16 +
 .../pym/portage/tests/lint/test_bash_syntax.py     |   42 +
 .../pym/portage/tests/lint/test_compile_modules.py |   46 +
 .../pym/portage/tests/lint/test_import_modules.py  |   40 +
 .../pym/portage/tests/locks/__init__.py            |    2 +
 .../portage/tests/locks/test_asynchronous_lock.py  |  124 +
 .../pym/portage/tests/locks/test_lock_nonblock.py  |   46 +
 .../pym/portage/tests/news/__init__.py             |    3 +
 .../pym/portage/tests/news/test_NewsItem.py        |   95 +
 .../pym/portage/tests/process/__init__.py          |    2 +
 .../pym/portage/tests/process/test_poll.py         |   39 +
 .../portage/tests/resolver/ResolverPlayground.py   |  690 ++
 .../pym/portage/tests/resolver/__init__.py         |    2 +
 .../pym/portage/tests/resolver/test_autounmask.py  |  326 +
 .../portage/tests/resolver/test_backtracking.py    |  169 +
 .../tests/resolver/test_circular_dependencies.py   |   84 +
 .../pym/portage/tests/resolver/test_depclean.py    |  285 +
 .../pym/portage/tests/resolver/test_depth.py       |  252 +
 .../pym/portage/tests/resolver/test_eapi.py        |  115 +
 .../pym/portage/tests/resolver/test_merge_order.py |  453 ++
 .../test_missing_iuse_and_evaluated_atoms.py       |   31 +
 .../pym/portage/tests/resolver/test_multirepo.py   |  318 +
 .../pym/portage/tests/resolver/test_multislot.py   |   40 +
 .../tests/resolver/test_old_dep_chain_display.py   |   35 +
 .../pym/portage/tests/resolver/test_output.py      |   88 +
 .../pym/portage/tests/resolver/test_rebuild.py     |  138 +
 .../portage/tests/resolver/test_required_use.py    |  114 +
 .../pym/portage/tests/resolver/test_simple.py      |   57 +
 .../portage/tests/resolver/test_slot_collisions.py |  143 +
 .../tests/resolver/test_use_dep_defaults.py        |   40 +
 portage_with_autodep/pym/portage/tests/runTests    |   46 +
 .../tests/sets/base/testInternalPackageSet.py      |   61 +
 .../portage/tests/sets/files/testConfigFileSet.py  |   32 +
 .../portage/tests/sets/files/testStaticFileSet.py  |   27 +
 .../pym/portage/tests/sets/shell/testShell.py      |   28 +
 .../pym/portage/tests/unicode/__init__.py          |    2 +
 .../portage/tests/unicode/test_string_format.py    |  108 +
 .../pym/portage/tests/util/__init__.py             |    4 +
 .../pym/portage/tests/util/test_digraph.py         |  201 +
 .../pym/portage/tests/util/test_getconfig.py       |   29 +
 .../pym/portage/tests/util/test_grabdict.py        |   11 +
 .../pym/portage/tests/util/test_normalizedPath.py  |   14 +
 .../pym/portage/tests/util/test_stackDictList.py   |   17 +
 .../pym/portage/tests/util/test_stackDicts.py      |   36 +
 .../pym/portage/tests/util/test_stackLists.py      |   19 +
 .../pym/portage/tests/util/test_uniqueArray.py     |   24 +
 .../pym/portage/tests/util/test_varExpand.py       |   92 +
 .../pym/portage/tests/versions/__init__.py         |    3 +
 .../portage/tests/versions/test_cpv_sort_key.py    |   16 +
 .../pym/portage/tests/versions/test_vercmp.py      |   80 +
 .../pym/portage/tests/xpak/__init__.py             |    3 +
 .../pym/portage/tests/xpak/test_decodeint.py       |   16 +
 portage_with_autodep/pym/portage/update.py         |  320 +
 .../pym/portage/util/ExtractKernelVersion.py       |   76 +
 portage_with_autodep/pym/portage/util/__init__.py  | 1602 +++++
 .../pym/portage/util/_dyn_libs/LinkageMapELF.py    |  805 +++
 .../util/_dyn_libs/PreservedLibsRegistry.py        |  172 +
 .../pym/portage/util/_dyn_libs/__init__.py         |    2 +
 portage_with_autodep/pym/portage/util/_pty.py      |  212 +
 portage_with_autodep/pym/portage/util/digraph.py   |  342 +
 .../pym/portage/util/env_update.py                 |  293 +
 .../pym/portage/util/lafilefixer.py                |  185 +
 portage_with_autodep/pym/portage/util/listdir.py   |  151 +
 portage_with_autodep/pym/portage/util/movefile.py  |  242 +
 portage_with_autodep/pym/portage/util/mtimedb.py   |   81 +
 portage_with_autodep/pym/portage/versions.py       |  403 ++
 portage_with_autodep/pym/portage/xml/__init__.py   |    2 +
 portage_with_autodep/pym/portage/xml/metadata.py   |  376 ++
 portage_with_autodep/pym/portage/xpak.py           |  497 ++
 portage_with_autodep/pym/repoman/checks.py         |  707 ++
 portage_with_autodep/pym/repoman/errors.py         |   26 +
 portage_with_autodep/pym/repoman/herdbase.py       |  110 +
 portage_with_autodep/pym/repoman/utilities.py      |  511 ++
 392 files changed, 89247 insertions(+), 0 deletions(-)

diff --git a/portage_with_autodep/bin/archive-conf b/portage_with_autodep/bin/archive-conf
new file mode 100755
index 0000000..5a03b85
--- /dev/null
+++ b/portage_with_autodep/bin/archive-conf
@@ -0,0 +1,111 @@
+#!/usr/bin/python
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+#
+# archive-conf -- save off a config file in the dispatch-conf archive dir
+#
+#  Written by Wayne Davison <gentoo@blorf.net> with code snagged from
+#  Jeremy Wohl's dispatch-conf script and the portage chkcontents script.
+#
+
+from __future__ import print_function
+
+import sys
+try:
+    import portage
+except ImportError:
+    from os import path as osp
+    sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+    import portage
+
+from portage import os
+import dispatch_conf
+
+FIND_EXTANT_CONTENTS  = "find %s -name CONTENTS"
+
+MANDATORY_OPTS  = [ 'archive-dir' ]
+
+try:
+    import fchksum
+    def perform_checksum(filename): return fchksum.fmd5t(filename)
+except ImportError:
+    import md5
+    def md5_to_hex(md5sum):
+        hexform = ""
+        for ix in range(len(md5sum)):
+            hexform = hexform + "%02x" % ord(md5sum[ix])
+        return hexform.lower()
+    
+    def perform_checksum(filename):
+        f = open(filename, 'rb')
+        blocksize=32768
+        data = f.read(blocksize)
+        size = 0
+        sum = md5.new()
+        while data:
+            sum.update(data)
+            size = size + len(data)
+            data = f.read(blocksize)
+        return (md5_to_hex(sum.digest()),size)
+
+def archive_conf():
+    args = []
+    content_files = []
+    md5_match_hash = {}
+
+    options = portage.dispatch_conf.read_config(MANDATORY_OPTS)
+
+    for conf in sys.argv[1:]:
+        if not os.path.isabs(conf):
+            conf = os.path.abspath(conf)
+        args += [ conf ]
+        md5_match_hash[conf] = ''
+
+    # Find all the CONTENT files in VDB_PATH.
+    content_files += os.popen(FIND_EXTANT_CONTENTS %
+			(os.path.join(portage.settings['EROOT'], portage.VDB_PATH))).readlines()
+
+    # Search for the saved md5 checksum of all the specified config files
+    # and see if the current file is unmodified or not.
+    try:
+        todo_cnt = len(args)
+        for file in content_files:
+            file = file.rstrip()
+            try:
+                contents = open(file, "r")
+            except IOError as e:
+                print('archive-conf: Unable to open %s: %s' % (file, e), file=sys.stderr)
+                sys.exit(1)
+            lines = contents.readlines()
+            for line in lines:
+                items = line.split()
+                if items[0] == 'obj':
+                    for conf in args:
+                        if items[1] == conf:
+                            stored = items[2].lower()
+                            real = perform_checksum(conf)[0].lower()
+                            if stored == real:
+                                md5_match_hash[conf] = conf
+                            todo_cnt -= 1
+                            if todo_cnt == 0:
+                                raise StopIteration()
+    except StopIteration:
+        pass
+
+    for conf in args:
+        archive = os.path.join(options['archive-dir'], conf.lstrip('/'))
+        if options['use-rcs'] == 'yes':
+            portage.dispatch_conf.rcs_archive(archive, conf, md5_match_hash[conf], '')
+            if md5_match_hash[conf]:
+                portage.dispatch_conf.rcs_archive_post_process(archive)
+        else:
+            portage.dispatch_conf.file_archive(archive, conf, md5_match_hash[conf], '')
+            if md5_match_hash[conf]:
+                portage.dispatch_conf.file_archive_post_process(archive)
+
+# run
+if len(sys.argv) > 1:
+    archive_conf()
+else:
+    print('Usage: archive-conf /CONFIG/FILE [/CONFIG/FILE...]', file=sys.stderr)

diff --git a/portage_with_autodep/bin/banned-helper b/portage_with_autodep/bin/banned-helper
new file mode 100755
index 0000000..17ea991
--- /dev/null
+++ b/portage_with_autodep/bin/banned-helper
@@ -0,0 +1,6 @@
+#!/bin/bash
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+die "'${0##*/}' has been banned for EAPI '$EAPI'"
+exit 1

diff --git a/portage_with_autodep/bin/binhost-snapshot b/portage_with_autodep/bin/binhost-snapshot
new file mode 100755
index 0000000..9d2697d
--- /dev/null
+++ b/portage_with_autodep/bin/binhost-snapshot
@@ -0,0 +1,142 @@
+#!/usr/bin/python
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import optparse
+import os
+import sys
+import textwrap
+
+try:
+	from urllib.parse import urlparse
+except ImportError:
+	from urlparse import urlparse
+
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(
+		osp.realpath(__file__))), "pym"))
+	import portage
+
+def parse_args(argv):
+	prog_name = os.path.basename(argv[0])
+	usage = prog_name + ' [options] ' + \
+		'<src_pkg_dir> <snapshot_dir> <snapshot_uri> <binhost_dir>'
+
+	prog_desc = "This program will copy src_pkg_dir to snapshot_dir " + \
+		"and inside binhost_dir it will create a Packages index file " + \
+		"which refers to snapshot_uri. This is intended to solve race " + \
+		"conditions on binhosts as described at http://crosbug.com/3225."
+
+	usage += "\n\n"
+	for line in textwrap.wrap(prog_desc, 70):
+		usage += line + "\n" 
+
+	usage += "\n"
+	usage += "Required Arguments:\n\n"
+	usage += "  src_pkg_dir  - the source $PKGDIR\n"
+	usage += "  snapshot_dir - destination snapshot " + \
+		"directory (must not exist)\n"
+	usage += "  snapshot_uri - URI which refers to " + \
+		"snapshot_dir from the\n" + \
+		"                 client side\n"
+	usage += "  binhost_dir  - directory in which to " + \
+		"write Packages index with\n" + \
+		"                 snapshot_uri"
+
+	parser = optparse.OptionParser(usage=usage)
+	parser.add_option('--hardlinks', help='create hardlinks (y or n, default is y)',
+		choices=('y', 'n'))
+	parser.set_defaults(hardlinks='y')
+	options, args = parser.parse_args(argv[1:])
+
+	if len(args) != 4:
+		parser.error("Required 4 arguments, got %d" % (len(args),))
+
+	return parser, options, args
+
+def main(argv):
+	parser, options, args = parse_args(argv)
+
+	src_pkg_dir, snapshot_dir, snapshot_uri, binhost_dir = args
+	src_pkgs_index = os.path.join(src_pkg_dir, 'Packages')
+
+	if not os.path.isdir(src_pkg_dir):
+		parser.error("src_pkg_dir is not a directory: '%s'" % (src_pkg_dir,))
+
+	if not os.path.isfile(src_pkgs_index):
+		parser.error("src_pkg_dir does not contain a " + \
+			"'Packages' index: '%s'" % (src_pkg_dir,))
+
+	parse_result = urlparse(snapshot_uri)
+	if not (parse_result.scheme and parse_result.netloc and parse_result.path):
+		parser.error("snapshot_uri is not a valid URI: '%s'" % (snapshot_uri,))
+
+	if os.path.isdir(snapshot_dir):
+		parser.error("snapshot_dir already exists: '%s'" % snapshot_dir)
+
+	try:
+		os.makedirs(os.path.dirname(snapshot_dir))
+	except OSError:
+		pass
+	if not os.path.isdir(os.path.dirname(snapshot_dir)):
+		parser.error("snapshot_dir parent could not be created: '%s'" % \
+			os.path.dirname(snapshot_dir))
+
+	try:
+		os.makedirs(binhost_dir)
+	except OSError:
+		pass
+	if not os.path.isdir(binhost_dir):
+		parser.error("binhost_dir could not be created: '%s'" % binhost_dir)
+
+	cp_opts = 'RP'
+	if options.hardlinks == 'n':
+		cp_opts += 'p'
+	else:
+		cp_opts += 'l'
+
+	cp_cmd = 'cp -%s %s %s' % (
+		cp_opts,
+		portage._shell_quote(src_pkg_dir),
+		portage._shell_quote(snapshot_dir)
+	)
+
+	ret = os.system(cp_cmd)
+	if not (os.WIFEXITED(ret) and os.WEXITSTATUS(ret) == os.EX_OK):
+		return 1
+
+	infile = io.open(portage._unicode_encode(src_pkgs_index,
+		encoding=portage._encodings['fs'], errors='strict'),
+		mode='r', encoding=portage._encodings['repo.content'],
+		errors='strict')
+
+	outfile = portage.util.atomic_ofstream(
+		os.path.join(binhost_dir, "Packages"),
+		encoding=portage._encodings['repo.content'],
+		errors='strict')
+
+	for line in infile:
+		if line[:4] == 'URI:':
+			# skip existing URI line
+			pass
+		else:
+			if not line.strip():
+				# end of header
+				outfile.write("URI: %s\n\n" % snapshot_uri)
+				break
+			outfile.write(line)
+
+	for line in infile:
+		outfile.write(line)
+
+	infile.close()
+	outfile.close()
+
+	return os.EX_OK
+
+if __name__ == "__main__":
+	sys.exit(main(sys.argv))

diff --git a/portage_with_autodep/bin/check-implicit-pointer-usage.py b/portage_with_autodep/bin/check-implicit-pointer-usage.py
new file mode 100755
index 0000000..8822c45
--- /dev/null
+++ b/portage_with_autodep/bin/check-implicit-pointer-usage.py
@@ -0,0 +1,84 @@
+#!/usr/bin/python
+
+# Ripped from HP and updated from Debian
+# Update by Gentoo to support unicode output
+
+#
+# Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
+#	David Mosberger <davidm@hpl.hp.com>
+#
+# Scan standard input for GCC warning messages that are likely to
+# source of real 64-bit problems.  In particular, see whether there
+# are any implicitly declared functions whose return values are later
+# interpreted as pointers.  Those are almost guaranteed to cause
+# crashes.
+#
+
+from __future__ import print_function
+
+import re
+import sys
+
+implicit_pattern = re.compile("([^:]*):(\d+): warning: implicit declaration "
+                              + "of function [`']([^']*)'")
+pointer_pattern = (
+    "([^:]*):(\d+): warning: "
+    + "("
+    +  "(assignment"
+    +  "|initialization"
+    +  "|return"
+    +  "|passing arg \d+ of `[^']*'"
+    +  "|passing arg \d+ of pointer to function"
+    +  ") makes pointer from integer without a cast"
+    + "|"
+    + "cast to pointer from integer of different size)")
+
+if sys.hexversion < 0x3000000:
+    # Use encoded byte strings in python-2.x, since the python ebuilds are
+    # known to remove the encodings module when USE=build is enabled (thus
+    # disabling unicode decoding/encoding). The portage module has a
+    # workaround for this, but currently we don't import that here since we
+    # don't want to trigger potential sandbox violations due to stale pyc
+    # files for the portage module.
+    unicode_quote_open = '\xE2\x80\x98'
+    unicode_quote_close = '\xE2\x80\x99'
+    def write(msg):
+        sys.stdout.write(msg)
+else:
+    unicode_quote_open = '\u2018'
+    unicode_quote_close = '\u2019'
+    def write(msg):
+        sys.stdout.buffer.write(msg.encode('utf_8', 'backslashreplace'))
+
+pointer_pattern = re.compile(pointer_pattern)
+
+last_implicit_filename = ""
+last_implicit_linenum = -1
+last_implicit_func = ""
+
+while True:
+    if sys.hexversion >= 0x3000000:
+        line = sys.stdin.buffer.readline().decode('utf_8', 'replace')
+    else:
+        line = sys.stdin.readline()
+    if not line:
+        break
+    # translate unicode open/close quotes to ascii ones
+    line = line.replace(unicode_quote_open, "`")
+    line = line.replace(unicode_quote_close, "'")
+    m = implicit_pattern.match(line)
+    if m:
+        last_implicit_filename = m.group(1)
+        last_implicit_linenum = int(m.group(2))
+        last_implicit_func = m.group(3)
+    else:
+        m = pointer_pattern.match(line)
+        if m:
+            pointer_filename = m.group(1)
+            pointer_linenum = int(m.group(2))
+            if (last_implicit_filename == pointer_filename
+                and last_implicit_linenum == pointer_linenum):
+                write("Function `%s' implicitly converted to pointer at " \
+                      "%s:%d\n" % (last_implicit_func,
+                                   last_implicit_filename,
+                                   last_implicit_linenum))

diff --git a/portage_with_autodep/bin/clean_locks b/portage_with_autodep/bin/clean_locks
new file mode 100755
index 0000000..8c4299c
--- /dev/null
+++ b/portage_with_autodep/bin/clean_locks
@@ -0,0 +1,47 @@
+#!/usr/bin/python -O
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys, errno
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+
+from portage import os
+
+if not sys.argv[1:] or "--help" in sys.argv or "-h" in sys.argv:
+	import portage
+	print()
+	print("You must specify directories with hardlink-locks to clean.")
+	print("You may optionally specify --force, which will remove all")
+	print("of the locks, even if we can't establish if they are in use.")
+	print("Please attempt cleaning without force first.")
+	print()
+	print("%s %s/.locks" % (sys.argv[0], portage.settings["DISTDIR"]))
+	print("%s --force %s/.locks" % (sys.argv[0], portage.settings["DISTDIR"]))
+	print()
+	sys.exit(1)
+	
+force = False
+if "--force" in sys.argv[1:]:
+	force=True
+	
+for x in sys.argv[1:]:
+	if x == "--force":
+		continue
+	try:
+		for y in portage.locks.hardlock_cleanup(x, remove_all_locks=force):
+			print(y)
+		print()
+		
+	except OSError as e:
+		if e.errno in (errno.ENOENT, errno.ENOTDIR):
+			print("!!! %s is not a directory or does not exist" % x)
+		else:
+			raise
+		sys.exit(e.errno)

diff --git a/portage_with_autodep/bin/dispatch-conf b/portage_with_autodep/bin/dispatch-conf
new file mode 100755
index 0000000..1e21a52
--- /dev/null
+++ b/portage_with_autodep/bin/dispatch-conf
@@ -0,0 +1,434 @@
+#!/usr/bin/python -O
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+#
+# dispatch-conf -- Integrate modified configs, post-emerge
+#
+#  Jeremy Wohl (http://igmus.org)
+#
+# TODO
+#  dialog menus
+#
+
+from __future__ import print_function
+
+from stat import ST_GID, ST_MODE, ST_UID
+from random import random
+import atexit, re, shutil, stat, sys
+
+try:
+    import portage
+except ImportError:
+    from os import path as osp
+    sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+    import portage
+
+from portage import os
+from portage import dispatch_conf
+from portage import _unicode_decode
+from portage.dispatch_conf import diffstatusoutput_len
+from portage.process import find_binary
+
+FIND_EXTANT_CONFIGS  = "find '%s' %s -name '._cfg????_%s' ! -name '.*~' ! -iname '.*.bak' -print"
+DIFF_CONTENTS        = "diff -Nu '%s' '%s'"
+DIFF_CVS_INTERP      = "diff -Nu '%s' '%s' | grep '^[+-][^+-]' | grep -v '# .Header:.*'"
+DIFF_WSCOMMENTS      = "diff -Nu '%s' '%s' | grep '^[+-][^+-]' | grep -v '^[-+]#' | grep -v '^[-+][:space:]*$'"
+
+# We need a secure scratch dir and python does silly verbose errors on the use of tempnam
+oldmask = os.umask(0o077)
+SCRATCH_DIR = None
+while SCRATCH_DIR is None:
+    try:
+        mydir = "/tmp/dispatch-conf."
+        for x in range(0,8):
+            if int(random() * 3) == 0:
+                mydir += chr(int(65+random()*26.0))
+            elif int(random() * 2) == 0:
+                mydir += chr(int(97+random()*26.0))
+            else:
+                mydir += chr(int(48+random()*10.0))
+        if os.path.exists(mydir):
+            continue
+        os.mkdir(mydir)
+        SCRATCH_DIR = mydir
+    except OSError as e:
+        if e.errno != 17:
+            raise
+os.umask(oldmask)
+
+# Ensure the scratch dir is deleted
+def cleanup(mydir=SCRATCH_DIR):
+    shutil.rmtree(mydir)
+atexit.register(cleanup)
+
+MANDATORY_OPTS  = [ 'archive-dir', 'diff', 'replace-cvs', 'replace-wscomments', 'merge' ]
+
+class dispatch:
+    options = {}
+
+    def grind (self, config_paths):
+        confs = []
+        count = 0
+
+        config_root = '/'
+        self.options = portage.dispatch_conf.read_config(MANDATORY_OPTS)
+
+        if "log-file" in self.options:
+            if os.path.isfile(self.options["log-file"]):
+                shutil.copy(self.options["log-file"], self.options["log-file"] + '.old')
+            if os.path.isfile(self.options["log-file"]) \
+               or not os.path.exists(self.options["log-file"]):
+                open(self.options["log-file"], 'w').close() # Truncate it
+                os.chmod(self.options["log-file"], 0o600)
+        else:
+            self.options["log-file"] = "/dev/null"
+
+        #
+        # Build list of extant configs
+        #
+
+        for path in config_paths:
+            path = portage.normalize_path(path)
+            try:
+                mymode = os.stat(path).st_mode
+            except OSError:
+                continue
+            basename = "*"
+            find_opts = "-name '.*' -type d -prune -o"
+            if not stat.S_ISDIR(mymode):
+                path, basename = os.path.split(path)
+                find_opts = "-maxdepth 1"
+
+            confs += self.massage(os.popen(FIND_EXTANT_CONFIGS % (path, find_opts, basename)).readlines())
+
+        if self.options['use-rcs'] == 'yes':
+            for rcs_util in ("rcs", "ci", "co", "rcsmerge"):
+                if not find_binary(rcs_util):
+                    print('dispatch-conf: Error finding all RCS utils and " + \
+                        "use-rcs=yes in config; fatal', file=sys.stderr)
+                    return False
+
+
+        # config file freezing support
+        frozen_files = set(self.options.get("frozen-files", "").split())
+        auto_zapped = []
+        protect_obj = portage.util.ConfigProtect(
+            config_root, config_paths,
+            portage.util.shlex_split(
+            portage.settings.get('CONFIG_PROTECT_MASK', '')))
+
+        #
+        # Remove new configs identical to current
+        #                  and
+        # Auto-replace configs a) whose differences are simply CVS interpolations,
+        #                  or  b) whose differences are simply ws or comments,
+        #                  or  c) in paths now unprotected by CONFIG_PROTECT_MASK,
+        #
+
+        def f (conf):
+            mrgconf = re.sub(r'\._cfg', '._mrg', conf['new'])
+            archive = os.path.join(self.options['archive-dir'], conf['current'].lstrip('/'))
+            if self.options['use-rcs'] == 'yes':
+                mrgfail = portage.dispatch_conf.rcs_archive(archive, conf['current'], conf['new'], mrgconf)
+            else:
+                mrgfail = portage.dispatch_conf.file_archive(archive, conf['current'], conf['new'], mrgconf)
+            if os.path.exists(archive + '.dist'):
+                unmodified = diffstatusoutput_len(DIFF_CONTENTS % (conf['current'], archive + '.dist'))[1] == 0
+            else:
+                unmodified = 0
+            if os.path.exists(mrgconf):
+                if mrgfail or diffstatusoutput_len(DIFF_CONTENTS % (conf['new'], mrgconf))[1] == 0:
+                    os.unlink(mrgconf)
+                    newconf = conf['new']
+                else:
+                    newconf = mrgconf
+            else:
+                newconf = conf['new']
+
+            if newconf == mrgconf and \
+                self.options.get('ignore-previously-merged') != 'yes' and \
+                os.path.exists(archive+'.dist') and \
+                diffstatusoutput_len(DIFF_CONTENTS % (archive+'.dist', conf['new']))[1] == 0:
+                # The current update is identical to the archived .dist
+                # version that has previously been merged.
+                os.unlink(mrgconf)
+                newconf = conf['new']
+
+            mystatus, myoutput_len = diffstatusoutput_len(
+                DIFF_CONTENTS  % (conf ['current'], newconf))
+            same_file = 0 == myoutput_len
+            if mystatus >> 8 == 2:
+                # Binary files differ
+                same_cvs = False
+                same_wsc = False
+            else:
+                same_cvs = 0 == diffstatusoutput_len(
+                    DIFF_CVS_INTERP % (conf ['current'], newconf))[1]
+                same_wsc = 0 == diffstatusoutput_len(
+                    DIFF_WSCOMMENTS % (conf ['current'], newconf))[1]
+
+            # Do options permit?
+            same_cvs = same_cvs and self.options['replace-cvs'] == 'yes'
+            same_wsc = same_wsc and self.options['replace-wscomments'] == 'yes'
+            unmodified = unmodified and self.options['replace-unmodified'] == 'yes'
+
+            if same_file:
+                os.unlink (conf ['new'])
+                self.post_process(conf['current'])
+                if os.path.exists(mrgconf):
+                    os.unlink(mrgconf)
+                return False
+            elif conf['current'] in frozen_files:
+                """Frozen files are automatically zapped. The new config has
+                already been archived with a .new suffix.  When zapped, it is
+                left with the .new suffix (post_process is skipped), since it
+                hasn't been merged into the current config."""
+                auto_zapped.append(conf['current'])
+                os.unlink(conf['new'])
+                try:
+                    os.unlink(mrgconf)
+                except OSError:
+                    pass
+                return False
+            elif unmodified or same_cvs or same_wsc or \
+                not protect_obj.isprotected(conf['current']):
+                self.replace(newconf, conf['current'])
+                self.post_process(conf['current'])
+                if newconf == mrgconf:
+                    os.unlink(conf['new'])
+                elif os.path.exists(mrgconf):
+                    os.unlink(mrgconf)
+                return False
+            else:
+                return True
+
+        confs = [x for x in confs if f(x)]
+
+        #
+        # Interactively process remaining
+        #
+
+        valid_input = "qhtnmlezu"
+
+        for conf in confs:
+            count = count + 1
+
+            newconf = conf['new']
+            mrgconf = re.sub(r'\._cfg', '._mrg', newconf)
+            if os.path.exists(mrgconf):
+                newconf = mrgconf
+            show_new_diff = 0
+
+            while 1:
+                clear_screen()
+                if show_new_diff:
+                    cmd = self.options['diff'] % (conf['new'], mrgconf)
+                    spawn_shell(cmd)
+                    show_new_diff = 0
+                else:
+                    cmd = self.options['diff'] % (conf['current'], newconf)
+                    spawn_shell(cmd)
+
+                print()
+                print('>> (%i of %i) -- %s' % (count, len(confs), conf ['current']))
+                print('>> q quit, h help, n next, e edit-new, z zap-new, u use-new\n   m merge, t toggle-merge, l look-merge: ', end=' ')
+
+                # In some cases getch() will return some spurious characters
+                # that do not represent valid input. If we don't validate the
+                # input then the spurious characters can cause us to jump
+                # back into the above "diff" command immediatly after the user
+                # has exited it (which can be quite confusing and gives an
+                # "out of control" feeling).
+                while True:
+                    c = getch()
+                    if c in valid_input:
+                        sys.stdout.write('\n')
+                        sys.stdout.flush()
+                        break
+
+                if c == 'q':
+                    sys.exit (0)
+                if c == 'h':
+                    self.do_help ()
+                    continue
+                elif c == 't':
+                    if newconf == mrgconf:
+                        newconf = conf['new']
+                    elif os.path.exists(mrgconf):
+                        newconf = mrgconf
+                    continue
+                elif c == 'n':
+                    break
+                elif c == 'm':
+                    merged = SCRATCH_DIR+"/"+os.path.basename(conf['current'])
+                    print()
+                    ret = os.system (self.options['merge'] % (merged, conf ['current'], newconf))
+                    ret = os.WEXITSTATUS(ret)
+                    if ret < 2:
+                        ret = 0
+                    if ret:
+                        print("Failure running 'merge' command")
+                        continue
+                    shutil.copyfile(merged, mrgconf)
+                    os.remove(merged)
+                    mystat = os.lstat(conf['new'])
+                    os.chmod(mrgconf, mystat[ST_MODE])
+                    os.chown(mrgconf, mystat[ST_UID], mystat[ST_GID])
+                    newconf = mrgconf
+                    continue
+                elif c == 'l':
+                    show_new_diff = 1
+                    continue
+                elif c == 'e':
+                    if 'EDITOR' not in os.environ:
+                        os.environ['EDITOR']='nano'
+                    os.system(os.environ['EDITOR'] + ' ' + newconf)
+                    continue
+                elif c == 'z':
+                    os.unlink(conf['new'])
+                    if os.path.exists(mrgconf):
+                        os.unlink(mrgconf)
+                    break
+                elif c == 'u':
+                    self.replace(newconf, conf ['current'])
+                    self.post_process(conf['current'])
+                    if newconf == mrgconf:
+                        os.unlink(conf['new'])
+                    elif os.path.exists(mrgconf):
+                        os.unlink(mrgconf)
+                    break
+                else:
+                    raise AssertionError("Invalid Input: %s" % c)
+
+        if auto_zapped:
+            print()
+            print(" One or more updates are frozen and have been automatically zapped:")
+            print()
+            for frozen in auto_zapped:
+                print("  * '%s'" % frozen)
+            print()
+
+    def replace (self, newconf, curconf):
+        """Replace current config with the new/merged version.  Also logs
+        the diff of what changed into the configured log file."""
+        os.system((DIFF_CONTENTS % (curconf, newconf)) + '>>' + self.options["log-file"])
+        try:
+            os.rename(newconf, curconf)
+        except (IOError, os.error) as why:
+            print('dispatch-conf: Error renaming %s to %s: %s; fatal' % \
+                  (newconf, curconf, str(why)), file=sys.stderr)
+
+
+    def post_process(self, curconf):
+        archive = os.path.join(self.options['archive-dir'], curconf.lstrip('/'))
+        if self.options['use-rcs'] == 'yes':
+            portage.dispatch_conf.rcs_archive_post_process(archive)
+        else:
+            portage.dispatch_conf.file_archive_post_process(archive)
+
+
+    def massage (self, newconfigs):
+        """Sort, rstrip, remove old versions, break into triad hash.
+
+        Triad is dictionary of current (/etc/make.conf), new (/etc/._cfg0003_make.conf)
+        and dir (/etc).
+
+        We keep ._cfg0002_conf over ._cfg0001_conf and ._cfg0000_conf.
+        """
+        h = {}
+        configs = []
+        newconfigs.sort ()
+
+        for nconf in newconfigs:
+            nconf = nconf.rstrip ()
+            conf  = re.sub (r'\._cfg\d+_', '', nconf)
+            dirname   = os.path.dirname(nconf)
+            conf_map  = {
+                'current' : conf,
+                'dir'     : dirname,
+                'new'     : nconf,
+            }
+
+            if conf in h:
+                mrgconf = re.sub(r'\._cfg', '._mrg', h[conf]['new'])
+                if os.path.exists(mrgconf):
+                    os.unlink(mrgconf)
+                os.unlink(h[conf]['new'])
+                h[conf].update(conf_map)
+            else:
+                h[conf] = conf_map
+                configs.append(conf_map)
+
+        return configs
+
+
+    def do_help (self):
+        print(); print
+
+        print('  u -- update current config with new config and continue')
+        print('  z -- zap (delete) new config and continue')
+        print('  n -- skip to next config, leave all intact')
+        print('  e -- edit new config')
+        print('  m -- interactively merge current and new configs')
+        print('  l -- look at diff between pre-merged and merged configs')
+        print('  t -- toggle new config between merged and pre-merged state')
+        print('  h -- this screen')
+        print('  q -- quit')
+
+        print(); print('press any key to return to diff...', end=' ')
+
+        getch ()
+
+
+def getch ():
+    # from ASPN - Danny Yoo
+    #
+    import sys, tty, termios
+
+    fd = sys.stdin.fileno()
+    old_settings = termios.tcgetattr(fd)
+    try:
+        tty.setraw(sys.stdin.fileno())
+        ch = sys.stdin.read(1)
+    finally:
+        termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+    return ch
+
+def clear_screen():
+    try:
+        import curses
+        try:
+            curses.setupterm()
+            sys.stdout.write(_unicode_decode(curses.tigetstr("clear")))
+            sys.stdout.flush()
+            return
+        except curses.error:
+            pass
+    except ImportError:
+        pass
+    os.system("clear 2>/dev/null")
+
+from portage.process import find_binary, spawn
+shell = os.environ.get("SHELL")
+if not shell or not os.access(shell, os.EX_OK):
+    shell = find_binary("sh")
+
+def spawn_shell(cmd):
+    if shell:
+        spawn([shell, "-c", cmd], env=os.environ,
+            fd_pipes = {  0 : sys.stdin.fileno(),
+                          1 : sys.stdout.fileno(),
+                          2 : sys.stderr.fileno()})
+    else:
+        os.system(cmd)
+
+# run
+d = dispatch ()
+
+if len(sys.argv) > 1:
+    # for testing
+    d.grind(sys.argv[1:])
+else:
+    d.grind(portage.util.shlex_split(
+        portage.settings.get('CONFIG_PROTECT', '')))

diff --git a/portage_with_autodep/bin/dohtml.py b/portage_with_autodep/bin/dohtml.py
new file mode 100755
index 0000000..00258ec
--- /dev/null
+++ b/portage_with_autodep/bin/dohtml.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+#
+# Typical usage:
+# dohtml -r docs/*
+#  - put all files and directories in docs into /usr/share/doc/${PF}/html
+# dohtml foo.html
+#  - put foo.html into /usr/share/doc/${PF}/html
+#
+#
+# Detailed usage:
+# dohtml <list-of-files> 
+#  - will install the files in the list of files (space-separated list) into 
+#    /usr/share/doc/${PF}/html, provided the file ends in .htm, .html, .css,
+#      .js, ,gif, .jpeg, .jpg, or .png.
+# dohtml -r <list-of-files-and-directories>
+#  - will do as 'dohtml', but recurse into all directories, as long as the 
+#    directory name is not CVS
+# dohtml -A jpe,java [-r] <list-of-files[-and-directories]>
+#  - will do as 'dohtml' but add .jpe,.java (default filter list is
+#    added to your list)
+# dohtml -a png,gif,html,htm [-r] <list-of-files[-and-directories]>
+#  - will do as 'dohtml' but filter on .png,.gif,.html,.htm (default filter 
+#    list is ignored)
+# dohtml -x CVS,SCCS,RCS -r <list-of-files-and-directories>
+#  - will do as 'dohtml -r', but ignore directories named CVS, SCCS, RCS
+#
+
+from __future__ import print_function
+
+import os
+import sys
+
+def dodir(path):
+	os.spawnlp(os.P_WAIT, "install", "install", "-d", path)
+
+def dofile(src,dst):
+	os.spawnlp(os.P_WAIT, "install", "install", "-m0644", src, dst)
+
+def eqawarn(lines):
+	cmd = "source '%s/isolated-functions.sh' ; " % \
+		os.environ["PORTAGE_BIN_PATH"]
+	for line in lines:
+		cmd += "eqawarn \"%s\" ; " % line
+	os.spawnlp(os.P_WAIT, "bash", "bash", "-c", cmd)
+
+skipped_directories = []
+
+def install(basename, dirname, options, prefix=""):
+	fullpath = basename
+	if prefix:
+		fullpath = prefix + "/" + fullpath
+	if dirname:
+		fullpath = dirname + "/" + fullpath
+
+	if options.DOCDESTTREE:
+		destdir = options.D + "usr/share/doc/" + options.PF + "/" + options.DOCDESTTREE + "/" + options.doc_prefix + "/" + prefix
+	else:
+		destdir = options.D + "usr/share/doc/" + options.PF + "/html/" + options.doc_prefix + "/" + prefix
+
+	if not os.path.exists(fullpath):
+		sys.stderr.write("!!! dohtml: %s does not exist\n" % fullpath)
+		return False
+	elif os.path.isfile(fullpath):
+		ext = os.path.splitext(basename)[1]
+		if (len(ext) and ext[1:] in options.allowed_exts) or basename in options.allowed_files:
+			dodir(destdir)
+			dofile(fullpath, destdir + "/" + basename)
+	elif options.recurse and os.path.isdir(fullpath) and \
+	     basename not in options.disallowed_dirs:
+		for i in os.listdir(fullpath):
+			pfx = basename
+			if prefix: pfx = prefix + "/" + pfx
+			install(i, dirname, options, pfx)
+	elif not options.recurse and os.path.isdir(fullpath):
+		global skipped_directories
+		skipped_directories.append(fullpath)
+		return False
+	else:
+		return False
+	return True
+
+
+class OptionsClass:
+	def __init__(self):
+		self.PF = ""
+		self.D = ""
+		self.DOCDESTTREE = ""
+		
+		if "PF" in os.environ:
+			self.PF = os.environ["PF"]
+		if "D" in os.environ:
+			self.D = os.environ["D"]
+		if "_E_DOCDESTTREE_" in os.environ:
+			self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
+		
+		self.allowed_exts = [ 'htm', 'html', 'css', 'js',
+			'gif', 'jpeg', 'jpg', 'png' ]
+		self.allowed_files = []
+		self.disallowed_dirs = [ 'CVS' ]
+		self.recurse = False
+		self.verbose = False
+		self.doc_prefix = ""
+
+def print_help():
+	opts = OptionsClass()
+
+	print("dohtml [-a .foo,.bar] [-A .foo,.bar] [-f foo,bar] [-x foo,bar]")
+	print("       [-r] [-V] <file> [file ...]")
+	print()
+	print(" -a   Set the list of allowed to those that are specified.")
+	print("      Default:", ",".join(opts.allowed_exts))
+	print(" -A   Extend the list of allowed file types.")
+	print(" -f   Set list of allowed extensionless file names.")
+	print(" -x   Set directories to be excluded from recursion.")
+	print("      Default:", ",".join(opts.disallowed_dirs))
+	print(" -p   Set a document prefix for installed files (empty by default).")
+	print(" -r   Install files and directories recursively.")
+	print(" -V   Be verbose.")
+	print()
+
+def parse_args():
+	options = OptionsClass()
+	args = []
+	
+	x = 1
+	while x < len(sys.argv):
+		arg = sys.argv[x]
+		if arg in ["-h","-r","-V"]:
+			if arg == "-h":
+				print_help()
+				sys.exit(0)
+			elif arg == "-r":
+				options.recurse = True
+			elif arg == "-V":
+				options.verbose = True
+		elif sys.argv[x] in ["-A","-a","-f","-x","-p"]:
+			x += 1
+			if x == len(sys.argv):
+				print_help()
+				sys.exit(0)
+			elif arg == "-p":
+				options.doc_prefix = sys.argv[x]
+			else:
+				values = sys.argv[x].split(",")
+				if arg == "-A":
+					options.allowed_exts.extend(values)
+				elif arg == "-a":
+					options.allowed_exts = values
+				elif arg == "-f":
+					options.allowed_files = values
+				elif arg == "-x":
+					options.disallowed_dirs = values
+		else:
+			args.append(sys.argv[x])
+		x += 1
+	
+	return (options, args)
+
+def main():
+
+	(options, args) = parse_args()
+
+	if options.verbose:
+		print("Allowed extensions:", options.allowed_exts)
+		print("Document prefix : '" + options.doc_prefix	 + "'")
+		print("Allowed files :", options.allowed_files)
+
+	success = False
+	
+	for x in args:
+		basename = os.path.basename(x)
+		dirname  = os.path.dirname(x)
+		success |= install(basename, dirname, options)
+
+	global skipped_directories
+	for x in skipped_directories:
+		eqawarn(["QA Notice: dohtml on directory " + \
+			"'%s' without recursion option" % x])
+
+	if success:
+		retcode = 0
+	else:
+		retcode = 1
+
+	sys.exit(retcode)
+
+if __name__ == "__main__":
+	main()

diff --git a/portage_with_autodep/bin/ebuild b/portage_with_autodep/bin/ebuild
new file mode 100755
index 0000000..f8b6d79
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild
@@ -0,0 +1,346 @@
+#!/usr/bin/python -O
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+	def exithandler(signum,frame):
+		signal.signal(signal.SIGINT, signal.SIG_IGN)
+		signal.signal(signal.SIGTERM, signal.SIG_IGN)
+		sys.exit(128 + signum)
+
+	signal.signal(signal.SIGINT, exithandler)
+	signal.signal(signal.SIGTERM, exithandler)
+	# Prevent "[Errno 32] Broken pipe" exceptions when
+	# writing to a pipe.
+	signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+except KeyboardInterrupt:
+	sys.exit(128 + signal.SIGINT)
+
+def debug_signal(signum, frame):
+	import pdb
+	pdb.set_trace()
+signal.signal(signal.SIGUSR1, debug_signal)
+
+import imp
+import optparse
+import os
+
+description = "See the ebuild(1) man page for more info"
+usage = "Usage: ebuild <ebuild file> <command> [command] ..."
+parser = optparse.OptionParser(description=description, usage=usage)
+
+force_help = "When used together with the digest or manifest " + \
+	"command, this option forces regeneration of digests for all " + \
+	"distfiles associated with the current ebuild. Any distfiles " + \
+	"that do not already exist in ${DISTDIR} will be automatically fetched."
+
+parser.add_option("--force", help=force_help, action="store_true", dest="force")
+parser.add_option("--color", help="enable or disable color output",
+	type="choice", choices=("y", "n"))
+parser.add_option("--debug", help="show debug output",
+	action="store_true", dest="debug")
+parser.add_option("--ignore-default-opts",
+	action="store_true",
+	help="do not use the EBUILD_DEFAULT_OPTS environment variable")
+parser.add_option("--skip-manifest", help="skip all manifest checks",
+	action="store_true", dest="skip_manifest")
+
+opts, pargs = parser.parse_args(args=sys.argv[1:])
+
+if len(pargs) < 2:
+	parser.error("missing required args")
+
+if "merge" in pargs:
+	print("Disabling noauto in features... merge disables it. (qmerge doesn't)")
+	os.environ["FEATURES"] = os.environ.get("FEATURES", "") + " -noauto"
+
+os.environ["PORTAGE_CALLER"]="ebuild"
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+
+portage.dep._internal_warnings = True
+from portage import os
+from portage import _encodings
+from portage import _shell_quote
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.const import VDB_PATH
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
+
+if not opts.ignore_default_opts:
+	default_opts = portage.settings.get("EBUILD_DEFAULT_OPTS", "").split()
+	opts, pargs = parser.parse_args(default_opts + sys.argv[1:])
+
+debug = opts.debug
+force = opts.force
+
+import portage.util, portage.const
+
+# do this _after_ 'import portage' to prevent unnecessary tracing
+if debug and "python-trace" in portage.features:
+	import portage.debug
+	portage.debug.set_trace(True)
+
+if not opts.color == 'y' and \
+	(opts.color == 'n' or \
+	portage.settings.get('NOCOLOR') in ('yes', 'true') or \
+	portage.settings.get('TERM') == 'dumb' or \
+	not sys.stdout.isatty()):
+	portage.output.nocolor()
+	portage.settings.unlock()
+	portage.settings['NOCOLOR'] = 'true'
+	portage.settings.lock()
+
+ebuild = pargs.pop(0)
+
+pf = None
+if ebuild.endswith(".ebuild"):
+	pf = os.path.basename(ebuild)[:-7]
+
+if pf is None:
+	portage.writemsg("'%s' does not end with '.ebuild'.\n" % \
+		(ebuild,), noiselevel=-1)
+	sys.exit(1)
+
+if not os.path.isabs(ebuild):
+	mycwd = os.getcwd()
+	# Try to get the non-canonical path from the PWD evironment variable, since
+	# the canonical path returned from os.getcwd() may may be unusable in
+	# cases where the directory stucture is built from symlinks.
+	pwd = os.environ.get('PWD', '')
+	if sys.hexversion < 0x3000000:
+		pwd = _unicode_decode(pwd, encoding=_encodings['content'],
+			errors='strict')
+	if pwd and pwd != mycwd and \
+		os.path.realpath(pwd) == mycwd:
+		mycwd = portage.normalize_path(pwd)
+	ebuild = os.path.join(mycwd, ebuild)
+ebuild = portage.normalize_path(ebuild)
+# portdbapi uses the canonical path for the base of the portage tree, but
+# subdirectories of the base can be built from symlinks (like crossdev does).
+ebuild_portdir = os.path.realpath(
+	os.path.dirname(os.path.dirname(os.path.dirname(ebuild))))
+ebuild = os.path.join(ebuild_portdir, *ebuild.split(os.path.sep)[-3:])
+vdb_path = os.path.realpath(os.path.join(portage.settings['EROOT'], VDB_PATH))
+
+# Make sure that portdb.findname() returns the correct ebuild.
+if ebuild_portdir != vdb_path and \
+	ebuild_portdir not in portage.portdb.porttrees:
+	if sys.hexversion >= 0x3000000:
+		os.environ["PORTDIR_OVERLAY"] = \
+			os.environ.get("PORTDIR_OVERLAY","") + \
+			" " + _shell_quote(ebuild_portdir)
+	else:
+		os.environ["PORTDIR_OVERLAY"] = \
+			os.environ.get("PORTDIR_OVERLAY","") + \
+			" " + _unicode_encode(_shell_quote(ebuild_portdir),
+			encoding=_encodings['content'], errors='strict')
+
+	print("Appending %s to PORTDIR_OVERLAY..." % ebuild_portdir)
+	imp.reload(portage)
+
+# Constrain eclass resolution to the master(s)
+# that are specified in layout.conf (using an
+# approach similar to repoman's).
+myrepo = None
+if ebuild_portdir != vdb_path:
+	myrepo = portage.portdb.getRepositoryName(ebuild_portdir)
+	repo_info = portage.portdb._repo_info[ebuild_portdir]
+	portage.portdb.porttrees = list(repo_info.eclass_db.porttrees)
+
+if not os.path.exists(ebuild):
+	print("'%s' does not exist." % ebuild)
+	sys.exit(1)
+
+ebuild_split = ebuild.split("/")
+cpv = "%s/%s" % (ebuild_split[-3], pf)
+
+if not portage.catpkgsplit(cpv):
+	print("!!! %s does not follow correct package syntax." % (cpv))
+	sys.exit(1)
+
+if ebuild.startswith(vdb_path):
+	mytree = "vartree"
+	pkg_type = "installed"
+
+	portage_ebuild = portage.db[portage.root][mytree].dbapi.findname(cpv, myrepo=myrepo)
+
+	if os.path.realpath(portage_ebuild) != ebuild:
+		print("!!! Portage seems to think that %s is at %s" % (cpv, portage_ebuild))
+		sys.exit(1)
+
+else:
+	mytree = "porttree"
+	pkg_type = "ebuild"
+
+	portage_ebuild = portage.portdb.findname(cpv, myrepo=myrepo)
+
+	if not portage_ebuild or portage_ebuild != ebuild:
+		print("!!! %s does not seem to have a valid PORTDIR structure." % ebuild)
+		sys.exit(1)
+
+if len(pargs) > 1 and "config" in pargs:
+	print("config must be called on it's own, not combined with any other phase")
+	sys.exit(1)
+
+def discard_digests(myebuild, mysettings, mydbapi):
+	"""Discard all distfiles digests for the given ebuild.  This is useful when
+	upstream has changed the identity of the distfiles and the user would
+	otherwise have to manually remove the Manifest and files/digest-* files in
+	order to ensure correct results."""
+	try:
+		portage._doebuild_manifest_exempt_depend += 1
+		pkgdir = os.path.dirname(myebuild)
+		fetchlist_dict = portage.FetchlistDict(pkgdir, mysettings, mydbapi)
+		from portage.manifest import Manifest
+		mf = Manifest(pkgdir, mysettings["DISTDIR"],
+			fetchlist_dict=fetchlist_dict, manifest1_compat=False)
+		mf.create(requiredDistfiles=None,
+			assumeDistHashesSometimes=True, assumeDistHashesAlways=True)
+		distfiles = fetchlist_dict[cpv]
+		for myfile in distfiles:
+			try:
+				del mf.fhashdict["DIST"][myfile]
+			except KeyError:
+				pass
+		mf.write()
+	finally:
+		portage._doebuild_manifest_exempt_depend -= 1
+
+portage.settings.validate() # generate warning messages if necessary
+
+build_dir_phases = set(["setup", "unpack", "prepare", "configure", "compile",
+	"test", "install", "package", "rpm", "merge", "qmerge"])
+
+# If the current metadata is invalid then force the ebuild to be
+# sourced again even if $T/environment already exists.
+ebuild_changed = False
+if mytree == "porttree" and build_dir_phases.intersection(pargs):
+	metadata, st, emtime = \
+		portage.portdb._pull_valid_cache(cpv, ebuild, ebuild_portdir)
+	if metadata is None:
+		ebuild_changed = True
+
+tmpsettings = portage.config(clone=portage.settings)
+tmpsettings["PORTAGE_VERBOSE"] = "1"
+tmpsettings.backup_changes("PORTAGE_VERBOSE")
+
+if opts.skip_manifest:
+	tmpsettings["EBUILD_SKIP_MANIFEST"] = "1"
+	tmpsettings.backup_changes("EBUILD_SKIP_MANIFEST")
+
+if opts.skip_manifest or \
+	"digest" in tmpsettings.features or \
+	"digest" in pargs or \
+	"manifest" in pargs:
+	portage._doebuild_manifest_exempt_depend += 1
+
+if "test" in pargs:
+	# This variable is a signal to config.regenerate() to
+	# indicate that the test phase should be enabled regardless
+	# of problems such as masked "test" USE flag.
+	tmpsettings["EBUILD_FORCE_TEST"] = "1"
+	tmpsettings.backup_changes("EBUILD_FORCE_TEST")
+	tmpsettings.features.add("test")
+
+tmpsettings.features.discard("fail-clean")
+
+try:
+	metadata = dict(zip(Package.metadata_keys,
+		portage.db[portage.settings["ROOT"]][mytree].dbapi.aux_get(
+		cpv, Package.metadata_keys, myrepo=myrepo)))
+except KeyError:
+	# aux_get failure, message should have been shown on stderr.
+	sys.exit(1)
+
+root_config = RootConfig(portage.settings,
+	portage.db[portage.settings["ROOT"]], None)
+
+pkg = Package(built=(pkg_type != "ebuild"), cpv=cpv,
+	installed=(pkg_type=="installed"),
+	metadata=metadata, root_config=root_config,
+	type_name=pkg_type)
+
+# Apply package.env and repo-level settings. This allows per-package
+# FEATURES and other variables (possibly PORTAGE_TMPDIR) to be
+# available as soon as possible.
+tmpsettings.setcpv(pkg)
+
+def stale_env_warning():
+	if "clean" not in pargs and \
+		"noauto" not in tmpsettings.features and \
+		build_dir_phases.intersection(pargs):
+		portage.doebuild_environment(ebuild, "setup", portage.root,
+			tmpsettings, debug, 1, portage.portdb)
+		env_filename = os.path.join(tmpsettings["T"], "environment")
+		if os.path.exists(env_filename):
+			msg = ("Existing ${T}/environment for '%s' will be sourced. " + \
+				"Run 'clean' to start with a fresh environment.") % \
+				(tmpsettings["PF"], )
+			from textwrap import wrap
+			msg = wrap(msg, 70)
+			for x in msg:
+				portage.writemsg(">>> %s\n" % x)
+
+			if ebuild_changed:
+				open(os.path.join(tmpsettings['PORTAGE_BUILDDIR'],
+					'.ebuild_changed'), 'w')
+
+from portage.exception import PermissionDenied, \
+	PortagePackageException, UnsupportedAPIException
+
+if 'digest' in tmpsettings.features and \
+	not set(["digest", "manifest"]).intersection(pargs):
+	pargs = ['digest'] + pargs
+
+checked_for_stale_env = False
+
+for arg in pargs:
+	try:
+		if not checked_for_stale_env and arg not in ("digest","manifest"):
+			# This has to go after manifest generation since otherwise
+			# aux_get() might fail due to invalid ebuild digests.
+			stale_env_warning()
+			checked_for_stale_env = True
+
+		if arg in ("digest", "manifest") and force:
+			discard_digests(ebuild, tmpsettings, portage.portdb)
+		a = portage.doebuild(ebuild, arg, portage.root, tmpsettings,
+			debug=debug, tree=mytree,
+			vartree=portage.db[portage.root]['vartree'])
+	except KeyboardInterrupt:
+		print("Interrupted.")
+		a = 1
+	except KeyError:
+		# aux_get error
+		a = 1
+	except UnsupportedAPIException as e:
+		from textwrap import wrap
+		msg = wrap(str(e), 70)
+		del e
+		for x in msg:
+			portage.writemsg("!!! %s\n" % x, noiselevel=-1)
+		a = 1
+	except PortagePackageException as e:
+		portage.writemsg("!!! %s\n" % (e,), noiselevel=-1)
+		a = 1
+	except PermissionDenied as e:
+		portage.writemsg("!!! Permission Denied: %s\n" % (e,), noiselevel=-1)
+		a = 1
+	if a == None:
+		print("Could not run the required binary?")
+		a = 127
+	if a:
+		sys.exit(a)

diff --git a/portage_with_autodep/bin/ebuild-helpers/4/dodoc b/portage_with_autodep/bin/ebuild-helpers/4/dodoc
new file mode 120000
index 0000000..35080ad
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/4/dodoc
@@ -0,0 +1 @@
+../doins
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/4/dohard b/portage_with_autodep/bin/ebuild-helpers/4/dohard
new file mode 120000
index 0000000..1a6b57a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/4/dohard
@@ -0,0 +1 @@
+../../banned-helper
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/4/dosed b/portage_with_autodep/bin/ebuild-helpers/4/dosed
new file mode 120000
index 0000000..1a6b57a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/4/dosed
@@ -0,0 +1 @@
+../../banned-helper
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/4/prepalldocs b/portage_with_autodep/bin/ebuild-helpers/4/prepalldocs
new file mode 120000
index 0000000..1a6b57a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/4/prepalldocs
@@ -0,0 +1 @@
+../../banned-helper
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/die b/portage_with_autodep/bin/ebuild-helpers/die
new file mode 100755
index 0000000..9869141
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/die
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+die "$@"
+exit 1

diff --git a/portage_with_autodep/bin/ebuild-helpers/dobin b/portage_with_autodep/bin/ebuild-helpers/dobin
new file mode 100755
index 0000000..e385455
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dobin
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+if [[ ! -d ${D}${DESTTREE}/bin ]] ; then
+	install -d "${D}${DESTTREE}/bin" || { helpers_die "${0##*/}: failed to install ${D}${DESTTREE}/bin"; exit 2; }
+fi
+
+ret=0
+
+for x in "$@" ; do
+	if [[ -e ${x} ]] ; then
+		install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${D}${DESTTREE}/bin"
+	else
+		echo "!!! ${0##*/}: $x does not exist" 1>&2
+		false
+	fi
+	((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}

diff --git a/portage_with_autodep/bin/ebuild-helpers/doconfd b/portage_with_autodep/bin/ebuild-helpers/doconfd
new file mode 100755
index 0000000..e146000
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doconfd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+	source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/etc/conf.d/" \
+doins "$@"

diff --git a/portage_with_autodep/bin/ebuild-helpers/dodir b/portage_with_autodep/bin/ebuild-helpers/dodir
new file mode 100755
index 0000000..f40bee7
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dodir
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+install -d ${DIROPTIONS} "${@/#/${D}/}"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/dodoc b/portage_with_autodep/bin/ebuild-helpers/dodoc
new file mode 100755
index 0000000..65713db
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dodoc
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [ $# -lt 1 ] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1 	
+fi
+
+dir="${D}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
+if [ ! -d "${dir}" ] ; then
+	install -d "${dir}"
+fi
+
+ret=0
+for x in "$@" ; do
+	if [ -d "${x}" ] ; then
+		eqawarn "QA Notice: dodoc argument '${x}' is a directory"
+	elif [ -s "${x}" ] ; then
+		install -m0644 "${x}" "${dir}" || { ((ret|=1)); continue; }
+		ecompress --queue "${dir}/${x##*/}"
+	elif [ ! -e "${x}" ] ; then
+		echo "!!! ${0##*/}: $x does not exist" 1>&2
+		((ret|=1))
+	fi
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}

diff --git a/portage_with_autodep/bin/ebuild-helpers/doenvd b/portage_with_autodep/bin/ebuild-helpers/doenvd
new file mode 100755
index 0000000..28ab5d2
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doenvd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+	source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/etc/env.d/" \
+doins "$@"

diff --git a/portage_with_autodep/bin/ebuild-helpers/doexe b/portage_with_autodep/bin/ebuild-helpers/doexe
new file mode 100755
index 0000000..360800e
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doexe
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+if [[ ! -d ${D}${_E_EXEDESTTREE_} ]] ; then
+	install -d "${D}${_E_EXEDESTTREE_}"
+fi
+
+TMP=$T/.doexe_tmp
+mkdir "$TMP"
+
+ret=0
+
+for x in "$@" ; do
+	if [ -L "${x}" ] ; then
+		cp "$x" "$TMP"
+		mysrc=$TMP/${x##*/}
+	elif [ -d "${x}" ] ; then
+		vecho "doexe: warning, skipping directory ${x}"
+		continue
+	else
+		mysrc="${x}"
+	fi
+	if [ -e "$mysrc" ] ; then
+		install $EXEOPTIONS "$mysrc" "$D$_E_EXEDESTTREE_"
+	else
+		echo "!!! ${0##*/}: $mysrc does not exist" 1>&2
+		false
+	fi
+	((ret|=$?))
+done
+
+rm -rf "$TMP"
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/dohard b/portage_with_autodep/bin/ebuild-helpers/dohard
new file mode 100755
index 0000000..2270487
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dohard
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -ne 2 ]] ; then
+	echo "$0: two arguments needed" 1>&2
+	exit 1
+fi
+
+destdir=${2%/*}
+[[ ! -d ${D}${destdir} ]] && dodir "${destdir}"
+
+exec ln -f "${D}$1" "${D}$2"

diff --git a/portage_with_autodep/bin/ebuild-helpers/dohtml b/portage_with_autodep/bin/ebuild-helpers/dohtml
new file mode 100755
index 0000000..630629a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dohtml
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+	"${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/dohtml.py" "$@"
+
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/doinfo b/portage_with_autodep/bin/ebuild-helpers/doinfo
new file mode 100755
index 0000000..54fb8da
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doinfo
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1 	
+fi
+
+if [[ ! -d ${D}usr/share/info ]] ; then
+	install -d "${D}usr/share/info" || { helpers_die "${0##*/}: failed to install ${D}usr/share/info"; exit 1; }
+fi
+
+install -m0644 "$@" "${D}usr/share/info"
+rval=$?
+if [ $rval -ne 0 ] ; then
+	for x in "$@" ; do
+		[ -e "$x" ] || echo "!!! ${0##*/}: $x does not exist" 1>&2
+	done
+	helpers_die "${0##*/} failed"
+fi
+exit $rval

diff --git a/portage_with_autodep/bin/ebuild-helpers/doinitd b/portage_with_autodep/bin/ebuild-helpers/doinitd
new file mode 100755
index 0000000..b711e19
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doinitd
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+	source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+exec \
+env \
+_E_EXEDESTTREE_="/etc/init.d/" \
+doexe "$@"

diff --git a/portage_with_autodep/bin/ebuild-helpers/doins b/portage_with_autodep/bin/ebuild-helpers/doins
new file mode 100755
index 0000000..7dec146
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doins
@@ -0,0 +1,155 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ ${0##*/} == dodoc ]] ; then
+	if [ $# -eq 0 ] ; then
+		# default_src_install may call dodoc with no arguments
+		# when DOC is defined but empty, so simply return
+		# sucessfully in this case.
+		exit 0
+	fi
+	export INSOPTIONS=-m0644
+	export INSDESTTREE=usr/share/doc/${PF}/${_E_DOCDESTTREE_}
+fi
+
+if [ $# -lt 1 ] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+if [[ "$1" == "-r" ]] ; then
+	DOINSRECUR=y
+	shift
+else
+	DOINSRECUR=n
+fi
+
+if [[ ${INSDESTTREE#${D}} != "${INSDESTTREE}" ]]; then
+	vecho "-------------------------------------------------------" 1>&2
+	vecho "You should not use \${D} with helpers." 1>&2
+	vecho "  --> ${INSDESTTREE}" 1>&2
+	vecho "-------------------------------------------------------" 1>&2
+	helpers_die "${0##*/} used with \${D}"
+	exit 1
+fi
+
+case "$EAPI" in
+	0|1|2|3|3_pre2)
+		PRESERVE_SYMLINKS=n
+		;;
+	*)
+		PRESERVE_SYMLINKS=y
+		;;
+esac
+
+export TMP=$T/.doins_tmp
+# Use separate directories to avoid potential name collisions.
+mkdir -p "$TMP"/{1,2}
+
+[[ ! -d ${D}${INSDESTTREE} ]] && dodir "${INSDESTTREE}"
+
+_doins() {
+	local mysrc="$1" mydir="$2" cleanup="" rval
+
+	if [ -L "$mysrc" ] ; then
+		# Our fake $DISTDIR contains symlinks that should
+		# not be reproduced inside $D. In order to ensure
+		# that things like dodoc "$DISTDIR"/foo.pdf work
+		# as expected, we dereference symlinked files that
+		# refer to absolute paths inside
+		# $PORTAGE_ACTUAL_DISTDIR/.
+		if [ $PRESERVE_SYMLINKS = y ] && \
+			! [[ $(readlink "$mysrc") == "$PORTAGE_ACTUAL_DISTDIR"/* ]] ; then
+			rm -rf "$D$INSDESTTREE/$mydir/${mysrc##*/}" || return $?
+			cp -P "$mysrc" "$D$INSDESTTREE/$mydir/${mysrc##*/}"
+			return $?
+		else
+			cp "$mysrc" "$TMP/2/${mysrc##*/}" || return $?
+			mysrc="$TMP/2/${mysrc##*/}"
+			cleanup=$mysrc
+		fi
+	fi
+
+	install ${INSOPTIONS} "${mysrc}" "${D}${INSDESTTREE}/${mydir}"
+	rval=$?
+	[[ -n ${cleanup} ]] && rm -f "${cleanup}"
+	[ $rval -ne 0 ] && echo "!!! ${0##*/}: $mysrc does not exist" 1>&2
+	return $rval
+}
+
+_xdoins() {
+	local -i failed=0
+	while read -r -d $'\0' x ; do
+		_doins "$x" "${x%/*}"
+		((failed|=$?))
+	done
+	return $failed
+}
+
+success=0
+failed=0
+
+for x in "$@" ; do
+	if [[ $PRESERVE_SYMLINKS = n && -d $x ]] || \
+		[[ $PRESERVE_SYMLINKS = y && -d $x && ! -L $x ]] ; then
+		if [ "${DOINSRECUR}" == "n" ] ; then
+			if [[ ${0##*/} == dodoc ]] ; then
+				echo "!!! ${0##*/}: $x is a directory" 1>&2
+				((failed|=1))
+			fi
+			continue
+		fi
+
+		while [ "$x" != "${x%/}" ] ; do
+			x=${x%/}
+		done
+		if [ "$x" = "${x%/*}" ] ; then
+			pushd "$PWD" >/dev/null
+		else
+			pushd "${x%/*}" >/dev/null
+		fi
+		x=${x##*/}
+		x_orig=$x
+		# Follow any symlinks recursively until we've got
+		# a normal directory for 'find' to traverse. The
+		# name of the symlink will be used for the name
+		# of the installed directory, as discussed in
+		# bug #239529.
+		while [ -L "$x" ] ; do
+			pushd "$(readlink "$x")" >/dev/null
+			x=${PWD##*/}
+			pushd "${PWD%/*}" >/dev/null
+		done
+		if [[ $x != $x_orig ]] ; then
+			mv "$x" "$TMP/1/$x_orig"
+			pushd "$TMP/1" >/dev/null
+		fi
+		find "$x_orig" -type d -exec dodir "${INSDESTTREE}/{}" \;
+		find "$x_orig" \( -type f -or -type l \) -print0 | _xdoins
+		if [[ ${PIPESTATUS[1]} -eq 0 ]] ; then
+			# NOTE: Even if only an empty directory is installed here, it
+			# still counts as success, since an empty directory given as
+			# an argument to doins -r should not trigger failure.
+			((success|=1))
+		else
+			((failed|=1))
+		fi
+		if [[ $x != $x_orig ]] ; then
+			popd >/dev/null
+			mv "$TMP/1/$x_orig" "$x"
+		fi
+		while popd >/dev/null 2>&1 ; do true ; done
+	else
+		_doins "${x}"
+		if [[ $? -eq 0 ]] ; then
+			((success|=1))
+		else
+			((failed|=1))
+		fi
+	fi
+done
+rm -rf "$TMP"
+[[ $failed -ne 0 || $success -eq 0 ]] && { helpers_die "${0##*/} failed"; exit 1; } || exit 0

diff --git a/portage_with_autodep/bin/ebuild-helpers/dolib b/portage_with_autodep/bin/ebuild-helpers/dolib
new file mode 100755
index 0000000..87ade42
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dolib
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+# Setup ABI cruft
+LIBDIR_VAR="LIBDIR_${ABI}"
+if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
+	CONF_LIBDIR=${!LIBDIR_VAR}
+fi
+unset LIBDIR_VAR
+# we need this to default to lib so that things dont break
+CONF_LIBDIR=${CONF_LIBDIR:-lib}
+libdir="${D}${DESTTREE}/${CONF_LIBDIR}"
+
+
+if [[ $# -lt 1 ]] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+if [[ ! -d ${libdir} ]] ; then
+	install -d "${libdir}" || { helpers_die "${0##*/}: failed to install ${libdir}"; exit 1; }
+fi
+
+ret=0
+
+for x in "$@" ; do
+	if [[ -e ${x} ]] ; then
+		if [[ ! -L ${x} ]] ; then
+			install ${LIBOPTIONS} "${x}" "${libdir}"
+		else
+			ln -s "$(readlink "${x}")" "${libdir}/${x##*/}"
+		fi
+	else
+		echo "!!! ${0##*/}: ${x} does not exist" 1>&2
+		false
+	fi
+	((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}

diff --git a/portage_with_autodep/bin/ebuild-helpers/dolib.a b/portage_with_autodep/bin/ebuild-helpers/dolib.a
new file mode 100755
index 0000000..d2279dc
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dolib.a
@@ -0,0 +1,6 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+exec env LIBOPTIONS="-m0644" \
+	dolib "$@"

diff --git a/portage_with_autodep/bin/ebuild-helpers/dolib.so b/portage_with_autodep/bin/ebuild-helpers/dolib.so
new file mode 100755
index 0000000..4bdbfab
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dolib.so
@@ -0,0 +1,6 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+exec env LIBOPTIONS="-m0755" \
+	dolib "$@"

diff --git a/portage_with_autodep/bin/ebuild-helpers/doman b/portage_with_autodep/bin/ebuild-helpers/doman
new file mode 100755
index 0000000..4561bef
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doman
@@ -0,0 +1,64 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+i18n=""
+
+ret=0
+
+for x in "$@" ; do
+	if [[ ${x:0:6} == "-i18n=" ]] ; then
+		i18n=${x:6}/
+		continue
+	fi
+	if [[ ${x:0:6} == ".keep_" ]] ; then
+		continue
+	fi
+
+	suffix=${x##*.}
+
+	# These will be automatically decompressed by ecompressdir.
+	if has ${suffix} Z gz bz2 ; then
+		realname=${x%.*}
+		suffix=${realname##*.}
+	fi
+
+	if has "${EAPI:-0}" 2 3 || [[ -z ${i18n} ]] \
+		&& ! has "${EAPI:-0}" 0 1 \
+		&& [[ $x =~ (.*)\.([a-z][a-z](_[A-Z][A-Z])?)\.(.*) ]]
+	then
+		name=${BASH_REMATCH[1]##*/}.${BASH_REMATCH[4]}
+		mandir=${BASH_REMATCH[2]}/man${suffix:0:1}
+	else
+		name=${x##*/}
+		mandir=${i18n#/}man${suffix:0:1}
+	fi
+
+
+	if [[ ${mandir} == *man[0-9n] ]] ; then
+		if [[ -s ${x} ]] ; then
+			if [[ ! -d ${D}/usr/share/man/${mandir} ]] ; then
+				install -d "${D}/usr/share/man/${mandir}"
+			fi
+
+			install -m0644 "${x}" "${D}/usr/share/man/${mandir}/${name}"
+			((ret|=$?))
+		elif [[ ! -e ${x} ]] ; then
+			echo "!!! ${0##*/}: $x does not exist" 1>&2
+			((ret|=1))
+		fi
+	else
+		vecho "doman: '${x}' is probably not a man page; skipping" 1>&2
+		((ret|=1))
+	fi
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}

diff --git a/portage_with_autodep/bin/ebuild-helpers/domo b/portage_with_autodep/bin/ebuild-helpers/domo
new file mode 100755
index 0000000..4737f44
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/domo
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+mynum=${#}
+if [ ${mynum} -lt 1 ] ; then
+	helpers_die "${0}: at least one argument needed"
+	exit 1
+fi
+if [ ! -d "${D}${DESTTREE}/share/locale" ] ; then
+	install -d "${D}${DESTTREE}/share/locale/"
+fi
+
+ret=0
+
+for x in "$@" ; do
+	if [ -e "${x}" ] ; then
+		mytiny="${x##*/}"
+		mydir="${D}${DESTTREE}/share/locale/${mytiny%.*}/LC_MESSAGES"
+		if [ ! -d "${mydir}" ] ; then
+			install -d "${mydir}"
+		fi
+		install -m0644 "${x}" "${mydir}/${MOPREFIX}.mo"
+	else
+		echo "!!! ${0##*/}: $x does not exist" 1>&2
+		false
+	fi
+	((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/dosbin b/portage_with_autodep/bin/ebuild-helpers/dosbin
new file mode 100755
index 0000000..87a3091
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dosbin
@@ -0,0 +1,29 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -lt 1 ]] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+if [[ ! -d ${D}${DESTTREE}/sbin ]] ; then
+	install -d "${D}${DESTTREE}/sbin" || { helpers_die "${0##*/}: failed to install ${D}${DESTTREE}/sbin"; exit 2; }
+fi
+
+ret=0
+
+for x in "$@" ; do
+	if [[ -e ${x} ]] ; then
+		install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${D}${DESTTREE}/sbin"
+	else
+		echo "!!! ${0##*/}: ${x} does not exist" 1>&2
+		false
+	fi
+	((ret|=$?))
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}

diff --git a/portage_with_autodep/bin/ebuild-helpers/dosed b/portage_with_autodep/bin/ebuild-helpers/dosed
new file mode 100755
index 0000000..afc949b
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dosed
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+if [[ $# -lt 1 ]] ; then
+	echo "!!! ${0##*/}: at least one argument needed" >&2
+	exit 1
+fi
+
+ret=0
+file_found=0
+mysed="s:${D}::g"
+
+for x in "$@" ; do
+	y=$D${x#/}
+	if [ -e "${y}" ] ; then
+		if [ -f "${y}" ] ; then
+			file_found=1
+			sed -i -e "${mysed}" "${y}"
+		else
+			echo "${y} is not a regular file!" >&2
+			false
+		fi
+		((ret|=$?))
+	else
+		mysed="${x}"
+	fi
+done
+
+if [ $file_found = 0 ] ; then
+	echo "!!! ${0##*/}: $y does not exist" 1>&2
+	((ret|=1))
+fi
+
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/dosym b/portage_with_autodep/bin/ebuild-helpers/dosym
new file mode 100755
index 0000000..500dad0
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/dosym
@@ -0,0 +1,18 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $# -ne 2 ]] ; then
+	helpers_die "${0##*/}: two arguments needed"
+	exit 1
+fi
+
+destdir=${2%/*}
+[[ ! -d ${D}${destdir} ]] && dodir "${destdir}"
+
+ln -snf "$1" "${D}$2"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/ecompress b/portage_with_autodep/bin/ebuild-helpers/ecompress
new file mode 100755
index 0000000..b61421b
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/ecompress
@@ -0,0 +1,161 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+# setup compression stuff
+PORTAGE_COMPRESS=${PORTAGE_COMPRESS-bzip2}
+[[ -z ${PORTAGE_COMPRESS} ]] && exit 0
+
+if [[ ${PORTAGE_COMPRESS_FLAGS+set} != "set" ]] ; then
+	case ${PORTAGE_COMPRESS} in
+		bzip2|gzip)  PORTAGE_COMPRESS_FLAGS="-9";;
+	esac
+fi
+
+# decompress_args(suffix, binary)
+#	- suffix: the compression suffix to work with
+#	- binary: the program to execute that'll compress/decompress
+# new_args: global array used to return revised arguments
+decompress_args() {
+	local suffix=$1 binary=$2
+	shift 2
+
+	# Initialize the global new_args array.
+	new_args=()
+	declare -a decompress_args=()
+	local x i=0 decompress_count=0
+	for x in "$@" ; do
+		if [[ ${x%$suffix} = $x ]] ; then
+			new_args[$i]=$x
+		else
+			new_args[$i]=${x%$suffix}
+			decompress_args[$decompress_count]=$x
+			((decompress_count++))
+		fi
+		((i++))
+	done
+
+	if [ $decompress_count -gt 0 ] ; then
+		${binary} "${decompress_args[@]}"
+		if [ $? -ne 0 ] ; then
+			# Apparently decompression failed for one or more files, so
+			# drop those since we don't want to compress them twice.
+			new_args=()
+			local x i=0
+			for x in "$@" ; do
+				if [[ ${x%$suffix} = $x ]] ; then
+					new_args[$i]=$x
+					((i++))
+				elif [[ -f ${x%$suffix} ]] ; then
+					new_args[$i]=${x%$suffix}
+					((i++))
+				else
+					# Apparently decompression failed for this one, so drop
+					# it since we don't want to compress it twice.
+					true
+				fi
+			done
+		fi
+	fi
+}
+
+case $1 in
+	--suffix)
+		[[ -n $2 ]] && vecho "${0##*/}: --suffix takes no additional arguments" 1>&2
+
+		if [[ ! -e ${T}/.ecompress.suffix ]] ; then
+			set -e
+			tmpdir="${T}"/.ecompress$$.${RANDOM}
+			mkdir "${tmpdir}"
+			cd "${tmpdir}"
+			# we have to fill the file enough so that there is something
+			# to compress as some programs will refuse to do compression
+			# if it cannot actually compress the file
+			echo {0..1000} > compressme
+			${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS} compressme > /dev/null
+			# If PORTAGE_COMPRESS_FLAGS contains -k then we need to avoid
+			# having our glob match the uncompressed file here.
+			suffix=$(echo compressme.*)
+			[[ -z $suffix || "$suffix" == "compressme.*" ]] && \
+				suffix=$(echo compressme*)
+			suffix=${suffix#compressme}
+			cd /
+			rm -rf "${tmpdir}"
+			echo "${suffix}" > "${T}/.ecompress.suffix"
+		fi
+		cat "${T}/.ecompress.suffix"
+		;;
+	--bin)
+		[[ -n $2 ]] && vecho "${0##*/}: --bin takes no additional arguments" 1>&2
+
+		echo "${PORTAGE_COMPRESS} ${PORTAGE_COMPRESS_FLAGS}"
+		;;
+	--queue)
+		shift
+		ret=0
+		for x in "${@/%/.ecompress.file}" ; do
+			>> "$x"
+			((ret|=$?))
+		done
+		[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+		exit $ret
+		;;
+	--dequeue)
+		[[ -n $2 ]] && vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
+		find "${D}" -name '*.ecompress.file' -print0 \
+			| sed -e 's:\.ecompress\.file::g' \
+			| ${XARGS} -0 ecompress
+		find "${D}" -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
+		;;
+	--*)
+		helpers_die "${0##*/}: unknown arguments '$*'"
+		exit 1
+		;;
+	*)
+		# Since dodoc calls ecompress on files that are already compressed,
+		# perform decompression here (similar to ecompressdir behavior).
+		decompress_args ".Z" "gunzip -f" "$@"
+		set -- "${new_args[@]}"
+		decompress_args ".gz" "gunzip -f" "$@"
+		set -- "${new_args[@]}"
+		decompress_args ".bz2" "bunzip2 -f" "$@"
+		set -- "${new_args[@]}"
+
+		mask_ext_re=""
+		set -f
+		for x in $PORTAGE_COMPRESS_EXCLUDE_SUFFIXES ; do
+			mask_ext_re+="|$x"
+		done
+		set +f
+		mask_ext_re="^(${mask_ext_re:1})\$"
+		declare -a filtered_args=()
+		i=0
+		for x in "$@" ; do
+			[[ ${x##*.} =~ $mask_ext_re ]] && continue
+			[[ -s ${x} ]] || continue
+			filtered_args[$i]=$x
+			((i++))
+		done
+		[ $i -eq 0 ] && exit 0
+		set -- "${filtered_args[@]}"
+
+		# If a compressed version of the file already exists, simply
+		# delete it so that the compressor doesn't whine (bzip2 will
+		# complain and skip, gzip will prompt for input)
+		suffix=$(ecompress --suffix)
+		[[ -n ${suffix} ]] && echo -n "${@/%/${suffix}$'\001'}" | \
+			tr '\001' '\000' | ${XARGS} -0 rm -f
+		# Finally, let's actually do some real work
+		"${PORTAGE_COMPRESS}" ${PORTAGE_COMPRESS_FLAGS} "$@"
+		ret=$?
+		[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+		exit $ret
+		;;
+esac

diff --git a/portage_with_autodep/bin/ebuild-helpers/ecompressdir b/portage_with_autodep/bin/ebuild-helpers/ecompressdir
new file mode 100755
index 0000000..7a95120
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/ecompressdir
@@ -0,0 +1,143 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+	helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+case $1 in
+	--ignore)
+		shift
+		for skip in "$@" ; do
+			[[ -d ${D}${skip} || -f ${D}${skip} ]] \
+				&& >> "${D}${skip}.ecompress.skip"
+		done
+		exit 0
+		;;
+	--queue)
+		shift
+		set -- "${@/%/.ecompress.dir}"
+		set -- "${@/#/${D}}"
+		ret=0
+		for x in "$@" ; do
+			>> "$x"
+			((ret|=$?))
+		done
+		[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+		exit $ret
+		;;
+	--dequeue)
+		[[ -n $2 ]] && vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
+		find "${D}" -name '*.ecompress.dir' -print0 \
+			| sed -e 's:\.ecompress\.dir::g' -e "s:${D}:/:g" \
+			| ${XARGS} -0 ecompressdir
+		find "${D}" -name '*.ecompress.skip' -print0 | ${XARGS} -0 rm -f
+		exit 0
+		;;
+	--*)
+		helpers_die "${0##*/}: unknown arguments '$*'"
+		exit 1
+		;;
+esac
+
+# figure out the new suffix
+suffix=$(ecompress --suffix)
+
+# funk_up_dir(action, suffix, binary)
+#	- action: compress or decompress
+#	- suffix: the compression suffix to work with
+#	- binary: the program to execute that'll compress/decompress
+# The directory we act on is implied in the ${dir} variable
+funk_up_dir() {
+	local act=$1 suffix=$2 binary=$3
+
+	local negate=""
+	[[ ${act} == "compress" ]] && negate="!"
+
+	# first we act on all the files
+	find "${dir}" -type f ${negate} -iname '*'${suffix} -print0 | ${XARGS} -0 ${binary}
+	((ret|=$?))
+
+	find "${dir}" -type l -print0 | \
+	while read -r -d $'\0' brokenlink ; do
+		[[ -e ${brokenlink} ]] && continue
+		olddest=$(readlink "${brokenlink}")
+		[[ ${act} == "compress" ]] \
+			&& newdest="${olddest}${suffix}" \
+			|| newdest="${olddest%${suffix}}"
+		rm -f "${brokenlink}"
+		[[ ${act} == "compress" ]] \
+			&& ln -snf "${newdest}" "${brokenlink}${suffix}" \
+			|| ln -snf "${newdest}" "${brokenlink%${suffix}}"
+		((ret|=$?))
+	done
+}
+
+# _relocate_skip_dirs(srctree, dsttree)
+# Move all files and directories we want to skip running compression
+# on from srctree to dsttree.
+_relocate_skip_dirs() {
+	local srctree="$1" dsttree="$2"
+
+	[[ -d ${srctree} ]] || return 0
+
+	find "${srctree}" -name '*.ecompress.skip' -print0 | \
+	while read -r -d $'\0' src ; do
+		src=${src%.ecompress.skip}
+		dst="${dsttree}${src#${srctree}}"
+		parent=${dst%/*}
+		mkdir -p "${parent}"
+		mv "${src}" "${dst}"
+		mv "${src}.ecompress.skip" "${dst}.ecompress.skip"
+	done
+}
+hide_skip_dirs()    { _relocate_skip_dirs "${D}" "${T}"/ecompress-skip/ ; }
+restore_skip_dirs() { _relocate_skip_dirs "${T}"/ecompress-skip/ "${D}" ; }
+
+ret=0
+
+rm -rf "${T}"/ecompress-skip
+
+for dir in "$@" ; do
+	dir=${dir#/}
+	dir="${D}${dir}"
+	if [[ ! -d ${dir} ]] ; then
+		vecho "${0##*/}: /${dir#${D}} does not exist!"
+		continue
+	fi
+	cd "${dir}"
+	actual_dir=${dir}
+	dir=. # use relative path to avoid 'Argument list too long' errors
+
+	# hide all the stuff we want to skip
+	hide_skip_dirs "${dir}"
+
+	# since we've been requested to compress the whole dir,
+	# delete any individual queued requests
+	rm -f "${actual_dir}.ecompress.dir"
+	find "${dir}" -type f -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
+
+	# not uncommon for packages to compress doc files themselves
+	funk_up_dir "decompress" ".Z" "gunzip -f"
+	funk_up_dir "decompress" ".gz" "gunzip -f"
+	funk_up_dir "decompress" ".bz2" "bunzip2 -f"
+
+	# forcibly break all hard links as some compressors whine about it
+	find "${dir}" -type f -links +1 -exec env file="{}" sh -c \
+		'cp -p "${file}" "${file}.ecompress.break" ; mv -f "${file}.ecompress.break" "${file}"' \;
+
+	# now lets do our work
+	[[ -z ${suffix} ]] && continue
+	vecho "${0##*/}: $(ecompress --bin) /${actual_dir#${D}}"
+	funk_up_dir "compress" "${suffix}" "ecompress"
+
+	# finally, restore the skipped stuff
+	restore_skip_dirs
+done
+
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit ${ret}

diff --git a/portage_with_autodep/bin/ebuild-helpers/eerror b/portage_with_autodep/bin/ebuild-helpers/eerror
new file mode 120000
index 0000000..a403c75
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/eerror
@@ -0,0 +1 @@
+elog
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/einfo b/portage_with_autodep/bin/ebuild-helpers/einfo
new file mode 120000
index 0000000..a403c75
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/einfo
@@ -0,0 +1 @@
+elog
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/elog b/portage_with_autodep/bin/ebuild-helpers/elog
new file mode 100755
index 0000000..a2303af
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/elog
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+${0##*/} "$@"

diff --git a/portage_with_autodep/bin/ebuild-helpers/emake b/portage_with_autodep/bin/ebuild-helpers/emake
new file mode 100755
index 0000000..d842781
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/emake
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# emake: Supplies some default parameters to GNU make. At the moment the
+#        only parameter supplied is -jN, where N is a number of
+#        parallel processes that should be ideal for the running host
+#        (e.g. on a single-CPU machine, N=2).  The MAKEOPTS variable
+#        is set in make.globals. We don't source make.globals
+#        here because emake is only called from an ebuild.
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ $PORTAGE_QUIET != 1 ]] ; then
+	(
+	for arg in ${MAKE:-make} $MAKEOPTS $EXTRA_EMAKE "$@" ; do
+		[[ ${arg} == *" "* ]] \
+			&& printf "'%s' " "${arg}" \
+			|| printf "%s " "${arg}"
+	done
+	printf "\n"
+	) >&2
+fi
+
+${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE} "$@"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/eqawarn b/portage_with_autodep/bin/ebuild-helpers/eqawarn
new file mode 120000
index 0000000..a403c75
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/eqawarn
@@ -0,0 +1 @@
+elog
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/ewarn b/portage_with_autodep/bin/ebuild-helpers/ewarn
new file mode 120000
index 0000000..a403c75
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/ewarn
@@ -0,0 +1 @@
+elog
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/fowners b/portage_with_autodep/bin/ebuild-helpers/fowners
new file mode 100755
index 0000000..4cc6bfa
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/fowners
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+# we can't prefix all arguments because
+# chown takes random options
+slash="/"
+chown "${@/#${slash}/${D}${slash}}"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/fperms b/portage_with_autodep/bin/ebuild-helpers/fperms
new file mode 100755
index 0000000..0260bdc
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/fperms
@@ -0,0 +1,13 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+# we can't prefix all arguments because
+# chmod takes random options
+slash="/"
+chmod "${@/#${slash}/${D}${slash}}"
+ret=$?
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/newbin b/portage_with_autodep/bin/ebuild-helpers/newbin
new file mode 100755
index 0000000..30f19b0
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newbin
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dobin "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newconfd b/portage_with_autodep/bin/ebuild-helpers/newconfd
new file mode 100755
index 0000000..5752cfa
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newconfd
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doconfd "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newdoc b/portage_with_autodep/bin/ebuild-helpers/newdoc
new file mode 100755
index 0000000..f97ce0d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newdoc
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dodoc "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newenvd b/portage_with_autodep/bin/ebuild-helpers/newenvd
new file mode 100755
index 0000000..83c556e
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newenvd
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doenvd "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newexe b/portage_with_autodep/bin/ebuild-helpers/newexe
new file mode 100755
index 0000000..92dbe9f
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newexe
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doexe "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newinitd b/portage_with_autodep/bin/ebuild-helpers/newinitd
new file mode 100755
index 0000000..fc6003a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newinitd
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doinitd "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newins b/portage_with_autodep/bin/ebuild-helpers/newins
new file mode 100755
index 0000000..065477f
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newins
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" || exit $?
+case "$EAPI" in
+	0|1|2|3|3_pre2)
+		cp "$1" "$T/$2" || exit $?
+		;;
+	*)
+		cp -P "$1" "$T/$2"
+		ret=$?
+		if [[ $ret -ne 0 ]] ; then
+			helpers_die "${0##*/} failed"
+			exit $ret
+		fi
+		;;
+esac
+doins "${T}/${2}"
+ret=$?
+rm -rf "${T}/${2}"
+[[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
+exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/newlib.a b/portage_with_autodep/bin/ebuild-helpers/newlib.a
new file mode 100755
index 0000000..eef4104
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newlib.a
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dolib.a "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newlib.so b/portage_with_autodep/bin/ebuild-helpers/newlib.so
new file mode 100755
index 0000000..c8696f3
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newlib.so
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dolib.so "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newman b/portage_with_autodep/bin/ebuild-helpers/newman
new file mode 100755
index 0000000..ffb8a2d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newman
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec doman "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newsbin b/portage_with_autodep/bin/ebuild-helpers/newsbin
new file mode 100755
index 0000000..82242aa
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newsbin
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z ${T} ]] || [[ -z ${2} ]] ; then
+	helpers_die "${0##*/}: Need two arguments, old file and new file"
+	exit 1
+fi
+
+if [ ! -e "$1" ] ; then
+	helpers_die "!!! ${0##*/}: $1 does not exist"
+	exit 1
+fi
+
+rm -rf "${T}/${2}" && \
+cp -f "${1}" "${T}/${2}" && \
+exec dosbin "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/portageq b/portage_with_autodep/bin/ebuild-helpers/portageq
new file mode 100755
index 0000000..ec30b66
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/portageq
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+	exec "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/portageq" "$@"

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepall b/portage_with_autodep/bin/ebuild-helpers/prepall
new file mode 100755
index 0000000..701ecba
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepall
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if has chflags $FEATURES ; then
+	# Save all the file flags for restoration at the end of prepall.
+	mtree -c -p "${D}" -k flags > "${T}/bsdflags.mtree"
+	# Remove all the file flags so that prepall can do anything necessary.
+	chflags -R noschg,nouchg,nosappnd,nouappnd "${D}"
+	chflags -R nosunlnk,nouunlnk "${D}" 2>/dev/null
+fi
+
+prepallman
+prepallinfo
+
+prepallstrip
+
+if has chflags $FEATURES ; then
+	# Restore all the file flags that were saved at the beginning of prepall.
+	mtree -U -e -p "${D}" -k flags < "${T}/bsdflags.mtree" &> /dev/null
+fi

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepalldocs b/portage_with_autodep/bin/ebuild-helpers/prepalldocs
new file mode 100755
index 0000000..fdc735d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepalldocs
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -n $1 ]] ; then
+	vecho "${0##*/}: invalid usage; takes no arguments" 1>&2
+fi
+
+cd "${D}"
+[[ -d usr/share/doc ]] || exit 0
+
+ecompressdir --ignore /usr/share/doc/${PF}/html
+ecompressdir --queue /usr/share/doc

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepallinfo b/portage_with_autodep/bin/ebuild-helpers/prepallinfo
new file mode 100755
index 0000000..0d97803
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepallinfo
@@ -0,0 +1,9 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+[[ ! -d ${D}usr/share/info ]] && exit 0
+
+exec prepinfo

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepallman b/portage_with_autodep/bin/ebuild-helpers/prepallman
new file mode 100755
index 0000000..e50de6d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepallman
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+# replaced by controllable compression in EAPI 4
+has "${EAPI}" 0 1 2 3 || exit 0
+
+ret=0
+
+find "${D}" -type d -name man > "${T}"/prepallman.filelist
+while read -r mandir ; do
+	mandir=${mandir#${D}}
+	prepman "${mandir%/man}"
+	((ret|=$?))
+done < "${T}"/prepallman.filelist
+
+exit ${ret}

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepallstrip b/portage_with_autodep/bin/ebuild-helpers/prepallstrip
new file mode 100755
index 0000000..ec12ce6
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepallstrip
@@ -0,0 +1,5 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+exec prepstrip "${D}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepinfo b/portage_with_autodep/bin/ebuild-helpers/prepinfo
new file mode 100755
index 0000000..691fd13
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepinfo
@@ -0,0 +1,34 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then
+	infodir="/usr/share/info"
+else
+	if [[ -d ${D}$1/share/info ]] ; then
+		infodir="$1/share/info"
+	else
+		infodir="$1/info"
+	fi
+fi
+
+if [[ ! -d ${D}${infodir} ]] ; then
+	if [[ -n $1 ]] ; then
+		vecho "${0##*/}: '${infodir}' does not exist!"
+		exit 1
+	else
+		exit 0
+	fi
+fi
+
+find "${D}${infodir}" -type d -print0 | while read -r -d $'\0' x ; do
+	for f in "${x}"/.keepinfodir*; do
+		[[ -e ${f} ]] && continue 2
+	done
+	rm -f "${x}"/dir{,.info}{,.gz,.bz2}
+done
+
+has "${EAPI}" 0 1 2 3 || exit 0
+exec ecompressdir --queue "${infodir}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/preplib b/portage_with_autodep/bin/ebuild-helpers/preplib
new file mode 100755
index 0000000..76aabe6
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/preplib
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+eqawarn "QA Notice: Deprecated call to 'preplib'"
+
+LIBDIR_VAR="LIBDIR_${ABI}"
+if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
+	CONF_LIBDIR="${!LIBDIR_VAR}"
+fi
+unset LIBDIR_VAR
+
+if [ -z "${CONF_LIBDIR}" ]; then
+	# we need this to default to lib so that things dont break
+	CONF_LIBDIR="lib"
+fi
+
+if [ -z "$1" ] ; then
+	z="${D}usr/${CONF_LIBDIR}"
+else
+	z="${D}$1/${CONF_LIBDIR}"
+fi
+
+if [ -d "${z}" ] ; then
+	ldconfig -n -N "${z}"
+fi

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepman b/portage_with_autodep/bin/ebuild-helpers/prepman
new file mode 100755
index 0000000..c9add8a
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepman
@@ -0,0 +1,32 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if [[ -z $1 ]] ; then 
+	mandir="${D}usr/share/man"
+else
+	mandir="${D}$1/man"
+fi
+
+if [[ ! -d ${mandir} ]] ; then
+	eqawarn "QA Notice: prepman called with non-existent dir '${mandir#${D}}'"
+	exit 0
+fi
+
+# replaced by controllable compression in EAPI 4
+has "${EAPI}" 0 1 2 3 || exit 0
+
+shopt -s nullglob
+
+really_is_mandir=0
+
+# use some heuristics to test if this is a real mandir
+for subdir in "${mandir}"/man* "${mandir}"/*/man* ; do
+	[[ -d ${subdir} ]] && really_is_mandir=1 && break
+done
+
+[[ ${really_is_mandir} == 1 ]] && exec ecompressdir --queue "${mandir#${D}}"
+
+exit 0

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepstrip b/portage_with_autodep/bin/ebuild-helpers/prepstrip
new file mode 100755
index 0000000..d25259d
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/prepstrip
@@ -0,0 +1,193 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+banner=false
+SKIP_STRIP=false
+if has nostrip ${FEATURES} || \
+   has strip ${RESTRICT}
+then
+	SKIP_STRIP=true
+	banner=true
+	has installsources ${FEATURES} || exit 0
+fi
+
+STRIP=${STRIP:-${CHOST}-strip}
+type -P -- ${STRIP} > /dev/null || STRIP=strip
+OBJCOPY=${OBJCOPY:-${CHOST}-objcopy}
+type -P -- ${OBJCOPY} > /dev/null || OBJCOPY=objcopy
+
+# We'll leave out -R .note for now until we can check out the relevance
+# of the section when it has the ALLOC flag set on it ...
+export SAFE_STRIP_FLAGS="--strip-unneeded"
+export PORTAGE_STRIP_FLAGS=${PORTAGE_STRIP_FLAGS-${SAFE_STRIP_FLAGS} -R .comment}
+prepstrip_sources_dir=/usr/src/debug/${CATEGORY}/${PF}
+
+if has installsources ${FEATURES} && ! type -P debugedit >/dev/null ; then
+	ewarn "FEATURES=installsources is enabled but the debugedit binary could not"
+	ewarn "be found. This feature will not work unless debugedit is installed!"
+fi
+
+unset ${!INODE_*}
+
+inode_var_name() {
+	if  [[ $USERLAND = BSD ]] ; then
+		stat -f 'INODE_%d_%i' "$1"
+	else
+		stat -c 'INODE_%d_%i' "$1"
+	fi
+}
+
+save_elf_sources() {
+	has installsources ${FEATURES} || return 0
+	has installsources ${RESTRICT} && return 0
+	type -P debugedit >/dev/null || return 0
+
+	local x=$1
+	local inode=$(inode_var_name "$x")
+	[[ -n ${!inode} ]] && return 0
+	debugedit -b "${WORKDIR}" -d "${prepstrip_sources_dir}" \
+		-l "${T}"/debug.sources "${x}"
+}
+
+save_elf_debug() {
+	has splitdebug ${FEATURES} || return 0
+
+	local x=$1
+	local y="${D}usr/lib/debug/${x:${#D}}.debug"
+
+	# dont save debug info twice
+	[[ ${x} == *".debug" ]] && return 0
+
+	# this will recompute the build-id, but for now that's ok
+	local buildid="$( type -P debugedit >/dev/null && debugedit -i "${x}" )"
+
+	mkdir -p $(dirname "${y}")
+
+	local inode=$(inode_var_name "$x")
+	if [[ -n ${!inode} ]] ; then
+		ln "${D}usr/lib/debug/${!inode:${#D}}.debug" "$y"
+	else
+		eval $inode=\$x
+		${OBJCOPY} --only-keep-debug "${x}" "${y}"
+		${OBJCOPY} --add-gnu-debuglink="${y}" "${x}"
+		[[ -g ${x} ]] && chmod go-r "${y}"
+		[[ -u ${x} ]] && chmod go-r "${y}"
+		chmod a-x,o-w "${y}"
+	fi
+
+	if [[ -n ${buildid} ]] ; then
+		local buildid_dir="${D}usr/lib/debug/.build-id/${buildid:0:2}"
+		local buildid_file="${buildid_dir}/${buildid:2}"
+		mkdir -p "${buildid_dir}"
+		ln -s "../../${x:${#D}}.debug" "${buildid_file}.debug"
+		ln -s "/${x:${#D}}" "${buildid_file}"
+	fi
+}
+
+# The existance of the section .symtab tells us that a binary is stripped.
+# We want to log already stripped binaries, as this may be a QA violation.
+# They prevent us from getting the splitdebug data.
+if ! has binchecks ${RESTRICT} && \
+	! has strip ${RESTRICT} ; then
+	log=$T/scanelf-already-stripped.log
+	qa_var="QA_PRESTRIPPED_${ARCH/-/_}"
+	[[ -n ${!qa_var} ]] && QA_PRESTRIPPED="${!qa_var}"
+	scanelf -yqRBF '#k%F' -k '!.symtab' "$@" | sed -e "s#^$D##" > "$log"
+	if [[ -n $QA_PRESTRIPPED && -s $log && \
+		${QA_STRICT_PRESTRIPPED-unset} = unset ]] ; then
+		shopts=$-
+		set -o noglob
+		for x in $QA_PRESTRIPPED ; do
+			sed -e "s#^${x#/}\$##" -i "$log"
+		done
+		set +o noglob
+		set -$shopts
+	fi
+	sed -e "/^\$/d" -e "s#^#/#" -i "$log"
+	if [[ -s $log ]] ; then
+		vecho -e "\n"
+		eqawarn "QA Notice: Pre-stripped files found:"
+		eqawarn "$(<"$log")"
+	else
+		rm -f "$log"
+	fi
+fi
+
+# Now we look for unstripped binaries.
+for x in \
+	$(scanelf -yqRBF '#k%F' -k '.symtab' "$@") \
+	$(find "$@" -type f -name '*.a')
+do
+	if ! ${banner} ; then
+		vecho "strip: ${STRIP} ${PORTAGE_STRIP_FLAGS}"
+		banner=true
+	fi
+
+	f=$(file "${x}") || continue
+	[[ -z ${f} ]] && continue
+
+	if ! ${SKIP_STRIP} ; then
+		# The noglob funk is to support STRIP_MASK="/*/booga" and to keep
+		#  the for loop from expanding the globs.
+		# The eval echo is to support STRIP_MASK="/*/{booga,bar}" sex.
+		set -o noglob
+		strip_this=true
+		for m in $(eval echo ${STRIP_MASK}) ; do
+			[[ /${x#${D}} == ${m} ]] && strip_this=false && break
+		done
+		set +o noglob
+	else
+		strip_this=false
+	fi
+
+	# only split debug info for final linked objects
+	# or kernel modules as debuginfo for intermediatary
+	# files (think crt*.o from gcc/glibc) is useless and
+	# actually causes problems.  install sources for all
+	# elf types though cause that stuff is good.
+
+	if [[ ${f} == *"current ar archive"* ]] ; then
+		vecho "   ${x:${#D}}"
+		if ${strip_this} ; then
+			# hmm, can we split debug/sources for .a ?
+			${STRIP} -g "${x}"
+		fi
+	elif [[ ${f} == *"SB executable"* || ${f} == *"SB shared object"* ]] ; then
+		vecho "   ${x:${#D}}"
+		save_elf_sources "${x}"
+		if ${strip_this} ; then
+			save_elf_debug "${x}"
+			${STRIP} ${PORTAGE_STRIP_FLAGS} "${x}"
+		fi
+	elif [[ ${f} == *"SB relocatable"* ]] ; then
+		vecho "   ${x:${#D}}"
+		save_elf_sources "${x}"
+		if ${strip_this} ; then
+			[[ ${x} == *.ko ]] && save_elf_debug "${x}"
+			${STRIP} ${SAFE_STRIP_FLAGS} "${x}"
+		fi
+	fi
+done
+
+if [[ -s ${T}/debug.sources ]] && \
+        has installsources ${FEATURES} && \
+        ! has installsources ${RESTRICT} && \
+        type -P debugedit >/dev/null
+then
+	vecho "installsources: rsyncing source files"
+	[[ -d ${D}${prepstrip_sources_dir} ]] || mkdir -p "${D}${prepstrip_sources_dir}"
+	grep -zv '/<[^/>]*>$' "${T}"/debug.sources | \
+		(cd "${WORKDIR}"; LANG=C sort -z -u | \
+		rsync -tL0 --files-from=- "${WORKDIR}/" "${D}${prepstrip_sources_dir}/" )
+
+	# Preserve directory structure.
+	# Needed after running save_elf_sources.
+	# https://bugzilla.redhat.com/show_bug.cgi?id=444310
+	while read -r -d $'\0' emptydir
+	do
+		>> "$emptydir"/.keepdir
+	done < <(find "${D}${prepstrip_sources_dir}/" -type d -empty -print0)
+fi

diff --git a/portage_with_autodep/bin/ebuild-helpers/sed b/portage_with_autodep/bin/ebuild-helpers/sed
new file mode 100755
index 0000000..b21e856
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/sed
@@ -0,0 +1,27 @@
+#!/bin/bash
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+scriptpath=${BASH_SOURCE[0]}
+scriptname=${scriptpath##*/}
+
+if [[ sed == ${scriptname} ]] && [[ -n ${ESED} ]]; then
+	exec ${ESED} "$@"
+elif type -P g${scriptname} > /dev/null ; then
+	exec g${scriptname} "$@"
+else
+	old_IFS="${IFS}"
+	IFS=":"
+ 
+	for path in $PATH; do
+		[[ ${path}/${scriptname} == ${scriptpath} ]] && continue
+		if [[ -x ${path}/${scriptname} ]]; then
+			exec ${path}/${scriptname} "$@"
+			exit 0
+		fi
+	done
+	
+	IFS="${old_IFS}"
+fi
+ 
+exit 1

diff --git a/portage_with_autodep/bin/ebuild-ipc b/portage_with_autodep/bin/ebuild-ipc
new file mode 100755
index 0000000..43e4a02
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-ipc
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+PYTHONPATH=$PORTAGE_PYM_PATH${PYTHONPATH:+:}$PYTHONPATH \
+	exec "${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH/ebuild-ipc.py" "$@"

diff --git a/portage_with_autodep/bin/ebuild-ipc.py b/portage_with_autodep/bin/ebuild-ipc.py
new file mode 100755
index 0000000..68ad985
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-ipc.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# This is a helper which ebuild processes can use
+# to communicate with portage's main python process.
+
+import errno
+import logging
+import os
+import pickle
+import select
+import signal
+import sys
+import time
+
+def debug_signal(signum, frame):
+	import pdb
+	pdb.set_trace()
+signal.signal(signal.SIGUSR1, debug_signal)
+
+# Avoid sandbox violations after python upgrade.
+pym_path = os.path.join(os.path.dirname(
+	os.path.dirname(os.path.realpath(__file__))), "pym")
+if os.environ.get("SANDBOX_ON") == "1":
+	sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+	if pym_path not in sandbox_write:
+		sandbox_write.append(pym_path)
+		os.environ["SANDBOX_WRITE"] = \
+			":".join(filter(None, sandbox_write))
+
+import portage
+portage._disable_legacy_globals()
+
+class EbuildIpc(object):
+
+	# Timeout for each individual communication attempt (we retry
+	# as long as the daemon process appears to be alive).
+	_COMMUNICATE_RETRY_TIMEOUT_SECONDS = 15
+	_BUFSIZE = 4096
+
+	def __init__(self):
+		self.fifo_dir = os.environ['PORTAGE_BUILDDIR']
+		self.ipc_in_fifo = os.path.join(self.fifo_dir, '.ipc_in')
+		self.ipc_out_fifo = os.path.join(self.fifo_dir, '.ipc_out')
+		self.ipc_lock_file = os.path.join(self.fifo_dir, '.ipc_lock')
+
+	def _daemon_is_alive(self):
+		try:
+			builddir_lock = portage.locks.lockfile(self.fifo_dir,
+				wantnewlockfile=True, flags=os.O_NONBLOCK)
+		except portage.exception.TryAgain:
+			return True
+		else:
+			portage.locks.unlockfile(builddir_lock)
+			return False
+
+	def communicate(self, args):
+
+		# Make locks quiet since unintended locking messages displayed on
+		# stdout could corrupt the intended output of this program.
+		portage.locks._quiet = True
+		lock_obj = portage.locks.lockfile(self.ipc_lock_file, unlinkfile=True)
+
+		try:
+			return self._communicate(args)
+		finally:
+			portage.locks.unlockfile(lock_obj)
+
+	def _timeout_retry_msg(self, start_time, when):
+		time_elapsed = time.time() - start_time
+		portage.util.writemsg_level(
+			portage.localization._(
+			'ebuild-ipc timed out %s after %d seconds,' + \
+			' retrying...\n') % (when, time_elapsed),
+			level=logging.ERROR, noiselevel=-1)
+
+	def _no_daemon_msg(self):
+		portage.util.writemsg_level(
+			portage.localization._(
+			'ebuild-ipc: daemon process not detected\n'),
+			level=logging.ERROR, noiselevel=-1)
+
+	def _wait(self, pid, pr, msg):
+		"""
+		Wait on pid and return an appropriate exit code. This
+		may return unsuccessfully due to timeout if the daemon
+		process does not appear to be alive.
+		"""
+
+		start_time = time.time()
+
+		while True:
+			try:
+				events = select.select([pr], [], [],
+					self._COMMUNICATE_RETRY_TIMEOUT_SECONDS)
+			except select.error as e:
+				portage.util.writemsg_level(
+					"ebuild-ipc: %s: %s\n" % \
+					(portage.localization._('during select'), e),
+					level=logging.ERROR, noiselevel=-1)
+				continue
+
+			if events[0]:
+				break
+
+			if self._daemon_is_alive():
+				self._timeout_retry_msg(start_time, msg)
+			else:
+				self._no_daemon_msg()
+				try:
+					os.kill(pid, signal.SIGKILL)
+					os.waitpid(pid, 0)
+				except OSError as e:
+					portage.util.writemsg_level(
+						"ebuild-ipc: %s\n" % (e,),
+						level=logging.ERROR, noiselevel=-1)
+				return 2
+
+		try:
+			wait_retval = os.waitpid(pid, 0)
+		except OSError as e:
+			portage.util.writemsg_level(
+				"ebuild-ipc: %s: %s\n" % (msg, e),
+				level=logging.ERROR, noiselevel=-1)
+			return 2
+
+		if not os.WIFEXITED(wait_retval[1]):
+			portage.util.writemsg_level(
+				"ebuild-ipc: %s: %s\n" % (msg,
+				portage.localization._('subprocess failure: %s') % \
+				wait_retval[1]),
+				level=logging.ERROR, noiselevel=-1)
+			return 2
+
+		return os.WEXITSTATUS(wait_retval[1])
+
+	def _receive_reply(self, input_fd):
+
+		# Timeouts are handled by the parent process, so just
+		# block until input is available. For maximum portability,
+		# use a single atomic read.
+		buf = None
+		while True:
+			try:
+				events = select.select([input_fd], [], [])
+			except select.error as e:
+				portage.util.writemsg_level(
+					"ebuild-ipc: %s: %s\n" % \
+					(portage.localization._('during select for read'), e),
+					level=logging.ERROR, noiselevel=-1)
+				continue
+
+			if events[0]:
+				# For maximum portability, use os.read() here since
+				# array.fromfile() and file.read() are both known to
+				# erroneously return an empty string from this
+				# non-blocking fifo stream on FreeBSD (bug #337465).
+				try:
+					buf = os.read(input_fd, self._BUFSIZE)
+				except OSError as e:
+					if e.errno != errno.EAGAIN:
+						portage.util.writemsg_level(
+							"ebuild-ipc: %s: %s\n" % \
+							(portage.localization._('read error'), e),
+							level=logging.ERROR, noiselevel=-1)
+						break
+					# Assume that another event will be generated
+					# if there's any relevant data.
+					continue
+
+				# Only one (atomic) read should be necessary.
+				if buf:
+					break
+
+		retval = 2
+
+		if not buf:
+
+			portage.util.writemsg_level(
+				"ebuild-ipc: %s\n" % \
+				(portage.localization._('read failed'),),
+				level=logging.ERROR, noiselevel=-1)
+
+		else:
+
+			try:
+				reply = pickle.loads(buf)
+			except SystemExit:
+				raise
+			except Exception as e:
+				# The pickle module can raise practically
+				# any exception when given corrupt data.
+				portage.util.writemsg_level(
+					"ebuild-ipc: %s\n" % (e,),
+					level=logging.ERROR, noiselevel=-1)
+
+			else:
+
+				(out, err, retval) = reply
+
+				if out:
+					portage.util.writemsg_stdout(out, noiselevel=-1)
+
+				if err:
+					portage.util.writemsg(err, noiselevel=-1)
+
+		return retval
+
+	def _communicate(self, args):
+
+		if not self._daemon_is_alive():
+			self._no_daemon_msg()
+			return 2
+
+		# Open the input fifo before the output fifo, in order to make it
+		# possible for the daemon to send a reply without blocking. This
+		# improves performance, and also makes it possible for the daemon
+		# to do a non-blocking write without a race condition.
+		input_fd = os.open(self.ipc_out_fifo,
+			os.O_RDONLY|os.O_NONBLOCK)
+
+		# Use forks so that the child process can handle blocking IO
+		# un-interrupted, while the parent handles all timeout
+		# considerations. This helps to avoid possible race conditions
+		# from interference between timeouts and blocking IO operations.
+		pr, pw = os.pipe()
+		pid = os.fork()
+
+		if pid == 0:
+			os.close(pr)
+
+			# File streams are in unbuffered mode since we do atomic
+			# read and write of whole pickles.
+			output_file = open(self.ipc_in_fifo, 'wb', 0)
+			output_file.write(pickle.dumps(args))
+			output_file.close()
+			os._exit(os.EX_OK)
+
+		os.close(pw)
+
+		msg = portage.localization._('during write')
+		retval = self._wait(pid, pr, msg)
+		os.close(pr)
+
+		if retval != os.EX_OK:
+			portage.util.writemsg_level(
+				"ebuild-ipc: %s: %s\n" % (msg,
+				portage.localization._('subprocess failure: %s') % \
+				retval), level=logging.ERROR, noiselevel=-1)
+			return retval
+
+		if not self._daemon_is_alive():
+			self._no_daemon_msg()
+			return 2
+
+		pr, pw = os.pipe()
+		pid = os.fork()
+
+		if pid == 0:
+			os.close(pr)
+			retval = self._receive_reply(input_fd)
+			os._exit(retval)
+
+		os.close(pw)
+		retval = self._wait(pid, pr, portage.localization._('during read'))
+		os.close(pr)
+		os.close(input_fd)
+		return retval
+
+def ebuild_ipc_main(args):
+	ebuild_ipc = EbuildIpc()
+	return ebuild_ipc.communicate(args)
+
+if __name__ == '__main__':
+	sys.exit(ebuild_ipc_main(sys.argv[1:]))

diff --git a/portage_with_autodep/bin/ebuild.sh b/portage_with_autodep/bin/ebuild.sh
new file mode 100755
index 0000000..d68e54b
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild.sh
@@ -0,0 +1,2424 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH="${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"
+PORTAGE_PYM_PATH="${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}"
+
+if [[ $PORTAGE_SANDBOX_COMPAT_LEVEL -lt 22 ]] ; then
+	# Ensure that /dev/std* streams have appropriate sandbox permission for
+	# bug #288863. This can be removed after sandbox is fixed and portage
+	# depends on the fixed version (sandbox-2.2 has the fix but it is
+	# currently unstable).
+	export SANDBOX_WRITE="${SANDBOX_WRITE:+${SANDBOX_WRITE}:}/dev/stdout:/dev/stderr"
+	export SANDBOX_READ="${SANDBOX_READ:+${SANDBOX_READ}:}/dev/stdin"
+fi
+
+# Don't use sandbox's BASH_ENV for new shells because it does
+# 'source /etc/profile' which can interfere with the build
+# environment by modifying our PATH.
+unset BASH_ENV
+
+ROOTPATH=${ROOTPATH##:}
+ROOTPATH=${ROOTPATH%%:}
+PREROOTPATH=${PREROOTPATH##:}
+PREROOTPATH=${PREROOTPATH%%:}
+PATH=$PORTAGE_BIN_PATH/ebuild-helpers:$PREROOTPATH${PREROOTPATH:+:}/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin${ROOTPATH:+:}$ROOTPATH
+export PATH
+
+# This is just a temporary workaround for portage-9999 users since
+# earlier portage versions do not detect a version change in this case
+# (9999 to 9999) and therefore they try execute an incompatible version of
+# ebuild.sh during the upgrade.
+export PORTAGE_BZIP2_COMMAND=${PORTAGE_BZIP2_COMMAND:-bzip2} 
+
+# These two functions wrap sourcing and calling respectively.  At present they
+# perform a qa check to make sure eclasses and ebuilds and profiles don't mess
+# with shell opts (shopts).  Ebuilds/eclasses changing shopts should reset them 
+# when they are done.
+
+qa_source() {
+	local shopts=$(shopt) OLDIFS="$IFS"
+	local retval
+	source "$@"
+	retval=$?
+	set +e
+	[[ $shopts != $(shopt) ]] &&
+		eqawarn "QA Notice: Global shell options changed and were not restored while sourcing '$*'"
+	[[ "$IFS" != "$OLDIFS" ]] &&
+		eqawarn "QA Notice: Global IFS changed and was not restored while sourcing '$*'"
+	return $retval
+}
+
+qa_call() {
+	local shopts=$(shopt) OLDIFS="$IFS"
+	local retval
+	"$@"
+	retval=$?
+	set +e
+	[[ $shopts != $(shopt) ]] &&
+		eqawarn "QA Notice: Global shell options changed and were not restored while calling '$*'"
+	[[ "$IFS" != "$OLDIFS" ]] &&
+		eqawarn "QA Notice: Global IFS changed and was not restored while calling '$*'"
+	return $retval
+}
+
+EBUILD_SH_ARGS="$*"
+
+shift $#
+
+# Prevent aliases from causing portage to act inappropriately.
+# Make sure it's before everything so we don't mess aliases that follow.
+unalias -a
+
+# Unset some variables that break things.
+unset GZIP BZIP BZIP2 CDPATH GREP_OPTIONS GREP_COLOR GLOBIGNORE
+
+source "${PORTAGE_BIN_PATH}/isolated-functions.sh"  &>/dev/null
+
+[[ $PORTAGE_QUIET != "" ]] && export PORTAGE_QUIET
+
+# sandbox support functions; defined prior to profile.bashrc srcing, since the profile might need to add a default exception (/usr/lib64/conftest fex)
+_sb_append_var() {
+	local _v=$1 ; shift
+	local var="SANDBOX_${_v}"
+	[[ -z $1 || -n $2 ]] && die "Usage: add$(echo ${_v} | \
+		LC_ALL=C tr [:upper:] [:lower:]) <colon-delimited list of paths>"
+	export ${var}="${!var:+${!var}:}$1"
+}
+# bash-4 version:
+# local var="SANDBOX_${1^^}"
+# addread() { _sb_append_var ${0#add} "$@" ; }
+addread()    { _sb_append_var READ    "$@" ; }
+addwrite()   { _sb_append_var WRITE   "$@" ; }
+adddeny()    { _sb_append_var DENY    "$@" ; }
+addpredict() { _sb_append_var PREDICT "$@" ; }
+
+addwrite "${PORTAGE_TMPDIR}"
+addread "/:${PORTAGE_TMPDIR}"
+[[ -n ${PORTAGE_GPG_DIR} ]] && addpredict "${PORTAGE_GPG_DIR}"
+
+# Avoid sandbox violations in temporary directories.
+if [[ -w $T ]] ; then
+	export TEMP=$T
+	export TMP=$T
+	export TMPDIR=$T
+elif [[ $SANDBOX_ON = 1 ]] ; then
+	for x in TEMP TMP TMPDIR ; do
+		[[ -n ${!x} ]] && addwrite "${!x}"
+	done
+	unset x
+fi
+
+# the sandbox is disabled by default except when overridden in the relevant stages
+export SANDBOX_ON=0
+
+lchown() {
+	chown -h "$@"
+}
+
+lchgrp() {
+	chgrp -h "$@"
+}
+
+esyslog() {
+	# Custom version of esyslog() to take care of the "Red Star" bug.
+	# MUST follow functions.sh to override the "" parameter problem.
+	return 0
+}
+
+useq() {
+	has $EBUILD_PHASE prerm postrm || eqawarn \
+		"QA Notice: The 'useq' function is deprecated (replaced by 'use')"
+	use ${1}
+}
+
+usev() {
+	if use ${1}; then
+		echo "${1#!}"
+		return 0
+	fi
+	return 1
+}
+
+use() {
+	local u=$1
+	local found=0
+
+	# if we got something like '!flag', then invert the return value
+	if [[ ${u:0:1} == "!" ]] ; then
+		u=${u:1}
+		found=1
+	fi
+
+	if [[ $EBUILD_PHASE = depend ]] ; then
+		# TODO: Add a registration interface for eclasses to register
+		# any number of phase hooks, so that global scope eclass
+		# initialization can by migrated to phase hooks in new EAPIs.
+		# Example: add_phase_hook before pkg_setup $ECLASS_pre_pkg_setup
+		#if [[ -n $EAPI ]] && ! has "$EAPI" 0 1 2 3 ; then
+		#	die "use() called during invalid phase: $EBUILD_PHASE"
+		#fi
+		true
+
+	# Make sure we have this USE flag in IUSE
+	elif [[ -n $PORTAGE_IUSE && -n $EBUILD_PHASE ]] ; then
+		[[ $u =~ $PORTAGE_IUSE ]] || \
+			eqawarn "QA Notice: USE Flag '${u}' not" \
+				"in IUSE for ${CATEGORY}/${PF}"
+	fi
+
+	if has ${u} ${USE} ; then
+		return ${found}
+	else
+		return $((!found))
+	fi
+}
+
+# Return true if given package is installed. Otherwise return false.
+# Takes single depend-type atoms.
+has_version() {
+	if [ "${EBUILD_PHASE}" == "depend" ]; then
+		die "portageq calls (has_version calls portageq) are not allowed in the global scope"
+	fi
+
+	if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+		"$PORTAGE_BIN_PATH"/ebuild-ipc has_version "$ROOT" "$1"
+	else
+		PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+		"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" has_version "${ROOT}" "$1"
+	fi
+	local retval=$?
+	case "${retval}" in
+		0|1)
+			return ${retval}
+			;;
+		*)
+			die "unexpected portageq exit code: ${retval}"
+			;;
+	esac
+}
+
+portageq() {
+	if [ "${EBUILD_PHASE}" == "depend" ]; then
+		die "portageq calls are not allowed in the global scope"
+	fi
+
+	PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+	"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" "$@"
+}
+
+
+# ----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
+
+
+# Returns the best/most-current match.
+# Takes single depend-type atoms.
+best_version() {
+	if [ "${EBUILD_PHASE}" == "depend" ]; then
+		die "portageq calls (best_version calls portageq) are not allowed in the global scope"
+	fi
+
+	if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+		"$PORTAGE_BIN_PATH"/ebuild-ipc best_version "$ROOT" "$1"
+	else
+		PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+		"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" 'best_version' "${ROOT}" "$1"
+	fi
+	local retval=$?
+	case "${retval}" in
+		0|1)
+			return ${retval}
+			;;
+		*)
+			die "unexpected portageq exit code: ${retval}"
+			;;
+	esac
+}
+
+use_with() {
+	if [ -z "$1" ]; then
+		echo "!!! use_with() called without a parameter." >&2
+		echo "!!! use_with <USEFLAG> [<flagname> [value]]" >&2
+		return 1
+	fi
+
+	if ! has "${EAPI:-0}" 0 1 2 3 ; then
+		local UW_SUFFIX=${3+=$3}
+	else
+		local UW_SUFFIX=${3:+=$3}
+	fi
+	local UWORD=${2:-$1}
+
+	if use $1; then
+		echo "--with-${UWORD}${UW_SUFFIX}"
+	else
+		echo "--without-${UWORD}"
+	fi
+	return 0
+}
+
+use_enable() {
+	if [ -z "$1" ]; then
+		echo "!!! use_enable() called without a parameter." >&2
+		echo "!!! use_enable <USEFLAG> [<flagname> [value]]" >&2
+		return 1
+	fi
+
+	if ! has "${EAPI:-0}" 0 1 2 3 ; then
+		local UE_SUFFIX=${3+=$3}
+	else
+		local UE_SUFFIX=${3:+=$3}
+	fi
+	local UWORD=${2:-$1}
+
+	if use $1; then
+		echo "--enable-${UWORD}${UE_SUFFIX}"
+	else
+		echo "--disable-${UWORD}"
+	fi
+	return 0
+}
+
+register_die_hook() {
+	local x
+	for x in $* ; do
+		has $x $EBUILD_DEATH_HOOKS || \
+			export EBUILD_DEATH_HOOKS="$EBUILD_DEATH_HOOKS $x"
+	done
+}
+
+register_success_hook() {
+	local x
+	for x in $* ; do
+		has $x $EBUILD_SUCCESS_HOOKS || \
+			export EBUILD_SUCCESS_HOOKS="$EBUILD_SUCCESS_HOOKS $x"
+	done
+}
+
+# Ensure that $PWD is sane whenever possible, to protect against
+# exploitation of insecure search path for python -c in ebuilds.
+# See bug #239560.
+if ! has "$EBUILD_PHASE" clean cleanrm depend help ; then
+	cd "$PORTAGE_BUILDDIR" || \
+		die "PORTAGE_BUILDDIR does not exist: '$PORTAGE_BUILDDIR'"
+fi
+
+#if no perms are specified, dirs/files will have decent defaults
+#(not secretive, but not stupid)
+umask 022
+export DESTTREE=/usr
+export INSDESTTREE=""
+export _E_EXEDESTTREE_=""
+export _E_DOCDESTTREE_=""
+export INSOPTIONS="-m0644"
+export EXEOPTIONS="-m0755"
+export LIBOPTIONS="-m0644"
+export DIROPTIONS="-m0755"
+export MOPREFIX=${PN}
+declare -a PORTAGE_DOCOMPRESS=( /usr/share/{doc,info,man} )
+declare -a PORTAGE_DOCOMPRESS_SKIP=( /usr/share/doc/${PF}/html )
+
+# adds ".keep" files so that dirs aren't auto-cleaned
+keepdir() {
+	dodir "$@"
+	local x
+	if [ "$1" == "-R" ] || [ "$1" == "-r" ]; then
+		shift
+		find "$@" -type d -printf "${D}%p/.keep_${CATEGORY}_${PN}-${SLOT}\n" \
+			| tr "\n" "\0" | \
+			while read -r -d $'\0' ; do
+				>> "$REPLY" || \
+					die "Failed to recursively create .keep files"
+			done
+	else
+		for x in "$@"; do
+			>> "${D}${x}/.keep_${CATEGORY}_${PN}-${SLOT}" || \
+				die "Failed to create .keep in ${D}${x}"
+		done
+	fi
+}
+
+unpack() {
+	local srcdir
+	local x
+	local y
+	local myfail
+	local eapi=${EAPI:-0}
+	[ -z "$*" ] && die "Nothing passed to the 'unpack' command"
+
+	for x in "$@"; do
+		vecho ">>> Unpacking ${x} to ${PWD}"
+		y=${x%.*}
+		y=${y##*.}
+
+		if [[ ${x} == "./"* ]] ; then
+			srcdir=""
+		elif [[ ${x} == ${DISTDIR%/}/* ]] ; then
+			die "Arguments to unpack() cannot begin with \${DISTDIR}."
+		elif [[ ${x} == "/"* ]] ; then
+			die "Arguments to unpack() cannot be absolute"
+		else
+			srcdir="${DISTDIR}/"
+		fi
+		[[ ! -s ${srcdir}${x} ]] && die "${x} does not exist"
+
+		_unpack_tar() {
+			if [ "${y}" == "tar" ]; then
+				$1 -c -- "$srcdir$x" | tar xof -
+				assert_sigpipe_ok "$myfail"
+			else
+				local cwd_dest=${x##*/}
+				cwd_dest=${cwd_dest%.*}
+				$1 -c -- "${srcdir}${x}" > "${cwd_dest}" || die "$myfail"
+			fi
+		}
+
+		myfail="failure unpacking ${x}"
+		case "${x##*.}" in
+			tar)
+				tar xof "$srcdir$x" || die "$myfail"
+				;;
+			tgz)
+				tar xozf "$srcdir$x" || die "$myfail"
+				;;
+			tbz|tbz2)
+				${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- "$srcdir$x" | tar xof -
+				assert_sigpipe_ok "$myfail"
+				;;
+			ZIP|zip|jar)
+				# unzip will interactively prompt under some error conditions,
+				# as reported in bug #336285
+				( while true ; do echo n || break ; done ) | \
+				unzip -qo "${srcdir}${x}" || die "$myfail"
+				;;
+			gz|Z|z)
+				_unpack_tar "gzip -d"
+				;;
+			bz2|bz)
+				_unpack_tar "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d}"
+				;;
+			7Z|7z)
+				local my_output
+				my_output="$(7z x -y "${srcdir}${x}")"
+				if [ $? -ne 0 ]; then
+					echo "${my_output}" >&2
+					die "$myfail"
+				fi
+				;;
+			RAR|rar)
+				unrar x -idq -o+ "${srcdir}${x}" || die "$myfail"
+				;;
+			LHa|LHA|lha|lzh)
+				lha xfq "${srcdir}${x}" || die "$myfail"
+				;;
+			a)
+				ar x "${srcdir}${x}" || die "$myfail"
+				;;
+			deb)
+				# Unpacking .deb archives can not always be done with
+				# `ar`.  For instance on AIX this doesn't work out.  If
+				# we have `deb2targz` installed, prefer it over `ar` for
+				# that reason.  We just make sure on AIX `deb2targz` is
+				# installed.
+				if type -P deb2targz > /dev/null; then
+					y=${x##*/}
+					local created_symlink=0
+					if [ ! "$srcdir$x" -ef "$y" ] ; then
+						# deb2targz always extracts into the same directory as
+						# the source file, so create a symlink in the current
+						# working directory if necessary.
+						ln -sf "$srcdir$x" "$y" || die "$myfail"
+						created_symlink=1
+					fi
+					deb2targz "$y" || die "$myfail"
+					if [ $created_symlink = 1 ] ; then
+						# Clean up the symlink so the ebuild
+						# doesn't inadvertently install it.
+						rm -f "$y"
+					fi
+					mv -f "${y%.deb}".tar.gz data.tar.gz || die "$myfail"
+				else
+					ar x "$srcdir$x" || die "$myfail"
+				fi
+				;;
+			lzma)
+				_unpack_tar "lzma -d"
+				;;
+			xz)
+				if has $eapi 0 1 2 ; then
+					vecho "unpack ${x}: file format not recognized. Ignoring."
+				else
+					_unpack_tar "xz -d"
+				fi
+				;;
+			*)
+				vecho "unpack ${x}: file format not recognized. Ignoring."
+				;;
+		esac
+	done
+	# Do not chmod '.' since it's probably ${WORKDIR} and PORTAGE_WORKDIR_MODE
+	# should be preserved.
+	find . -mindepth 1 -maxdepth 1 ! -type l -print0 | \
+		${XARGS} -0 chmod -fR a+rX,u+w,g-w,o-w
+}
+
+strip_duplicate_slashes() {
+	if [[ -n $1 ]] ; then
+		local removed=$1
+		while [[ ${removed} == *//* ]] ; do
+			removed=${removed//\/\///}
+		done
+		echo ${removed}
+	fi
+}
+
+hasg() {
+    local x s=$1
+    shift
+    for x ; do [[ ${x} == ${s} ]] && echo "${x}" && return 0 ; done
+    return 1
+}
+hasgq() { hasg "$@" >/dev/null ; }
+econf() {
+	local x
+
+	local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
+	if [[ -n $phase_func ]] ; then
+		if has "$EAPI" 0 1 ; then
+			[[ $phase_func != src_compile ]] && \
+				eqawarn "QA Notice: econf called in" \
+					"$phase_func instead of src_compile"
+		else
+			[[ $phase_func != src_configure ]] && \
+				eqawarn "QA Notice: econf called in" \
+					"$phase_func instead of src_configure"
+		fi
+	fi
+
+	: ${ECONF_SOURCE:=.}
+	if [ -x "${ECONF_SOURCE}/configure" ]; then
+		if [[ -n $CONFIG_SHELL && \
+			"$(head -n1 "$ECONF_SOURCE/configure")" =~ ^'#!'[[:space:]]*/bin/sh([[:space:]]|$) ]] ; then
+			sed -e "1s:^#![[:space:]]*/bin/sh:#!$CONFIG_SHELL:" -i "$ECONF_SOURCE/configure" || \
+				die "Substition of shebang in '$ECONF_SOURCE/configure' failed"
+		fi
+		if [ -e /usr/share/gnuconfig/ ]; then
+			find "${WORKDIR}" -type f '(' \
+			-name config.guess -o -name config.sub ')' -print0 | \
+			while read -r -d $'\0' x ; do
+				vecho " * econf: updating ${x/${WORKDIR}\/} with /usr/share/gnuconfig/${x##*/}"
+				cp -f /usr/share/gnuconfig/"${x##*/}" "${x}"
+			done
+		fi
+
+		# EAPI=4 adds --disable-dependency-tracking to econf
+		if ! has "$EAPI" 0 1 2 3 3_pre2 && \
+			"${ECONF_SOURCE}/configure" --help 2>/dev/null | \
+			grep -q disable-dependency-tracking ; then
+			set -- --disable-dependency-tracking "$@"
+		fi
+
+		# if the profile defines a location to install libs to aside from default, pass it on.
+		# if the ebuild passes in --libdir, they're responsible for the conf_libdir fun.
+		local CONF_LIBDIR LIBDIR_VAR="LIBDIR_${ABI}"
+		if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
+			CONF_LIBDIR=${!LIBDIR_VAR}
+		fi
+		if [[ -n ${CONF_LIBDIR} ]] && ! hasgq --libdir=\* "$@" ; then
+			export CONF_PREFIX=$(hasg --exec-prefix=\* "$@")
+			[[ -z ${CONF_PREFIX} ]] && CONF_PREFIX=$(hasg --prefix=\* "$@")
+			: ${CONF_PREFIX:=/usr}
+			CONF_PREFIX=${CONF_PREFIX#*=}
+			[[ ${CONF_PREFIX} != /* ]] && CONF_PREFIX="/${CONF_PREFIX}"
+			[[ ${CONF_LIBDIR} != /* ]] && CONF_LIBDIR="/${CONF_LIBDIR}"
+			set -- --libdir="$(strip_duplicate_slashes ${CONF_PREFIX}${CONF_LIBDIR})" "$@"
+		fi
+
+		set -- \
+			--prefix=/usr \
+			${CBUILD:+--build=${CBUILD}} \
+			--host=${CHOST} \
+			${CTARGET:+--target=${CTARGET}} \
+			--mandir=/usr/share/man \
+			--infodir=/usr/share/info \
+			--datadir=/usr/share \
+			--sysconfdir=/etc \
+			--localstatedir=/var/lib \
+			"$@" \
+			${EXTRA_ECONF}
+		vecho "${ECONF_SOURCE}/configure" "$@"
+
+		if ! "${ECONF_SOURCE}/configure" "$@" ; then
+
+			if [ -s config.log ]; then
+				echo
+				echo "!!! Please attach the following file when seeking support:"
+				echo "!!! ${PWD}/config.log"
+			fi
+			die "econf failed"
+		fi
+	elif [ -f "${ECONF_SOURCE}/configure" ]; then
+		die "configure is not executable"
+	else
+		die "no configure script found"
+	fi
+}
+
+einstall() {
+	# CONF_PREFIX is only set if they didn't pass in libdir above.
+	local LOCAL_EXTRA_EINSTALL="${EXTRA_EINSTALL}"
+	LIBDIR_VAR="LIBDIR_${ABI}"
+	if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
+		CONF_LIBDIR="${!LIBDIR_VAR}"
+	fi
+	unset LIBDIR_VAR
+	if [ -n "${CONF_LIBDIR}" ] && [ "${CONF_PREFIX:+set}" = set ]; then
+		EI_DESTLIBDIR="${D}/${CONF_PREFIX}/${CONF_LIBDIR}"
+		EI_DESTLIBDIR="$(strip_duplicate_slashes ${EI_DESTLIBDIR})"
+		LOCAL_EXTRA_EINSTALL="libdir=${EI_DESTLIBDIR} ${LOCAL_EXTRA_EINSTALL}"
+		unset EI_DESTLIBDIR
+	fi
+
+	if [ -f ./[mM]akefile -o -f ./GNUmakefile ] ; then
+		if [ "${PORTAGE_DEBUG}" == "1" ]; then
+			${MAKE:-make} -n prefix="${D}usr" \
+				datadir="${D}usr/share" \
+				infodir="${D}usr/share/info" \
+				localstatedir="${D}var/lib" \
+				mandir="${D}usr/share/man" \
+				sysconfdir="${D}etc" \
+				${LOCAL_EXTRA_EINSTALL} \
+				${MAKEOPTS} ${EXTRA_EMAKE} -j1 \
+				"$@" install
+		fi
+		${MAKE:-make} prefix="${D}usr" \
+			datadir="${D}usr/share" \
+			infodir="${D}usr/share/info" \
+			localstatedir="${D}var/lib" \
+			mandir="${D}usr/share/man" \
+			sysconfdir="${D}etc" \
+			${LOCAL_EXTRA_EINSTALL} \
+			${MAKEOPTS} ${EXTRA_EMAKE} -j1 \
+			"$@" install || die "einstall failed"
+	else
+		die "no Makefile found"
+	fi
+}
+
+_eapi0_pkg_nofetch() {
+	[ -z "${SRC_URI}" ] && return
+
+	elog "The following are listed in SRC_URI for ${PN}:"
+	local x
+	for x in $(echo ${SRC_URI}); do
+		elog "   ${x}"
+	done
+}
+
+_eapi0_src_unpack() {
+	[[ -n ${A} ]] && unpack ${A}
+}
+
+_eapi0_src_compile() {
+	if [ -x ./configure ] ; then
+		econf
+	fi
+	_eapi2_src_compile
+}
+
+_eapi0_src_test() {
+	# Since we don't want emake's automatic die
+	# support (EAPI 4 and later), and we also don't
+	# want the warning messages that it produces if
+	# we call it in 'nonfatal' mode, we use emake_cmd
+	# to emulate the desired parts of emake behavior.
+	local emake_cmd="${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE}"
+	if $emake_cmd -j1 check -n &> /dev/null; then
+		vecho ">>> Test phase [check]: ${CATEGORY}/${PF}"
+		if ! $emake_cmd -j1 check; then
+			has test $FEATURES && die "Make check failed. See above for details."
+			has test $FEATURES || eerror "Make check failed. See above for details."
+		fi
+	elif $emake_cmd -j1 test -n &> /dev/null; then
+		vecho ">>> Test phase [test]: ${CATEGORY}/${PF}"
+		if ! $emake_cmd -j1 test; then
+			has test $FEATURES && die "Make test failed. See above for details."
+			has test $FEATURES || eerror "Make test failed. See above for details."
+		fi
+	else
+		vecho ">>> Test phase [none]: ${CATEGORY}/${PF}"
+	fi
+}
+
+_eapi1_src_compile() {
+	_eapi2_src_configure
+	_eapi2_src_compile
+}
+
+_eapi2_src_configure() {
+	if [[ -x ${ECONF_SOURCE:-.}/configure ]] ; then
+		econf
+	fi
+}
+
+_eapi2_src_compile() {
+	if [ -f Makefile ] || [ -f GNUmakefile ] || [ -f makefile ]; then
+		emake || die "emake failed"
+	fi
+}
+
+_eapi4_src_install() {
+	if [[ -f Makefile || -f GNUmakefile || -f makefile ]] ; then
+		emake DESTDIR="${D}" install
+	fi
+
+	if ! declare -p DOCS &>/dev/null ; then
+		local d
+		for d in README* ChangeLog AUTHORS NEWS TODO CHANGES \
+				THANKS BUGS FAQ CREDITS CHANGELOG ; do
+			[[ -s "${d}" ]] && dodoc "${d}"
+		done
+	elif [[ $(declare -p DOCS) == "declare -a "* ]] ; then
+		dodoc "${DOCS[@]}"
+	else
+		dodoc ${DOCS}
+	fi
+}
+
+ebuild_phase() {
+	declare -F "$1" >/dev/null && qa_call $1
+}
+
+ebuild_phase_with_hooks() {
+	local x phase_name=${1}
+	for x in {pre_,,post_}${phase_name} ; do
+		ebuild_phase ${x}
+	done
+}
+
+dyn_pretend() {
+	if [[ -e $PORTAGE_BUILDDIR/.pretended ]] ; then
+		vecho ">>> It appears that '$PF' is already pretended; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.pretended' to force pretend."
+		return 0
+	fi
+	ebuild_phase pre_pkg_pretend
+	ebuild_phase pkg_pretend
+	>> "$PORTAGE_BUILDDIR/.pretended" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.pretended"
+	ebuild_phase post_pkg_pretend
+}
+
+dyn_setup() {
+	if [[ -e $PORTAGE_BUILDDIR/.setuped ]] ; then
+		vecho ">>> It appears that '$PF' is already setup; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.setuped' to force setup."
+		return 0
+	fi
+	ebuild_phase pre_pkg_setup
+	ebuild_phase pkg_setup
+	>> "$PORTAGE_BUILDDIR/.setuped" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.setuped"
+	ebuild_phase post_pkg_setup
+}
+
+dyn_unpack() {
+	local newstuff="no"
+	if [ -e "${WORKDIR}" ]; then
+		local x
+		local checkme
+		for x in $A ; do
+			vecho ">>> Checking ${x}'s mtime..."
+			if [ "${PORTAGE_ACTUAL_DISTDIR:-${DISTDIR}}/${x}" -nt "${WORKDIR}" ]; then
+				vecho ">>> ${x} has been updated; recreating WORKDIR..."
+				newstuff="yes"
+				break
+			fi
+		done
+		if [ ! -f "${PORTAGE_BUILDDIR}/.unpacked" ] ; then
+			vecho ">>> Not marked as unpacked; recreating WORKDIR..."
+			newstuff="yes"
+		fi
+	fi
+	if [ "${newstuff}" == "yes" ]; then
+		# We don't necessarily have privileges to do a full dyn_clean here.
+		rm -rf "${PORTAGE_BUILDDIR}"/{.setuped,.unpacked,.prepared,.configured,.compiled,.tested,.installed,.packaged,build-info}
+		if ! has keepwork $FEATURES ; then
+			rm -rf "${WORKDIR}"
+		fi
+		if [ -d "${T}" ] && \
+			! has keeptemp $FEATURES ; then
+			rm -rf "${T}" && mkdir "${T}"
+		fi
+	fi
+	if [ -e "${WORKDIR}" ]; then
+		if [ "$newstuff" == "no" ]; then
+			vecho ">>> WORKDIR is up-to-date, keeping..."
+			return 0
+		fi
+	fi
+
+	if [ ! -d "${WORKDIR}" ]; then
+		install -m${PORTAGE_WORKDIR_MODE:-0700} -d "${WORKDIR}" || die "Failed to create dir '${WORKDIR}'"
+	fi
+	cd "${WORKDIR}" || die "Directory change failed: \`cd '${WORKDIR}'\`"
+	ebuild_phase pre_src_unpack
+	vecho ">>> Unpacking source..."
+	ebuild_phase src_unpack
+	>> "$PORTAGE_BUILDDIR/.unpacked" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.unpacked"
+	vecho ">>> Source unpacked in ${WORKDIR}"
+	ebuild_phase post_src_unpack
+}
+
+dyn_clean() {
+	if [ -z "${PORTAGE_BUILDDIR}" ]; then
+		echo "Aborting clean phase because PORTAGE_BUILDDIR is unset!"
+		return 1
+	elif [ ! -d "${PORTAGE_BUILDDIR}" ] ; then
+		return 0
+	fi
+	if has chflags $FEATURES ; then
+		chflags -R noschg,nouchg,nosappnd,nouappnd "${PORTAGE_BUILDDIR}"
+		chflags -R nosunlnk,nouunlnk "${PORTAGE_BUILDDIR}" 2>/dev/null
+	fi
+
+	rm -rf "${PORTAGE_BUILDDIR}/image" "${PORTAGE_BUILDDIR}/homedir"
+	rm -f "${PORTAGE_BUILDDIR}/.installed"
+
+	if [[ $EMERGE_FROM = binary ]] || \
+		! has keeptemp $FEATURES && ! has keepwork $FEATURES ; then
+		rm -rf "${T}"
+	fi
+
+	if [[ $EMERGE_FROM = binary ]] || ! has keepwork $FEATURES; then
+		rm -f "$PORTAGE_BUILDDIR"/.{ebuild_changed,logid,pretended,setuped,unpacked,prepared} \
+			"$PORTAGE_BUILDDIR"/.{configured,compiled,tested,packaged} \
+			"$PORTAGE_BUILDDIR"/.die_hooks \
+			"$PORTAGE_BUILDDIR"/.ipc_{in,out,lock} \
+			"$PORTAGE_BUILDDIR"/.exit_status
+
+		rm -rf "${PORTAGE_BUILDDIR}/build-info"
+		rm -rf "${WORKDIR}"
+	fi
+
+	if [ -f "${PORTAGE_BUILDDIR}/.unpacked" ]; then
+		find "${PORTAGE_BUILDDIR}" -type d ! -regex "^${WORKDIR}" | sort -r | tr "\n" "\0" | $XARGS -0 rmdir &>/dev/null
+	fi
+
+	# do not bind this to doebuild defined DISTDIR; don't trust doebuild, and if mistakes are made it'll
+	# result in it wiping the users distfiles directory (bad).
+	rm -rf "${PORTAGE_BUILDDIR}/distdir"
+
+	# Some kernels, such as Solaris, return EINVAL when an attempt
+	# is made to remove the current working directory.
+	cd "$PORTAGE_BUILDDIR"/../..
+	rmdir "$PORTAGE_BUILDDIR" 2>/dev/null
+
+	true
+}
+
+into() {
+	if [ "$1" == "/" ]; then
+		export DESTTREE=""
+	else
+		export DESTTREE=$1
+		if [ ! -d "${D}${DESTTREE}" ]; then
+			install -d "${D}${DESTTREE}"
+			local ret=$?
+			if [[ $ret -ne 0 ]] ; then
+				helpers_die "${FUNCNAME[0]} failed"
+				return $ret
+			fi
+		fi
+	fi
+}
+
+insinto() {
+	if [ "$1" == "/" ]; then
+		export INSDESTTREE=""
+	else
+		export INSDESTTREE=$1
+		if [ ! -d "${D}${INSDESTTREE}" ]; then
+			install -d "${D}${INSDESTTREE}"
+			local ret=$?
+			if [[ $ret -ne 0 ]] ; then
+				helpers_die "${FUNCNAME[0]} failed"
+				return $ret
+			fi
+		fi
+	fi
+}
+
+exeinto() {
+	if [ "$1" == "/" ]; then
+		export _E_EXEDESTTREE_=""
+	else
+		export _E_EXEDESTTREE_="$1"
+		if [ ! -d "${D}${_E_EXEDESTTREE_}" ]; then
+			install -d "${D}${_E_EXEDESTTREE_}"
+			local ret=$?
+			if [[ $ret -ne 0 ]] ; then
+				helpers_die "${FUNCNAME[0]} failed"
+				return $ret
+			fi
+		fi
+	fi
+}
+
+docinto() {
+	if [ "$1" == "/" ]; then
+		export _E_DOCDESTTREE_=""
+	else
+		export _E_DOCDESTTREE_="$1"
+		if [ ! -d "${D}usr/share/doc/${PF}/${_E_DOCDESTTREE_}" ]; then
+			install -d "${D}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
+			local ret=$?
+			if [[ $ret -ne 0 ]] ; then
+				helpers_die "${FUNCNAME[0]} failed"
+				return $ret
+			fi
+		fi
+	fi
+}
+
+insopts() {
+	export INSOPTIONS="$@"
+
+	# `install` should never be called with '-s' ...
+	has -s ${INSOPTIONS} && die "Never call insopts() with -s"
+}
+
+diropts() {
+	export DIROPTIONS="$@"
+}
+
+exeopts() {
+	export EXEOPTIONS="$@"
+
+	# `install` should never be called with '-s' ...
+	has -s ${EXEOPTIONS} && die "Never call exeopts() with -s"
+}
+
+libopts() {
+	export LIBOPTIONS="$@"
+
+	# `install` should never be called with '-s' ...
+	has -s ${LIBOPTIONS} && die "Never call libopts() with -s"
+}
+
+docompress() {
+	has "${EAPI}" 0 1 2 3 && die "'docompress' not supported in this EAPI"
+
+	local f g
+	if [[ $1 = "-x" ]]; then
+		shift
+		for f; do
+			f=$(strip_duplicate_slashes "${f}"); f=${f%/}
+			[[ ${f:0:1} = / ]] || f="/${f}"
+			for g in "${PORTAGE_DOCOMPRESS_SKIP[@]}"; do
+				[[ ${f} = "${g}" ]] && continue 2
+			done
+			PORTAGE_DOCOMPRESS_SKIP[${#PORTAGE_DOCOMPRESS_SKIP[@]}]=${f}
+		done
+	else
+		for f; do
+			f=$(strip_duplicate_slashes "${f}"); f=${f%/}
+			[[ ${f:0:1} = / ]] || f="/${f}"
+			for g in "${PORTAGE_DOCOMPRESS[@]}"; do
+				[[ ${f} = "${g}" ]] && continue 2
+			done
+			PORTAGE_DOCOMPRESS[${#PORTAGE_DOCOMPRESS[@]}]=${f}
+		done
+	fi
+}
+
+abort_handler() {
+	local msg
+	if [ "$2" != "fail" ]; then
+		msg="${EBUILD}: ${1} aborted; exiting."
+	else
+		msg="${EBUILD}: ${1} failed; exiting."
+	fi
+	echo
+	echo "$msg"
+	echo
+	eval ${3}
+	#unset signal handler
+	trap - SIGINT SIGQUIT
+}
+
+abort_prepare() {
+	abort_handler src_prepare $1
+	rm -f "$PORTAGE_BUILDDIR/.prepared"
+	exit 1
+}
+
+abort_configure() {
+	abort_handler src_configure $1
+	rm -f "$PORTAGE_BUILDDIR/.configured"
+	exit 1
+}
+
+abort_compile() {
+	abort_handler "src_compile" $1
+	rm -f "${PORTAGE_BUILDDIR}/.compiled"
+	exit 1
+}
+
+abort_test() {
+	abort_handler "dyn_test" $1
+	rm -f "${PORTAGE_BUILDDIR}/.tested"
+	exit 1
+}
+
+abort_install() {
+	abort_handler "src_install" $1
+	rm -rf "${PORTAGE_BUILDDIR}/image"
+	exit 1
+}
+
+has_phase_defined_up_to() {
+	local phase
+	for phase in unpack prepare configure compile install; do
+		has ${phase} ${DEFINED_PHASES} && return 0
+		[[ ${phase} == $1 ]] && return 1
+	done
+	# We shouldn't actually get here
+	return 1
+}
+
+dyn_prepare() {
+
+	if [[ -e $PORTAGE_BUILDDIR/.prepared ]] ; then
+		vecho ">>> It appears that '$PF' is already prepared; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.prepared' to force prepare."
+		return 0
+	fi
+
+	if [[ -d $S ]] ; then
+		cd "${S}"
+	elif has $EAPI 0 1 2 3 3_pre2 ; then
+		cd "${WORKDIR}"
+	elif [[ -z ${A} ]] && ! has_phase_defined_up_to prepare; then
+		cd "${WORKDIR}"
+	else
+		die "The source directory '${S}' doesn't exist"
+	fi
+
+	trap abort_prepare SIGINT SIGQUIT
+
+	ebuild_phase pre_src_prepare
+	vecho ">>> Preparing source in $PWD ..."
+	ebuild_phase src_prepare
+	>> "$PORTAGE_BUILDDIR/.prepared" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.prepared"
+	vecho ">>> Source prepared."
+	ebuild_phase post_src_prepare
+
+	trap - SIGINT SIGQUIT
+}
+
+dyn_configure() {
+
+	if [[ -e $PORTAGE_BUILDDIR/.configured ]] ; then
+		vecho ">>> It appears that '$PF' is already configured; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.configured' to force configuration."
+		return 0
+	fi
+
+	if [[ -d $S ]] ; then
+		cd "${S}"
+	elif has $EAPI 0 1 2 3 3_pre2 ; then
+		cd "${WORKDIR}"
+	elif [[ -z ${A} ]] && ! has_phase_defined_up_to configure; then
+		cd "${WORKDIR}"
+	else
+		die "The source directory '${S}' doesn't exist"
+	fi
+
+	trap abort_configure SIGINT SIGQUIT
+
+	ebuild_phase pre_src_configure
+
+	vecho ">>> Configuring source in $PWD ..."
+	ebuild_phase src_configure
+	>> "$PORTAGE_BUILDDIR/.configured" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.configured"
+	vecho ">>> Source configured."
+
+	ebuild_phase post_src_configure
+
+	trap - SIGINT SIGQUIT
+}
+
+dyn_compile() {
+
+	if [[ -e $PORTAGE_BUILDDIR/.compiled ]] ; then
+		vecho ">>> It appears that '${PF}' is already compiled; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.compiled' to force compilation."
+		return 0
+	fi
+
+	if [[ -d $S ]] ; then
+		cd "${S}"
+	elif has $EAPI 0 1 2 3 3_pre2 ; then
+		cd "${WORKDIR}"
+	elif [[ -z ${A} ]] && ! has_phase_defined_up_to compile; then
+		cd "${WORKDIR}"
+	else
+		die "The source directory '${S}' doesn't exist"
+	fi
+
+	trap abort_compile SIGINT SIGQUIT
+
+	if has distcc $FEATURES && has distcc-pump $FEATURES ; then
+		if [[ -z $INCLUDE_SERVER_PORT ]] || [[ ! -w $INCLUDE_SERVER_PORT ]] ; then
+			eval $(pump --startup)
+			trap "pump --shutdown" EXIT
+		fi
+	fi
+
+	ebuild_phase pre_src_compile
+
+	vecho ">>> Compiling source in $PWD ..."
+	ebuild_phase src_compile
+	>> "$PORTAGE_BUILDDIR/.compiled" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.compiled"
+	vecho ">>> Source compiled."
+
+	ebuild_phase post_src_compile
+
+	trap - SIGINT SIGQUIT
+}
+
+dyn_test() {
+
+	if [[ -e $PORTAGE_BUILDDIR/.tested ]] ; then
+		vecho ">>> It appears that ${PN} has already been tested; skipping."
+		vecho ">>> Remove '${PORTAGE_BUILDDIR}/.tested' to force test."
+		return
+	fi
+
+	if [ "${EBUILD_FORCE_TEST}" == "1" ] ; then
+		# If USE came from ${T}/environment then it might not have USE=test
+		# like it's supposed to here.
+		! has test ${USE} && export USE="${USE} test"
+	fi
+
+	trap "abort_test" SIGINT SIGQUIT
+	if [ -d "${S}" ]; then
+		cd "${S}"
+	else
+		cd "${WORKDIR}"
+	fi
+
+	if ! has test $FEATURES && [ "${EBUILD_FORCE_TEST}" != "1" ]; then
+		vecho ">>> Test phase [not enabled]: ${CATEGORY}/${PF}"
+	elif has test $RESTRICT; then
+		einfo "Skipping make test/check due to ebuild restriction."
+		vecho ">>> Test phase [explicitly disabled]: ${CATEGORY}/${PF}"
+	else
+		local save_sp=${SANDBOX_PREDICT}
+		addpredict /
+		ebuild_phase pre_src_test
+		ebuild_phase src_test
+		>> "$PORTAGE_BUILDDIR/.tested" || \
+			die "Failed to create $PORTAGE_BUILDDIR/.tested"
+		ebuild_phase post_src_test
+		SANDBOX_PREDICT=${save_sp}
+	fi
+
+	trap - SIGINT SIGQUIT
+}
+
+dyn_install() {
+	[ -z "$PORTAGE_BUILDDIR" ] && die "${FUNCNAME}: PORTAGE_BUILDDIR is unset"
+	if has noauto $FEATURES ; then
+		rm -f "${PORTAGE_BUILDDIR}/.installed"
+	elif [[ -e $PORTAGE_BUILDDIR/.installed ]] ; then
+		vecho ">>> It appears that '${PF}' is already installed; skipping."
+		vecho ">>> Remove '${PORTAGE_BUILDDIR}/.installed' to force install."
+		return 0
+	fi
+	trap "abort_install" SIGINT SIGQUIT
+	ebuild_phase pre_src_install
+	rm -rf "${PORTAGE_BUILDDIR}/image"
+	mkdir "${PORTAGE_BUILDDIR}/image"
+	if [[ -d $S ]] ; then
+		cd "${S}"
+	elif has $EAPI 0 1 2 3 3_pre2 ; then
+		cd "${WORKDIR}"
+	elif [[ -z ${A} ]] && ! has_phase_defined_up_to install; then
+		cd "${WORKDIR}"
+	else
+		die "The source directory '${S}' doesn't exist"
+	fi
+
+	vecho
+	vecho ">>> Install ${PF} into ${D} category ${CATEGORY}"
+	#our custom version of libtool uses $S and $D to fix
+	#invalid paths in .la files
+	export S D
+
+	# Reset exeinto(), docinto(), insinto(), and into() state variables
+	# in case the user is running the install phase multiple times
+	# consecutively via the ebuild command.
+	export DESTTREE=/usr
+	export INSDESTTREE=""
+	export _E_EXEDESTTREE_=""
+	export _E_DOCDESTTREE_=""
+
+	ebuild_phase src_install
+	>> "$PORTAGE_BUILDDIR/.installed" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.installed"
+	vecho ">>> Completed installing ${PF} into ${D}"
+	vecho
+	ebuild_phase post_src_install
+
+	cd "${PORTAGE_BUILDDIR}"/build-info
+	set -f
+	local f x
+	IFS=$' \t\n\r'
+	for f in CATEGORY DEFINED_PHASES FEATURES INHERITED IUSE REQUIRED_USE \
+		PF PKGUSE SLOT KEYWORDS HOMEPAGE DESCRIPTION ; do
+		x=$(echo -n ${!f})
+		[[ -n $x ]] && echo "$x" > $f
+	done
+	if [[ $CATEGORY != virtual ]] ; then
+		for f in ASFLAGS CBUILD CC CFLAGS CHOST CTARGET CXX \
+			CXXFLAGS EXTRA_ECONF EXTRA_EINSTALL EXTRA_MAKE \
+			LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
+			x=$(echo -n ${!f})
+			[[ -n $x ]] && echo "$x" > $f
+		done
+	fi
+	echo "${USE}"       > USE
+	echo "${EAPI:-0}"   > EAPI
+	set +f
+
+	# local variables can leak into the saved environment.
+	unset f
+
+	save_ebuild_env --exclude-init-phases | filter_readonly_variables \
+		--filter-path --filter-sandbox --allow-extra-vars > environment
+	assert "save_ebuild_env failed"
+
+	${PORTAGE_BZIP2_COMMAND} -f9 environment
+
+	cp "${EBUILD}" "${PF}.ebuild"
+	[ -n "${PORTAGE_REPO_NAME}" ]  && echo "${PORTAGE_REPO_NAME}" > repository
+	if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT}
+	then
+		>> DEBUGBUILD
+	fi
+	trap - SIGINT SIGQUIT
+}
+
+dyn_preinst() {
+	if [ -z "${D}" ]; then
+		eerror "${FUNCNAME}: D is unset"
+		return 1
+	fi
+	ebuild_phase_with_hooks pkg_preinst
+}
+
+dyn_help() {
+	echo
+	echo "Portage"
+	echo "Copyright 1999-2010 Gentoo Foundation"
+	echo
+	echo "How to use the ebuild command:"
+	echo
+	echo "The first argument to ebuild should be an existing .ebuild file."
+	echo
+	echo "One or more of the following options can then be specified.  If more"
+	echo "than one option is specified, each will be executed in order."
+	echo
+	echo "  help        : show this help screen"
+	echo "  pretend     : execute package specific pretend actions"
+	echo "  setup       : execute package specific setup actions"
+	echo "  fetch       : download source archive(s) and patches"
+	echo "  digest      : create a manifest file for the package"
+	echo "  manifest    : create a manifest file for the package"
+	echo "  unpack      : unpack sources (auto-dependencies if needed)"
+	echo "  prepare     : prepare sources (auto-dependencies if needed)"
+	echo "  configure   : configure sources (auto-fetch/unpack if needed)"
+	echo "  compile     : compile sources (auto-fetch/unpack/configure if needed)"
+	echo "  test        : test package (auto-fetch/unpack/configure/compile if needed)"
+	echo "  preinst     : execute pre-install instructions"
+	echo "  postinst    : execute post-install instructions"
+	echo "  install     : install the package to the temporary install directory"
+	echo "  qmerge      : merge image into live filesystem, recording files in db"
+	echo "  merge       : do fetch, unpack, compile, install and qmerge"
+	echo "  prerm       : execute pre-removal instructions"
+	echo "  postrm      : execute post-removal instructions"
+	echo "  unmerge     : remove package from live filesystem"
+	echo "  config      : execute package specific configuration actions"
+	echo "  package     : create a tarball package in ${PKGDIR}/All"
+	echo "  rpm         : build a RedHat RPM package"
+	echo "  clean       : clean up all source and temporary files"
+	echo
+	echo "The following settings will be used for the ebuild process:"
+	echo
+	echo "  package     : ${PF}"
+	echo "  slot        : ${SLOT}"
+	echo "  category    : ${CATEGORY}"
+	echo "  description : ${DESCRIPTION}"
+	echo "  system      : ${CHOST}"
+	echo "  c flags     : ${CFLAGS}"
+	echo "  c++ flags   : ${CXXFLAGS}"
+	echo "  make flags  : ${MAKEOPTS}"
+	echo -n "  build mode  : "
+	if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT} ;
+	then
+		echo "debug (large)"
+	else
+		echo "production (stripped)"
+	fi
+	echo "  merge to    : ${ROOT}"
+	echo
+	if [ -n "$USE" ]; then
+		echo "Additionally, support for the following optional features will be enabled:"
+		echo
+		echo "  ${USE}"
+	fi
+	echo
+}
+
+# debug-print() gets called from many places with verbose status information useful
+# for tracking down problems. The output is in $T/eclass-debug.log.
+# You can set ECLASS_DEBUG_OUTPUT to redirect the output somewhere else as well.
+# The special "on" setting echoes the information, mixing it with the rest of the
+# emerge output.
+# You can override the setting by exporting a new one from the console, or you can
+# set a new default in make.*. Here the default is "" or unset.
+
+# in the future might use e* from /etc/init.d/functions.sh if i feel like it
+debug-print() {
+	# if $T isn't defined, we're in dep calculation mode and
+	# shouldn't do anything
+	[[ $EBUILD_PHASE = depend || ! -d ${T} || ${#} -eq 0 ]] && return 0
+
+	if [[ ${ECLASS_DEBUG_OUTPUT} == on ]]; then
+		printf 'debug: %s\n' "${@}" >&2
+	elif [[ -n ${ECLASS_DEBUG_OUTPUT} ]]; then
+		printf 'debug: %s\n' "${@}" >> "${ECLASS_DEBUG_OUTPUT}"
+	fi
+
+	if [[ -w $T ]] ; then
+		# default target
+		printf '%s\n' "${@}" >> "${T}/eclass-debug.log"
+		# let the portage user own/write to this file
+		chgrp portage "${T}/eclass-debug.log" &>/dev/null
+		chmod g+w "${T}/eclass-debug.log" &>/dev/null
+	fi
+}
+
+# The following 2 functions are debug-print() wrappers
+
+debug-print-function() {
+	debug-print "${1}: entering function, parameters: ${*:2}"
+}
+
+debug-print-section() {
+	debug-print "now in section ${*}"
+}
+
+# Sources all eclasses in parameters
+declare -ix ECLASS_DEPTH=0
+inherit() {
+	ECLASS_DEPTH=$(($ECLASS_DEPTH + 1))
+	if [[ ${ECLASS_DEPTH} > 1 ]]; then
+		debug-print "*** Multiple Inheritence (Level: ${ECLASS_DEPTH})"
+	fi
+
+	if [[ -n $ECLASS && -n ${!__export_funcs_var} ]] ; then
+		echo "QA Notice: EXPORT_FUNCTIONS is called before inherit in" \
+			"$ECLASS.eclass. For compatibility with <=portage-2.1.6.7," \
+			"only call EXPORT_FUNCTIONS after inherit(s)." \
+			| fmt -w 75 | while read -r ; do eqawarn "$REPLY" ; done
+	fi
+
+	local location
+	local olocation
+	local x
+
+	# These variables must be restored before returning.
+	local PECLASS=$ECLASS
+	local prev_export_funcs_var=$__export_funcs_var
+
+	local B_IUSE
+	local B_REQUIRED_USE
+	local B_DEPEND
+	local B_RDEPEND
+	local B_PDEPEND
+	while [ "$1" ]; do
+		location="${ECLASSDIR}/${1}.eclass"
+		olocation=""
+
+		export ECLASS="$1"
+		__export_funcs_var=__export_functions_$ECLASS_DEPTH
+		unset $__export_funcs_var
+
+		if [ "${EBUILD_PHASE}" != "depend" ] && \
+			[[ ${EBUILD_PHASE} != *rm ]] && \
+			[[ ${EMERGE_FROM} != "binary" ]] ; then
+			# This is disabled in the *rm phases because they frequently give
+			# false alarms due to INHERITED in /var/db/pkg being outdated
+			# in comparison the the eclasses from the portage tree.
+			if ! has $ECLASS $INHERITED $__INHERITED_QA_CACHE ; then
+				eqawarn "QA Notice: ECLASS '$ECLASS' inherited illegally in $CATEGORY/$PF $EBUILD_PHASE"
+			fi
+		fi
+
+		# any future resolution code goes here
+		if [ -n "$PORTDIR_OVERLAY" ]; then
+			local overlay
+			for overlay in ${PORTDIR_OVERLAY}; do
+				olocation="${overlay}/eclass/${1}.eclass"
+				if [ -e "$olocation" ]; then
+					location="${olocation}"
+					debug-print "  eclass exists: ${location}"
+				fi
+			done
+		fi
+		debug-print "inherit: $1 -> $location"
+		[ ! -e "$location" ] && die "${1}.eclass could not be found by inherit()"
+
+		if [ "${location}" == "${olocation}" ] && \
+			! has "${location}" ${EBUILD_OVERLAY_ECLASSES} ; then
+				EBUILD_OVERLAY_ECLASSES="${EBUILD_OVERLAY_ECLASSES} ${location}"
+		fi
+
+		#We need to back up the value of DEPEND and RDEPEND to B_DEPEND and B_RDEPEND
+		#(if set).. and then restore them after the inherit call.
+
+		#turn off glob expansion
+		set -f
+
+		# Retain the old data and restore it later.
+		unset B_IUSE B_REQUIRED_USE B_DEPEND B_RDEPEND B_PDEPEND
+		[ "${IUSE+set}"       = set ] && B_IUSE="${IUSE}"
+		[ "${REQUIRED_USE+set}" = set ] && B_REQUIRED_USE="${REQUIRED_USE}"
+		[ "${DEPEND+set}"     = set ] && B_DEPEND="${DEPEND}"
+		[ "${RDEPEND+set}"    = set ] && B_RDEPEND="${RDEPEND}"
+		[ "${PDEPEND+set}"    = set ] && B_PDEPEND="${PDEPEND}"
+		unset IUSE REQUIRED_USE DEPEND RDEPEND PDEPEND
+		#turn on glob expansion
+		set +f
+
+		qa_source "$location" || die "died sourcing $location in inherit()"
+		
+		#turn off glob expansion
+		set -f
+
+		# If each var has a value, append it to the global variable E_* to
+		# be applied after everything is finished. New incremental behavior.
+		[ "${IUSE+set}"       = set ] && export E_IUSE="${E_IUSE} ${IUSE}"
+		[ "${REQUIRED_USE+set}"       = set ] && export E_REQUIRED_USE="${E_REQUIRED_USE} ${REQUIRED_USE}"
+		[ "${DEPEND+set}"     = set ] && export E_DEPEND="${E_DEPEND} ${DEPEND}"
+		[ "${RDEPEND+set}"    = set ] && export E_RDEPEND="${E_RDEPEND} ${RDEPEND}"
+		[ "${PDEPEND+set}"    = set ] && export E_PDEPEND="${E_PDEPEND} ${PDEPEND}"
+
+		[ "${B_IUSE+set}"     = set ] && IUSE="${B_IUSE}"
+		[ "${B_IUSE+set}"     = set ] || unset IUSE
+		
+		[ "${B_REQUIRED_USE+set}"     = set ] && REQUIRED_USE="${B_REQUIRED_USE}"
+		[ "${B_REQUIRED_USE+set}"     = set ] || unset REQUIRED_USE
+
+		[ "${B_DEPEND+set}"   = set ] && DEPEND="${B_DEPEND}"
+		[ "${B_DEPEND+set}"   = set ] || unset DEPEND
+
+		[ "${B_RDEPEND+set}"  = set ] && RDEPEND="${B_RDEPEND}"
+		[ "${B_RDEPEND+set}"  = set ] || unset RDEPEND
+
+		[ "${B_PDEPEND+set}"  = set ] && PDEPEND="${B_PDEPEND}"
+		[ "${B_PDEPEND+set}"  = set ] || unset PDEPEND
+
+		#turn on glob expansion
+		set +f
+
+		if [[ -n ${!__export_funcs_var} ]] ; then
+			for x in ${!__export_funcs_var} ; do
+				debug-print "EXPORT_FUNCTIONS: $x -> ${ECLASS}_$x"
+				declare -F "${ECLASS}_$x" >/dev/null || \
+					die "EXPORT_FUNCTIONS: ${ECLASS}_$x is not defined"
+				eval "$x() { ${ECLASS}_$x \"\$@\" ; }" > /dev/null
+			done
+		fi
+		unset $__export_funcs_var
+
+		has $1 $INHERITED || export INHERITED="$INHERITED $1"
+
+		shift
+	done
+	((--ECLASS_DEPTH)) # Returns 1 when ECLASS_DEPTH reaches 0.
+	if (( ECLASS_DEPTH > 0 )) ; then
+		export ECLASS=$PECLASS
+		__export_funcs_var=$prev_export_funcs_var
+	else
+		unset ECLASS __export_funcs_var
+	fi
+	return 0
+}
+
+# Exports stub functions that call the eclass's functions, thereby making them default.
+# For example, if ECLASS="base" and you call "EXPORT_FUNCTIONS src_unpack", the following
+# code will be eval'd:
+# src_unpack() { base_src_unpack; }
+EXPORT_FUNCTIONS() {
+	if [ -z "$ECLASS" ]; then
+		die "EXPORT_FUNCTIONS without a defined ECLASS"
+	fi
+	eval $__export_funcs_var+=\" $*\"
+}
+
+# this is a function for removing any directory matching a passed in pattern from
+# PATH
+remove_path_entry() {
+	save_IFS
+	IFS=":"
+	stripped_path="${PATH}"
+	while [ -n "$1" ]; do
+		cur_path=""
+		for p in ${stripped_path}; do
+			if [ "${p/${1}}" == "${p}" ]; then
+				cur_path="${cur_path}:${p}"
+			fi
+		done
+		stripped_path="${cur_path#:*}"
+		shift
+	done
+	restore_IFS
+	PATH="${stripped_path}"
+}
+
+# @FUNCTION: _ebuild_arg_to_phase
+# @DESCRIPTION:
+# Translate a known ebuild(1) argument into the precise
+# name of it's corresponding ebuild phase.
+_ebuild_arg_to_phase() {
+	[ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
+	local eapi=$1
+	local arg=$2
+	local phase_func=""
+
+	case "$arg" in
+		pretend)
+			! has $eapi 0 1 2 3 3_pre2 && \
+				phase_func=pkg_pretend
+			;;
+		setup)
+			phase_func=pkg_setup
+			;;
+		nofetch)
+			phase_func=pkg_nofetch
+			;;
+		unpack)
+			phase_func=src_unpack
+			;;
+		prepare)
+			! has $eapi 0 1 && \
+				phase_func=src_prepare
+			;;
+		configure)
+			! has $eapi 0 1 && \
+				phase_func=src_configure
+			;;
+		compile)
+			phase_func=src_compile
+			;;
+		test)
+			phase_func=src_test
+			;;
+		install)
+			phase_func=src_install
+			;;
+		preinst)
+			phase_func=pkg_preinst
+			;;
+		postinst)
+			phase_func=pkg_postinst
+			;;
+		prerm)
+			phase_func=pkg_prerm
+			;;
+		postrm)
+			phase_func=pkg_postrm
+			;;
+	esac
+
+	[[ -z $phase_func ]] && return 1
+	echo "$phase_func"
+	return 0
+}
+
+_ebuild_phase_funcs() {
+	[ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
+	local eapi=$1
+	local phase_func=$2
+	local default_phases="pkg_nofetch src_unpack src_prepare src_configure
+		src_compile src_install src_test"
+	local x y default_func=""
+
+	for x in pkg_nofetch src_unpack src_test ; do
+		declare -F $x >/dev/null || \
+			eval "$x() { _eapi0_$x \"\$@\" ; }"
+	done
+
+	case $eapi in
+
+		0|1)
+
+			if ! declare -F src_compile >/dev/null ; then
+				case $eapi in
+					0)
+						src_compile() { _eapi0_src_compile "$@" ; }
+						;;
+					*)
+						src_compile() { _eapi1_src_compile "$@" ; }
+						;;
+				esac
+			fi
+
+			for x in $default_phases ; do
+				eval "default_$x() {
+					die \"default_$x() is not supported with EAPI='$eapi' during phase $phase_func\"
+				}"
+			done
+
+			eval "default() {
+				die \"default() is not supported with EAPI='$eapi' during phase $phase_func\"
+			}"
+
+			;;
+
+		*)
+
+			declare -F src_configure >/dev/null || \
+				src_configure() { _eapi2_src_configure "$@" ; }
+
+			declare -F src_compile >/dev/null || \
+				src_compile() { _eapi2_src_compile "$@" ; }
+
+			has $eapi 2 3 3_pre2 || declare -F src_install >/dev/null || \
+				src_install() { _eapi4_src_install "$@" ; }
+
+			if has $phase_func $default_phases ; then
+
+				_eapi2_pkg_nofetch   () { _eapi0_pkg_nofetch          "$@" ; }
+				_eapi2_src_unpack    () { _eapi0_src_unpack           "$@" ; }
+				_eapi2_src_prepare   () { true                             ; }
+				_eapi2_src_test      () { _eapi0_src_test             "$@" ; }
+				_eapi2_src_install   () { die "$FUNCNAME is not supported" ; }
+
+				for x in $default_phases ; do
+					eval "default_$x() { _eapi2_$x \"\$@\" ; }"
+				done
+
+				eval "default() { _eapi2_$phase_func \"\$@\" ; }"
+
+				case $eapi in
+					2|3)
+						;;
+					*)
+						eval "default_src_install() { _eapi4_src_install \"\$@\" ; }"
+						[[ $phase_func = src_install ]] && \
+							eval "default() { _eapi4_$phase_func \"\$@\" ; }"
+						;;
+				esac
+
+			else
+
+				for x in $default_phases ; do
+					eval "default_$x() {
+						die \"default_$x() is not supported in phase $default_func\"
+					}"
+				done
+
+				eval "default() {
+					die \"default() is not supported with EAPI='$eapi' during phase $phase_func\"
+				}"
+
+			fi
+
+			;;
+	esac
+}
+
+# Set given variables unless these variable have been already set (e.g. during emerge
+# invocation) to values different than values set in make.conf.
+set_unless_changed() {
+	if [[ $# -lt 1 ]]; then
+		die "${FUNCNAME}() requires at least 1 argument: VARIABLE=VALUE"
+	fi
+
+	local argument value variable
+	for argument in "$@"; do
+		if [[ ${argument} != *=* ]]; then
+			die "${FUNCNAME}(): Argument '${argument}' has incorrect syntax"
+		fi
+		variable="${argument%%=*}"
+		value="${argument#*=}"
+		if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
+			eval "${variable}=\"\${value}\""
+		fi
+	done
+}
+
+# Unset given variables unless these variable have been set (e.g. during emerge
+# invocation) to values different than values set in make.conf.
+unset_unless_changed() {
+	if [[ $# -lt 1 ]]; then
+		die "${FUNCNAME}() requires at least 1 argument: VARIABLE"
+	fi
+
+	local variable
+	for variable in "$@"; do
+		if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
+			unset ${variable}
+		fi
+	done
+}
+
+PORTAGE_BASHRCS_SOURCED=0
+
+# @FUNCTION: source_all_bashrcs
+# @DESCRIPTION:
+# Source a relevant bashrc files and perform other miscellaneous
+# environment initialization when appropriate.
+#
+# If EAPI is set then define functions provided by the current EAPI:
+#
+#  * default_* aliases for the current EAPI phase functions
+#  * A "default" function which is an alias for the default phase
+#    function for the current phase.
+#
+source_all_bashrcs() {
+	[[ $PORTAGE_BASHRCS_SOURCED = 1 ]] && return 0
+	PORTAGE_BASHRCS_SOURCED=1
+	local x
+
+	local OCC="${CC}" OCXX="${CXX}"
+
+	if [[ $EBUILD_PHASE != depend ]] ; then
+		# source the existing profile.bashrcs.
+		save_IFS
+		IFS=$'\n'
+		local path_array=($PROFILE_PATHS)
+		restore_IFS
+		for x in "${path_array[@]}" ; do
+			[ -f "$x/profile.bashrc" ] && qa_source "$x/profile.bashrc"
+		done
+	fi
+
+	# We assume if people are changing shopts in their bashrc they do so at their
+	# own peril.  This is the ONLY non-portage bit of code that can change shopts
+	# without a QA violation.
+	for x in "${PORTAGE_BASHRC}" "${PM_EBUILD_HOOK_DIR}"/${CATEGORY}/{${PN},${PN}:${SLOT},${P},${PF}}; do
+		if [ -r "${x}" ]; then
+			# If $- contains x, then tracing has already enabled elsewhere for some
+			# reason.  We preserve it's state so as not to interfere.
+			if [ "$PORTAGE_DEBUG" != "1" ] || [ "${-/x/}" != "$-" ]; then
+				source "${x}"
+			else
+				set -x
+				source "${x}"
+				set +x
+			fi
+		fi
+	done
+
+	[ ! -z "${OCC}" ] && export CC="${OCC}"
+	[ ! -z "${OCXX}" ] && export CXX="${OCXX}"
+}
+
+# Hardcoded bash lists are needed for backward compatibility with
+# <portage-2.1.4 since they assume that a newly installed version
+# of ebuild.sh will work for pkg_postinst, pkg_prerm, and pkg_postrm
+# when portage is upgrading itself.
+
+PORTAGE_READONLY_METADATA="DEFINED_PHASES DEPEND DESCRIPTION
+	EAPI HOMEPAGE INHERITED IUSE REQUIRED_USE KEYWORDS LICENSE
+	PDEPEND PROVIDE RDEPEND RESTRICT SLOT SRC_URI"
+
+PORTAGE_READONLY_VARS="D EBUILD EBUILD_PHASE \
+	EBUILD_SH_ARGS ECLASSDIR EMERGE_FROM FILESDIR MERGE_TYPE \
+	PM_EBUILD_HOOK_DIR \
+	PORTAGE_ACTUAL_DISTDIR PORTAGE_ARCHLIST PORTAGE_BASHRC  \
+	PORTAGE_BINPKG_FILE PORTAGE_BINPKG_TAR_OPTS PORTAGE_BINPKG_TMPFILE \
+	PORTAGE_BIN_PATH PORTAGE_BUILDDIR PORTAGE_BUNZIP2_COMMAND \
+	PORTAGE_BZIP2_COMMAND PORTAGE_COLORMAP PORTAGE_CONFIGROOT \
+	PORTAGE_DEBUG PORTAGE_DEPCACHEDIR PORTAGE_EBUILD_EXIT_FILE \
+	PORTAGE_GID PORTAGE_GRPNAME PORTAGE_INST_GID PORTAGE_INST_UID \
+	PORTAGE_IPC_DAEMON PORTAGE_IUSE PORTAGE_LOG_FILE \
+	PORTAGE_MUTABLE_FILTERED_VARS PORTAGE_PYM_PATH PORTAGE_PYTHON \
+	PORTAGE_READONLY_METADATA PORTAGE_READONLY_VARS \
+	PORTAGE_REPO_NAME PORTAGE_RESTRICT PORTAGE_SANDBOX_COMPAT_LEVEL \
+	PORTAGE_SAVED_READONLY_VARS PORTAGE_SIGPIPE_STATUS \
+	PORTAGE_TMPDIR PORTAGE_UPDATE_ENV PORTAGE_USERNAME \
+	PORTAGE_VERBOSE PORTAGE_WORKDIR_MODE PORTDIR PORTDIR_OVERLAY \
+	PROFILE_PATHS REPLACING_VERSIONS REPLACED_BY_VERSION T WORKDIR"
+
+PORTAGE_SAVED_READONLY_VARS="A CATEGORY P PF PN PR PV PVR"
+
+# Variables that portage sets but doesn't mark readonly.
+# In order to prevent changed values from causing unexpected
+# interference, they are filtered out of the environment when
+# it is saved or loaded (any mutations do not persist).
+PORTAGE_MUTABLE_FILTERED_VARS="AA HOSTNAME"
+
+# @FUNCTION: filter_readonly_variables
+# @DESCRIPTION: [--filter-sandbox] [--allow-extra-vars]
+# Read an environment from stdin and echo to stdout while filtering variables
+# with names that are known to cause interference:
+#
+#   * some specific variables for which bash does not allow assignment
+#   * some specific variables that affect portage or sandbox behavior
+#   * variable names that begin with a digit or that contain any
+#     non-alphanumeric characters that are not be supported by bash
+#
+# --filter-sandbox causes all SANDBOX_* variables to be filtered, which
+# is only desired in certain cases, such as during preprocessing or when
+# saving environment.bz2 for a binary or installed package.
+#
+# --filter-features causes the special FEATURES variable to be filtered.
+# Generally, we want it to persist between phases since the user might
+# want to modify it via bashrc to enable things like splitdebug and
+# installsources for specific packages. They should be able to modify it
+# in pre_pkg_setup() and have it persist all the way through the install
+# phase. However, if FEATURES exist inside environment.bz2 then they
+# should be overridden by current settings.
+#
+# --filter-locale causes locale related variables such as LANG and LC_*
+# variables to be filtered. These variables should persist between phases,
+# in case they are modified by the ebuild. However, the current user
+# settings should be used when loading the environment from a binary or
+# installed package.
+#
+# --filter-path causes the PATH variable to be filtered. This variable
+# should persist between phases, in case it is modified by the ebuild.
+# However, old settings should be overridden when loading the
+# environment from a binary or installed package.
+#
+# ---allow-extra-vars causes some extra vars to be allowd through, such
+# as ${PORTAGE_SAVED_READONLY_VARS} and ${PORTAGE_MUTABLE_FILTERED_VARS}.
+#
+# In bash-3.2_p20+ an attempt to assign BASH_*, FUNCNAME, GROUPS or any
+# readonly variable cause the shell to exit while executing the "source"
+# builtin command. To avoid this problem, this function filters those
+# variables out and discards them. See bug #190128.
+filter_readonly_variables() {
+	local x filtered_vars
+	local readonly_bash_vars="BASHOPTS BASHPID DIRSTACK EUID
+		FUNCNAME GROUPS PIPESTATUS PPID SHELLOPTS UID"
+	local bash_misc_vars="BASH BASH_.* COMP_WORDBREAKS HISTCMD
+		HISTFILE HOSTNAME HOSTTYPE IFS LINENO MACHTYPE OLDPWD
+		OPTERR OPTIND OSTYPE POSIXLY_CORRECT PS4 PWD RANDOM
+		SECONDS SHELL SHLVL"
+	local filtered_sandbox_vars="SANDBOX_ACTIVE SANDBOX_BASHRC
+		SANDBOX_DEBUG_LOG SANDBOX_DISABLED SANDBOX_LIB
+		SANDBOX_LOG SANDBOX_ON"
+	local misc_garbage_vars="_portage_filter_opts"
+	filtered_vars="$readonly_bash_vars $bash_misc_vars
+		$PORTAGE_READONLY_VARS $misc_garbage_vars"
+
+	# Don't filter/interfere with prefix variables unless they are
+	# supported by the current EAPI.
+	case "${EAPI:-0}" in
+		0|1|2)
+			;;
+		*)
+			filtered_vars+=" ED EPREFIX EROOT"
+			;;
+	esac
+
+	if has --filter-sandbox $* ; then
+		filtered_vars="${filtered_vars} SANDBOX_.*"
+	else
+		filtered_vars="${filtered_vars} ${filtered_sandbox_vars}"
+	fi
+	if has --filter-features $* ; then
+		filtered_vars="${filtered_vars} FEATURES PORTAGE_FEATURES"
+	fi
+	if has --filter-path $* ; then
+		filtered_vars+=" PATH"
+	fi
+	if has --filter-locale $* ; then
+		filtered_vars+=" LANG LC_ALL LC_COLLATE
+			LC_CTYPE LC_MESSAGES LC_MONETARY
+			LC_NUMERIC LC_PAPER LC_TIME"
+	fi
+	if ! has --allow-extra-vars $* ; then
+		filtered_vars="
+			${filtered_vars}
+			${PORTAGE_SAVED_READONLY_VARS}
+			${PORTAGE_MUTABLE_FILTERED_VARS}
+		"
+	fi
+
+	"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}"/filter-bash-environment.py "${filtered_vars}" || die "filter-bash-environment.py failed"
+}
+
+# @FUNCTION: preprocess_ebuild_env
+# @DESCRIPTION:
+# Filter any readonly variables from ${T}/environment, source it, and then
+# save it via save_ebuild_env(). This process should be sufficient to prevent
+# any stale variables or functions from an arbitrary environment from
+# interfering with the current environment. This is useful when an existing
+# environment needs to be loaded from a binary or installed package.
+preprocess_ebuild_env() {
+	local _portage_filter_opts="--filter-features --filter-locale --filter-path --filter-sandbox"
+
+	# If environment.raw is present, this is a signal from the python side,
+	# indicating that the environment may contain stale FEATURES and
+	# SANDBOX_{DENY,PREDICT,READ,WRITE} variables that should be filtered out.
+	# Otherwise, we don't need to filter the environment.
+	[ -f "${T}/environment.raw" ] || return 0
+
+	filter_readonly_variables $_portage_filter_opts < "${T}"/environment \
+		>> "$T/environment.filtered" || return $?
+	unset _portage_filter_opts
+	mv "${T}"/environment.filtered "${T}"/environment || return $?
+	rm -f "${T}/environment.success" || return $?
+	# WARNING: Code inside this subshell should avoid making assumptions
+	# about variables or functions after source "${T}"/environment has been
+	# called. Any variables that need to be relied upon should already be
+	# filtered out above.
+	(
+		export SANDBOX_ON=1
+		source "${T}/environment" || exit $?
+		# We have to temporarily disable sandbox since the
+		# SANDBOX_{DENY,READ,PREDICT,WRITE} values we've just loaded
+		# may be unusable (triggering in spurious sandbox violations)
+		# until we've merged them with our current values.
+		export SANDBOX_ON=0
+
+		# It's remotely possible that save_ebuild_env() has been overridden
+		# by the above source command. To protect ourselves, we override it
+		# here with our own version. ${PORTAGE_BIN_PATH} is safe to use here
+		# because it's already filtered above.
+		source "${PORTAGE_BIN_PATH}/isolated-functions.sh" || exit $?
+
+		# Rely on save_ebuild_env() to filter out any remaining variables
+		# and functions that could interfere with the current environment.
+		save_ebuild_env || exit $?
+		>> "$T/environment.success" || exit $?
+	) > "${T}/environment.filtered"
+	local retval
+	if [ -e "${T}/environment.success" ] ; then
+		filter_readonly_variables --filter-features < \
+			"${T}/environment.filtered" > "${T}/environment"
+		retval=$?
+	else
+		retval=1
+	fi
+	rm -f "${T}"/environment.{filtered,raw,success}
+	return ${retval}
+}
+
+# === === === === === === === === === === === === === === === === === ===
+# === === === === === functions end, main part begins === === === === ===
+# === === === === === functions end, main part begins === === === === ===
+# === === === === === functions end, main part begins === === === === ===
+# === === === === === === === === === === === === === === === === === ===
+
+export SANDBOX_ON="1"
+export S=${WORKDIR}/${P}
+
+unset E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND
+
+# Turn of extended glob matching so that g++ doesn't get incorrectly matched.
+shopt -u extglob
+
+if [[ ${EBUILD_PHASE} == depend ]] ; then
+	QA_INTERCEPTORS="awk bash cc egrep equery fgrep g++
+		gawk gcc grep javac java-config nawk perl
+		pkg-config python python-config sed"
+elif [[ ${EBUILD_PHASE} == clean* ]] ; then
+	unset QA_INTERCEPTORS
+else
+	QA_INTERCEPTORS="autoconf automake aclocal libtoolize"
+fi
+# level the QA interceptors if we're in depend
+if [[ -n ${QA_INTERCEPTORS} ]] ; then
+	for BIN in ${QA_INTERCEPTORS}; do
+		BIN_PATH=$(type -Pf ${BIN})
+		if [ "$?" != "0" ]; then
+			BODY="echo \"*** missing command: ${BIN}\" >&2; return 127"
+		else
+			BODY="${BIN_PATH} \"\$@\"; return \$?"
+		fi
+		if [[ ${EBUILD_PHASE} == depend ]] ; then
+			FUNC_SRC="${BIN}() {
+				if [ \$ECLASS_DEPTH -gt 0 ]; then
+					eqawarn \"QA Notice: '${BIN}' called in global scope: eclass \${ECLASS}\"
+				else
+					eqawarn \"QA Notice: '${BIN}' called in global scope: \${CATEGORY}/\${PF}\"
+				fi
+			${BODY}
+			}"
+		elif has ${BIN} autoconf automake aclocal libtoolize ; then
+			FUNC_SRC="${BIN}() {
+				if ! has \${FUNCNAME[1]} eautoreconf eaclocal _elibtoolize \\
+					eautoheader eautoconf eautomake autotools_run_tool \\
+					autotools_check_macro autotools_get_subdirs \\
+					autotools_get_auxdir ; then
+					eqawarn \"QA Notice: '${BIN}' called by \${FUNCNAME[1]}: \${CATEGORY}/\${PF}\"
+					eqawarn \"Use autotools.eclass instead of calling '${BIN}' directly.\"
+				fi
+			${BODY}
+			}"
+		else
+			FUNC_SRC="${BIN}() {
+				eqawarn \"QA Notice: '${BIN}' called by \${FUNCNAME[1]}: \${CATEGORY}/\${PF}\"
+			${BODY}
+			}"
+		fi
+		eval "$FUNC_SRC" || echo "error creating QA interceptor ${BIN}" >&2
+	done
+	unset BIN_PATH BIN BODY FUNC_SRC
+fi
+
+# Subshell/helper die support (must export for the die helper).
+export EBUILD_MASTER_PID=$BASHPID
+trap 'exit 1' SIGTERM
+
+if ! has "$EBUILD_PHASE" clean cleanrm depend && \
+	[ -f "${T}"/environment ] ; then
+	# The environment may have been extracted from environment.bz2 or
+	# may have come from another version of ebuild.sh or something.
+	# In any case, preprocess it to prevent any potential interference.
+	preprocess_ebuild_env || \
+		die "error processing environment"
+	# Colon separated SANDBOX_* variables need to be cumulative.
+	for x in SANDBOX_DENY SANDBOX_READ SANDBOX_PREDICT SANDBOX_WRITE ; do
+		export PORTAGE_${x}=${!x}
+	done
+	PORTAGE_SANDBOX_ON=${SANDBOX_ON}
+	export SANDBOX_ON=1
+	source "${T}"/environment || \
+		die "error sourcing environment"
+	# We have to temporarily disable sandbox since the
+	# SANDBOX_{DENY,READ,PREDICT,WRITE} values we've just loaded
+	# may be unusable (triggering in spurious sandbox violations)
+	# until we've merged them with our current values.
+	export SANDBOX_ON=0
+	for x in SANDBOX_DENY SANDBOX_PREDICT SANDBOX_READ SANDBOX_WRITE ; do
+		y="PORTAGE_${x}"
+		if [ -z "${!x}" ] ; then
+			export ${x}=${!y}
+		elif [ -n "${!y}" ] && [ "${!y}" != "${!x}" ] ; then
+			# filter out dupes
+			export ${x}=$(printf "${!y}:${!x}" | tr ":" "\0" | \
+				sort -z -u | tr "\0" ":")
+		fi
+		export ${x}=${!x%:}
+		unset PORTAGE_${x}
+	done
+	unset x y
+	export SANDBOX_ON=${PORTAGE_SANDBOX_ON}
+	unset PORTAGE_SANDBOX_ON
+	[[ -n $EAPI ]] || EAPI=0
+fi
+
+if ! has "$EBUILD_PHASE" clean cleanrm ; then
+	if [[ $EBUILD_PHASE = depend || ! -f $T/environment || \
+		-f $PORTAGE_BUILDDIR/.ebuild_changed ]] || \
+		has noauto $FEATURES ; then
+		# The bashrcs get an opportunity here to set aliases that will be expanded
+		# during sourcing of ebuilds and eclasses.
+		source_all_bashrcs
+
+		# When EBUILD_PHASE != depend, INHERITED comes pre-initialized
+		# from cache. In order to make INHERITED content independent of
+		# EBUILD_PHASE during inherit() calls, we unset INHERITED after
+		# we make a backup copy for QA checks.
+		__INHERITED_QA_CACHE=$INHERITED
+
+		# *DEPEND and IUSE will be set during the sourcing of the ebuild.
+		# In order to ensure correct interaction between ebuilds and
+		# eclasses, they need to be unset before this process of
+		# interaction begins.
+		unset DEPEND RDEPEND PDEPEND INHERITED IUSE REQUIRED_USE \
+			ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND
+
+		if [[ $PORTAGE_DEBUG != 1 || ${-/x/} != $- ]] ; then
+			source "$EBUILD" || die "error sourcing ebuild"
+		else
+			set -x
+			source "$EBUILD" || die "error sourcing ebuild"
+			set +x
+		fi
+
+		if [[ "${EBUILD_PHASE}" != "depend" ]] ; then
+			RESTRICT=${PORTAGE_RESTRICT}
+			[[ -e $PORTAGE_BUILDDIR/.ebuild_changed ]] && \
+			rm "$PORTAGE_BUILDDIR/.ebuild_changed"
+		fi
+
+		[[ -n $EAPI ]] || EAPI=0
+
+		if has "$EAPI" 0 1 2 3 3_pre2 ; then
+			export RDEPEND=${RDEPEND-${DEPEND}}
+			debug-print "RDEPEND: not set... Setting to: ${DEPEND}"
+		fi
+
+		# add in dependency info from eclasses
+		IUSE="${IUSE} ${E_IUSE}"
+		DEPEND="${DEPEND} ${E_DEPEND}"
+		RDEPEND="${RDEPEND} ${E_RDEPEND}"
+		PDEPEND="${PDEPEND} ${E_PDEPEND}"
+		REQUIRED_USE="${REQUIRED_USE} ${E_REQUIRED_USE}"
+		
+		unset ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND \
+			__INHERITED_QA_CACHE
+
+		# alphabetically ordered by $EBUILD_PHASE value
+		case "$EAPI" in
+			0|1)
+				_valid_phases="src_compile pkg_config pkg_info src_install
+					pkg_nofetch pkg_postinst pkg_postrm pkg_preinst pkg_prerm
+					pkg_setup src_test src_unpack"
+				;;
+			2|3|3_pre2)
+				_valid_phases="src_compile pkg_config src_configure pkg_info
+					src_install pkg_nofetch pkg_postinst pkg_postrm pkg_preinst
+					src_prepare pkg_prerm pkg_setup src_test src_unpack"
+				;;
+			*)
+				_valid_phases="src_compile pkg_config src_configure pkg_info
+					src_install pkg_nofetch pkg_postinst pkg_postrm pkg_preinst
+					src_prepare pkg_prerm pkg_pretend pkg_setup src_test src_unpack"
+				;;
+		esac
+
+		DEFINED_PHASES=
+		for _f in $_valid_phases ; do
+			if declare -F $_f >/dev/null ; then
+				_f=${_f#pkg_}
+				DEFINED_PHASES+=" ${_f#src_}"
+			fi
+		done
+		[[ -n $DEFINED_PHASES ]] || DEFINED_PHASES=-
+
+		unset _f _valid_phases
+
+		if [[ $EBUILD_PHASE != depend ]] ; then
+
+			case "$EAPI" in
+				0|1|2|3)
+					_ebuild_helpers_path="$PORTAGE_BIN_PATH/ebuild-helpers"
+					;;
+				*)
+					_ebuild_helpers_path="$PORTAGE_BIN_PATH/ebuild-helpers/4:$PORTAGE_BIN_PATH/ebuild-helpers"
+					;;
+			esac
+
+			PATH=$_ebuild_helpers_path:$PREROOTPATH${PREROOTPATH:+:}/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin${ROOTPATH:+:}$ROOTPATH
+			unset _ebuild_helpers_path
+
+			# Use default ABI libdir in accordance with bug #355283.
+			x=LIBDIR_${DEFAULT_ABI}
+			[[ -n $DEFAULT_ABI && -n ${!x} ]] && x=${!x} || x=lib
+
+			if has distcc $FEATURES ; then
+				PATH="/usr/$x/distcc/bin:$PATH"
+				[[ -n $DISTCC_LOG ]] && addwrite "${DISTCC_LOG%/*}"
+			fi
+
+			if has ccache $FEATURES ; then
+				PATH="/usr/$x/ccache/bin:$PATH"
+
+				if [[ -n $CCACHE_DIR ]] ; then
+					addread "$CCACHE_DIR"
+					addwrite "$CCACHE_DIR"
+				fi
+
+				[[ -n $CCACHE_SIZE ]] && ccache -M $CCACHE_SIZE &> /dev/null
+			fi
+
+			unset x
+
+			if [[ -n $QA_PREBUILT ]] ; then
+
+				# these ones support fnmatch patterns
+				QA_EXECSTACK+=" $QA_PREBUILT"
+				QA_TEXTRELS+=" $QA_PREBUILT"
+				QA_WX_LOAD+=" $QA_PREBUILT"
+
+				# these ones support regular expressions, so translate
+				# fnmatch patterns to regular expressions
+				for x in QA_DT_HASH QA_DT_NEEDED QA_PRESTRIPPED QA_SONAME ; do
+					if [[ $(declare -p $x 2>/dev/null) = declare\ -a* ]] ; then
+						eval "$x=(\"\${$x[@]}\" ${QA_PREBUILT//\*/.*})"
+					else
+						eval "$x+=\" ${QA_PREBUILT//\*/.*}\""
+					fi
+				done
+
+				unset x
+			fi
+
+			# This needs to be exported since prepstrip is a separate shell script.
+			[[ -n $QA_PRESTRIPPED ]] && export QA_PRESTRIPPED
+			eval "[[ -n \$QA_PRESTRIPPED_${ARCH/-/_} ]] && \
+				export QA_PRESTRIPPED_${ARCH/-/_}"
+		fi
+	fi
+fi
+
+# unset USE_EXPAND variables that contain only the special "*" token
+for x in ${USE_EXPAND} ; do
+	[ "${!x}" == "*" ] && unset ${x}
+done
+unset x
+
+if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT}
+then
+	export DEBUGBUILD=1
+fi
+
+#a reasonable default for $S
+[[ -z ${S} ]] && export S=${WORKDIR}/${P}
+
+# Note: readonly variables interfere with preprocess_ebuild_env(), so
+# declare them only after it has already run.
+if [ "${EBUILD_PHASE}" != "depend" ] ; then
+	declare -r $PORTAGE_READONLY_METADATA $PORTAGE_READONLY_VARS
+	case "$EAPI" in
+		0|1|2)
+			;;
+		*)
+			declare -r ED EPREFIX EROOT
+			;;
+	esac
+fi
+
+ebuild_main() {
+
+	# Subshell/helper die support (must export for the die helper).
+	# Since this function is typically executed in a subshell,
+	# setup EBUILD_MASTER_PID to refer to the current $BASHPID,
+	# which seems to give the best results when further
+	# nested subshells call die.
+	export EBUILD_MASTER_PID=$BASHPID
+	trap 'exit 1' SIGTERM
+
+	if [[ $EBUILD_PHASE != depend ]] ; then
+		# Force configure scripts that automatically detect ccache to
+		# respect FEATURES="-ccache".
+		has ccache $FEATURES || export CCACHE_DISABLE=1
+
+		local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
+		[[ -n $phase_func ]] && _ebuild_phase_funcs "$EAPI" "$phase_func"
+		unset phase_func
+	fi
+
+	source_all_bashrcs
+
+	case ${EBUILD_SH_ARGS} in
+	nofetch)
+		ebuild_phase_with_hooks pkg_nofetch
+		;;
+	prerm|postrm|postinst|config|info)
+		if has "$EBUILD_SH_ARGS" config info && \
+			! declare -F "pkg_$EBUILD_SH_ARGS" >/dev/null ; then
+			ewarn  "pkg_${EBUILD_SH_ARGS}() is not defined: '${EBUILD##*/}'"
+		fi
+		export SANDBOX_ON="0"
+		if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+			ebuild_phase_with_hooks pkg_${EBUILD_SH_ARGS}
+		else
+			set -x
+			ebuild_phase_with_hooks pkg_${EBUILD_SH_ARGS}
+			set +x
+		fi
+		if [[ $EBUILD_PHASE == postinst ]] && [[ -n $PORTAGE_UPDATE_ENV ]]; then
+			# Update environment.bz2 in case installation phases
+			# need to pass some variables to uninstallation phases.
+			save_ebuild_env --exclude-init-phases | \
+				filter_readonly_variables --filter-path \
+				--filter-sandbox --allow-extra-vars \
+				| ${PORTAGE_BZIP2_COMMAND} -c -f9 > "$PORTAGE_UPDATE_ENV"
+			assert "save_ebuild_env failed"
+		fi
+		;;
+	unpack|prepare|configure|compile|test|clean|install)
+		if [[ ${SANDBOX_DISABLED:-0} = 0 ]] ; then
+			export SANDBOX_ON="1"
+		else
+			export SANDBOX_ON="0"
+		fi
+
+		case "$EBUILD_SH_ARGS" in
+		configure|compile)
+
+			local x
+			for x in ASFLAGS CCACHE_DIR CCACHE_SIZE \
+				CFLAGS CXXFLAGS LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
+				[[ ${!x+set} = set ]] && export $x
+			done
+			unset x
+
+			has distcc $FEATURES && [[ -n $DISTCC_DIR ]] && \
+				[[ ${SANDBOX_WRITE/$DISTCC_DIR} = $SANDBOX_WRITE ]] && \
+				addwrite "$DISTCC_DIR"
+
+			x=LIBDIR_$ABI
+			[ -z "$PKG_CONFIG_PATH" -a -n "$ABI" -a -n "${!x}" ] && \
+				export PKG_CONFIG_PATH=/usr/${!x}/pkgconfig
+
+			if has noauto $FEATURES && \
+				[[ ! -f $PORTAGE_BUILDDIR/.unpacked ]] ; then
+				echo
+				echo "!!! We apparently haven't unpacked..." \
+					"This is probably not what you"
+				echo "!!! want to be doing... You are using" \
+					"FEATURES=noauto so I'll assume"
+				echo "!!! that you know what you are doing..." \
+					"You have 5 seconds to abort..."
+				echo
+
+				local x
+				for x in 1 2 3 4 5 6 7 8; do
+					LC_ALL=C sleep 0.25
+				done
+
+				sleep 3
+			fi
+
+			cd "$PORTAGE_BUILDDIR"
+			if [ ! -d build-info ] ; then
+				mkdir build-info
+				cp "$EBUILD" "build-info/$PF.ebuild"
+			fi
+
+			#our custom version of libtool uses $S and $D to fix
+			#invalid paths in .la files
+			export S D
+
+			;;
+		esac
+
+		if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+			dyn_${EBUILD_SH_ARGS}
+		else
+			set -x
+			dyn_${EBUILD_SH_ARGS}
+			set +x
+		fi
+		export SANDBOX_ON="0"
+		;;
+	help|pretend|setup|preinst)
+		#pkg_setup needs to be out of the sandbox for tmp file creation;
+		#for example, awking and piping a file in /tmp requires a temp file to be created
+		#in /etc.  If pkg_setup is in the sandbox, both our lilo and apache ebuilds break.
+		export SANDBOX_ON="0"
+		if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+			dyn_${EBUILD_SH_ARGS}
+		else
+			set -x
+			dyn_${EBUILD_SH_ARGS}
+			set +x
+		fi
+		;;
+	depend)
+		export SANDBOX_ON="0"
+		set -f
+
+		if [ -n "${dbkey}" ] ; then
+			if [ ! -d "${dbkey%/*}" ]; then
+				install -d -g ${PORTAGE_GID} -m2775 "${dbkey%/*}"
+			fi
+			# Make it group writable. 666&~002==664
+			umask 002
+		fi
+
+		auxdbkeys="DEPEND RDEPEND SLOT SRC_URI RESTRICT HOMEPAGE LICENSE
+			DESCRIPTION KEYWORDS INHERITED IUSE REQUIRED_USE PDEPEND PROVIDE EAPI
+			PROPERTIES DEFINED_PHASES UNUSED_05 UNUSED_04
+			UNUSED_03 UNUSED_02 UNUSED_01"
+
+		#the extra $(echo) commands remove newlines
+		[ -n "${EAPI}" ] || EAPI=0
+
+		if [ -n "${dbkey}" ] ; then
+			> "${dbkey}"
+			for f in ${auxdbkeys} ; do
+				echo $(echo ${!f}) >> "${dbkey}" || exit $?
+			done
+		else
+			for f in ${auxdbkeys} ; do
+				echo $(echo ${!f}) 1>&9 || exit $?
+			done
+			exec 9>&-
+		fi
+		set +f
+		;;
+	_internal_test)
+		;;
+	*)
+		export SANDBOX_ON="1"
+		echo "Unrecognized EBUILD_SH_ARGS: '${EBUILD_SH_ARGS}'"
+		echo
+		dyn_help
+		exit 1
+		;;
+	esac
+}
+
+if [[ -s $SANDBOX_LOG ]] ; then
+	# We use SANDBOX_LOG to check for sandbox violations,
+	# so we ensure that there can't be a stale log to
+	# interfere with our logic.
+	x=
+	if [[ -n SANDBOX_ON ]] ; then
+		x=$SANDBOX_ON
+		export SANDBOX_ON=0
+	fi
+
+	rm -f "$SANDBOX_LOG" || \
+		die "failed to remove stale sandbox log: '$SANDBOX_LOG'"
+
+	if [[ -n $x ]] ; then
+		export SANDBOX_ON=$x
+	fi
+	unset x
+fi
+
+if [[ $EBUILD_PHASE = depend ]] ; then
+	ebuild_main
+elif [[ -n $EBUILD_SH_ARGS ]] ; then
+	(
+		# Don't allow subprocesses to inherit the pipe which
+		# emerge uses to monitor ebuild.sh.
+		exec 9>&-
+
+		ebuild_main
+
+		# Save the env only for relevant phases.
+		if ! has "$EBUILD_SH_ARGS" clean help info nofetch ; then
+			umask 002
+			save_ebuild_env | filter_readonly_variables \
+				--filter-features > "$T/environment"
+			assert "save_ebuild_env failed"
+			chown portage:portage "$T/environment" &>/dev/null
+			chmod g+w "$T/environment" &>/dev/null
+		fi
+		[[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+		if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+			[[ ! -s $SANDBOX_LOG ]]
+			"$PORTAGE_BIN_PATH"/ebuild-ipc exit $?
+		fi
+		exit 0
+	)
+	exit $?
+fi
+
+# Do not exit when ebuild.sh is sourced by other scripts.
+true

diff --git a/portage_with_autodep/bin/egencache b/portage_with_autodep/bin/egencache
new file mode 100755
index 0000000..1b4265d
--- /dev/null
+++ b/portage_with_autodep/bin/egencache
@@ -0,0 +1,851 @@
+#!/usr/bin/python
+# Copyright 2009-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+	def exithandler(signum,frame):
+		signal.signal(signal.SIGINT, signal.SIG_IGN)
+		signal.signal(signal.SIGTERM, signal.SIG_IGN)
+		sys.exit(128 + signum)
+
+	signal.signal(signal.SIGINT, exithandler)
+	signal.signal(signal.SIGTERM, exithandler)
+
+except KeyboardInterrupt:
+	sys.exit(128 + signal.SIGINT)
+
+import io
+import logging
+import optparse
+import subprocess
+import time
+import textwrap
+import re
+
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+
+from portage import os, _encodings, _unicode_encode, _unicode_decode
+from _emerge.MetadataRegen import MetadataRegen
+from portage.cache.cache_errors import CacheError, StatCollision
+from portage.manifest import guessManifestFileType
+from portage.util import cmp_sort_key, writemsg_level
+from portage import cpv_getkey
+from portage.dep import Atom, isjustname
+from portage.versions import pkgcmp, pkgsplit, vercmp
+
+try:
+	from xml.etree import ElementTree
+except ImportError:
+	pass
+else:
+	try:
+		from xml.parsers.expat import ExpatError
+	except ImportError:
+		pass
+	else:
+		from repoman.utilities import parse_metadata_use
+
+from repoman.utilities import FindVCS
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+def parse_args(args):
+	usage = "egencache [options] <action> ... [atom] ..."
+	parser = optparse.OptionParser(usage=usage)
+
+	actions = optparse.OptionGroup(parser, 'Actions')
+	actions.add_option("--update",
+		action="store_true",
+		help="update metadata/cache/ (generate as necessary)")
+	actions.add_option("--update-use-local-desc",
+		action="store_true",
+		help="update the use.local.desc file from metadata.xml")
+	actions.add_option("--update-changelogs",
+		action="store_true",
+		help="update the ChangeLog files from SCM logs")
+	parser.add_option_group(actions)
+
+	common = optparse.OptionGroup(parser, 'Common options')
+	common.add_option("--repo",
+		action="store",
+		help="name of repo to operate on (default repo is located at $PORTDIR)")
+	common.add_option("--config-root",
+		help="location of portage config files",
+		dest="portage_configroot")
+	common.add_option("--portdir",
+		help="override the portage tree location",
+		dest="portdir")
+	common.add_option("--tolerant",
+		action="store_true",
+		help="exit successfully if only minor errors occurred")
+	common.add_option("--ignore-default-opts",
+		action="store_true",
+		help="do not use the EGENCACHE_DEFAULT_OPTS environment variable")
+	parser.add_option_group(common)
+
+	update = optparse.OptionGroup(parser, '--update options')
+	update.add_option("--cache-dir",
+		help="location of the metadata cache",
+		dest="cache_dir")
+	update.add_option("--jobs",
+		action="store",
+		help="max ebuild processes to spawn")
+	update.add_option("--load-average",
+		action="store",
+		help="max load allowed when spawning multiple jobs",
+		dest="load_average")
+	update.add_option("--rsync",
+		action="store_true",
+		help="enable rsync stat collision workaround " + \
+			"for bug 139134 (use with --update)")
+	parser.add_option_group(update)
+
+	uld = optparse.OptionGroup(parser, '--update-use-local-desc options')
+	uld.add_option("--preserve-comments",
+		action="store_true",
+		help="preserve the comments from the existing use.local.desc file")
+	uld.add_option("--use-local-desc-output",
+		help="output file for use.local.desc data (or '-' for stdout)",
+		dest="uld_output")
+	parser.add_option_group(uld)
+
+	options, args = parser.parse_args(args)
+
+	if options.jobs:
+		jobs = None
+		try:
+			jobs = int(options.jobs)
+		except ValueError:
+			jobs = -1
+
+		if jobs < 1:
+			parser.error("Invalid: --jobs='%s'" % \
+				(options.jobs,))
+
+		options.jobs = jobs
+
+	else:
+		options.jobs = None
+
+	if options.load_average:
+		try:
+			load_average = float(options.load_average)
+		except ValueError:
+			load_average = 0.0
+
+		if load_average <= 0.0:
+			parser.error("Invalid: --load-average='%s'" % \
+				(options.load_average,))
+
+		options.load_average = load_average
+
+	else:
+		options.load_average = None
+
+	options.config_root = options.portage_configroot
+	if options.config_root is not None and \
+		not os.path.isdir(options.config_root):
+		parser.error("Not a directory: --config-root='%s'" % \
+			(options.config_root,))
+
+	if options.cache_dir is not None and not os.path.isdir(options.cache_dir):
+		parser.error("Not a directory: --cache-dir='%s'" % \
+			(options.cache_dir,))
+
+	for atom in args:
+		try:
+			atom = portage.dep.Atom(atom)
+		except portage.exception.InvalidAtom:
+			parser.error('Invalid atom: %s' % (atom,))
+
+		if not isjustname(atom):
+			parser.error('Atom is too specific: %s' % (atom,))
+
+	if options.update_use_local_desc:
+		try:
+			ElementTree
+			ExpatError
+		except NameError:
+			parser.error('--update-use-local-desc requires python with USE=xml!')
+
+	if options.uld_output == '-' and options.preserve_comments:
+		parser.error('--preserve-comments can not be used when outputting to stdout')
+
+	return parser, options, args
+
+class GenCache(object):
+	def __init__(self, portdb, cp_iter=None, max_jobs=None, max_load=None,
+		rsync=False):
+		self._portdb = portdb
+		# We can globally cleanse stale cache only if we
+		# iterate over every single cp.
+		self._global_cleanse = cp_iter is None
+		if cp_iter is not None:
+			self._cp_set = set(cp_iter)
+			cp_iter = iter(self._cp_set)
+			self._cp_missing = self._cp_set.copy()
+		else:
+			self._cp_set = None
+			self._cp_missing = set()
+		self._regen = MetadataRegen(portdb, cp_iter=cp_iter,
+			consumer=self._metadata_callback,
+			max_jobs=max_jobs, max_load=max_load)
+		self.returncode = os.EX_OK
+		metadbmodule = portdb.settings.load_best_module("portdbapi.metadbmodule")
+		self._trg_cache = metadbmodule(portdb.porttrees[0],
+			"metadata/cache", portage.auxdbkeys[:])
+		if rsync:
+			self._trg_cache.raise_stat_collision = True
+		try:
+			self._trg_cache.ec = \
+				portdb._repo_info[portdb.porttrees[0]].eclass_db
+		except AttributeError:
+			pass
+		self._existing_nodes = set()
+
+	def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata):
+		self._existing_nodes.add(cpv)
+		self._cp_missing.discard(cpv_getkey(cpv))
+		if metadata is not None:
+			if metadata.get('EAPI') == '0':
+				del metadata['EAPI']
+			try:
+				try:
+					self._trg_cache[cpv] = metadata
+				except StatCollision as sc:
+					# If the content of a cache entry changes and neither the
+					# file mtime nor size changes, it will prevent rsync from
+					# detecting changes. Cache backends may raise this
+					# exception from _setitem() if they detect this type of stat
+					# collision. These exceptions are handled by bumping the
+					# mtime on the ebuild (and the corresponding cache entry).
+					# See bug #139134.
+					max_mtime = sc.mtime
+					for ec, (loc, ec_mtime) in metadata['_eclasses_'].items():
+						if max_mtime < ec_mtime:
+							max_mtime = ec_mtime
+					if max_mtime == sc.mtime:
+						max_mtime += 1
+					max_mtime = long(max_mtime)
+					try:
+						os.utime(ebuild_path, (max_mtime, max_mtime))
+					except OSError as e:
+						self.returncode |= 1
+						writemsg_level(
+							"%s writing target: %s\n" % (cpv, e),
+							level=logging.ERROR, noiselevel=-1)
+					else:
+						metadata['_mtime_'] = max_mtime
+						self._trg_cache[cpv] = metadata
+						self._portdb.auxdb[repo_path][cpv] = metadata
+
+			except CacheError as ce:
+				self.returncode |= 1
+				writemsg_level(
+					"%s writing target: %s\n" % (cpv, ce),
+					level=logging.ERROR, noiselevel=-1)
+
+	def run(self):
+
+		received_signal = []
+
+		def sighandler(signum, frame):
+			signal.signal(signal.SIGINT, signal.SIG_IGN)
+			signal.signal(signal.SIGTERM, signal.SIG_IGN)
+			self._regen.terminate()
+			received_signal.append(128 + signum)
+
+		earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+		earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+		try:
+			self._regen.run()
+		finally:
+			# Restore previous handlers
+			if earlier_sigint_handler is not None:
+				signal.signal(signal.SIGINT, earlier_sigint_handler)
+			else:
+				signal.signal(signal.SIGINT, signal.SIG_DFL)
+			if earlier_sigterm_handler is not None:
+				signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+			else:
+				signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+		if received_signal:
+			sys.exit(received_signal[0])
+
+		self.returncode |= self._regen.returncode
+		cp_missing = self._cp_missing
+
+		trg_cache = self._trg_cache
+		dead_nodes = set()
+		if self._global_cleanse:
+			try:
+				for cpv in trg_cache:
+					cp = cpv_getkey(cpv)
+					if cp is None:
+						self.returncode |= 1
+						writemsg_level(
+							"Unable to parse cp for '%s'\n"  % (cpv,),
+							level=logging.ERROR, noiselevel=-1)
+					else:
+						dead_nodes.add(cpv)
+			except CacheError as ce:
+				self.returncode |= 1
+				writemsg_level(
+					"Error listing cache entries for " + \
+					"'%s/metadata/cache': %s, continuing...\n" % \
+					(self._portdb.porttree_root, ce),
+					level=logging.ERROR, noiselevel=-1)
+
+		else:
+			cp_set = self._cp_set
+			try:
+				for cpv in trg_cache:
+					cp = cpv_getkey(cpv)
+					if cp is None:
+						self.returncode |= 1
+						writemsg_level(
+							"Unable to parse cp for '%s'\n"  % (cpv,),
+							level=logging.ERROR, noiselevel=-1)
+					else:
+						cp_missing.discard(cp)
+						if cp in cp_set:
+							dead_nodes.add(cpv)
+			except CacheError as ce:
+				self.returncode |= 1
+				writemsg_level(
+					"Error listing cache entries for " + \
+					"'%s/metadata/cache': %s, continuing...\n" % \
+					(self._portdb.porttree_root, ce),
+					level=logging.ERROR, noiselevel=-1)
+
+		if cp_missing:
+			self.returncode |= 1
+			for cp in sorted(cp_missing):
+				writemsg_level(
+					"No ebuilds or cache entries found for '%s'\n"  % (cp,),
+					level=logging.ERROR, noiselevel=-1)
+
+		if dead_nodes:
+			dead_nodes.difference_update(self._existing_nodes)
+			for k in dead_nodes:
+				try:
+					del trg_cache[k]
+				except KeyError:
+					pass
+				except CacheError as ce:
+					self.returncode |= 1
+					writemsg_level(
+						"%s deleting stale cache: %s\n" % (k, ce),
+						level=logging.ERROR, noiselevel=-1)
+
+		if not trg_cache.autocommits:
+			try:
+				trg_cache.commit()
+			except CacheError as ce:
+				self.returncode |= 1
+				writemsg_level(
+					"committing target: %s\n" % (ce,),
+					level=logging.ERROR, noiselevel=-1)
+
+class GenUseLocalDesc(object):
+	def __init__(self, portdb, output=None,
+			preserve_comments=False):
+		self.returncode = os.EX_OK
+		self._portdb = portdb
+		self._output = output
+		self._preserve_comments = preserve_comments
+	
+	def run(self):
+		repo_path = self._portdb.porttrees[0]
+		ops = {'<':0, '<=':1, '=':2, '>=':3, '>':4}
+
+		if self._output is None or self._output != '-':
+			if self._output is None:
+				prof_path = os.path.join(repo_path, 'profiles')
+				desc_path = os.path.join(prof_path, 'use.local.desc')
+				try:
+					os.mkdir(prof_path)
+				except OSError:
+					pass
+			else:
+				desc_path = self._output
+
+			try:
+				if self._preserve_comments:
+					# Probe in binary mode, in order to avoid
+					# potential character encoding issues.
+					output = open(_unicode_encode(desc_path,
+						encoding=_encodings['fs'], errors='strict'), 'r+b')
+				else:
+					output = io.open(_unicode_encode(desc_path,
+						encoding=_encodings['fs'], errors='strict'),
+						mode='w', encoding=_encodings['repo.content'],
+						errors='backslashreplace')
+			except IOError as e:
+				if not self._preserve_comments or \
+					os.path.isfile(desc_path):
+					writemsg_level(
+						"ERROR: failed to open output file %s: %s\n" \
+						% (desc_path, e), level=logging.ERROR, noiselevel=-1)
+					self.returncode |= 2
+					return
+
+				# Open in r+b mode failed because the file doesn't
+				# exist yet. We can probably recover if we disable
+				# preserve_comments mode now.
+				writemsg_level(
+					"WARNING: --preserve-comments enabled, but " + \
+					"output file not found: %s\n" % (desc_path,),
+					level=logging.WARNING, noiselevel=-1)
+				self._preserve_comments = False
+				try:
+					output = io.open(_unicode_encode(desc_path,
+						encoding=_encodings['fs'], errors='strict'),
+						mode='w', encoding=_encodings['repo.content'],
+						errors='backslashreplace')
+				except IOError as e:
+					writemsg_level(
+						"ERROR: failed to open output file %s: %s\n" \
+						% (desc_path, e), level=logging.ERROR, noiselevel=-1)
+					self.returncode |= 2
+					return
+		else:
+			output = sys.stdout
+
+		if self._preserve_comments:
+			while True:
+				pos = output.tell()
+				if not output.readline().startswith(b'#'):
+					break
+			output.seek(pos)
+			output.truncate()
+			output.close()
+
+			# Finished probing comments in binary mode, now append
+			# in text mode.
+			output = io.open(_unicode_encode(desc_path,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='a', encoding=_encodings['repo.content'],
+				errors='backslashreplace')
+			output.write(_unicode_decode('\n'))
+		else:
+			output.write(_unicode_decode('''
+# This file is deprecated as per GLEP 56 in favor of metadata.xml. Please add
+# your descriptions to your package's metadata.xml ONLY.
+# * generated automatically using egencache *
+
+'''.lstrip()))
+
+		# The cmp function no longer exists in python3, so we'll
+		# implement our own here under a slightly different name
+		# since we don't want any confusion given that we never
+		# want to rely on the builtin cmp function.
+		def cmp_func(a, b):
+			if a is None or b is None:
+				# None can't be compared with other types in python3.
+				if a is None and b is None:
+					return 0
+				elif a is None:
+					return -1
+				else:
+					return 1
+			return (a > b) - (a < b)
+
+		for cp in self._portdb.cp_all():
+			metadata_path = os.path.join(repo_path, cp, 'metadata.xml')
+			try:
+				metadata = ElementTree.parse(metadata_path)
+			except IOError:
+				pass
+			except (ExpatError, EnvironmentError) as e:
+				writemsg_level(
+					"ERROR: failed parsing %s/metadata.xml: %s\n" % (cp, e),
+					level=logging.ERROR, noiselevel=-1)
+				self.returncode |= 1
+			else:
+				try:
+					usedict = parse_metadata_use(metadata)
+				except portage.exception.ParseError as e:
+					writemsg_level(
+						"ERROR: failed parsing %s/metadata.xml: %s\n" % (cp, e),
+						level=logging.ERROR, noiselevel=-1)
+					self.returncode |= 1
+				else:
+					for flag in sorted(usedict):
+						def atomcmp(atoma, atomb):
+							# None is better than an atom, that's why we reverse the args
+							if atoma is None or atomb is None:
+								return cmp_func(atomb, atoma)
+							# Same for plain PNs (.operator is None then)
+							elif atoma.operator is None or atomb.operator is None:
+								return cmp_func(atomb.operator, atoma.operator)
+							# Version matching
+							elif atoma.cpv != atomb.cpv:
+								return pkgcmp(pkgsplit(atoma.cpv), pkgsplit(atomb.cpv))
+							# Versions match, let's fallback to operator matching
+							else:
+								return cmp_func(ops.get(atoma.operator, -1),
+									ops.get(atomb.operator, -1))
+
+						def _Atom(key):
+							if key is not None:
+								return Atom(key)
+							return None
+
+						resdict = usedict[flag]
+						if len(resdict) == 1:
+							resdesc = next(iter(resdict.items()))[1]
+						else:
+							try:
+								reskeys = dict((_Atom(k), k) for k in resdict)
+							except portage.exception.InvalidAtom as e:
+								writemsg_level(
+									"ERROR: failed parsing %s/metadata.xml: %s\n" % (cp, e),
+									level=logging.ERROR, noiselevel=-1)
+								self.returncode |= 1
+								resdesc = next(iter(resdict.items()))[1]
+							else:
+								resatoms = sorted(reskeys, key=cmp_sort_key(atomcmp))
+								resdesc = resdict[reskeys[resatoms[-1]]]
+
+						output.write(_unicode_decode(
+							'%s:%s - %s\n' % (cp, flag, resdesc)))
+
+		output.close()
+
+if sys.hexversion < 0x3000000:
+	_filename_base = unicode
+else:
+	_filename_base = str
+
+class _special_filename(_filename_base):
+	"""
+	Helps to sort file names by file type and other criteria.
+	"""
+	def __new__(cls, status_change, file_name):
+		return _filename_base.__new__(cls, status_change + file_name)
+
+	def __init__(self, status_change, file_name):
+		_filename_base.__init__(status_change + file_name)
+		self.status_change = status_change
+		self.file_name = file_name
+		self.file_type = guessManifestFileType(file_name)
+
+	def file_type_lt(self, a, b):
+		"""
+		Defines an ordering between file types.
+		"""
+		first = a.file_type
+		second = b.file_type
+		if first == second:
+			return False
+
+		if first == "EBUILD":
+			return True
+		elif first == "MISC":
+			return second in ("EBUILD",)
+		elif first == "AUX":
+			return second in ("EBUILD", "MISC")
+		elif first == "DIST":
+			return second in ("EBUILD", "MISC", "AUX")
+		elif first is None:
+			return False
+		else:
+			raise ValueError("Unknown file type '%s'" % first)
+
+	def __lt__(self, other):
+		"""
+		Compare different file names, first by file type and then
+		for ebuilds by version and lexicographically for others.
+		EBUILD < MISC < AUX < DIST < None
+		"""
+		if self.__class__ != other.__class__:
+			raise NotImplementedError
+
+		# Sort by file type as defined by file_type_lt().
+		if self.file_type_lt(self, other):
+			return True
+		elif self.file_type_lt(other, self):
+			return False
+
+		# Files have the same type.
+		if self.file_type == "EBUILD":
+			# Sort by version. Lowest first.
+			ver = "-".join(pkgsplit(self.file_name[:-7])[1:3])
+			other_ver = "-".join(pkgsplit(other.file_name[:-7])[1:3])
+			return vercmp(ver, other_ver) < 0
+		else:
+			# Sort lexicographically.
+			return self.file_name < other.file_name
+
+class GenChangeLogs(object):
+	def __init__(self, portdb):
+		self.returncode = os.EX_OK
+		self._portdb = portdb
+		self._wrapper = textwrap.TextWrapper(
+				width = 78,
+				initial_indent = '  ',
+				subsequent_indent = '  '
+			)
+
+	@staticmethod
+	def grab(cmd):
+		p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+		return _unicode_decode(p.communicate()[0],
+				encoding=_encodings['stdio'], errors='strict')
+
+	def generate_changelog(self, cp):
+		try:
+			output = io.open('ChangeLog',
+				mode='w', encoding=_encodings['repo.content'],
+				errors='backslashreplace')
+		except IOError as e:
+			writemsg_level(
+				"ERROR: failed to open ChangeLog for %s: %s\n" % (cp,e,),
+				level=logging.ERROR, noiselevel=-1)
+			self.returncode |= 2
+			return
+
+		output.write(_unicode_decode('''
+# ChangeLog for %s
+# Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2
+# $Header: $
+
+''' % (cp, time.strftime('%Y'))).lstrip())
+
+		# now grab all the commits
+		commits = self.grab(['git', 'rev-list', 'HEAD', '--', '.']).split()
+
+		for c in commits:
+			# Explaining the arguments:
+			# --name-status to get a list of added/removed files
+			# --no-renames to avoid getting more complex records on the list
+			# --format to get the timestamp, author and commit description
+			# --root to make it work fine even with the initial commit
+			# --relative to get paths relative to ebuilddir
+			# -r (recursive) to get per-file changes
+			# then the commit-id and path.
+
+			cinfo = self.grab(['git', 'diff-tree', '--name-status', '--no-renames',
+					'--format=%ct %cN <%cE>%n%B', '--root', '--relative', '-r',
+					c, '--', '.']).rstrip('\n').split('\n')
+
+			# Expected output:
+			# timestamp Author Name <author@email>
+			# commit message l1
+			# ...
+			# commit message ln
+			#
+			# status1	filename1
+			# ...
+			# statusn	filenamen
+
+			changed = []
+			for n, l in enumerate(reversed(cinfo)):
+				if not l:
+					body = cinfo[1:-n-1]
+					break
+				else:
+					f = l.split()
+					if f[1] == 'Manifest':
+						pass # XXX: remanifest commits?
+					elif f[1] == 'ChangeLog':
+						pass
+					elif f[0].startswith('A'):
+						changed.append(_special_filename("+", f[1]))
+					elif f[0].startswith('D'):
+						changed.append(_special_filename("-", f[1]))
+					elif f[0].startswith('M'):
+						changed.append(_special_filename("", f[1]))
+					else:
+						writemsg_level(
+							"ERROR: unexpected git file status for %s: %s\n" % (cp,f,),
+							level=logging.ERROR, noiselevel=-1)
+						self.returncode |= 1
+
+			if not changed:
+				continue
+
+			(ts, author) = cinfo[0].split(' ', 1)
+			date = time.strftime('%d %b %Y', time.gmtime(float(ts)))
+
+			changed = [str(x) for x in sorted(changed)]
+
+			wroteheader = False
+			# Reverse the sort order for headers.
+			for c in reversed(changed):
+				if c.startswith('+') and c.endswith('.ebuild'):
+					output.write(_unicode_decode(
+						'*%s (%s)\n' % (c[1:-7], date)))
+					wroteheader = True
+			if wroteheader:
+				output.write(_unicode_decode('\n'))
+
+			# strip '<cp>: ', '[<cp>] ', and similar
+			body[0] = re.sub(r'^\W*' + re.escape(cp) + r'\W+', '', body[0])
+			# strip trailing newline
+			if not body[-1]:
+				body = body[:-1]
+			# strip git-svn id
+			if body[-1].startswith('git-svn-id:') and not body[-2]:
+				body = body[:-2]
+			# strip the repoman version/manifest note
+			if body[-1] == ' (Signed Manifest commit)' or body[-1] == ' (Unsigned Manifest commit)':
+				body = body[:-1]
+			if body[-1].startswith('(Portage version:') and body[-1].endswith(')'):
+				body = body[:-1]
+				if not body[-1]:
+					body = body[:-1]
+
+			# don't break filenames on hyphens
+			self._wrapper.break_on_hyphens = False
+			output.write(_unicode_decode(
+				self._wrapper.fill(
+				'%s; %s %s:' % (date, author, ', '.join(changed)))))
+			# but feel free to break commit messages there
+			self._wrapper.break_on_hyphens = True
+			output.write(_unicode_decode(
+				'\n%s\n\n' % '\n'.join(self._wrapper.fill(x) for x in body)))
+
+		output.close()
+
+	def run(self):
+		repo_path = self._portdb.porttrees[0]
+		os.chdir(repo_path)
+
+		if 'git' not in FindVCS():
+			writemsg_level(
+				"ERROR: --update-changelogs supported only in git repos\n",
+				level=logging.ERROR, noiselevel=-1)
+			self.returncode = 127
+			return
+
+		for cp in self._portdb.cp_all():
+			os.chdir(os.path.join(repo_path, cp))
+			# Determine whether ChangeLog is up-to-date by comparing
+			# the newest commit timestamp with the ChangeLog timestamp.
+			lmod = self.grab(['git', 'log', '--format=%ct', '-1', '.'])
+			if not lmod:
+				# This cp has not been added to the repo.
+				continue
+
+			try:
+				cmod = os.stat('ChangeLog').st_mtime
+			except OSError:
+				cmod = 0
+
+			if float(cmod) < float(lmod):
+				self.generate_changelog(cp)
+
+def egencache_main(args):
+	parser, options, atoms = parse_args(args)
+
+	config_root = options.config_root
+	if config_root is None:
+		config_root = '/'
+
+	# The calling environment is ignored, so the program is
+	# completely controlled by commandline arguments.
+	env = {}
+
+	if options.repo is None:
+		env['PORTDIR_OVERLAY'] = ''
+
+	if options.cache_dir is not None:
+		env['PORTAGE_DEPCACHEDIR'] = options.cache_dir
+
+	if options.portdir is not None:
+		env['PORTDIR'] = options.portdir
+
+	settings = portage.config(config_root=config_root,
+		target_root='/', local_config=False, env=env)
+
+	default_opts = None
+	if not options.ignore_default_opts:
+		default_opts = settings.get('EGENCACHE_DEFAULT_OPTS', '').split()
+
+	if default_opts:
+		parser, options, args = parse_args(default_opts + args)
+
+		if options.config_root is not None:
+			config_root = options.config_root
+
+		if options.cache_dir is not None:
+			env['PORTAGE_DEPCACHEDIR'] = options.cache_dir
+
+		settings = portage.config(config_root=config_root,
+			target_root='/', local_config=False, env=env)
+
+	if not options.update and not options.update_use_local_desc \
+			and not options.update_changelogs:
+		parser.error('No action specified')
+		return 1
+
+	if options.update and 'metadata-transfer' not in settings.features:
+		writemsg_level("ecachegen: warning: " + \
+			"automatically enabling FEATURES=metadata-transfer\n",
+			level=logging.WARNING, noiselevel=-1)
+		settings.features.add('metadata-transfer')
+
+	settings.lock()
+
+	portdb = portage.portdbapi(mysettings=settings)
+	if options.repo is not None:
+		repo_path = portdb.getRepositoryPath(options.repo)
+		if repo_path is None:
+			parser.error("Unable to locate repository named '%s'" % \
+				(options.repo,))
+			return 1
+
+		# Limit ebuilds to the specified repo.
+		portdb.porttrees = [repo_path]
+
+	ret = [os.EX_OK]
+
+	if options.update:
+		cp_iter = None
+		if atoms:
+			cp_iter = iter(atoms)
+
+		gen_cache = GenCache(portdb, cp_iter=cp_iter,
+			max_jobs=options.jobs,
+			max_load=options.load_average,
+			rsync=options.rsync)
+		gen_cache.run()
+		if options.tolerant:
+			ret.append(os.EX_OK)
+		else:
+			ret.append(gen_cache.returncode)
+
+	if options.update_use_local_desc:
+		gen_desc = GenUseLocalDesc(portdb,
+			output=options.uld_output,
+			preserve_comments=options.preserve_comments)
+		gen_desc.run()
+		ret.append(gen_desc.returncode)
+
+	if options.update_changelogs:
+		gen_clogs = GenChangeLogs(portdb)
+		gen_clogs.run()
+		ret.append(gen_clogs.returncode)
+
+	return max(ret)
+
+if __name__ == "__main__":
+	portage._disable_legacy_globals()
+	portage.util.noiselimit = -1
+	sys.exit(egencache_main(sys.argv[1:]))

diff --git a/portage_with_autodep/bin/emaint b/portage_with_autodep/bin/emaint
new file mode 100755
index 0000000..fdd01ed
--- /dev/null
+++ b/portage_with_autodep/bin/emaint
@@ -0,0 +1,654 @@
+#!/usr/bin/python -O
+# vim: noet :
+
+from __future__ import print_function
+
+import errno
+import re
+import signal
+import stat
+import sys
+import textwrap
+import time
+from optparse import OptionParser, OptionValueError
+
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+
+from portage import os
+from portage.util import writemsg
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+class WorldHandler(object):
+
+	short_desc = "Fix problems in the world file"
+
+	def name():
+		return "world"
+	name = staticmethod(name)
+
+	def __init__(self):
+		self.invalid = []
+		self.not_installed = []
+		self.invalid_category = []
+		self.okay = []
+		from portage._sets import load_default_config
+		setconfig = load_default_config(portage.settings,
+			portage.db[portage.settings["ROOT"]])
+		self._sets = setconfig.getSets()
+
+	def _check_world(self, onProgress):
+		categories = set(portage.settings.categories)
+		myroot = portage.settings["ROOT"]
+		self.world_file = os.path.join(portage.settings["EROOT"], portage.const.WORLD_FILE)
+		self.found = os.access(self.world_file, os.R_OK)
+		vardb = portage.db[myroot]["vartree"].dbapi
+
+		from portage._sets import SETPREFIX
+		sets = self._sets
+		world_atoms = list(sets["selected"])
+		maxval = len(world_atoms)
+		if onProgress:
+			onProgress(maxval, 0)
+		for i, atom in enumerate(world_atoms):
+			if not isinstance(atom, portage.dep.Atom):
+				if atom.startswith(SETPREFIX):
+					s = atom[len(SETPREFIX):]
+					if s in sets:
+						self.okay.append(atom)
+					else:
+						self.not_installed.append(atom)
+				else:
+					self.invalid.append(atom)
+				if onProgress:
+					onProgress(maxval, i+1)
+				continue
+			okay = True
+			if not vardb.match(atom):
+				self.not_installed.append(atom)
+				okay = False
+			if portage.catsplit(atom.cp)[0] not in categories:
+				self.invalid_category.append(atom)
+				okay = False
+			if okay:
+				self.okay.append(atom)
+			if onProgress:
+				onProgress(maxval, i+1)
+
+	def check(self, onProgress=None):
+		self._check_world(onProgress)
+		errors = []
+		if self.found:
+			errors += ["'%s' is not a valid atom" % x for x in self.invalid]
+			errors += ["'%s' is not installed" % x for x in self.not_installed]
+			errors += ["'%s' has a category that is not listed in /etc/portage/categories" % x for x in self.invalid_category]
+		else:
+			errors.append(self.world_file + " could not be opened for reading")
+		return errors
+
+	def fix(self, onProgress=None):
+		world_set = self._sets["selected"]
+		world_set.lock()
+		try:
+			world_set.load() # maybe it's changed on disk
+			before = set(world_set)
+			self._check_world(onProgress)
+			after = set(self.okay)
+			errors = []
+			if before != after:
+				try:
+					world_set.replace(self.okay)
+				except portage.exception.PortageException:
+					errors.append("%s could not be opened for writing" % \
+						self.world_file)
+			return errors
+		finally:
+			world_set.unlock()
+
+class BinhostHandler(object):
+
+	short_desc = "Generate a metadata index for binary packages"
+
+	def name():
+		return "binhost"
+	name = staticmethod(name)
+
+	def __init__(self):
+		myroot = portage.settings["ROOT"]
+		self._bintree = portage.db[myroot]["bintree"]
+		self._bintree.populate()
+		self._pkgindex_file = self._bintree._pkgindex_file
+		self._pkgindex = self._bintree._load_pkgindex()
+
+	def _need_update(self, cpv, data):
+
+		if "MD5" not in data:
+			return True
+
+		size = data.get("SIZE")
+		if size is None:
+			return True
+
+		mtime = data.get("MTIME")
+		if mtime is None:
+			return True
+
+		pkg_path = self._bintree.getname(cpv)
+		try:
+			s = os.lstat(pkg_path)
+		except OSError as e:
+			if e.errno not in (errno.ENOENT, errno.ESTALE):
+				raise
+			# We can't update the index for this one because
+			# it disappeared.
+			return False
+
+		try:
+			if long(mtime) != s[stat.ST_MTIME]:
+				return True
+			if long(size) != long(s.st_size):
+				return True
+		except ValueError:
+			return True
+
+		return False
+
+	def check(self, onProgress=None):
+		missing = []
+		cpv_all = self._bintree.dbapi.cpv_all()
+		cpv_all.sort()
+		maxval = len(cpv_all)
+		if onProgress:
+			onProgress(maxval, 0)
+		pkgindex = self._pkgindex
+		missing = []
+		metadata = {}
+		for d in pkgindex.packages:
+			metadata[d["CPV"]] = d
+		for i, cpv in enumerate(cpv_all):
+			d = metadata.get(cpv)
+			if not d or self._need_update(cpv, d):
+				missing.append(cpv)
+			if onProgress:
+				onProgress(maxval, i+1)
+		errors = ["'%s' is not in Packages" % cpv for cpv in missing]
+		stale = set(metadata).difference(cpv_all)
+		for cpv in stale:
+			errors.append("'%s' is not in the repository" % cpv)
+		return errors
+
+	def fix(self, onProgress=None):
+		bintree = self._bintree
+		cpv_all = self._bintree.dbapi.cpv_all()
+		cpv_all.sort()
+		missing = []
+		maxval = 0
+		if onProgress:
+			onProgress(maxval, 0)
+		pkgindex = self._pkgindex
+		missing = []
+		metadata = {}
+		for d in pkgindex.packages:
+			metadata[d["CPV"]] = d
+
+		for i, cpv in enumerate(cpv_all):
+			d = metadata.get(cpv)
+			if not d or self._need_update(cpv, d):
+				missing.append(cpv)
+
+		stale = set(metadata).difference(cpv_all)
+		if missing or stale:
+			from portage import locks
+			pkgindex_lock = locks.lockfile(
+				self._pkgindex_file, wantnewlockfile=1)
+			try:
+				# Repopulate with lock held.
+				bintree._populate()
+				cpv_all = self._bintree.dbapi.cpv_all()
+				cpv_all.sort()
+
+				pkgindex = bintree._load_pkgindex()
+				self._pkgindex = pkgindex
+
+				metadata = {}
+				for d in pkgindex.packages:
+					metadata[d["CPV"]] = d
+
+				# Recount missing packages, with lock held.
+				del missing[:]
+				for i, cpv in enumerate(cpv_all):
+					d = metadata.get(cpv)
+					if not d or self._need_update(cpv, d):
+						missing.append(cpv)
+
+				maxval = len(missing)
+				for i, cpv in enumerate(missing):
+					try:
+						metadata[cpv] = bintree._pkgindex_entry(cpv)
+					except portage.exception.InvalidDependString:
+						writemsg("!!! Invalid binary package: '%s'\n" % \
+							bintree.getname(cpv), noiselevel=-1)
+
+					if onProgress:
+						onProgress(maxval, i+1)
+
+				for cpv in set(metadata).difference(
+					self._bintree.dbapi.cpv_all()):
+					del metadata[cpv]
+
+				# We've updated the pkgindex, so set it to
+				# repopulate when necessary.
+				bintree.populated = False
+
+				del pkgindex.packages[:]
+				pkgindex.packages.extend(metadata.values())
+				from portage.util import atomic_ofstream
+				f = atomic_ofstream(self._pkgindex_file)
+				try:
+					self._pkgindex.write(f)
+				finally:
+					f.close()
+			finally:
+				locks.unlockfile(pkgindex_lock)
+
+		if onProgress:
+			if maxval == 0:
+				maxval = 1
+			onProgress(maxval, maxval)
+		return None
+
+class MoveHandler(object):
+
+	def __init__(self, tree, porttree):
+		self._tree = tree
+		self._portdb = porttree.dbapi
+		self._update_keys = ["DEPEND", "RDEPEND", "PDEPEND", "PROVIDE"]
+		self._master_repo = \
+			self._portdb.getRepositoryName(self._portdb.porttree_root)
+
+	def _grab_global_updates(self):
+		from portage.update import grab_updates, parse_updates
+		retupdates = {}
+		errors = []
+
+		for repo_name in self._portdb.getRepositories():
+			repo = self._portdb.getRepositoryPath(repo_name)
+			updpath = os.path.join(repo, "profiles", "updates")
+			if not os.path.isdir(updpath):
+				continue
+
+			try:
+				rawupdates = grab_updates(updpath)
+			except portage.exception.DirectoryNotFound:
+				rawupdates = []
+			upd_commands = []
+			for mykey, mystat, mycontent in rawupdates:
+				commands, errors = parse_updates(mycontent)
+				upd_commands.extend(commands)
+				errors.extend(errors)
+			retupdates[repo_name] = upd_commands
+
+		if self._master_repo in retupdates:
+			retupdates['DEFAULT'] = retupdates[self._master_repo]
+
+		return retupdates, errors
+
+	def check(self, onProgress=None):
+		allupdates, errors = self._grab_global_updates()
+		# Matching packages and moving them is relatively fast, so the
+		# progress bar is updated in indeterminate mode.
+		match = self._tree.dbapi.match
+		aux_get = self._tree.dbapi.aux_get
+		if onProgress:
+			onProgress(0, 0)
+		for repo, updates in allupdates.items():
+			if repo == 'DEFAULT':
+				continue
+			if not updates:
+				continue
+
+			def repo_match(repository):
+				return repository == repo or \
+					(repo == self._master_repo and \
+					repository not in allupdates)
+
+			for i, update_cmd in enumerate(updates):
+				if update_cmd[0] == "move":
+					origcp, newcp = update_cmd[1:]
+					for cpv in match(origcp):
+						if repo_match(aux_get(cpv, ["repository"])[0]):
+							errors.append("'%s' moved to '%s'" % (cpv, newcp))
+				elif update_cmd[0] == "slotmove":
+					pkg, origslot, newslot = update_cmd[1:]
+					for cpv in match(pkg):
+						slot, prepo = aux_get(cpv, ["SLOT", "repository"])
+						if slot == origslot and repo_match(prepo):
+							errors.append("'%s' slot moved from '%s' to '%s'" % \
+								(cpv, origslot, newslot))
+				if onProgress:
+					onProgress(0, 0)
+
+		# Searching for updates in all the metadata is relatively slow, so this
+		# is where the progress bar comes out of indeterminate mode.
+		cpv_all = self._tree.dbapi.cpv_all()
+		cpv_all.sort()
+		maxval = len(cpv_all)
+		aux_update = self._tree.dbapi.aux_update
+		meta_keys = self._update_keys + ['repository']
+		from portage.update import update_dbentries
+		if onProgress:
+			onProgress(maxval, 0)
+		for i, cpv in enumerate(cpv_all):
+			metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+			repository = metadata.pop('repository')
+			try:
+				updates = allupdates[repository]
+			except KeyError:
+				try:
+					updates = allupdates['DEFAULT']
+				except KeyError:
+					continue
+			if not updates:
+				continue
+			metadata_updates = update_dbentries(updates, metadata)
+			if metadata_updates:
+				errors.append("'%s' has outdated metadata" % cpv)
+			if onProgress:
+				onProgress(maxval, i+1)
+		return errors
+
+	def fix(self, onProgress=None):
+		allupdates, errors = self._grab_global_updates()
+		# Matching packages and moving them is relatively fast, so the
+		# progress bar is updated in indeterminate mode.
+		move = self._tree.dbapi.move_ent
+		slotmove = self._tree.dbapi.move_slot_ent
+		if onProgress:
+			onProgress(0, 0)
+		for repo, updates in allupdates.items():
+			if repo == 'DEFAULT':
+				continue
+			if not updates:
+				continue
+
+			def repo_match(repository):
+				return repository == repo or \
+					(repo == self._master_repo and \
+					repository not in allupdates)
+
+			for i, update_cmd in enumerate(updates):
+				if update_cmd[0] == "move":
+					move(update_cmd, repo_match=repo_match)
+				elif update_cmd[0] == "slotmove":
+					slotmove(update_cmd, repo_match=repo_match)
+				if onProgress:
+					onProgress(0, 0)
+
+		# Searching for updates in all the metadata is relatively slow, so this
+		# is where the progress bar comes out of indeterminate mode.
+		self._tree.dbapi.update_ents(allupdates, onProgress=onProgress)
+		return errors
+
+class MoveInstalled(MoveHandler):
+
+	short_desc = "Perform package move updates for installed packages"
+
+	def name():
+		return "moveinst"
+	name = staticmethod(name)
+	def __init__(self):
+		myroot = portage.settings["ROOT"]
+		MoveHandler.__init__(self, portage.db[myroot]["vartree"], portage.db[myroot]["porttree"])
+
+class MoveBinary(MoveHandler):
+
+	short_desc = "Perform package move updates for binary packages"
+
+	def name():
+		return "movebin"
+	name = staticmethod(name)
+	def __init__(self):
+		myroot = portage.settings["ROOT"]
+		MoveHandler.__init__(self, portage.db[myroot]["bintree"], portage.db[myroot]["porttree"])
+
+class VdbKeyHandler(object):
+	def name():
+		return "vdbkeys"
+	name = staticmethod(name)
+
+	def __init__(self):
+		self.list = portage.db["/"]["vartree"].dbapi.cpv_all()
+		self.missing = []
+		self.keys = ["HOMEPAGE", "SRC_URI", "KEYWORDS", "DESCRIPTION"]
+		
+		for p in self.list:
+			mydir = os.path.join(portage.settings["EROOT"], portage.const.VDB_PATH, p)+os.sep
+			ismissing = True
+			for k in self.keys:
+				if os.path.exists(mydir+k):
+					ismissing = False
+					break
+			if ismissing:
+				self.missing.append(p)
+		
+	def check(self):
+		return ["%s has missing keys" % x for x in self.missing]
+	
+	def fix(self):
+	
+		errors = []
+	
+		for p in self.missing:
+			mydir = os.path.join(portage.settings["EROOT"], portage.const.VDB_PATH, p)+os.sep
+			if not os.access(mydir+"environment.bz2", os.R_OK):
+				errors.append("Can't access %s" % (mydir+"environment.bz2"))
+			elif not os.access(mydir, os.W_OK):
+				errors.append("Can't create files in %s" % mydir)
+			else:
+				env = os.popen("bzip2 -dcq "+mydir+"environment.bz2", "r")
+				envlines = env.read().split("\n")
+				env.close()
+				for k in self.keys:
+					s = [l for l in envlines if l.startswith(k+"=")]
+					if len(s) > 1:
+						errors.append("multiple matches for %s found in %senvironment.bz2" % (k, mydir))
+					elif len(s) == 0:
+						s = ""
+					else:
+						s = s[0].split("=",1)[1]
+						s = s.lstrip("$").strip("\'\"")
+						s = re.sub("(\\\\[nrt])+", " ", s)
+						s = " ".join(s.split()).strip()
+						if s != "":
+							try:
+								keyfile = open(mydir+os.sep+k, "w")
+								keyfile.write(s+"\n")
+								keyfile.close()
+							except (IOError, OSError) as e:
+								errors.append("Could not write %s, reason was: %s" % (mydir+k, e))
+		
+		return errors
+
+class ProgressHandler(object):
+	def __init__(self):
+		self.curval = 0
+		self.maxval = 0
+		self.last_update = 0
+		self.min_display_latency = 0.2
+
+	def onProgress(self, maxval, curval):
+		self.maxval = maxval
+		self.curval = curval
+		cur_time = time.time()
+		if cur_time - self.last_update >= self.min_display_latency:
+			self.last_update = cur_time
+			self.display()
+
+	def display(self):
+		raise NotImplementedError(self)
+
+class CleanResume(object):
+
+	short_desc = "Discard emerge --resume merge lists"
+
+	def name():
+		return "cleanresume"
+	name = staticmethod(name)
+
+	def check(self, onProgress=None):
+		messages = []
+		mtimedb = portage.mtimedb
+		resume_keys = ("resume", "resume_backup")
+		maxval = len(resume_keys)
+		if onProgress:
+			onProgress(maxval, 0)
+		for i, k in enumerate(resume_keys):
+			try:
+				d = mtimedb.get(k)
+				if d is None:
+					continue
+				if not isinstance(d, dict):
+					messages.append("unrecognized resume list: '%s'" % k)
+					continue
+				mergelist = d.get("mergelist")
+				if mergelist is None or not hasattr(mergelist, "__len__"):
+					messages.append("unrecognized resume list: '%s'" % k)
+					continue
+				messages.append("resume list '%s' contains %d packages" % \
+					(k, len(mergelist)))
+			finally:
+				if onProgress:
+					onProgress(maxval, i+1)
+		return messages
+
+	def fix(self, onProgress=None):
+		delete_count = 0
+		mtimedb = portage.mtimedb
+		resume_keys = ("resume", "resume_backup")
+		maxval = len(resume_keys)
+		if onProgress:
+			onProgress(maxval, 0)
+		for i, k in enumerate(resume_keys):
+			try:
+				if mtimedb.pop(k, None) is not None:
+					delete_count += 1
+			finally:
+				if onProgress:
+					onProgress(maxval, i+1)
+		if delete_count:
+			mtimedb.commit()
+
+def emaint_main(myargv):
+
+	# Similar to emerge, emaint needs a default umask so that created
+	# files (such as the world file) have sane permissions.
+	os.umask(0o22)
+
+	# TODO: Create a system that allows external modules to be added without
+	#       the need for hard coding.
+	modules = {
+		"world" : WorldHandler,
+		"binhost":BinhostHandler,
+		"moveinst":MoveInstalled,
+		"movebin":MoveBinary,
+		"cleanresume":CleanResume
+	}
+
+	module_names = list(modules)
+	module_names.sort()
+	module_names.insert(0, "all")
+
+	def exclusive(option, *args, **kw):
+		var = kw.get("var", None)
+		if var is None:
+			raise ValueError("var not specified to exclusive()")
+		if getattr(parser, var, ""):
+			raise OptionValueError("%s and %s are exclusive options" % (getattr(parser, var), option))
+		setattr(parser, var, str(option))
+
+
+	usage = "usage: emaint [options] COMMAND"
+
+	desc = "The emaint program provides an interface to system health " + \
+		"checks and maintenance. See the emaint(1) man page " + \
+		"for additional information about the following commands:"
+
+	usage += "\n\n"
+	for line in textwrap.wrap(desc, 65):
+		usage += "%s\n" % line
+	usage += "\n"
+	usage += "  %s" % "all".ljust(15) + \
+		"Perform all supported commands\n"
+	for m in module_names[1:]:
+		usage += "  %s%s\n" % (m.ljust(15), modules[m].short_desc)
+
+	parser = OptionParser(usage=usage, version=portage.VERSION)
+	parser.add_option("-c", "--check", help="check for problems",
+		action="callback", callback=exclusive, callback_kwargs={"var":"action"})
+	parser.add_option("-f", "--fix", help="attempt to fix problems",
+		action="callback", callback=exclusive, callback_kwargs={"var":"action"})
+	parser.action = None
+
+
+	(options, args) = parser.parse_args(args=myargv)
+	if len(args) != 1:
+		parser.error("Incorrect number of arguments")
+	if args[0] not in module_names:
+		parser.error("%s target is not a known target" % args[0])
+
+	if parser.action:
+		action = parser.action
+	else:
+		print("Defaulting to --check")
+		action = "-c/--check"
+
+	if args[0] == "all":
+		tasks = modules.values()
+	else:
+		tasks = [modules[args[0]]]
+
+
+	if action == "-c/--check":
+		status = "Checking %s for problems"
+		func = "check"
+	else:
+		status = "Attempting to fix %s"
+		func = "fix"
+
+	isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty()
+	for task in tasks:
+		print(status % task.name())
+		inst = task()
+		onProgress = None
+		if isatty:
+			progressBar = portage.output.TermProgressBar()
+			progressHandler = ProgressHandler()
+			onProgress = progressHandler.onProgress
+			def display():
+				progressBar.set(progressHandler.curval, progressHandler.maxval)
+			progressHandler.display = display
+			def sigwinch_handler(signum, frame):
+				lines, progressBar.term_columns = \
+					portage.output.get_term_size()
+			signal.signal(signal.SIGWINCH, sigwinch_handler)
+		result = getattr(inst, func)(onProgress=onProgress)
+		if isatty:
+			# make sure the final progress is displayed
+			progressHandler.display()
+			print()
+			signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+		if result:
+			print()
+			print("\n".join(result))
+			print("\n")
+
+	print("Finished")
+
+if __name__ == "__main__":
+	emaint_main(sys.argv[1:])

diff --git a/portage_with_autodep/bin/emerge b/portage_with_autodep/bin/emerge
new file mode 100755
index 0000000..6f69244
--- /dev/null
+++ b/portage_with_autodep/bin/emerge
@@ -0,0 +1,66 @@
+#!/usr/bin/python
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+	def exithandler(signum,frame):
+		signal.signal(signal.SIGINT, signal.SIG_IGN)
+		signal.signal(signal.SIGTERM, signal.SIG_IGN)
+		sys.exit(128 + signum)
+
+	signal.signal(signal.SIGINT, exithandler)
+	signal.signal(signal.SIGTERM, exithandler)
+	# Prevent "[Errno 32] Broken pipe" exceptions when
+	# writing to a pipe.
+	signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+except KeyboardInterrupt:
+	sys.exit(128 + signal.SIGINT)
+
+def debug_signal(signum, frame):
+	import pdb
+	pdb.set_trace()
+signal.signal(signal.SIGUSR1, debug_signal)
+
+try:
+	from _emerge.main import emerge_main
+except ImportError:
+	from os import path as osp
+	import sys
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	from _emerge.main import emerge_main
+
+if __name__ == "__main__":
+	import sys
+	from portage.exception import ParseError, PermissionDenied
+	try:
+		retval = emerge_main()
+	except PermissionDenied as e:
+		sys.stderr.write("Permission denied: '%s'\n" % str(e))
+		sys.exit(e.errno)
+	except ParseError as e:
+		sys.stderr.write("%s\n" % str(e))
+		sys.exit(1)
+	except SystemExit:
+		raise
+	except Exception:
+		# If an unexpected exception occurs then we don't want the mod_echo
+		# output to obscure the traceback, so dump the mod_echo output before
+		# showing the traceback.
+		import traceback
+		tb_str = traceback.format_exc()
+		try:
+			from portage.elog import mod_echo
+		except ImportError:
+			pass
+		else:
+			mod_echo.finalize()
+		sys.stderr.write(tb_str)
+		sys.exit(1)
+	sys.exit(retval)

diff --git a/portage_with_autodep/bin/emerge-webrsync b/portage_with_autodep/bin/emerge-webrsync
new file mode 100755
index 0000000..d933871
--- /dev/null
+++ b/portage_with_autodep/bin/emerge-webrsync
@@ -0,0 +1,457 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author: Karl Trygve Kalleberg <karltk@gentoo.org>
+# Rewritten from the old, Perl-based emerge-webrsync script
+# Author: Alon Bar-Lev <alon.barlev@gmail.com>
+# Major rewrite from Karl's scripts.
+
+# TODO:
+#  - all output should prob be converted to e* funcs
+#  - add support for ROOT
+
+#
+# gpg key import
+# KEY_ID=0x239C75C4
+# gpg --homedir /etc/portage/gnupg --keyserver subkeys.pgp.net --recv-keys $KEY_ID
+# gpg --homedir /etc/portage/gnupg --edit-key $KEY_ID trust
+#
+
+# Only echo if in verbose mode
+vvecho() { [[ ${do_verbose} -eq 1 ]] && echo "$@" ; }
+# Only echo if not in verbose mode
+nvecho() { [[ ${do_verbose} -eq 0 ]] && echo "$@" ; }
+# warning echos
+wecho() { echo "${argv0}: warning: $*" 1>&2 ; }
+# error echos
+eecho() { echo "${argv0}: error: $*" 1>&2 ; }
+
+argv0=$0
+if ! type -P portageq > /dev/null ; then
+	eecho "could not find 'portageq'; aborting"
+	exit 1
+fi
+eval $(portageq envvar -v FEATURES FETCHCOMMAND GENTOO_MIRRORS \
+	PORTAGE_BIN_PATH PORTAGE_GPG_DIR \
+	PORTAGE_NICENESS PORTAGE_RSYNC_EXTRA_OPTS PORTAGE_TMPDIR PORTDIR \
+	SYNC http_proxy ftp_proxy)
+DISTDIR="${PORTAGE_TMPDIR}/emerge-webrsync"
+export http_proxy ftp_proxy
+
+# If PORTAGE_NICENESS is overriden via the env then it will
+# still pass through the portageq call and override properly.
+if [ -n "${PORTAGE_NICENESS}" ]; then
+	renice $PORTAGE_NICENESS $$ > /dev/null
+fi
+
+source "${PORTAGE_BIN_PATH}"/isolated-functions.sh || exit 1
+
+do_verbose=0
+do_debug=0
+
+if has webrsync-gpg ${FEATURES} ; then
+	WEBSYNC_VERIFY_SIGNATURE=1
+else
+	WEBSYNC_VERIFY_SIGNATURE=0
+fi
+if [ ${WEBSYNC_VERIFY_SIGNATURE} != 0 -a -z "${PORTAGE_GPG_DIR}" ]; then
+	eecho "please set PORTAGE_GPG_DIR in make.conf"
+	exit 1
+fi
+
+do_tar() {
+	local file=$1; shift
+	local decompressor
+	case ${file} in
+		*.xz)   decompressor="xzcat" ;;
+		*.bz2)  decompressor="bzcat" ;;
+		*.gz)   decompressor="zcat"  ;;
+		*)      decompressor="cat"   ;;
+	esac
+	${decompressor} "${file}" | tar "$@"
+	_pipestatus=${PIPESTATUS[*]}
+	[[ ${_pipestatus// /} -eq 0 ]]
+}
+
+get_utc_date_in_seconds() {
+	date -u +"%s"
+}
+
+get_date_part() {
+	local utc_time_in_secs="$1"
+	local part="$2"
+
+	if	[[ ${USERLAND} == BSD ]] ; then
+		date -r ${utc_time_in_secs} -u +"${part}"
+	else
+		date -d @${utc_time_in_secs} -u +"${part}"
+	fi
+}
+
+get_utc_second_from_string() {
+	local s="$1"
+	if [[ ${USERLAND} == BSD ]] ; then
+		date -juf "%Y%m%d" "$s" +"%s"
+	else
+		date -d "${s:0:4}-${s:4:2}-${s:6:2}" -u +"%s"
+	fi
+}
+
+get_portage_timestamp() {
+	local portage_current_timestamp=0
+
+	if [ -f "${PORTDIR}/metadata/timestamp.x" ]; then
+		portage_current_timestamp=$(cut -f 1 -d " " "${PORTDIR}/metadata/timestamp.x" )
+	fi
+
+	echo "${portage_current_timestamp}"
+}
+
+fetch_file() {
+	local URI="$1"
+	local FILE="$2"
+	local opts
+
+	if [ "${FETCHCOMMAND/wget/}" != "${FETCHCOMMAND}" ]; then
+		opts="--continue $(nvecho -q)"
+	elif [ "${FETCHCOMMAND/curl/}" != "${FETCHCOMMAND}" ]; then
+		opts="--continue-at - $(nvecho -s -f)"
+	else
+		rm -f "${FILE}"
+	fi
+
+	vecho "Fetching file ${FILE} ..."
+	# already set DISTDIR=
+	eval "${FETCHCOMMAND}" ${opts}
+	[ -s "${FILE}" ]
+}
+
+check_file_digest() {
+	local digest="$1"
+	local file="$2"
+	local r=1
+
+	vecho "Checking digest ..."
+
+	if type -P md5sum > /dev/null; then
+		md5sum -c $digest && r=0
+	elif type -P md5 > /dev/null; then
+		[ "$(md5 -q "${file}")" == "$(cut -d ' ' -f 1 "${digest}")" ] && r=0
+	else
+		eecho "cannot check digest: no suitable md5/md5sum binaries found"
+	fi
+
+	return "${r}"
+}
+
+check_file_signature() {
+	local signature="$1"
+	local file="$2"
+	local r=1
+
+	if [ ${WEBSYNC_VERIFY_SIGNATURE} != 0 ]; then
+
+		vecho "Checking signature ..."
+
+		if type -P gpg > /dev/null; then
+			gpg --homedir "${PORTAGE_GPG_DIR}" --verify "$signature" "$file" && r=0
+		else
+			eecho "cannot check signature: gpg binary not found"
+		fi
+	else
+		r=0
+	fi
+
+	return "${r}"
+}
+
+get_snapshot_timestamp() {
+	local file="$1"
+
+	do_tar "${file}" --to-stdout -xf - portage/metadata/timestamp.x | cut -f 1 -d " "
+}
+
+sync_local() {
+	local file="$1"
+
+	vecho "Syncing local tree ..."
+
+	if type -P tarsync > /dev/null ; then
+		local chown_opts="-o portage -g portage"
+		chown portage:portage portage > /dev/null 2>&1 || chown_opts=""
+		if ! tarsync $(vvecho -v) -s 1 ${chown_opts} \
+			-e /distfiles -e /packages -e /local "${file}" "${PORTDIR}"; then
+			eecho "tarsync failed; tarball is corrupt? (${file})"
+			return 1
+		fi
+	else
+		if ! do_tar "${file}" xf -; then
+			eecho "tar failed to extract the image. tarball is corrupt? (${file})"
+			rm -fr portage
+			return 1
+		fi
+
+		# Free disk space
+		rm -f "${file}"
+
+		chown portage:portage portage > /dev/null 2>&1 && \
+			chown -R portage:portage portage
+		cd portage
+		rsync -av --progress --stats --delete --delete-after \
+			--exclude='/distfiles' --exclude='/packages' \
+			--exclude='/local' ${PORTAGE_RSYNC_EXTRA_OPTS} . "${PORTDIR%%/}"
+		cd ..
+
+		vecho "Cleaning up ..."
+		rm -fr portage
+	fi
+
+	if has metadata-transfer ${FEATURES} ; then
+		vecho "Updating cache ..."
+		emerge --metadata
+	fi
+	[ -x /etc/portage/bin/post_sync ] && /etc/portage/bin/post_sync
+	return 0
+}
+
+do_snapshot() {
+	local ignore_timestamp="$1"
+	local date="$2"
+
+	local r=1
+
+	local base_file="portage-${date}.tar"
+
+	local have_files=0
+	local mirror
+
+	local compressions=""
+	# xz is not supported in app-arch/tarsync, so use
+	# bz2 format if we have tarsync.
+	if ! type -P tarsync > /dev/null ; then
+		type -P xzcat > /dev/null && compressions="${compressions} xz"
+	fi
+	type -P bzcat > /dev/null && compressions="${compressions} bz2"
+	type -P  zcat > /dev/null && compressions="${compressions} gz"
+	if [[ -z ${compressions} ]] ; then
+		eecho "unable to locate any decompressors (xzcat or bzcat or zcat)"
+		exit 1
+	fi
+
+	for mirror in ${GENTOO_MIRRORS} ; do
+
+		vecho "Trying to retrieve ${date} snapshot from ${mirror} ..."
+
+		for compression in ${compressions} ; do
+			local file="portage-${date}.tar.${compression}"
+			local digest="${file}.md5sum"
+			local signature="${file}.gpgsig"
+
+			if [ -s "${file}" -a -s "${digest}" -a -s "${signature}" ] ; then
+				check_file_digest "${DISTDIR}/${digest}" "${DISTDIR}/${file}" && \
+				check_file_signature "${DISTDIR}/${signature}" "${DISTDIR}/${file}" && \
+				have_files=1
+			fi
+
+			if [ ${have_files} -eq 0 ] ; then
+				fetch_file "${mirror}/snapshots/${digest}" "${digest}" && \
+				fetch_file "${mirror}/snapshots/${signature}" "${signature}" && \
+				fetch_file "${mirror}/snapshots/${file}" "${file}" && \
+				check_file_digest "${DISTDIR}/${digest}" "${DISTDIR}/${file}" && \
+				check_file_signature "${DISTDIR}/${signature}" "${DISTDIR}/${file}" && \
+				have_files=1
+			fi
+
+			#
+			# If timestamp is invalid
+			# we want to try and retrieve
+			# from a different mirror
+			#
+			if [ ${have_files} -eq 1 ]; then
+
+				vecho "Getting snapshot timestamp ..."
+				local snapshot_timestamp=$(get_snapshot_timestamp "${file}")
+
+				if [ ${ignore_timestamp} == 0 ]; then
+					if [ ${snapshot_timestamp} -lt $(get_portage_timestamp) ]; then
+						wecho "portage is newer than snapshot"
+						have_files=0
+					fi
+				else
+					local utc_seconds=$(get_utc_second_from_string "${date}")
+
+					#
+					# Check that this snapshot
+					# is what it claims to be ...
+					#
+					if [ ${snapshot_timestamp} -lt ${utc_seconds} ] || \
+						[ ${snapshot_timestamp} -gt $((${utc_seconds}+ 2*86400)) ]; then
+
+						wecho "snapshot timestamp is not in acceptable period"
+						have_files=0
+					fi
+				fi
+			fi
+
+			if [ ${have_files} -eq 1 ]; then
+				break
+			else
+				#
+				# Remove files and use a different mirror
+				#
+				rm -f "${file}" "${digest}" "${signature}"
+			fi
+		done
+
+		[ ${have_files} -eq 1 ] && break
+	done
+
+	if [ ${have_files} -eq 1 ]; then
+		sync_local "${file}" && r=0
+	else
+		vecho "${date} snapshot was not found"
+	fi
+	
+	rm -f "${file}" "${digest}" "${signature}"
+	return "${r}"
+}
+
+do_latest_snapshot() {
+	local attempts=0
+	local r=1
+
+	vecho "Fetching most recent snapshot ..."
+
+	# The snapshot for a given day is generated at 01:45 UTC on the following
+	# day, so the current day's snapshot (going by UTC time) hasn't been
+	# generated yet.  Therefore, always start by looking for the previous day's
+	# snapshot (for attempts=1, subtract 1 day from the current UTC time).
+
+	# Timestamps that differ by less than 2 hours
+	# are considered to be approximately equal.
+	local min_time_diff=$(( 2 * 60 * 60 ))
+
+	local existing_timestamp=$(get_portage_timestamp)
+	local timestamp_difference
+	local timestamp_problem
+	local approx_snapshot_time
+	local start_time=$(get_utc_date_in_seconds)
+	local start_hour=$(get_date_part ${start_time} "%H")
+
+	# Daily snapshots are created at 1:45 AM and are not
+	# available until after 2 AM. Don't waste time trying
+	# to fetch a snapshot before it's been created.
+	if [ ${start_hour} -lt 2 ] ; then
+		(( start_time -= 86400 ))
+	fi
+	local snapshot_date=$(get_date_part ${start_time} "%Y%m%d")
+	local snapshot_date_seconds=$(get_utc_second_from_string ${snapshot_date})
+
+	while (( ${attempts} <  40 )) ; do
+		(( attempts++ ))
+		(( snapshot_date_seconds -= 86400 ))
+		# snapshots are created at 1:45 AM
+		(( approx_snapshot_time = snapshot_date_seconds + 86400 + 6300 ))
+		(( timestamp_difference = existing_timestamp - approx_snapshot_time ))
+		[ ${timestamp_difference} -lt 0 ] && (( timestamp_difference = -1 * timestamp_difference ))
+		snapshot_date=$(get_date_part ${snapshot_date_seconds} "%Y%m%d")
+
+		timestamp_problem=""
+		if [ ${timestamp_difference} -eq 0 ]; then
+			timestamp_problem="is identical to"
+		elif [ ${timestamp_difference} -lt ${min_time_diff} ]; then
+			timestamp_problem="is possibly identical to"
+		elif [ ${approx_snapshot_time} -lt ${existing_timestamp} ] ; then
+			timestamp_problem="is newer than"
+		fi
+
+		if [ -n "${timestamp_problem}" ]; then
+			ewarn "Latest snapshot date: ${snapshot_date}"
+			ewarn
+			ewarn "Approximate snapshot timestamp: ${approx_snapshot_time}"
+			ewarn "       Current local timestamp: ${existing_timestamp}"
+			ewarn
+			echo -e "The current local timestamp" \
+				"${timestamp_problem} the" \
+				"timestamp of the latest" \
+				"snapshot. In order to force sync," \
+				"use the --revert option or remove" \
+				"the timestamp file located at" \
+				"'${PORTDIR}/metadata/timestamp.x'." | fmt -w 70 | \
+				while read -r line ; do
+					ewarn "${line}"
+				done
+			r=0
+			break
+		fi
+
+		if do_snapshot 0 "${snapshot_date}"; then
+			r=0
+			break;
+		fi
+	done
+
+	return "${r}"
+}
+
+usage() {
+	cat <<-EOF
+	Usage: $0 [options]
+	
+	Options:
+	  --revert=yyyymmdd   Revert to snapshot
+	  -q, --quiet         Only output errors
+	  -v, --verbose       Enable verbose output
+	  -x, --debug         Enable debug output
+	  -h, --help          This help screen (duh!)
+	EOF
+	if [[ -n $* ]] ; then
+		printf "\nError: %s\n" "$*" 1>&2
+		exit 1
+	else
+		exit 0
+	fi
+}
+
+main() {
+	local arg
+	local revert_date
+	
+	[ ! -d "${DISTDIR}" ] && mkdir -p "${DISTDIR}"
+	cd "${DISTDIR}"
+
+	for arg in "$@" ; do
+		local v=${arg#*=}
+		case ${arg} in
+			-h|--help)    usage ;;
+			-q|--quiet)   PORTAGE_QUIET=1 ;;
+			-v|--verbose) do_verbose=1 ;;
+			-x|--debug)   do_debug=1 ;;
+			--revert=*)   revert_date=${v} ;;
+			*)            usage "Invalid option '${arg}'" ;;
+		esac
+	done
+
+	# This is a sanity check to help prevent people like funtoo users
+	# from accidentally wiping out their git tree.
+	if [[ -n $SYNC && ${SYNC#rsync:} = $SYNC ]] ; then
+		echo "The current SYNC variable setting does not refer to an rsync URI:" >&2
+		echo >&2
+		echo "  SYNC=$SYNC" >&2
+		echo >&2
+		echo "If you intend to use emerge-webrsync then please" >&2
+		echo "adjust SYNC to refer to an rsync URI." >&2
+		echo "emerge-webrsync exiting due to abnormal SYNC setting." >&2
+		exit 1
+	fi
+
+	[[ ${do_debug} -eq 1 ]] && set -x
+
+	if [[ -n ${revert_date} ]] ; then
+		do_snapshot 1 "${revert_date}"
+	else
+		do_latest_snapshot
+	fi
+}
+
+main "$@"

diff --git a/portage_with_autodep/bin/env-update b/portage_with_autodep/bin/env-update
new file mode 100755
index 0000000..8a69f2b
--- /dev/null
+++ b/portage_with_autodep/bin/env-update
@@ -0,0 +1,41 @@
+#!/usr/bin/python -O
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import errno
+import sys
+
+def usage(status):
+	print("Usage: env-update [--no-ldconfig]")
+	print("")
+	print("See the env-update(1) man page for more info")
+	sys.exit(status)
+
+if "-h" in sys.argv or "--help" in sys.argv:
+	usage(0)
+
+makelinks=1
+if "--no-ldconfig" in sys.argv:
+	makelinks=0
+	sys.argv.pop(sys.argv.index("--no-ldconfig"))
+
+if len(sys.argv) > 1:
+	print("!!! Invalid command line options!\n")
+	usage(1)
+
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+try:
+	portage.env_update(makelinks)
+except IOError as e:
+	if e.errno == errno.EACCES:
+		print("env-update: Need superuser access")
+		sys.exit(1)
+	else:
+		raise

diff --git a/portage_with_autodep/bin/etc-update b/portage_with_autodep/bin/etc-update
new file mode 100755
index 0000000..42518ad
--- /dev/null
+++ b/portage_with_autodep/bin/etc-update
@@ -0,0 +1,616 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Author Brandon Low <lostlogic@gentoo.org>
+#
+# Previous version (from which I've borrowed a few bits) by:
+# Jochem Kossen <j.kossen@home.nl>
+# Leo Lipelis <aeoo@gentoo.org>
+# Karl Trygve Kalleberg <karltk@gentoo.org>
+
+cd /
+
+if type -P gsed >/dev/null ; then
+	sed() { gsed "$@"; }
+fi
+
+get_config() {
+	# the sed here does:
+	#  - strip off comments
+	#  - match lines that set item in question
+	#    - delete the "item =" part
+	#    - store the actual value into the hold space
+	#  - on the last line, restore the hold space and print it
+	# If there's more than one of the same configuration item, then
+	# the store to the hold space clobbers previous value so the last
+	# setting takes precedence.
+	local item=$1
+	eval echo $(sed -n \
+		-e 's:[[:space:]]*#.*$::' \
+		-e "/^[[:space:]]*$item[[:space:]]*=/{s:[^=]*=[[:space:]]*\([\"']\{0,1\}\)\(.*\)\1:\2:;h}" \
+		-e '${g;p}' \
+		"${PORTAGE_CONFIGROOT}"etc/etc-update.conf)
+}
+
+diff_command() {
+	local cmd=${diff_command//%file1/$1}
+	${cmd//%file2/$2}
+}
+
+scan() {
+	echo "Scanning Configuration files..."
+	rm -rf ${TMP}/files > /dev/null 2>&1
+	mkdir ${TMP}/files || die "Failed mkdir command!" 1
+	count=0
+	input=0
+	local find_opts
+	local my_basename
+
+	for path in ${CONFIG_PROTECT} ; do
+		path="${ROOT}${path}"
+		# Do not traverse hidden directories such as .svn or .git.
+		find_opts="-name .* -type d -prune -o -name ._cfg????_*"
+		if [ ! -d "${path}" ]; then
+			[ ! -f "${path}" ] && continue
+			my_basename="${path##*/}"
+			path="${path%/*}"
+			find_opts="-maxdepth 1 -name ._cfg????_${my_basename}"
+		fi
+
+		ofile=""
+		# The below set -f turns off file name globbing in the ${find_opts} expansion.
+		for file in $(set -f ; find ${path}/ ${find_opts} \
+		       ! -name '.*~' ! -iname '.*.bak' -print |
+			   sed -e "s:\(^.*/\)\(\._cfg[0-9]*_\)\(.*$\):\1\2\3\%\1%\2\%\3:" |
+			   sort -t'%' -k2,2 -k4,4 -k3,3 | LANG=POSIX LC_ALL=POSIX cut -f1 -d'%'); do
+
+			rpath=$(echo "${file/\/\///}" | sed -e "s:/[^/]*$::")
+			rfile=$(echo "${file/\/\///}" | sed -e "s:^.*/::")
+			for mpath in ${CONFIG_PROTECT_MASK}; do
+				mpath="${ROOT}${mpath}"
+				mpath=$(echo "${mpath/\/\///}")
+				if [[ "${rpath}" == "${mpath}"* ]]; then
+					mv ${rpath}/${rfile} ${rpath}/${rfile:10}
+					break
+				fi
+			done
+			if [[ ! -f ${file} ]] ; then
+				echo "Skipping non-file ${file} ..."
+				continue
+			fi
+
+			if [[ "${ofile:10}" != "${rfile:10}" ]] ||
+			   [[ ${opath} != ${rpath} ]]; then
+				MATCHES=0
+				if [[ "${EU_AUTOMERGE}" == "yes" ]]; then
+					if [ ! -e "${rpath}/${rfile}" ] || [ ! -e "${rpath}/${rfile:10}" ]; then
+						MATCHES=0
+					else
+						diff -Bbua ${rpath}/${rfile} ${rpath}/${rfile:10} | egrep '^[+-]' | egrep -v '^[+-][\t ]*#|^--- |^\+\+\+ ' | egrep -qv '^[-+][\t ]*$'
+						MATCHES=$?
+					fi
+				elif [[ -z $(diff -Nua ${rpath}/${rfile} ${rpath}/${rfile:10}|
+							  grep "^[+-][^+-]"|grep -v '# .Header:.*') ]]; then
+					MATCHES=1
+				fi
+				if [[ "${MATCHES}" == "1" ]]; then
+					echo "Automerging trivial changes in: ${rpath}/${rfile:10}"
+					mv ${rpath}/${rfile} ${rpath}/${rfile:10}
+					continue
+				else
+					count=${count}+1
+					echo "${rpath}/${rfile:10}" > ${TMP}/files/${count}
+					echo "${rpath}/${rfile}" >> ${TMP}/files/${count}
+					ofile="${rfile}"
+					opath="${rpath}"
+					continue
+				fi
+			fi
+
+			if [[ -z $(diff -Nua ${rpath}/${rfile} ${rpath}/${ofile}|
+					  grep "^[+-][^+-]"|grep -v '# .Header:.*') ]]; then
+				mv ${rpath}/${rfile} ${rpath}/${ofile}
+				continue
+			else
+				echo "${rpath}/${rfile}" >> ${TMP}/files/${count}
+				ofile="${rfile}"
+				opath="${rpath}"
+			fi
+		done
+	done
+
+}
+
+sel_file() {
+	local -i isfirst=0
+	until [[ -f ${TMP}/files/${input} ]] || \
+	      [[ ${input} == -1 ]] || \
+	      [[ ${input} == -3 ]]
+	do
+		local numfiles=$(ls ${TMP}/files|wc -l)
+		local numwidth=${#numfiles}
+		for file in $(ls ${TMP}/files|sort -n); do
+			if [[ ${isfirst} == 0 ]] ; then
+				isfirst=${file}
+			fi
+			numshow=$(printf "%${numwidth}i${PAR} " ${file})
+			numupdates=$(( $(wc -l <${TMP}/files/${file}) - 1 ))
+			echo -n "${numshow}"
+			if [[ ${mode} == 0 ]] ; then
+				echo "$(head -n1 ${TMP}/files/${file}) (${numupdates})"
+			else
+				head -n1 ${TMP}/files/${file}
+			fi
+		done > ${TMP}/menuitems
+
+		if [ "${OVERWRITE_ALL}" == "yes" ]; then
+			input=0
+		elif [ "${DELETE_ALL}" == "yes" ]; then
+			input=0
+		else
+			[[ $CLEAR_TERM == yes ]] && clear
+			if [[ ${mode} == 0 ]] ; then
+				echo "The following is the list of files which need updating, each
+configuration file is followed by a list of possible replacement files."
+			else
+				local my_title="Please select a file to update"
+			fi
+
+			if [[ ${mode} == 0 ]] ; then
+				cat ${TMP}/menuitems
+				echo    "Please select a file to edit by entering the corresponding number."
+				echo    "              (don't use -3, -5, -7 or -9 if you're unsure what to do)"
+				echo    "              (-1 to exit) (-3 to auto merge all remaining files)"
+				echo    "                           (-5 to auto-merge AND not use 'mv -i')"
+				echo    "                           (-7 to discard all updates)"
+				echo -n "                           (-9 to discard all updates AND not use 'rm -i'): "
+				input=$(read_int)
+			else
+				dialog --title "${title}" --menu "${my_title}" \
+					0 0 0 $(echo -e "-1 Exit\n$(<${TMP}/menuitems)") \
+					2> ${TMP}/input || die "User termination!" 0
+				input=$(<${TMP}/input)
+			fi
+			if [[ ${input} == -9 ]]; then
+				read -p "Are you sure that you want to delete all updates (type YES):" reply
+				if [[ ${reply} != "YES" ]]; then
+					continue
+				else
+					input=-7
+					export rm_opts=""
+				fi
+			fi
+			if [[ ${input} == -7 ]]; then
+				input=0
+				export DELETE_ALL="yes"
+			fi
+			if [[ ${input} == -5 ]] ; then
+				input=-3
+				export mv_opts=" ${mv_opts} "
+				mv_opts="${mv_opts// -i / }"
+			fi
+			if [[ ${input} == -3 ]] ; then
+				input=0
+				export OVERWRITE_ALL="yes"
+			fi
+		fi # -3 automerge
+		if [[ -z ${input} ]] || [[ ${input} == 0 ]] ; then
+			input=${isfirst}
+		fi
+	done
+}
+
+user_special() {
+	if [ -r ${PORTAGE_CONFIGROOT}etc/etc-update.special ]; then
+		if [ -z "$1" ]; then
+			echo "ERROR: user_special() called without arguments"
+			return 1
+		fi
+		while read -r pat; do
+			echo ${1} | grep "${pat}" > /dev/null && return 0
+		done < ${PORTAGE_CONFIGROOT}etc/etc-update.special
+	fi
+	return 1
+}
+
+read_int() {
+	# Read an integer from stdin.  Continously loops until a valid integer is
+	# read.  This is a workaround for odd behavior of bash when an attempt is
+	# made to store a value such as "1y" into an integer-only variable.
+	local my_input
+	while true; do
+		read my_input
+		# failed integer conversions will break a loop unless they're enclosed
+		# in a subshell.
+		echo "${my_input}" | ( declare -i x; read x) 2>/dev/null && break
+		echo -n "Value '$my_input' is not valid. Please enter an integer value:" >&2
+	done
+	echo ${my_input}
+}
+
+do_file() {
+	interactive_echo() { [ "${OVERWRITE_ALL}" != "yes" ] && [ "${DELETE_ALL}" != "yes" ] && echo; }
+	interactive_echo
+	local -i my_input
+	local -i fcount=0
+	until (( $(wc -l < ${TMP}/files/${input}) < 2 )); do
+		my_input=0
+		if (( $(wc -l < ${TMP}/files/${input}) == 2 )); then
+			my_input=1
+		fi
+		until (( ${my_input} > 0 )) && (( ${my_input} < $(wc -l < ${TMP}/files/${input}) )); do
+			fcount=0
+
+			if [ "${OVERWRITE_ALL}" == "yes" ]; then
+				my_input=0
+			elif [ "${DELETE_ALL}" == "yes" ]; then
+				my_input=0
+			else
+				for line in $(<${TMP}/files/${input}); do
+					if (( ${fcount} > 0 )); then
+						echo -n "${fcount}${PAR} "
+						echo "${line}"
+					else
+						if [[ ${mode} == 0 ]] ; then
+							echo "Below are the new config files for ${line}:"
+						else
+							local my_title="Please select a file to process for ${line}"
+						fi
+					fi
+					fcount=${fcount}+1
+				done > ${TMP}/menuitems
+
+				if [[ ${mode} == 0 ]] ; then
+					cat ${TMP}/menuitems
+					echo -n "Please select a file to process (-1 to exit this file): "
+					my_input=$(read_int)
+				else
+					dialog --title "${title}" --menu "${my_title}" \
+						0 0 0 $(echo -e "$(<${TMP}/menuitems)\n${fcount} Exit") \
+						2> ${TMP}/input || die "User termination!" 0
+					my_input=$(<${TMP}/input)
+				fi
+			fi # OVERWRITE_ALL
+
+			if [[ ${my_input} == 0 ]] ; then
+				my_input=1
+			elif [[ ${my_input} == -1 ]] ; then
+				input=0
+				return
+			elif [[ ${my_input} == ${fcount} ]] ; then
+				break
+			fi
+		done
+		if [[ ${my_input} == ${fcount} ]] ; then
+			break
+		fi
+
+		fcount=${my_input}+1
+
+		file=$(sed -e "${fcount}p;d" ${TMP}/files/${input})
+		ofile=$(head -n1 ${TMP}/files/${input})
+
+		do_cfg "${file}" "${ofile}"
+
+		sed -e "${fcount}!p;d" ${TMP}/files/${input} > ${TMP}/files/sed
+		mv ${TMP}/files/sed ${TMP}/files/${input}
+
+		if [[ ${my_input} == -1 ]] ; then
+			break
+		fi
+	done
+	interactive_echo
+	rm ${TMP}/files/${input}
+	count=${count}-1
+}
+
+do_cfg() {
+
+	local file="${1}"
+	local ofile="${2}"
+	local -i my_input=0
+
+	until (( ${my_input} == -1 )) || [ ! -f ${file} ]; do
+		if [[ "${OVERWRITE_ALL}" == "yes" ]] && ! user_special "${ofile}"; then
+			my_input=1
+		elif [[ "${DELETE_ALL}" == "yes" ]] && ! user_special "${ofile}"; then
+			my_input=2
+		else
+			[[ $CLEAR_TERM == yes ]] && clear
+			if [ "${using_editor}" == 0 ]; then
+				(
+					echo "Showing differences between ${ofile} and ${file}"
+					diff_command "${ofile}" "${file}"
+				) | ${pager}
+			else
+				echo "Beginning of differences between ${ofile} and ${file}"
+				diff_command "${ofile}" "${file}"
+				echo "End of differences between ${ofile} and ${file}"
+			fi
+			if [ -L "${file}" ]; then
+				echo
+				echo "-------------------------------------------------------------"
+				echo "NOTE: File is a symlink to another file. REPLACE recommended."
+				echo "      The original file may simply have moved. Please review."
+				echo "-------------------------------------------------------------"
+				echo
+			fi
+			echo -n "File: ${file}
+1) Replace original with update
+2) Delete update, keeping original as is
+3) Interactively merge original with update
+4) Show differences again
+5) Save update as example config
+Please select from the menu above (-1 to ignore this update): "
+			my_input=$(read_int)
+		fi
+
+		case ${my_input} in
+			1) echo "Replacing ${ofile} with ${file}"
+			   mv ${mv_opts} ${file} ${ofile}
+			   [ -n "${OVERWRITE_ALL}" ] && my_input=-1
+			   continue
+			   ;;
+			2) echo "Deleting ${file}"
+			   rm ${rm_opts} ${file}
+			   [ -n "${DELETE_ALL}" ] && my_input=-1
+			   continue
+			   ;;
+			3) do_merge "${file}" "${ofile}"
+			   my_input=${?}
+#			   [ ${my_input} == 255 ] && my_input=-1
+			   continue
+			   ;;
+			4) continue
+			   ;;
+			5) do_distconf "${file}" "${ofile}"
+			   ;;
+			*) continue
+			   ;;
+		esac
+	done
+}
+
+do_merge() {
+	# make sure we keep the merged file in the secure tempdir
+	# so we dont leak any information contained in said file
+	# (think of case where the file has 0600 perms; during the
+	# merging process, the temp file gets umask perms!)
+
+	local file="${1}"
+	local ofile="${2}"
+	local mfile="${TMP}/${2}.merged"
+	local -i my_input=0
+	echo "${file} ${ofile} ${mfile}"
+
+	if [[ -e ${mfile} ]] ; then
+		echo "A previous version of the merged file exists, cleaning..."
+		rm ${rm_opts} "${mfile}"
+	fi
+
+	# since mfile will be like $TMP/path/to/original-file.merged, we
+	# need to make sure the full /path/to/ exists ahead of time
+	mkdir -p "${mfile%/*}"
+
+	until (( ${my_input} == -1 )); do
+		echo "Merging ${file} and ${ofile}"
+		$(echo "${merge_command}" |
+		 sed -e "s:%merged:${mfile}:g" \
+		 	 -e "s:%orig:${ofile}:g" \
+			 -e "s:%new:${file}:g")
+		until (( ${my_input} == -1 )); do
+			echo -n "1) Replace ${ofile} with merged file
+2) Show differences between merged file and original
+3) Remerge original with update
+4) Edit merged file
+5) Return to the previous menu
+Please select from the menu above (-1 to exit, losing this merge): "
+			my_input=$(read_int)
+			case ${my_input} in
+				1) echo "Replacing ${ofile} with ${mfile}"
+				   if  [[ ${USERLAND} == BSD ]] ; then
+				       chown "$(stat -f %Su:%Sg "${ofile}")" "${mfile}"
+				       chmod $(stat -f %Mp%Lp "${ofile}") "${mfile}"
+				   else
+				       chown --reference="${ofile}" "${mfile}"
+				       chmod --reference="${ofile}" "${mfile}"
+				   fi
+				   mv ${mv_opts} "${mfile}" "${ofile}"
+				   rm ${rm_opts} "${file}"
+				   return 255
+				   ;;
+				2)
+					[[ $CLEAR_TERM == yes ]] && clear
+					if [ "${using_editor}" == 0 ]; then
+						(
+							echo "Showing differences between ${ofile} and ${mfile}"
+							diff_command "${ofile}" "${mfile}"
+						) | ${pager}
+					else
+						echo "Beginning of differences between ${ofile} and ${mfile}"
+						diff_command "${ofile}" "${mfile}"
+						echo "End of differences between ${ofile} and ${mfile}"
+					fi
+				   continue
+				   ;;
+				3) break
+				   ;;
+				4) ${EDITOR:-nano -w} "${mfile}"
+				   continue
+					 ;;
+				5) rm ${rm_opts} "${mfile}"
+				   return 0
+				   ;;
+				*) continue
+				   ;;
+			esac
+		done
+	done
+	rm ${rm_opts} "${mfile}"
+	return 255
+}
+
+do_distconf() {
+	# search for any previously saved distribution config
+	# files and number the current one accordingly
+
+	local file="${1}"
+	local ofile="${2}"
+	local -i count
+	local -i fill
+	local suffix
+	local efile
+
+	for ((count = 0; count <= 9999; count++)); do
+		suffix=".dist_"
+		for ((fill = 4 - ${#count}; fill > 0; fill--)); do
+			suffix+="0"
+		done
+		suffix+="${count}"
+		efile="${ofile}${suffix}"
+		if [[ ! -f ${efile} ]]; then
+			mv ${mv_opts} "${file}" "${efile}"
+			break
+		elif diff_command "${file}" "${efile}" &> /dev/null; then
+			# replace identical copy
+			mv "${file}" "${efile}"
+			break
+		fi
+	done
+}
+
+die() {
+	trap SIGTERM
+	trap SIGINT
+
+	if [ "$2" -eq 0 ]; then
+		echo "Exiting: ${1}"
+		scan > /dev/null
+		[ ${count} -gt 0 ] && echo "NOTE: ${count} updates remaining"
+	else
+		echo "ERROR: ${1}"
+	fi
+
+	rm -rf "${TMP}"
+	exit ${2}
+}
+
+usage() {
+	cat <<-EOF
+	etc-update: Handle configuration file updates
+
+	Usage: etc-update [options]
+
+	Options:
+	  -d, --debug    Enable shell debugging
+	  -h, --help     Show help and run away
+	  -V, --version  Show version and trundle away
+	EOF
+
+	[[ -n ${*:2} ]] && printf "\nError: %s\n" "${*:2}" 1>&2
+
+	exit ${1:-0}
+}
+
+#
+# Run the script
+#
+
+SET_X=false
+while [[ -n $1 ]] ; do
+	case $1 in
+		-d|--debug)   SET_X=true;;
+		-h|--help)    usage;;
+		-V|--version) emerge --version ; exit 0;;
+		*)            usage 1 "Invalid option '$1'";;
+	esac
+	shift
+done
+${SET_X} && set -x
+
+type portageq > /dev/null || exit $?
+eval $(portageq envvar -v CONFIG_PROTECT \
+	CONFIG_PROTECT_MASK PORTAGE_CONFIGROOT PORTAGE_TMPDIR ROOT USERLAND)
+export PORTAGE_TMPDIR
+
+TMP="${PORTAGE_TMPDIR}/etc-update-$$"
+trap "die terminated 1" SIGTERM
+trap "die interrupted 1" SIGINT
+
+[ -w ${PORTAGE_CONFIGROOT}etc ] || die "Need write access to ${PORTAGE_CONFIGROOT}etc" 1
+#echo $PORTAGE_TMPDIR
+#echo $CONFIG_PROTECT
+#echo $CONFIG_PROTECT_MASK
+#export PORTAGE_TMPDIR=$(/usr/lib/portage/bin/portageq envvar PORTAGE_TMPDIR)
+
+rm -rf "${TMP}" 2> /dev/null
+mkdir "${TMP}" || die "failed to create temp dir" 1
+# make sure we have a secure directory to work in
+chmod 0700 "${TMP}" || die "failed to set perms on temp dir" 1
+chown ${UID:-0}:${GID:-0} "${TMP}" || die "failed to set ownership on temp dir" 1
+
+# I need the CONFIG_PROTECT value
+#CONFIG_PROTECT=$(/usr/lib/portage/bin/portageq envvar CONFIG_PROTECT)
+#CONFIG_PROTECT_MASK=$(/usr/lib/portage/bin/portageq envvar CONFIG_PROTECT_MASK)
+
+# load etc-config's configuration
+CLEAR_TERM=$(get_config clear_term)
+EU_AUTOMERGE=$(get_config eu_automerge)
+rm_opts=$(get_config rm_opts)
+mv_opts=$(get_config mv_opts)
+cp_opts=$(get_config cp_opts)
+pager=$(get_config pager)
+diff_command=$(get_config diff_command)
+using_editor=$(get_config using_editor)
+merge_command=$(get_config merge_command)
+declare -i mode=$(get_config mode)
+[[ -z ${mode} ]] && mode=0
+[[ -z ${pager} ]] && pager="cat"
+
+if [ "${using_editor}" == 0 ]; then
+	# Sanity check to make sure diff exists and works
+	echo > "${TMP}"/.diff-test-1
+	echo > "${TMP}"/.diff-test-2
+	
+	if ! diff_command "${TMP}"/.diff-test-1 "${TMP}"/.diff-test-2 ; then
+		die "'${diff_command}' does not seem to work, aborting" 1
+	fi
+else
+	if ! type ${diff_command%% *} >/dev/null; then
+		die "'${diff_command}' does not seem to work, aborting" 1
+	fi
+fi
+
+if [[ ${mode} == "1" ]] ; then
+	if ! type dialog >/dev/null || ! dialog --help >/dev/null ; then
+		die "mode=1 and 'dialog' not found or not executable, aborting" 1
+	fi
+fi
+
+#echo "rm_opts: $rm_opts, mv_opts: $mv_opts, cp_opts: $cp_opts"
+#echo "pager: $pager, diff_command: $diff_command, merge_command: $merge_command"
+
+if (( ${mode} == 0 )); then
+	PAR=")"
+else
+	PAR=""
+fi
+
+declare -i count=0
+declare input=0
+declare title="Gentoo's etc-update tool!"
+
+scan
+
+until (( ${input} == -1 )); do
+	if (( ${count} == 0 )); then
+		die "Nothing left to do; exiting. :)" 0
+	fi
+	sel_file
+	if (( ${input} != -1 )); then
+		do_file
+	fi
+done
+
+die "User termination!" 0

diff --git a/portage_with_autodep/bin/filter-bash-environment.py b/portage_with_autodep/bin/filter-bash-environment.py
new file mode 100755
index 0000000..b9aec96
--- /dev/null
+++ b/portage_with_autodep/bin/filter-bash-environment.py
@@ -0,0 +1,150 @@
+#!/usr/bin/python
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import codecs
+import io
+import optparse
+import os
+import re
+import sys
+
+here_doc_re = re.compile(r'.*\s<<[-]?(\w+)$')
+func_start_re = re.compile(r'^[-\w]+\s*\(\)\s*$')
+func_end_re = re.compile(r'^\}$')
+
+var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?.*$')
+close_quote_re = re.compile(r'(\\"|"|\')\s*$')
+readonly_re = re.compile(r'^declare\s+-(\S*)r(\S*)\s+')
+# declare without assignment
+var_declare_re = re.compile(r'^declare(\s+-\S+)?\s+([^=\s]+)\s*$')
+
+def have_end_quote(quote, line):
+	"""
+	Check if the line has an end quote (useful for handling multi-line
+	quotes). This handles escaped double quotes that may occur at the
+	end of a line. The posix spec does not allow escaping of single
+	quotes inside of single quotes, so that case is not handled.
+	"""
+	close_quote_match = close_quote_re.search(line)
+	return close_quote_match is not None and \
+		close_quote_match.group(1) == quote
+
+def filter_declare_readonly_opt(line):
+	readonly_match = readonly_re.match(line)
+	if readonly_match is not None:
+		declare_opts = ''
+		for i in (1, 2):
+			group = readonly_match.group(i)
+			if group is not None:
+				declare_opts += group
+		if declare_opts:
+			line = 'declare -%s %s' % \
+				(declare_opts, line[readonly_match.end():])
+		else:
+			line = 'declare ' + line[readonly_match.end():]
+	return line
+
+def filter_bash_environment(pattern, file_in, file_out):
+	# Filter out any instances of the \1 character from variable values
+	# since this character multiplies each time that the environment
+	# is saved (strange bash behavior). This can eventually result in
+	# mysterious 'Argument list too long' errors from programs that have
+	# huge strings of \1 characters in their environment. See bug #222091.
+	here_doc_delim = None
+	in_func = None
+	multi_line_quote = None
+	multi_line_quote_filter = None
+	for line in file_in:
+		if multi_line_quote is not None:
+			if not multi_line_quote_filter:
+				file_out.write(line.replace("\1", ""))
+			if have_end_quote(multi_line_quote, line):
+				multi_line_quote = None
+				multi_line_quote_filter = None
+			continue
+		if here_doc_delim is None and in_func is None:
+			var_assign_match = var_assign_re.match(line)
+			if var_assign_match is not None:
+				quote = var_assign_match.group(3)
+				filter_this = pattern.match(var_assign_match.group(2)) \
+					is not None
+				# Exclude the start quote when searching for the end quote,
+				# to ensure that the start quote is not misidentified as the
+				# end quote (happens if there is a newline immediately after
+				# the start quote).
+				if quote is not None and not \
+					have_end_quote(quote, line[var_assign_match.end(2)+2:]):
+					multi_line_quote = quote
+					multi_line_quote_filter = filter_this
+				if not filter_this:
+					line = filter_declare_readonly_opt(line)
+					file_out.write(line.replace("\1", ""))
+				continue
+			else:
+				declare_match = var_declare_re.match(line)
+				if declare_match is not None:
+					# declare without assignment
+					filter_this = pattern.match(declare_match.group(2)) \
+						is not None
+					if not filter_this:
+						line = filter_declare_readonly_opt(line)
+						file_out.write(line)
+					continue
+
+		if here_doc_delim is not None:
+			if here_doc_delim.match(line):
+				here_doc_delim = None
+			file_out.write(line)
+			continue
+		here_doc = here_doc_re.match(line)
+		if here_doc is not None:
+			here_doc_delim = re.compile("^%s$" % here_doc.group(1))
+			file_out.write(line)
+			continue
+		# Note: here-documents are handled before functions since otherwise
+		# it would be possible for the content of a here-document to be
+		# mistaken as the end of a function.
+		if in_func:
+			if func_end_re.match(line) is not None:
+				in_func = None
+			file_out.write(line)
+			continue
+		in_func = func_start_re.match(line)
+		if in_func is not None:
+			file_out.write(line)
+			continue
+		# This line is not recognized as part of a variable assignment,
+		# function definition, or here document, so just allow it to
+		# pass through.
+		file_out.write(line)
+
+if __name__ == "__main__":
+	description = "Filter out variable assignments for variable " + \
+		"names matching a given PATTERN " + \
+		"while leaving bash function definitions and here-documents " + \
+		"intact. The PATTERN is a space separated list of variable names" + \
+		" and it supports python regular expression syntax."
+	usage = "usage: %s PATTERN" % os.path.basename(sys.argv[0])
+	parser = optparse.OptionParser(description=description, usage=usage)
+	options, args = parser.parse_args(sys.argv[1:])
+	if len(args) != 1:
+		parser.error("Missing required PATTERN argument.")
+	file_in = sys.stdin
+	file_out = sys.stdout
+	if sys.hexversion >= 0x3000000:
+		file_in = codecs.iterdecode(sys.stdin.buffer.raw,
+			'utf_8', errors='replace')
+		file_out = io.TextIOWrapper(sys.stdout.buffer,
+			'utf_8', errors='backslashreplace')
+
+	var_pattern = args[0].split()
+
+	# Filter invalid variable names that are not supported by bash.
+	var_pattern.append(r'\d.*')
+	var_pattern.append(r'.*\W.*')
+
+	var_pattern = "^(%s)$" % "|".join(var_pattern)
+	filter_bash_environment(
+		re.compile(var_pattern), file_in, file_out)
+	file_out.flush()

diff --git a/portage_with_autodep/bin/fixpackages b/portage_with_autodep/bin/fixpackages
new file mode 100755
index 0000000..5e1df70
--- /dev/null
+++ b/portage_with_autodep/bin/fixpackages
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import os,sys
+os.environ["PORTAGE_CALLER"]="fixpackages"
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+
+from portage import os
+from portage.output import EOutput
+from textwrap import wrap
+from portage._global_updates import _global_updates
+mysettings = portage.settings
+mytrees = portage.db
+mtimedb = portage.mtimedb
+
+if mysettings['ROOT'] != "/":
+	out = EOutput()
+	msg = "The fixpackages program is not intended for use with " + \
+		"ROOT != \"/\". Instead use `emaint --fix movebin` and/or " + \
+		"`emaint --fix moveinst."
+	for line in wrap(msg, 72):
+		out.eerror(line)
+	sys.exit(1)
+
+try:
+	os.nice(int(mysettings.get("PORTAGE_NICENESS", "0")))
+except (OSError, ValueError) as e:
+	portage.writemsg("!!! Failed to change nice value to '%s'\n" % \
+		mysettings["PORTAGE_NICENESS"])
+	portage.writemsg("!!! %s\n" % str(e))
+	del e
+
+_global_updates(mytrees, mtimedb["updates"])
+
+print()
+print("Done.")
+print()

diff --git a/portage_with_autodep/bin/glsa-check b/portage_with_autodep/bin/glsa-check
new file mode 100755
index 0000000..2f2d555
--- /dev/null
+++ b/portage_with_autodep/bin/glsa-check
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+# Copyright 2008-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+
+from portage import os
+from portage.output import *
+
+from optparse import OptionGroup, OptionParser
+
+__program__ = "glsa-check"
+__author__ = "Marius Mauch <genone@gentoo.org>"
+__version__ = "1.0"
+
+def cb_version(*args, **kwargs):
+	"""Callback for --version"""
+	sys.stderr.write("\n"+ __program__ + ", version " + __version__ + "\n")
+	sys.stderr.write("Author: " + __author__ + "\n")
+	sys.stderr.write("This program is licensed under the GPL, version 2\n\n")
+	sys.exit(0)
+
+# option parsing
+parser = OptionParser(usage="%prog <option> [glsa-list]",
+		version="%prog "+ __version__)
+parser.epilog = "glsa-list can contain an arbitrary number of GLSA ids," \
+		" filenames containing GLSAs or the special identifiers" \
+		" 'all', 'new' and 'affected'"
+
+modes = OptionGroup(parser, "Modes")
+modes.add_option("-l", "--list", action="store_const",
+		const="list", dest="mode",
+		help="List all unapplied GLSA")
+modes.add_option("-d", "--dump", action="store_const",
+		const="dump", dest="mode",
+		help="Show all information about the given GLSA")
+modes.add_option("", "--print", action="store_const",
+		const="dump", dest="mode",
+		help="Alias for --dump")
+modes.add_option("-t", "--test", action="store_const",
+		const="test", dest="mode",
+		help="Test if this system is affected by the given GLSA")
+modes.add_option("-p", "--pretend", action="store_const",
+		const="pretend", dest="mode",
+		help="Show the necessary commands to apply this GLSA")
+modes.add_option("-f", "--fix", action="store_const",
+		const="fix", dest="mode",
+		help="Try to auto-apply this GLSA (experimental)")
+modes.add_option("-i", "--inject", action="store_const", dest="mode",
+		help="Inject the given GLSA into the checkfile")
+modes.add_option("-m", "--mail", action="store_const",
+		const="mail", dest="mode",
+		help="Send a mail with the given GLSAs to the administrator")
+parser.add_option_group(modes)
+
+parser.remove_option("--version")
+parser.add_option("-V", "--version", action="callback",
+		callback=cb_version, help="Some information about this tool")
+parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
+		help="Print more information")
+parser.add_option("-n", "--nocolor", action="callback",
+		callback=lambda *args, **kwargs: nocolor(),
+		help="Disable colors")
+parser.add_option("-e", "--emergelike", action="store_false", dest="least_change",
+		help="Do not use a least-change algorithm")
+parser.add_option("-c", "--cve", action="store_true", dest="list_cve",
+		help="Show CAN ids in listing mode")
+
+options, params = parser.parse_args()
+
+mode = options.mode
+least_change = options.least_change
+list_cve = options.list_cve
+verbose = options.verbose
+
+# Sanity checking
+if mode is None:
+	sys.stderr.write("No mode given: what should I do?\n")
+	parser.print_help()
+	sys.exit(1)
+elif mode != "list" and not params:
+	sys.stderr.write("\nno GLSA given, so we'll do nothing for now. \n")
+	sys.stderr.write("If you want to run on all GLSA please tell me so \n")
+	sys.stderr.write("(specify \"all\" as parameter)\n\n")
+	parser.print_help()
+	sys.exit(1)
+elif mode in ["fix", "inject"] and os.geteuid() != 0:
+	# we need root privileges for write access
+	sys.stderr.write("\nThis tool needs root access to "+options.mode+" this GLSA\n\n")
+	sys.exit(2)
+elif mode == "list" and not params:
+	params.append("new")
+
+# delay this for speed increase
+from portage.glsa import *
+
+vardb = portage.db[portage.settings["ROOT"]]["vartree"].dbapi
+portdb = portage.db["/"]["porttree"].dbapi
+
+# build glsa lists
+completelist = get_glsa_list(portage.settings)
+
+checklist = get_applied_glsas(portage.settings)
+todolist = [e for e in completelist if e not in checklist]
+
+glsalist = []
+if "new" in params:
+	glsalist = todolist
+	params.remove("new")
+	
+if "all" in params:
+	glsalist = completelist
+	params.remove("all")
+if "affected" in params:
+	# replaced completelist with todolist on request of wschlich
+	for x in todolist:
+		try:
+			myglsa = Glsa(x, portage.settings, vardb, portdb)
+		except (GlsaTypeException, GlsaFormatException) as e:
+			if verbose:
+				sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (x, e)))
+			continue
+		if myglsa.isVulnerable():
+			glsalist.append(x)
+	params.remove("affected")
+
+# remove invalid parameters
+for p in params[:]:
+	if not (p in completelist or os.path.exists(p)):
+		sys.stderr.write(("(removing %s from parameter list as it isn't a valid GLSA specification)\n" % p))
+		params.remove(p)
+
+glsalist.extend([g for g in params if g not in glsalist])
+
+def summarylist(myglsalist, fd1=sys.stdout, fd2=sys.stderr):
+	fd2.write(white("[A]")+" means this GLSA was already applied,\n")
+	fd2.write(green("[U]")+" means the system is not affected and\n")
+	fd2.write(red("[N]")+" indicates that the system might be affected.\n\n")
+
+	myglsalist.sort()
+	for myid in myglsalist:
+		try:
+			myglsa = Glsa(myid, portage.settings, vardb, portdb)
+		except (GlsaTypeException, GlsaFormatException) as e:
+			if verbose:
+				fd2.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+			continue
+		if myglsa.isApplied():
+			status = "[A]"
+			color = white
+		elif myglsa.isVulnerable():
+			status = "[N]"
+			color = red
+		else:
+			status = "[U]"
+			color = green
+
+		if verbose:
+			access = ("[%-8s] " % myglsa.access)
+		else:
+			access=""
+
+		fd1.write(color(myglsa.nr) + " " + color(status) + " " + color(access) + myglsa.title + " (")
+		if not verbose:
+			for pkg in list(myglsa.packages)[:3]:
+				fd1.write(" " + pkg + " ")
+			if len(myglsa.packages) > 3:
+				fd1.write("... ")
+		else:
+			for pkg in myglsa.packages:
+				mylist = vardb.match(pkg)
+				if len(mylist) > 0:
+					pkg = color(" ".join(mylist))
+				fd1.write(" " + pkg + " ")
+
+		fd1.write(")")
+		if list_cve:
+			fd1.write(" "+(",".join([r[:13] for r in myglsa.references if r[:4] in ["CAN-", "CVE-"]])))
+		fd1.write("\n")		
+	return 0
+
+if mode == "list":
+	sys.exit(summarylist(glsalist))
+
+# dump, fix, inject and fix are nearly the same code, only the glsa method call differs
+if mode in ["dump", "fix", "inject", "pretend"]:
+	for myid in glsalist:
+		try:
+			myglsa = Glsa(myid, portage.settings, vardb, portdb)
+		except (GlsaTypeException, GlsaFormatException) as e:
+			if verbose:
+				sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+			continue
+		if mode == "dump":
+			myglsa.dump()
+		elif mode == "fix":
+			sys.stdout.write("fixing "+myid+"\n")
+			mergelist = myglsa.getMergeList(least_change=least_change)
+			for pkg in mergelist:
+				sys.stdout.write(">>> merging "+pkg+"\n")
+				# using emerge for the actual merging as it contains the dependency
+				# code and we want to be consistent in behaviour. Also this functionality
+				# will be integrated in emerge later, so it shouldn't hurt much.
+				emergecmd = "emerge --oneshot " + portage.settings["EMERGE_OPTS"] + " =" + pkg
+				if verbose:
+					sys.stderr.write(emergecmd+"\n")
+				exitcode = os.system(emergecmd)
+				# system() returns the exitcode in the high byte of a 16bit integer
+				if exitcode >= 1<<8:
+					exitcode >>= 8
+				if exitcode:
+					sys.exit(exitcode)
+			myglsa.inject()
+		elif mode == "pretend":
+			sys.stdout.write("Checking GLSA "+myid+"\n")
+			mergelist = myglsa.getMergeList(least_change=least_change)
+			if mergelist:
+				sys.stdout.write("The following updates will be performed for this GLSA:\n")
+				for pkg in mergelist:
+					oldver = None
+					for x in vardb.match(portage.cpv_getkey(pkg)):
+						if vardb.aux_get(x, ["SLOT"]) == portdb.aux_get(pkg, ["SLOT"]):
+							oldver = x
+					if oldver == None:
+						raise ValueError("could not find old version for package %s" % pkg)
+					oldver = oldver[len(portage.cpv_getkey(oldver))+1:]
+					sys.stdout.write("     " + pkg + " (" + oldver + ")\n")
+			else:
+				sys.stdout.write("Nothing to do for this GLSA\n")
+		elif mode == "inject":
+			sys.stdout.write("injecting " + myid + "\n")
+			myglsa.inject()
+		sys.stdout.write("\n")
+	sys.exit(0)
+
+# test is a bit different as Glsa.test() produces no output
+if mode == "test":
+	outputlist = []
+	for myid in glsalist:
+		try:
+			myglsa = Glsa(myid, portage.settings, vardb, portdb)
+		except (GlsaTypeException, GlsaFormatException) as e:
+			if verbose:
+				sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+			continue
+		if myglsa.isVulnerable():
+			outputlist.append(str(myglsa.nr))
+	if len(outputlist) > 0:
+		sys.stderr.write("This system is affected by the following GLSAs:\n")
+		if verbose:
+			summarylist(outputlist)
+		else:
+			sys.stdout.write("\n".join(outputlist)+"\n")
+	else:
+		sys.stderr.write("This system is not affected by any of the listed GLSAs\n")
+	sys.exit(0)
+
+# mail mode as requested by solar
+if mode == "mail":
+	import portage.mail, socket
+	from io import StringIO
+	from email.mime.text import MIMEText
+	
+	# color doesn't make any sense for mail
+	nocolor()
+
+	if "PORTAGE_ELOG_MAILURI" in portage.settings:
+		myrecipient = portage.settings["PORTAGE_ELOG_MAILURI"].split()[0]
+	else:
+		myrecipient = "root@localhost"
+	
+	if "PORTAGE_ELOG_MAILFROM" in portage.settings:
+		myfrom = portage.settings["PORTAGE_ELOG_MAILFROM"]
+	else:
+		myfrom = "glsa-check"
+
+	mysubject = "[glsa-check] Summary for %s" % socket.getfqdn()
+
+	# need a file object for summarylist()
+	myfd = StringIO()
+	myfd.write("GLSA Summary report for host %s\n" % socket.getfqdn())
+	myfd.write("(Command was: %s)\n\n" % " ".join(sys.argv))
+	summarylist(glsalist, fd1=myfd, fd2=myfd)
+	summary = str(myfd.getvalue())
+	myfd.close()
+
+	myattachments = []
+	for myid in glsalist:
+		try:
+			myglsa = Glsa(myid, portage.settings, vardb, portdb)
+		except (GlsaTypeException, GlsaFormatException) as e:
+			if verbose:
+				sys.stderr.write(("invalid GLSA: %s (error message was: %s)\n" % (myid, e)))
+			continue
+		myfd = StringIO()
+		myglsa.dump(outstream=myfd)
+		myattachments.append(MIMEText(str(myfd.getvalue()), _charset="utf8"))
+		myfd.close()
+		
+	mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, summary, myattachments)
+	portage.mail.send_mail(portage.settings, mymessage)
+		
+	sys.exit(0)
+	
+# something wrong here, all valid paths are covered with sys.exit()
+sys.stderr.write("nothing more to do\n")
+sys.exit(2)

diff --git a/portage_with_autodep/bin/isolated-functions.sh b/portage_with_autodep/bin/isolated-functions.sh
new file mode 100644
index 0000000..65bb1d5
--- /dev/null
+++ b/portage_with_autodep/bin/isolated-functions.sh
@@ -0,0 +1,630 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# We need this next line for "die" and "assert". It expands
+# It _must_ preceed all the calls to die and assert.
+shopt -s expand_aliases
+alias save_IFS='[ "${IFS:-unset}" != "unset" ] && old_IFS="${IFS}"'
+alias restore_IFS='if [ "${old_IFS:-unset}" != "unset" ]; then IFS="${old_IFS}"; unset old_IFS; else unset IFS; fi'
+
+assert() {
+	local x pipestatus=${PIPESTATUS[*]}
+	for x in $pipestatus ; do
+		[[ $x -eq 0 ]] || die "$@"
+	done
+}
+
+assert_sigpipe_ok() {
+	# When extracting a tar file like this:
+	#
+	#     bzip2 -dc foo.tar.bz2 | tar xof -
+	#
+	# For some tar files (see bug #309001), tar will
+	# close its stdin pipe when the decompressor still has
+	# remaining data to be written to its stdout pipe. This
+	# causes the decompressor to be killed by SIGPIPE. In
+	# this case, we want to ignore pipe writers killed by
+	# SIGPIPE, and trust the exit status of tar. We refer
+	# to the bash manual section "3.7.5 Exit Status"
+	# which says, "When a command terminates on a fatal
+	# signal whose number is N, Bash uses the value 128+N
+	# as the exit status."
+
+	local x pipestatus=${PIPESTATUS[*]}
+	for x in $pipestatus ; do
+		# Allow SIGPIPE through (128 + 13)
+		[[ $x -ne 0 && $x -ne ${PORTAGE_SIGPIPE_STATUS:-141} ]] && die "$@"
+	done
+
+	# Require normal success for the last process (tar).
+	[[ $x -eq 0 ]] || die "$@"
+}
+
+shopt -s extdebug
+
+# dump_trace([number of funcs on stack to skip],
+#            [whitespacing for filenames],
+#            [whitespacing for line numbers])
+dump_trace() {
+	local funcname="" sourcefile="" lineno="" s="yes" n p
+	declare -i strip=${1:-1}
+	local filespacing=$2 linespacing=$3
+
+	# The qa_call() function and anything before it are portage internals
+	# that the user will not be interested in. Therefore, the stack trace
+	# should only show calls that come after qa_call().
+	(( n = ${#FUNCNAME[@]} - 1 ))
+	(( p = ${#BASH_ARGV[@]} ))
+	while (( n > 0 )) ; do
+		[ "${FUNCNAME[${n}]}" == "qa_call" ] && break
+		(( p -= ${BASH_ARGC[${n}]} ))
+		(( n-- ))
+	done
+	if (( n == 0 )) ; then
+		(( n = ${#FUNCNAME[@]} - 1 ))
+		(( p = ${#BASH_ARGV[@]} ))
+	fi
+
+	eerror "Call stack:"
+	while (( n > ${strip} )) ; do
+		funcname=${FUNCNAME[${n} - 1]}
+		sourcefile=$(basename "${BASH_SOURCE[${n}]}")
+		lineno=${BASH_LINENO[${n} - 1]}
+		# Display function arguments
+		args=
+		if [[ -n "${BASH_ARGV[@]}" ]]; then
+			for (( j = 1 ; j <= ${BASH_ARGC[${n} - 1]} ; ++j )); do
+				newarg=${BASH_ARGV[$(( p - j - 1 ))]}
+				args="${args:+${args} }'${newarg}'"
+			done
+			(( p -= ${BASH_ARGC[${n} - 1]} ))
+		fi
+		eerror "  $(printf "%${filespacing}s" "${sourcefile}"), line $(printf "%${linespacing}s" "${lineno}"):  Called ${funcname}${args:+ ${args}}"
+		(( n-- ))
+	done
+}
+
+nonfatal() {
+	if has "${EAPI:-0}" 0 1 2 3 3_pre2 ; then
+		die "$FUNCNAME() not supported in this EAPI"
+	fi
+	if [[ $# -lt 1 ]]; then
+		die "$FUNCNAME(): Missing argument"
+	fi
+
+	PORTAGE_NONFATAL=1 "$@"
+}
+
+helpers_die() {
+	case "${EAPI:-0}" in
+		0|1|2|3)
+			echo -e "$@" >&2
+			;;
+		*)
+			die "$@"
+			;;
+	esac
+}
+
+die() {
+	if [[ $PORTAGE_NONFATAL -eq 1 ]]; then
+		echo -e " $WARN*$NORMAL ${FUNCNAME[1]}: WARNING: $@" >&2
+		return 1
+	fi
+
+	set +e
+	if [ -n "${QA_INTERCEPTORS}" ] ; then
+		# die was called from inside inherit. We need to clean up
+		# QA_INTERCEPTORS since sed is called below.
+		unset -f ${QA_INTERCEPTORS}
+		unset QA_INTERCEPTORS
+	fi
+	local n filespacing=0 linespacing=0
+	# setup spacing to make output easier to read
+	(( n = ${#FUNCNAME[@]} - 1 ))
+	while (( n > 0 )) ; do
+		[ "${FUNCNAME[${n}]}" == "qa_call" ] && break
+		(( n-- ))
+	done
+	(( n == 0 )) && (( n = ${#FUNCNAME[@]} - 1 ))
+	while (( n > 0 )); do
+		sourcefile=${BASH_SOURCE[${n}]} sourcefile=${sourcefile##*/}
+		lineno=${BASH_LINENO[${n}]}
+		((filespacing < ${#sourcefile})) && filespacing=${#sourcefile}
+		((linespacing < ${#lineno}))     && linespacing=${#lineno}
+		(( n-- ))
+	done
+
+	# When a helper binary dies automatically in EAPI 4 and later, we don't
+	# get a stack trace, so at least report the phase that failed.
+	local phase_str=
+	[[ -n $EBUILD_PHASE ]] && phase_str=" ($EBUILD_PHASE phase)"
+	eerror "ERROR: $CATEGORY/$PF failed${phase_str}:"
+	eerror "  ${*:-(no error message)}"
+	eerror
+	# dump_trace is useless when the main script is a helper binary
+	local main_index
+	(( main_index = ${#BASH_SOURCE[@]} - 1 ))
+	if has ${BASH_SOURCE[$main_index]##*/} ebuild.sh misc-functions.sh ; then
+	dump_trace 2 ${filespacing} ${linespacing}
+	eerror "  $(printf "%${filespacing}s" "${BASH_SOURCE[1]##*/}"), line $(printf "%${linespacing}s" "${BASH_LINENO[0]}"):  Called die"
+	eerror "The specific snippet of code:"
+	# This scans the file that called die and prints out the logic that
+	# ended in the call to die.  This really only handles lines that end
+	# with '|| die' and any preceding lines with line continuations (\).
+	# This tends to be the most common usage though, so let's do it.
+	# Due to the usage of appending to the hold space (even when empty),
+	# we always end up with the first line being a blank (thus the 2nd sed).
+	sed -n \
+		-e "# When we get to the line that failed, append it to the
+		    # hold space, move the hold space to the pattern space,
+		    # then print out the pattern space and quit immediately
+		    ${BASH_LINENO[0]}{H;g;p;q}" \
+		-e '# If this line ends with a line continuation, append it
+		    # to the hold space
+		    /\\$/H' \
+		-e '# If this line does not end with a line continuation,
+		    # erase the line and set the hold buffer to it (thus
+		    # erasing the hold buffer in the process)
+		    /[^\]$/{s:^.*$::;h}' \
+		"${BASH_SOURCE[1]}" \
+		| sed -e '1d' -e 's:^:RETAIN-LEADING-SPACE:' \
+		| while read -r n ; do eerror "  ${n#RETAIN-LEADING-SPACE}" ; done
+	eerror
+	fi
+	eerror "If you need support, post the output of 'emerge --info =$CATEGORY/$PF',"
+	eerror "the complete build log and the output of 'emerge -pqv =$CATEGORY/$PF'."
+	if [[ -n ${EBUILD_OVERLAY_ECLASSES} ]] ; then
+		eerror "This ebuild used the following eclasses from overlays:"
+		local x
+		for x in ${EBUILD_OVERLAY_ECLASSES} ; do
+			eerror "  ${x}"
+		done
+	fi
+	if [ "${EMERGE_FROM}" != "binary" ] && \
+		! has ${EBUILD_PHASE} prerm postrm && \
+		[ "${EBUILD#${PORTDIR}/}" == "${EBUILD}" ] ; then
+		local overlay=${EBUILD%/*}
+		overlay=${overlay%/*}
+		overlay=${overlay%/*}
+		if [[ -n $PORTAGE_REPO_NAME ]] ; then
+			eerror "This ebuild is from an overlay named" \
+				"'$PORTAGE_REPO_NAME': '${overlay}/'"
+		else
+			eerror "This ebuild is from an overlay: '${overlay}/'"
+		fi
+	elif [[ -n $PORTAGE_REPO_NAME && -f "$PORTDIR"/profiles/repo_name ]] ; then
+		local portdir_repo_name=$(<"$PORTDIR"/profiles/repo_name)
+		if [[ -n $portdir_repo_name && \
+			$portdir_repo_name != $PORTAGE_REPO_NAME ]] ; then
+			eerror "This ebuild is from a repository" \
+				"named '$PORTAGE_REPO_NAME'"
+		fi
+	fi
+
+	if [[ "${EBUILD_PHASE/depend}" == "${EBUILD_PHASE}" ]] ; then
+		local x
+		for x in $EBUILD_DEATH_HOOKS; do
+			${x} "$@" >&2 1>&2
+		done
+		> "$PORTAGE_BUILDDIR/.die_hooks"
+	fi
+
+	[[ -n ${PORTAGE_LOG_FILE} ]] \
+		&& eerror "The complete build log is located at '${PORTAGE_LOG_FILE}'."
+	if [ -f "${T}/environment" ] ; then
+		eerror "The ebuild environment file is located at '${T}/environment'."
+	elif [ -d "${T}" ] ; then
+		{
+			set
+			export
+		} > "${T}/die.env"
+		eerror "The ebuild environment file is located at '${T}/die.env'."
+	fi
+	eerror "S: '${S}'"
+
+	[[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+	[[ -n $PORTAGE_IPC_DAEMON ]] && "$PORTAGE_BIN_PATH"/ebuild-ipc exit 1
+
+	# subshell die support
+	[[ $BASHPID = $EBUILD_MASTER_PID ]] || kill -s SIGTERM $EBUILD_MASTER_PID
+	exit 1
+}
+
+# We need to implement diefunc() since environment.bz2 files contain
+# calls to it (due to alias expansion).
+diefunc() {
+	die "${@}"
+}
+
+quiet_mode() {
+	[[ ${PORTAGE_QUIET} -eq 1 ]]
+}
+
+vecho() {
+	quiet_mode || echo "$@"
+}
+
+# Internal logging function, don't use this in ebuilds
+elog_base() {
+	local messagetype
+	[ -z "${1}" -o -z "${T}" -o ! -d "${T}/logging" ] && return 1
+	case "${1}" in
+		INFO|WARN|ERROR|LOG|QA)
+			messagetype="${1}"
+			shift
+			;;
+		*)
+			vecho -e " ${BAD}*${NORMAL} Invalid use of internal function elog_base(), next message will not be logged"
+			return 1
+			;;
+	esac
+	echo -e "$@" | while read -r ; do
+		echo "$messagetype $REPLY" >> \
+			"${T}/logging/${EBUILD_PHASE:-other}"
+	done
+	return 0
+}
+
+eqawarn() {
+	elog_base QA "$*"
+	[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+	echo -e "$@" | while read -r ; do
+		vecho " $WARN*$NORMAL $REPLY" >&2
+	done
+	LAST_E_CMD="eqawarn"
+	return 0
+}
+
+elog() {
+	elog_base LOG "$*"
+	[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+	echo -e "$@" | while read -r ; do
+		echo " $GOOD*$NORMAL $REPLY"
+	done
+	LAST_E_CMD="elog"
+	return 0
+}
+
+esyslog() {
+	local pri=
+	local tag=
+
+	if [ -x /usr/bin/logger ]
+	then
+		pri="$1"
+		tag="$2"
+
+		shift 2
+		[ -z "$*" ] && return 0
+
+		/usr/bin/logger -p "${pri}" -t "${tag}" -- "$*"
+	fi
+
+	return 0
+}
+
+einfo() {
+	elog_base INFO "$*"
+	[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+	echo -e "$@" | while read -r ; do
+		echo " $GOOD*$NORMAL $REPLY"
+	done
+	LAST_E_CMD="einfo"
+	return 0
+}
+
+einfon() {
+	elog_base INFO "$*"
+	[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+	echo -ne " ${GOOD}*${NORMAL} $*"
+	LAST_E_CMD="einfon"
+	return 0
+}
+
+ewarn() {
+	elog_base WARN "$*"
+	[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+	echo -e "$@" | while read -r ; do
+		echo " $WARN*$NORMAL $RC_INDENTATION$REPLY" >&2
+	done
+	LAST_E_CMD="ewarn"
+	return 0
+}
+
+eerror() {
+	elog_base ERROR "$*"
+	[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo
+	echo -e "$@" | while read -r ; do
+		echo " $BAD*$NORMAL $RC_INDENTATION$REPLY" >&2
+	done
+	LAST_E_CMD="eerror"
+	return 0
+}
+
+ebegin() {
+	local msg="$*" dots spaces=${RC_DOT_PATTERN//?/ }
+	if [[ -n ${RC_DOT_PATTERN} ]] ; then
+		dots=$(printf "%$(( COLS - 3 - ${#RC_INDENTATION} - ${#msg} - 7 ))s" '')
+		dots=${dots//${spaces}/${RC_DOT_PATTERN}}
+		msg="${msg}${dots}"
+	else
+		msg="${msg} ..."
+	fi
+	einfon "${msg}"
+	[[ ${RC_ENDCOL} == "yes" ]] && echo
+	LAST_E_LEN=$(( 3 + ${#RC_INDENTATION} + ${#msg} ))
+	LAST_E_CMD="ebegin"
+	return 0
+}
+
+_eend() {
+	local retval=${1:-0} efunc=${2:-eerror} msg
+	shift 2
+
+	if [[ ${retval} == "0" ]] ; then
+		msg="${BRACKET}[ ${GOOD}ok${BRACKET} ]${NORMAL}"
+	else
+		if [[ -n $* ]] ; then
+			${efunc} "$*"
+		fi
+		msg="${BRACKET}[ ${BAD}!!${BRACKET} ]${NORMAL}"
+	fi
+
+	if [[ ${RC_ENDCOL} == "yes" ]] ; then
+		echo -e "${ENDCOL} ${msg}"
+	else
+		[[ ${LAST_E_CMD} == ebegin ]] || LAST_E_LEN=0
+		printf "%$(( COLS - LAST_E_LEN - 7 ))s%b\n" '' "${msg}"
+	fi
+
+	return ${retval}
+}
+
+eend() {
+	local retval=${1:-0}
+	shift
+
+	_eend ${retval} eerror "$*"
+
+	LAST_E_CMD="eend"
+	return ${retval}
+}
+
+KV_major() {
+	[[ -z $1 ]] && return 1
+
+	local KV=$@
+	echo "${KV%%.*}"
+}
+
+KV_minor() {
+	[[ -z $1 ]] && return 1
+
+	local KV=$@
+	KV=${KV#*.}
+	echo "${KV%%.*}"
+}
+
+KV_micro() {
+	[[ -z $1 ]] && return 1
+
+	local KV=$@
+	KV=${KV#*.*.}
+	echo "${KV%%[^[:digit:]]*}"
+}
+
+KV_to_int() {
+	[[ -z $1 ]] && return 1
+
+	local KV_MAJOR=$(KV_major "$1")
+	local KV_MINOR=$(KV_minor "$1")
+	local KV_MICRO=$(KV_micro "$1")
+	local KV_int=$(( KV_MAJOR * 65536 + KV_MINOR * 256 + KV_MICRO ))
+
+	# We make version 2.2.0 the minimum version we will handle as
+	# a sanity check ... if its less, we fail ...
+	if [[ ${KV_int} -ge 131584 ]] ; then
+		echo "${KV_int}"
+		return 0
+	fi
+
+	return 1
+}
+
+_RC_GET_KV_CACHE=""
+get_KV() {
+	[[ -z ${_RC_GET_KV_CACHE} ]] \
+		&& _RC_GET_KV_CACHE=$(uname -r)
+
+	echo $(KV_to_int "${_RC_GET_KV_CACHE}")
+
+	return $?
+}
+
+unset_colors() {
+	COLS=80
+	ENDCOL=
+
+	GOOD=
+	WARN=
+	BAD=
+	NORMAL=
+	HILITE=
+	BRACKET=
+}
+
+set_colors() {
+	COLS=${COLUMNS:-0}      # bash's internal COLUMNS variable
+	(( COLS == 0 )) && COLS=$(set -- $(stty size 2>/dev/null) ; echo $2)
+	(( COLS > 0 )) || (( COLS = 80 ))
+
+	# Now, ${ENDCOL} will move us to the end of the
+	# column;  irregardless of character width
+	ENDCOL=$'\e[A\e['$(( COLS - 8 ))'C'
+	if [ -n "${PORTAGE_COLORMAP}" ] ; then
+		eval ${PORTAGE_COLORMAP}
+	else
+		GOOD=$'\e[32;01m'
+		WARN=$'\e[33;01m'
+		BAD=$'\e[31;01m'
+		HILITE=$'\e[36;01m'
+		BRACKET=$'\e[34;01m'
+	fi
+	NORMAL=$'\e[0m'
+}
+
+RC_ENDCOL="yes"
+RC_INDENTATION=''
+RC_DEFAULT_INDENT=2
+RC_DOT_PATTERN=''
+
+case "${NOCOLOR:-false}" in
+	yes|true)
+		unset_colors
+		;;
+	no|false)
+		set_colors
+		;;
+esac
+
+if [[ -z ${USERLAND} ]] ; then
+	case $(uname -s) in
+	*BSD|DragonFly)
+		export USERLAND="BSD"
+		;;
+	*)
+		export USERLAND="GNU"
+		;;
+	esac
+fi
+
+if [[ -z ${XARGS} ]] ; then
+	case ${USERLAND} in
+	BSD)
+		export XARGS="xargs"
+		;;
+	*)
+		export XARGS="xargs -r"
+		;;
+	esac
+fi
+
+hasq() {
+	has $EBUILD_PHASE prerm postrm || eqawarn \
+		"QA Notice: The 'hasq' function is deprecated (replaced by 'has')"
+	has "$@"
+}
+
+hasv() {
+	if has "$@" ; then
+		echo "$1"
+		return 0
+	fi
+	return 1
+}
+
+has() {
+	local needle=$1
+	shift
+
+	local x
+	for x in "$@"; do
+		[ "${x}" = "${needle}" ] && return 0
+	done
+	return 1
+}
+
+# @FUNCTION: save_ebuild_env
+# @DESCRIPTION:
+# echo the current environment to stdout, filtering out redundant info.
+#
+# --exclude-init-phases causes pkg_nofetch and src_* phase functions to
+# be excluded from the output. These function are not needed for installation
+# or removal of the packages, and can therefore be safely excluded.
+#
+save_ebuild_env() {
+	(
+
+		if has --exclude-init-phases $* ; then
+			unset S _E_DOCDESTTREE_ _E_EXEDESTTREE_
+			if [[ -n $PYTHONPATH ]] ; then
+				export PYTHONPATH=${PYTHONPATH/${PORTAGE_PYM_PATH}:}
+				[[ -z $PYTHONPATH ]] && unset PYTHONPATH
+			fi
+		fi
+
+		# misc variables inherited from the calling environment
+		unset COLORTERM DISPLAY EDITOR LESS LESSOPEN LOGNAME LS_COLORS PAGER \
+			TERM TERMCAP USER ftp_proxy http_proxy no_proxy
+
+		# other variables inherited from the calling environment
+		unset CVS_RSH ECHANGELOG_USER GPG_AGENT_INFO \
+		SSH_AGENT_PID SSH_AUTH_SOCK STY WINDOW XAUTHORITY
+
+		# CCACHE and DISTCC config
+		unset ${!CCACHE_*} ${!DISTCC_*}
+
+		# There's no need to bloat environment.bz2 with internally defined
+		# functions and variables, so filter them out if possible.
+
+		for x in pkg_setup pkg_nofetch src_unpack src_prepare src_configure \
+			src_compile src_test src_install pkg_preinst pkg_postinst \
+			pkg_prerm pkg_postrm ; do
+			unset -f default_$x _eapi{0,1,2,3,4}_$x
+		done
+		unset x
+
+		unset -f assert assert_sigpipe_ok dump_trace die diefunc \
+			quiet_mode vecho elog_base eqawarn elog \
+			esyslog einfo einfon ewarn eerror ebegin _eend eend KV_major \
+			KV_minor KV_micro KV_to_int get_KV unset_colors set_colors has \
+			has_phase_defined_up_to \
+			hasg hasgq hasv hasq qa_source qa_call \
+			addread addwrite adddeny addpredict _sb_append_var \
+			lchown lchgrp esyslog use usev useq has_version portageq \
+			best_version use_with use_enable register_die_hook \
+			keepdir unpack strip_duplicate_slashes econf einstall \
+			dyn_setup dyn_unpack dyn_clean into insinto exeinto docinto \
+			insopts diropts exeopts libopts docompress \
+			abort_handler abort_prepare abort_configure abort_compile \
+			abort_test abort_install dyn_prepare dyn_configure \
+			dyn_compile dyn_test dyn_install \
+			dyn_preinst dyn_help debug-print debug-print-function \
+			debug-print-section inherit EXPORT_FUNCTIONS remove_path_entry \
+			save_ebuild_env filter_readonly_variables preprocess_ebuild_env \
+			set_unless_changed unset_unless_changed source_all_bashrcs \
+			ebuild_main ebuild_phase ebuild_phase_with_hooks \
+			_ebuild_arg_to_phase _ebuild_phase_funcs default \
+			_pipestatus \
+			${QA_INTERCEPTORS}
+
+		# portage config variables and variables set directly by portage
+		unset ACCEPT_LICENSE BAD BRACKET BUILD_PREFIX COLS \
+			DISTCC_DIR DISTDIR DOC_SYMLINKS_DIR \
+			EBUILD_FORCE_TEST EBUILD_MASTER_PID \
+			ECLASS_DEPTH ENDCOL FAKEROOTKEY \
+			GOOD HILITE HOME \
+			LAST_E_CMD LAST_E_LEN LD_PRELOAD MISC_FUNCTIONS_ARGS MOPREFIX \
+			NOCOLOR NORMAL PKGDIR PKGUSE PKG_LOGDIR PKG_TMPDIR \
+			PORTAGE_BASHRCS_SOURCED PORTAGE_NONFATAL PORTAGE_QUIET \
+			PORTAGE_SANDBOX_DENY PORTAGE_SANDBOX_PREDICT \
+			PORTAGE_SANDBOX_READ PORTAGE_SANDBOX_WRITE PREROOTPATH \
+			QA_INTERCEPTORS \
+			RC_DEFAULT_INDENT RC_DOT_PATTERN RC_ENDCOL RC_INDENTATION  \
+			ROOT ROOTPATH RPMDIR TEMP TMP TMPDIR USE_EXPAND \
+			WARN XARGS _RC_GET_KV_CACHE
+
+		# user config variables
+		unset DOC_SYMLINKS_DIR INSTALL_MASK PKG_INSTALL_MASK
+
+		declare -p
+		declare -fp
+		if [[ ${BASH_VERSINFO[0]} == 3 ]]; then
+			export
+		fi
+	)
+}
+
+true

diff --git a/portage_with_autodep/bin/lock-helper.py b/portage_with_autodep/bin/lock-helper.py
new file mode 100755
index 0000000..5f3ea9f
--- /dev/null
+++ b/portage_with_autodep/bin/lock-helper.py
@@ -0,0 +1,28 @@
+#!/usr/bin/python
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import sys
+sys.path.insert(0, os.environ['PORTAGE_PYM_PATH'])
+import portage
+
+def main(args):
+
+	if args and sys.hexversion < 0x3000000 and not isinstance(args[0], unicode):
+		for i, x in enumerate(args):
+			args[i] = portage._unicode_decode(x, errors='strict')
+
+	# Make locks quiet since unintended locking messages displayed on
+	# stdout would corrupt the intended output of this program.
+	portage.locks._quiet = True
+	lock_obj = portage.locks.lockfile(args[0], wantnewlockfile=True)
+	sys.stdout.write('\0')
+	sys.stdout.flush()
+	sys.stdin.read(1)
+	portage.locks.unlockfile(lock_obj)
+	return portage.os.EX_OK
+
+if __name__ == "__main__":
+	rval = main(sys.argv[1:])
+	sys.exit(rval)

diff --git a/portage_with_autodep/bin/misc-functions.sh b/portage_with_autodep/bin/misc-functions.sh
new file mode 100755
index 0000000..8c191ff
--- /dev/null
+++ b/portage_with_autodep/bin/misc-functions.sh
@@ -0,0 +1,1002 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# Miscellaneous shell functions that make use of the ebuild env but don't need
+# to be included directly in ebuild.sh.
+#
+# We're sourcing ebuild.sh here so that we inherit all of it's goodness,
+# including bashrc trickery.  This approach allows us to do our miscellaneous
+# shell work withing the same env that ebuild.sh has, but without polluting
+# ebuild.sh itself with unneeded logic and shell code.
+#
+# XXX hack: clear the args so ebuild.sh doesn't see them
+MISC_FUNCTIONS_ARGS="$@"
+shift $#
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}/ebuild.sh"
+
+install_symlink_html_docs() {
+	cd "${D}" || die "cd failed"
+	#symlink the html documentation (if DOC_SYMLINKS_DIR is set in make.conf)
+	if [ -n "${DOC_SYMLINKS_DIR}" ] ; then
+		local mydocdir docdir
+		for docdir in "${HTMLDOC_DIR:-does/not/exist}" "${PF}/html" "${PF}/HTML" "${P}/html" "${P}/HTML" ; do
+			if [ -d "usr/share/doc/${docdir}" ] ; then
+				mydocdir="/usr/share/doc/${docdir}"
+			fi
+		done
+		if [ -n "${mydocdir}" ] ; then
+			local mysympath
+			if [ -z "${SLOT}" -o "${SLOT}" = "0" ] ; then
+				mysympath="${DOC_SYMLINKS_DIR}/${CATEGORY}/${PN}"
+			else
+				mysympath="${DOC_SYMLINKS_DIR}/${CATEGORY}/${PN}-${SLOT}"
+			fi
+			einfo "Symlinking ${mysympath} to the HTML documentation"
+			dodir "${DOC_SYMLINKS_DIR}/${CATEGORY}"
+			dosym "${mydocdir}" "${mysympath}"
+		fi
+	fi
+}
+
+# replacement for "readlink -f" or "realpath"
+canonicalize() {
+	local f=$1 b n=10 wd=$(pwd)
+	while (( n-- > 0 )); do
+		while [[ ${f: -1} = / && ${#f} -gt 1 ]]; do
+			f=${f%/}
+		done
+		b=${f##*/}
+		cd "${f%"${b}"}" 2>/dev/null || break
+		if [[ ! -L ${b} ]]; then
+			f=$(pwd -P)
+			echo "${f%/}/${b}"
+			cd "${wd}"
+			return 0
+		fi
+		f=$(readlink "${b}")
+	done
+	cd "${wd}"
+	return 1
+}
+
+prepcompress() {
+	local -a include exclude incl_d incl_f
+	local f g i real_f real_d
+
+	# Canonicalize path names and check for their existence.
+	real_d=$(canonicalize "${D}")
+	for (( i = 0; i < ${#PORTAGE_DOCOMPRESS[@]}; i++ )); do
+		real_f=$(canonicalize "${D}${PORTAGE_DOCOMPRESS[i]}")
+		f=${real_f#"${real_d}"}
+		if [[ ${real_f} != "${f}" ]] && [[ -d ${real_f} || -f ${real_f} ]]
+		then
+			include[${#include[@]}]=${f:-/}
+		elif [[ ${i} -ge 3 ]]; then
+			ewarn "prepcompress:" \
+				"ignoring nonexistent path '${PORTAGE_DOCOMPRESS[i]}'"
+		fi
+	done
+	for (( i = 0; i < ${#PORTAGE_DOCOMPRESS_SKIP[@]}; i++ )); do
+		real_f=$(canonicalize "${D}${PORTAGE_DOCOMPRESS_SKIP[i]}")
+		f=${real_f#"${real_d}"}
+		if [[ ${real_f} != "${f}" ]] && [[ -d ${real_f} || -f ${real_f} ]]
+		then
+			exclude[${#exclude[@]}]=${f:-/}
+		elif [[ ${i} -ge 1 ]]; then
+			ewarn "prepcompress:" \
+				"ignoring nonexistent path '${PORTAGE_DOCOMPRESS_SKIP[i]}'"
+		fi
+	done
+
+	# Remove redundant entries from lists.
+	# For the include list, remove any entries that are:
+	# a) contained in a directory in the include or exclude lists, or
+	# b) identical with an entry in the exclude list.
+	for (( i = ${#include[@]} - 1; i >= 0; i-- )); do
+		f=${include[i]}
+		for g in "${include[@]}"; do
+			if [[ ${f} == "${g%/}"/* ]]; then
+				unset include[i]
+				continue 2
+			fi
+		done
+		for g in "${exclude[@]}"; do
+			if [[ ${f} = "${g}" || ${f} == "${g%/}"/* ]]; then
+				unset include[i]
+				continue 2
+			fi
+		done
+	done
+	# For the exclude list, remove any entries that are:
+	# a) contained in a directory in the exclude list, or
+	# b) _not_ contained in a directory in the include list.
+	for (( i = ${#exclude[@]} - 1; i >= 0; i-- )); do
+		f=${exclude[i]}
+		for g in "${exclude[@]}"; do
+			if [[ ${f} == "${g%/}"/* ]]; then
+				unset exclude[i]
+				continue 2
+			fi
+		done
+		for g in "${include[@]}"; do
+			[[ ${f} == "${g%/}"/* ]] && continue 2
+		done
+		unset exclude[i]
+	done
+
+	# Split the include list into directories and files
+	for f in "${include[@]}"; do
+		if [[ -d ${D}${f} ]]; then
+			incl_d[${#incl_d[@]}]=${f}
+		else
+			incl_f[${#incl_f[@]}]=${f}
+		fi
+	done
+
+	# Queue up for compression.
+	# ecompress{,dir} doesn't like to be called with empty argument lists.
+	[[ ${#incl_d[@]} -gt 0 ]] && ecompressdir --queue "${incl_d[@]}"
+	[[ ${#incl_f[@]} -gt 0 ]] && ecompress --queue "${incl_f[@]/#/${D}}"
+	[[ ${#exclude[@]} -gt 0 ]] && ecompressdir --ignore "${exclude[@]}"
+	return 0
+}
+
+install_qa_check() {
+	local f x
+
+	cd "${D}" || die "cd failed"
+
+	export STRIP_MASK
+	prepall
+	has "${EAPI}" 0 1 2 3 || prepcompress
+	ecompressdir --dequeue
+	ecompress --dequeue
+
+	f=
+	for x in etc/app-defaults usr/man usr/info usr/X11R6 usr/doc usr/locale ; do
+		[[ -d $D/$x ]] && f+="  $x\n"
+	done
+
+	if [[ -n $f ]] ; then
+		eqawarn "QA Notice: This ebuild installs into the following deprecated directories:"
+		eqawarn
+		eqawarn "$f"
+	fi
+
+	# Now we look for all world writable files.
+	local i
+	for i in $(find "${D}/" -type f -perm -2); do
+		vecho "QA Security Notice:"
+		vecho "- ${i:${#D}:${#i}} will be a world writable file."
+		vecho "- This may or may not be a security problem, most of the time it is one."
+		vecho "- Please double check that $PF really needs a world writeable bit and file bugs accordingly."
+		sleep 1
+	done
+
+	if type -P scanelf > /dev/null && ! has binchecks ${RESTRICT}; then
+		local qa_var insecure_rpath=0 tmp_quiet=${PORTAGE_QUIET}
+		local x
+
+		# display warnings when using stricter because we die afterwards
+		if has stricter ${FEATURES} ; then
+			unset PORTAGE_QUIET
+		fi
+
+		# Make sure we disallow insecure RUNPATH/RPATHs.
+		#   1) References to PORTAGE_BUILDDIR are banned because it's a
+		#      security risk. We don't want to load files from a
+		#      temporary directory.
+		#   2) If ROOT != "/", references to ROOT are banned because
+		#      that directory won't exist on the target system.
+		#   3) Null paths are banned because the loader will search $PWD when
+		#      it finds null paths.
+		local forbidden_dirs="${PORTAGE_BUILDDIR}"
+		if [[ -n "${ROOT}" && "${ROOT}" != "/" ]]; then
+			forbidden_dirs+=" ${ROOT}"
+		fi
+		local dir l rpath_files=$(scanelf -F '%F:%r' -qBR "${D}")
+		f=""
+		for dir in ${forbidden_dirs}; do
+			for l in $(echo "${rpath_files}" | grep -E ":${dir}|::|: "); do
+				f+="  ${l%%:*}\n"
+				if ! has stricter ${FEATURES}; then
+					vecho "Auto fixing rpaths for ${l%%:*}"
+					TMPDIR="${dir}" scanelf -BXr "${l%%:*}" -o /dev/null
+				fi
+			done
+		done
+
+		# Reject set*id binaries with $ORIGIN in RPATH #260331
+		x=$(
+			find "${D}" -type f \( -perm -u+s -o -perm -g+s \) -print0 | \
+			xargs -0 scanelf -qyRF '%r %p' | grep '$ORIGIN'
+		)
+
+		# Print QA notice.
+		if [[ -n ${f}${x} ]] ; then
+			vecho -ne '\n'
+			eqawarn "QA Notice: The following files contain insecure RUNPATHs"
+			eqawarn " Please file a bug about this at http://bugs.gentoo.org/"
+			eqawarn " with the maintaining herd of the package."
+			eqawarn "${f}${f:+${x:+\n}}${x}"
+			vecho -ne '\n'
+			if [[ -n ${x} ]] || has stricter ${FEATURES} ; then
+				insecure_rpath=1
+			fi
+		fi
+
+		# TEXTRELs are baaaaaaaad
+		# Allow devs to mark things as ignorable ... e.g. things that are
+		# binary-only and upstream isn't cooperating (nvidia-glx) ... we
+		# allow ebuild authors to set QA_TEXTRELS_arch and QA_TEXTRELS ...
+		# the former overrides the latter ... regexes allowed ! :)
+		qa_var="QA_TEXTRELS_${ARCH/-/_}"
+		[[ -n ${!qa_var} ]] && QA_TEXTRELS=${!qa_var}
+		[[ -n ${QA_STRICT_TEXTRELS} ]] && QA_TEXTRELS=""
+		export QA_TEXTRELS="${QA_TEXTRELS} lib*/modules/*.ko"
+		f=$(scanelf -qyRF '%t %p' "${D}" | grep -v 'usr/lib/debug/')
+		if [[ -n ${f} ]] ; then
+			scanelf -qyRAF '%T %p' "${PORTAGE_BUILDDIR}"/ &> "${T}"/scanelf-textrel.log
+			vecho -ne '\n'
+			eqawarn "QA Notice: The following files contain runtime text relocations"
+			eqawarn " Text relocations force the dynamic linker to perform extra"
+			eqawarn " work at startup, waste system resources, and may pose a security"
+			eqawarn " risk.  On some architectures, the code may not even function"
+			eqawarn " properly, if at all."
+			eqawarn " For more information, see http://hardened.gentoo.org/pic-fix-guide.xml"
+			eqawarn " Please include the following list of files in your report:"
+			eqawarn "${f}"
+			vecho -ne '\n'
+			die_msg="${die_msg} textrels,"
+			sleep 1
+		fi
+
+		# Also, executable stacks only matter on linux (and just glibc atm ...)
+		f=""
+		case ${CTARGET:-${CHOST}} in
+			*-linux-gnu*)
+			# Check for files with executable stacks, but only on arches which
+			# are supported at the moment.  Keep this list in sync with
+			# http://hardened.gentoo.org/gnu-stack.xml (Arch Status)
+			case ${CTARGET:-${CHOST}} in
+				arm*|i?86*|ia64*|m68k*|s390*|sh*|x86_64*)
+					# Allow devs to mark things as ignorable ... e.g. things
+					# that are binary-only and upstream isn't cooperating ...
+					# we allow ebuild authors to set QA_EXECSTACK_arch and
+					# QA_EXECSTACK ... the former overrides the latter ...
+					# regexes allowed ! :)
+
+					qa_var="QA_EXECSTACK_${ARCH/-/_}"
+					[[ -n ${!qa_var} ]] && QA_EXECSTACK=${!qa_var}
+					[[ -n ${QA_STRICT_EXECSTACK} ]] && QA_EXECSTACK=""
+					qa_var="QA_WX_LOAD_${ARCH/-/_}"
+					[[ -n ${!qa_var} ]] && QA_WX_LOAD=${!qa_var}
+					[[ -n ${QA_STRICT_WX_LOAD} ]] && QA_WX_LOAD=""
+					export QA_EXECSTACK="${QA_EXECSTACK} lib*/modules/*.ko"
+					export QA_WX_LOAD="${QA_WX_LOAD} lib*/modules/*.ko"
+					f=$(scanelf -qyRAF '%e %p' "${D}" | grep -v 'usr/lib/debug/')
+					;;
+			esac
+			;;
+		esac
+		if [[ -n ${f} ]] ; then
+			# One more pass to help devs track down the source
+			scanelf -qyRAF '%e %p' "${PORTAGE_BUILDDIR}"/ &> "${T}"/scanelf-execstack.log
+			vecho -ne '\n'
+			eqawarn "QA Notice: The following files contain writable and executable sections"
+			eqawarn " Files with such sections will not work properly (or at all!) on some"
+			eqawarn " architectures/operating systems.  A bug should be filed at"
+			eqawarn " http://bugs.gentoo.org/ to make sure the issue is fixed."
+			eqawarn " For more information, see http://hardened.gentoo.org/gnu-stack.xml"
+			eqawarn " Please include the following list of files in your report:"
+			eqawarn " Note: Bugs should be filed for the respective maintainers"
+			eqawarn " of the package in question and not hardened@g.o."
+			eqawarn "${f}"
+			vecho -ne '\n'
+			die_msg="${die_msg} execstacks"
+			sleep 1
+		fi
+
+		# Check for files built without respecting LDFLAGS
+		if [[ "${LDFLAGS}" == *,--hash-style=gnu* ]] && [[ "${PN}" != *-bin ]] ; then
+			qa_var="QA_DT_HASH_${ARCH/-/_}"
+			eval "[[ -n \${!qa_var} ]] && QA_DT_HASH=(\"\${${qa_var}[@]}\")"
+			f=$(scanelf -qyRF '%k %p' -k .hash "${D}" | sed -e "s:\.hash ::")
+			if [[ -n ${f} ]] ; then
+				echo "${f}" > "${T}"/scanelf-ignored-LDFLAGS.log
+				if [ "${QA_STRICT_DT_HASH-unset}" == unset ] ; then
+					if [[ ${#QA_DT_HASH[@]} -gt 1 ]] ; then
+						for x in "${QA_DT_HASH[@]}" ; do
+							sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-LDFLAGS.log
+						done
+					else
+						local shopts=$-
+						set -o noglob
+						for x in ${QA_DT_HASH} ; do
+							sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-LDFLAGS.log
+						done
+						set +o noglob
+						set -${shopts}
+					fi
+				fi
+				# Filter anything under /usr/lib/debug/ in order to avoid
+				# duplicate warnings for splitdebug files.
+				sed -e "s#^usr/lib/debug/.*##" -e "/^\$/d" -e "s#^#/#" \
+					-i "${T}"/scanelf-ignored-LDFLAGS.log
+				f=$(<"${T}"/scanelf-ignored-LDFLAGS.log)
+				if [[ -n ${f} ]] ; then
+					vecho -ne '\n'
+					eqawarn "${BAD}QA Notice: Files built without respecting LDFLAGS have been detected${NORMAL}"
+					eqawarn " Please include the following list of files in your report:"
+					eqawarn "${f}"
+					vecho -ne '\n'
+					sleep 1
+				else
+					rm -f "${T}"/scanelf-ignored-LDFLAGS.log
+				fi
+			fi
+		fi
+
+		# Save NEEDED information after removing self-contained providers
+		rm -f "$PORTAGE_BUILDDIR"/build-info/NEEDED{,.ELF.2}
+		scanelf -qyRF '%a;%p;%S;%r;%n' "${D}" | { while IFS= read -r l; do
+			arch=${l%%;*}; l=${l#*;}
+			obj="/${l%%;*}"; l=${l#*;}
+			soname=${l%%;*}; l=${l#*;}
+			rpath=${l%%;*}; l=${l#*;}; [ "${rpath}" = "  -  " ] && rpath=""
+			needed=${l%%;*}; l=${l#*;}
+			if [ -z "${rpath}" -o -n "${rpath//*ORIGIN*}" ]; then
+				# object doesn't contain $ORIGIN in its runpath attribute
+				echo "${obj} ${needed}"	>> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+				echo "${arch:3};${obj};${soname};${rpath};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2
+			else
+				dir=${obj%/*}
+				# replace $ORIGIN with the dirname of the current object for the lookup
+				opath=$(echo :${rpath}: | sed -e "s#.*:\(.*\)\$ORIGIN\(.*\):.*#\1${dir}\2#")
+				sneeded=$(echo ${needed} | tr , ' ')
+				rneeded=""
+				for lib in ${sneeded}; do
+					found=0
+					for path in ${opath//:/ }; do
+						[ -e "${D}/${path}/${lib}" ] && found=1 && break
+					done
+					[ "${found}" -eq 0 ] && rneeded="${rneeded},${lib}"
+				done
+				rneeded=${rneeded:1}
+				if [ -n "${rneeded}" ]; then
+					echo "${obj} ${rneeded}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+					echo "${arch:3};${obj};${soname};${rpath};${rneeded}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2
+				fi
+			fi
+		done }
+
+		if [[ ${insecure_rpath} -eq 1 ]] ; then
+			die "Aborting due to serious QA concerns with RUNPATH/RPATH"
+		elif [[ -n ${die_msg} ]] && has stricter ${FEATURES} ; then
+			die "Aborting due to QA concerns: ${die_msg}"
+		fi
+
+		# Check for shared libraries lacking SONAMEs
+		qa_var="QA_SONAME_${ARCH/-/_}"
+		eval "[[ -n \${!qa_var} ]] && QA_SONAME=(\"\${${qa_var}[@]}\")"
+		f=$(scanelf -ByF '%S %p' "${D}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${D}:/:")
+		if [[ -n ${f} ]] ; then
+			echo "${f}" > "${T}"/scanelf-missing-SONAME.log
+			if [[ "${QA_STRICT_SONAME-unset}" == unset ]] ; then
+				if [[ ${#QA_SONAME[@]} -gt 1 ]] ; then
+					for x in "${QA_SONAME[@]}" ; do
+						sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-SONAME.log
+					done
+				else
+					local shopts=$-
+					set -o noglob
+					for x in ${QA_SONAME} ; do
+						sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-SONAME.log
+					done
+					set +o noglob
+					set -${shopts}
+				fi
+			fi
+			sed -e "/^\$/d" -i "${T}"/scanelf-missing-SONAME.log
+			f=$(<"${T}"/scanelf-missing-SONAME.log)
+			if [[ -n ${f} ]] ; then
+				vecho -ne '\n'
+				eqawarn "QA Notice: The following shared libraries lack a SONAME"
+				eqawarn "${f}"
+				vecho -ne '\n'
+				sleep 1
+			else
+				rm -f "${T}"/scanelf-missing-SONAME.log
+			fi
+		fi
+
+		# Check for shared libraries lacking NEEDED entries
+		qa_var="QA_DT_NEEDED_${ARCH/-/_}"
+		eval "[[ -n \${!qa_var} ]] && QA_DT_NEEDED=(\"\${${qa_var}[@]}\")"
+		f=$(scanelf -ByF '%n %p' "${D}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${D}:/:")
+		if [[ -n ${f} ]] ; then
+			echo "${f}" > "${T}"/scanelf-missing-NEEDED.log
+			if [[ "${QA_STRICT_DT_NEEDED-unset}" == unset ]] ; then
+				if [[ ${#QA_DT_NEEDED[@]} -gt 1 ]] ; then
+					for x in "${QA_DT_NEEDED[@]}" ; do
+						sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-NEEDED.log
+					done
+				else
+					local shopts=$-
+					set -o noglob
+					for x in ${QA_DT_NEEDED} ; do
+						sed -e "s#^/${x#/}\$##" -i "${T}"/scanelf-missing-NEEDED.log
+					done
+					set +o noglob
+					set -${shopts}
+				fi
+			fi
+			sed -e "/^\$/d" -i "${T}"/scanelf-missing-NEEDED.log
+			f=$(<"${T}"/scanelf-missing-NEEDED.log)
+			if [[ -n ${f} ]] ; then
+				vecho -ne '\n'
+				eqawarn "QA Notice: The following shared libraries lack NEEDED entries"
+				eqawarn "${f}"
+				vecho -ne '\n'
+				sleep 1
+			else
+				rm -f "${T}"/scanelf-missing-NEEDED.log
+			fi
+		fi
+
+		PORTAGE_QUIET=${tmp_quiet}
+	fi
+
+	local unsafe_files=$(find "${D}" -type f '(' -perm -2002 -o -perm -4002 ')')
+	if [[ -n ${unsafe_files} ]] ; then
+		eqawarn "QA Notice: Unsafe files detected (set*id and world writable)"
+		eqawarn "${unsafe_files}"
+		die "Unsafe files found in \${D}.  Portage will not install them."
+	fi
+
+	if [[ -d ${D}/${D} ]] ; then
+		declare -i INSTALLTOD=0
+		for i in $(find "${D}/${D}/"); do
+			eqawarn "QA Notice: /${i##${D}/${D}} installed in \${D}/\${D}"
+			((INSTALLTOD++))
+		done
+		die "Aborting due to QA concerns: ${INSTALLTOD} files installed in ${D}/${D}"
+		unset INSTALLTOD
+	fi
+
+	# Sanity check syntax errors in init.d scripts
+	local d
+	for d in /etc/conf.d /etc/init.d ; do
+		[[ -d ${D}/${d} ]] || continue
+		for i in "${D}"/${d}/* ; do
+			[[ -L ${i} ]] && continue
+			# if empty conf.d/init.d dir exists (baselayout), then i will be "/etc/conf.d/*" and not exist
+			[[ ! -e ${i} ]] && continue
+			bash -n "${i}" || die "The init.d file has syntax errors: ${i}"
+		done
+	done
+
+	# this should help to ensure that all (most?) shared libraries are executable
+	# and that all libtool scripts / static libraries are not executable
+	local j
+	for i in "${D}"opt/*/lib{,32,64} \
+	         "${D}"lib{,32,64}       \
+	         "${D}"usr/lib{,32,64}   \
+	         "${D}"usr/X11R6/lib{,32,64} ; do
+		[[ ! -d ${i} ]] && continue
+
+		for j in "${i}"/*.so.* "${i}"/*.so ; do
+			[[ ! -e ${j} ]] && continue
+			[[ -L ${j} ]] && continue
+			[[ -x ${j} ]] && continue
+			vecho "making executable: ${j#${D}}"
+			chmod +x "${j}"
+		done
+
+		for j in "${i}"/*.a "${i}"/*.la ; do
+			[[ ! -e ${j} ]] && continue
+			[[ -L ${j} ]] && continue
+			[[ ! -x ${j} ]] && continue
+			vecho "removing executable bit: ${j#${D}}"
+			chmod -x "${j}"
+		done
+
+		for j in "${i}"/*.{a,dll,dylib,sl,so}.* "${i}"/*.{a,dll,dylib,sl,so} ; do
+			[[ ! -e ${j} ]] && continue
+			[[ ! -L ${j} ]] && continue
+			linkdest=$(readlink "${j}")
+			if [[ ${linkdest} == /* ]] ; then
+				vecho -ne '\n'
+				eqawarn "QA Notice: Found an absolute symlink in a library directory:"
+				eqawarn "           ${j#${D}} -> ${linkdest}"
+				eqawarn "           It should be a relative symlink if in the same directory"
+				eqawarn "           or a linker script if it crosses the /usr boundary."
+			fi
+		done
+	done
+
+	# When installing static libraries into /usr/lib and shared libraries into 
+	# /lib, we have to make sure we have a linker script in /usr/lib along side 
+	# the static library, or gcc will utilize the static lib when linking :(.
+	# http://bugs.gentoo.org/4411
+	abort="no"
+	local a s
+	for a in "${D}"usr/lib*/*.a ; do
+		s=${a%.a}.so
+		if [[ ! -e ${s} ]] ; then
+			s=${s%usr/*}${s##*/usr/}
+			if [[ -e ${s} ]] ; then
+				vecho -ne '\n'
+				eqawarn "QA Notice: Missing gen_usr_ldscript for ${s##*/}"
+	 			abort="yes"
+			fi
+		fi
+	done
+	[[ ${abort} == "yes" ]] && die "add those ldscripts"
+
+	# Make sure people don't store libtool files or static libs in /lib
+	f=$(ls "${D}"lib*/*.{a,la} 2>/dev/null)
+	if [[ -n ${f} ]] ; then
+		vecho -ne '\n'
+		eqawarn "QA Notice: Excessive files found in the / partition"
+		eqawarn "${f}"
+		vecho -ne '\n'
+		die "static archives (*.a) and libtool library files (*.la) do not belong in /"
+	fi
+
+	# Verify that the libtool files don't contain bogus $D entries.
+	local abort=no gentoo_bug=no always_overflow=no
+	for a in "${D}"usr/lib*/*.la ; do
+		s=${a##*/}
+		if grep -qs "${D}" "${a}" ; then
+			vecho -ne '\n'
+			eqawarn "QA Notice: ${s} appears to contain PORTAGE_TMPDIR paths"
+			abort="yes"
+		fi
+	done
+	[[ ${abort} == "yes" ]] && die "soiled libtool library files found"
+
+	# Evaluate misc gcc warnings
+	if [[ -n ${PORTAGE_LOG_FILE} && -r ${PORTAGE_LOG_FILE} ]] ; then
+		# In debug mode, this variable definition and corresponding grep calls
+		# will produce false positives if they're shown in the trace.
+		local reset_debug=0
+		if [[ ${-/x/} != $- ]] ; then
+			set +x
+			reset_debug=1
+		fi
+		local m msgs=(
+			": warning: dereferencing type-punned pointer will break strict-aliasing rules"
+			": warning: dereferencing pointer .* does break strict-aliasing rules"
+			": warning: implicit declaration of function"
+			": warning: incompatible implicit declaration of built-in function"
+			": warning: is used uninitialized in this function" # we'll ignore "may" and "might"
+			": warning: comparisons like X<=Y<=Z do not have their mathematical meaning"
+			": warning: null argument where non-null required"
+			": warning: array subscript is below array bounds"
+			": warning: array subscript is above array bounds"
+			": warning: attempt to free a non-heap object"
+			": warning: .* called with .*bigger.* than .* destination buffer"
+			": warning: call to .* will always overflow destination buffer"
+			": warning: assuming pointer wraparound does not occur when comparing"
+			": warning: hex escape sequence out of range"
+			": warning: [^ ]*-hand operand of comma .*has no effect"
+			": warning: converting to non-pointer type .* from NULL"
+			": warning: NULL used in arithmetic"
+			": warning: passing NULL to non-pointer argument"
+			": warning: the address of [^ ]* will always evaluate as"
+			": warning: the address of [^ ]* will never be NULL"
+			": warning: too few arguments for format"
+			": warning: reference to local variable .* returned"
+			": warning: returning reference to temporary"
+			": warning: function returns address of local variable"
+			# this may be valid code :/
+			#": warning: multi-character character constant"
+			# need to check these two ...
+			#": warning: assuming signed overflow does not occur when"
+			#": warning: comparison with string literal results in unspecified behav"
+			# yacc/lex likes to trigger this one
+			#": warning: extra tokens at end of .* directive"
+			# only gcc itself triggers this ?
+			#": warning: .*noreturn.* function does return"
+			# these throw false positives when 0 is used instead of NULL
+			#": warning: missing sentinel in function call"
+			#": warning: not enough variable arguments to fit a sentinel"
+		)
+		abort="no"
+		i=0
+		local grep_cmd=grep
+		[[ $PORTAGE_LOG_FILE = *.gz ]] && grep_cmd=zgrep
+		while [[ -n ${msgs[${i}]} ]] ; do
+			m=${msgs[$((i++))]}
+			# force C locale to work around slow unicode locales #160234
+			f=$(LC_ALL=C $grep_cmd "${m}" "${PORTAGE_LOG_FILE}")
+			if [[ -n ${f} ]] ; then
+				abort="yes"
+				# for now, don't make this fatal (see bug #337031)
+				#case "$m" in
+				#	": warning: call to .* will always overflow destination buffer") always_overflow=yes ;;
+				#esac
+				if [[ $always_overflow = yes ]] ; then
+					eerror
+					eerror "QA Notice: Package has poor programming practices which may compile"
+					eerror "           fine but exhibit random runtime failures."
+					eerror
+					eerror "${f}"
+					eerror
+					eerror " Please file a bug about this at http://bugs.gentoo.org/"
+					eerror " with the maintaining herd of the package."
+					eerror
+				else
+					vecho -ne '\n'
+					eqawarn "QA Notice: Package has poor programming practices which may compile"
+					eqawarn "           fine but exhibit random runtime failures."
+					eqawarn "${f}"
+					vecho -ne '\n'
+				fi
+			fi
+		done
+		local cat_cmd=cat
+		[[ $PORTAGE_LOG_FILE = *.gz ]] && cat_cmd=zcat
+		[[ $reset_debug = 1 ]] && set -x
+		f=$($cat_cmd "${PORTAGE_LOG_FILE}" | \
+			"${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH"/check-implicit-pointer-usage.py || die "check-implicit-pointer-usage.py failed")
+		if [[ -n ${f} ]] ; then
+
+			# In the future this will be a forced "die". In preparation,
+			# increase the log level from "qa" to "eerror" so that people
+			# are aware this is a problem that must be fixed asap.
+
+			# just warn on 32bit hosts but bail on 64bit hosts
+			case ${CHOST} in
+				alpha*|hppa64*|ia64*|powerpc64*|mips64*|sparc64*|sparcv9*|x86_64*) gentoo_bug=yes ;;
+			esac
+
+			abort=yes
+
+			if [[ $gentoo_bug = yes ]] ; then
+				eerror
+				eerror "QA Notice: Package has poor programming practices which may compile"
+				eerror "           but will almost certainly crash on 64bit architectures."
+				eerror
+				eerror "${f}"
+				eerror
+				eerror " Please file a bug about this at http://bugs.gentoo.org/"
+				eerror " with the maintaining herd of the package."
+				eerror
+			else
+				vecho -ne '\n'
+				eqawarn "QA Notice: Package has poor programming practices which may compile"
+				eqawarn "           but will almost certainly crash on 64bit architectures."
+				eqawarn "${f}"
+				vecho -ne '\n'
+			fi
+
+		fi
+		if [[ ${abort} == "yes" ]] ; then
+			if [[ $gentoo_bug = yes || $always_overflow = yes ]] ; then
+				die "install aborted due to" \
+					"poor programming practices shown above"
+			else
+				echo "Please do not file a Gentoo bug and instead" \
+				"report the above QA issues directly to the upstream" \
+				"developers of this software." | fmt -w 70 | \
+				while read -r line ; do eqawarn "${line}" ; done
+				eqawarn "Homepage: ${HOMEPAGE}"
+				has stricter ${FEATURES} && die "install aborted due to" \
+					"poor programming practices shown above"
+			fi
+		fi
+	fi
+
+	# Portage regenerates this on the installed system.
+	rm -f "${D}"/usr/share/info/dir{,.gz,.bz2}
+
+	if has multilib-strict ${FEATURES} && \
+	   [[ -x /usr/bin/file && -x /usr/bin/find ]] && \
+	   [[ -n ${MULTILIB_STRICT_DIRS} && -n ${MULTILIB_STRICT_DENY} ]]
+	then
+		local abort=no dir file firstrun=yes
+		MULTILIB_STRICT_EXEMPT=$(echo ${MULTILIB_STRICT_EXEMPT} | sed -e 's:\([(|)]\):\\\1:g')
+		for dir in ${MULTILIB_STRICT_DIRS} ; do
+			[[ -d ${D}/${dir} ]] || continue
+			for file in $(find ${D}/${dir} -type f | grep -v "^${D}/${dir}/${MULTILIB_STRICT_EXEMPT}"); do
+				if file ${file} | egrep -q "${MULTILIB_STRICT_DENY}" ; then
+					if [[ ${firstrun} == yes ]] ; then
+						echo "Files matching a file type that is not allowed:"
+						firstrun=no
+					fi
+					abort=yes
+					echo "   ${file#${D}//}"
+				fi
+			done
+		done
+		[[ ${abort} == yes ]] && die "multilib-strict check failed!"
+	fi
+
+	# ensure packages don't install systemd units automagically
+	if ! has systemd ${INHERITED} && \
+		[[ -d "${D}"/lib/systemd/system ]]
+	then
+		eqawarn "QA Notice: package installs systemd unit files (/lib/systemd/system)"
+		eqawarn "           but does not inherit systemd.eclass."
+		has stricter ${FEATURES} \
+			&& die "install aborted due to missing inherit of systemd.eclass"
+	fi
+}
+
+
+install_mask() {
+	local root="$1"
+	shift
+	local install_mask="$*"
+
+	# we don't want globbing for initial expansion, but afterwards, we do
+	local shopts=$-
+	set -o noglob
+	local no_inst
+	for no_inst in ${install_mask}; do
+		set +o noglob
+		quiet_mode || einfo "Removing ${no_inst}"
+		# normal stuff
+		rm -Rf "${root}"/${no_inst} >&/dev/null
+
+		# we also need to handle globs (*.a, *.h, etc)
+		find "${root}" \( -path "${no_inst}" -or -name "${no_inst}" \) \
+			-exec rm -fR {} \; >/dev/null 2>&1
+	done
+	# set everything back the way we found it
+	set +o noglob
+	set -${shopts}
+}
+
+preinst_mask() {
+	if [ -z "${D}" ]; then
+		 eerror "${FUNCNAME}: D is unset"
+		 return 1
+	fi
+
+	# Make sure $PWD is not ${D} so that we don't leave gmon.out files
+	# in there in case any tools were built with -pg in CFLAGS.
+	cd "${T}"
+
+	# remove man pages, info pages, docs if requested
+	local f
+	for f in man info doc; do
+		if has no${f} $FEATURES; then
+			INSTALL_MASK="${INSTALL_MASK} /usr/share/${f}"
+		fi
+	done
+
+	install_mask "${D}" "${INSTALL_MASK}"
+
+	# remove share dir if unnessesary
+	if has nodoc $FEATURES || has noman $FEATURES || has noinfo $FEATURES; then
+		rmdir "${D}usr/share" &> /dev/null
+	fi
+}
+
+preinst_sfperms() {
+	if [ -z "${D}" ]; then
+		 eerror "${FUNCNAME}: D is unset"
+		 return 1
+	fi
+	# Smart FileSystem Permissions
+	if has sfperms $FEATURES; then
+		local i
+		find "${D}" -type f -perm -4000 -print0 | \
+		while read -r -d $'\0' i ; do
+			if [ -n "$(find "$i" -perm -2000)" ] ; then
+				ebegin ">>> SetUID and SetGID: [chmod o-r] /${i#${D}}"
+				chmod o-r "$i"
+				eend $?
+			else
+				ebegin ">>> SetUID: [chmod go-r] /${i#${D}}"
+				chmod go-r "$i"
+				eend $?
+			fi
+		done
+		find "${D}" -type f -perm -2000 -print0 | \
+		while read -r -d $'\0' i ; do
+			if [ -n "$(find "$i" -perm -4000)" ] ; then
+				# This case is already handled
+				# by the SetUID check above.
+				true
+			else
+				ebegin ">>> SetGID: [chmod o-r] /${i#${D}}"
+				chmod o-r "$i"
+				eend $?
+			fi
+		done
+	fi
+}
+
+preinst_suid_scan() {
+	if [ -z "${D}" ]; then
+		 eerror "${FUNCNAME}: D is unset"
+		 return 1
+	fi
+	# total suid control.
+	if has suidctl $FEATURES; then
+		local i sfconf x
+		sfconf=${PORTAGE_CONFIGROOT}etc/portage/suidctl.conf
+		# sandbox prevents us from writing directly
+		# to files outside of the sandbox, but this
+		# can easly be bypassed using the addwrite() function
+		addwrite "${sfconf}"
+		vecho ">>> Performing suid scan in ${D}"
+		for i in $(find "${D}" -type f \( -perm -4000 -o -perm -2000 \) ); do
+			if [ -s "${sfconf}" ]; then
+				install_path=/${i#${D}}
+				if grep -q "^${install_path}\$" "${sfconf}" ; then
+					vecho "- ${install_path} is an approved suid file"
+				else
+					vecho ">>> Removing sbit on non registered ${install_path}"
+					for x in 5 4 3 2 1 0; do sleep 0.25 ; done
+					ls_ret=$(ls -ldh "${i}")
+					chmod ugo-s "${i}"
+					grep "^#${install_path}$" "${sfconf}" > /dev/null || {
+						vecho ">>> Appending commented out entry to ${sfconf} for ${PF}"
+						echo "## ${ls_ret%${D}*}${install_path}" >> "${sfconf}"
+						echo "#${install_path}" >> "${sfconf}"
+						# no delwrite() eh?
+						# delwrite ${sconf}
+					}
+				fi
+			else
+				vecho "suidctl feature set but you are lacking a ${sfconf}"
+			fi
+		done
+	fi
+}
+
+preinst_selinux_labels() {
+	if [ -z "${D}" ]; then
+		 eerror "${FUNCNAME}: D is unset"
+		 return 1
+	fi
+	if has selinux ${FEATURES}; then
+		# SELinux file labeling (needs to always be last in dyn_preinst)
+		# only attempt to label if setfiles is executable
+		# and 'context' is available on selinuxfs.
+		if [ -f /selinux/context -a -x /usr/sbin/setfiles -a -x /usr/sbin/selinuxconfig ]; then
+			vecho ">>> Setting SELinux security labels"
+			(
+				eval "$(/usr/sbin/selinuxconfig)" || \
+					die "Failed to determine SELinux policy paths.";
+	
+				addwrite /selinux/context;
+	
+				/usr/sbin/setfiles "${file_contexts_path}" -r "${D}" "${D}"
+			) || die "Failed to set SELinux security labels."
+		else
+			# nonfatal, since merging can happen outside a SE kernel
+			# like during a recovery situation
+			vecho "!!! Unable to set SELinux security labels"
+		fi
+	fi
+}
+
+dyn_package() {
+	# Make sure $PWD is not ${D} so that we don't leave gmon.out files
+	# in there in case any tools were built with -pg in CFLAGS.
+	cd "${T}"
+	install_mask "${PORTAGE_BUILDDIR}/image" "${PKG_INSTALL_MASK}"
+	local tar_options=""
+	[[ $PORTAGE_VERBOSE = 1 ]] && tar_options+=" -v"
+	# Sandbox is disabled in case the user wants to use a symlink
+	# for $PKGDIR and/or $PKGDIR/All.
+	export SANDBOX_ON="0"
+	[ -z "${PORTAGE_BINPKG_TMPFILE}" ] && \
+		die "PORTAGE_BINPKG_TMPFILE is unset"
+	mkdir -p "${PORTAGE_BINPKG_TMPFILE%/*}" || die "mkdir failed"
+	tar $tar_options -cf - $PORTAGE_BINPKG_TAR_OPTS -C "${D}" . | \
+		$PORTAGE_BZIP2_COMMAND -c > "$PORTAGE_BINPKG_TMPFILE"
+	assert "failed to pack binary package: '$PORTAGE_BINPKG_TMPFILE'"
+	PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+		"${PORTAGE_PYTHON:-/usr/bin/python}" "$PORTAGE_BIN_PATH"/xpak-helper.py recompose \
+		"$PORTAGE_BINPKG_TMPFILE" "$PORTAGE_BUILDDIR/build-info"
+	if [ $? -ne 0 ]; then
+		rm -f "${PORTAGE_BINPKG_TMPFILE}"
+		die "Failed to append metadata to the tbz2 file"
+	fi
+	local md5_hash=""
+	if type md5sum &>/dev/null ; then
+		md5_hash=$(md5sum "${PORTAGE_BINPKG_TMPFILE}")
+		md5_hash=${md5_hash%% *}
+	elif type md5 &>/dev/null ; then
+		md5_hash=$(md5 "${PORTAGE_BINPKG_TMPFILE}")
+		md5_hash=${md5_hash##* }
+	fi
+	[ -n "${md5_hash}" ] && \
+		echo ${md5_hash} > "${PORTAGE_BUILDDIR}"/build-info/BINPKGMD5
+	vecho ">>> Done."
+	cd "${PORTAGE_BUILDDIR}"
+	>> "$PORTAGE_BUILDDIR/.packaged" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.packaged"
+}
+
+dyn_spec() {
+	local sources_dir=/usr/src/rpm/SOURCES
+	mkdir -p "${sources_dir}"
+	declare -a tar_args=("${EBUILD}")
+	[[ -d ${FILESDIR} ]] && tar_args=("${EBUILD}" "${FILESDIR}")
+	tar czf "${sources_dir}/${PF}.tar.gz" \
+		"${tar_args[@]}" || \
+		die "Failed to create base rpm tarball."
+
+	cat <<__END1__ > ${PF}.spec
+Summary: ${DESCRIPTION}
+Name: ${PN}
+Version: ${PV}
+Release: ${PR}
+Copyright: GPL
+Group: portage/${CATEGORY}
+Source: ${PF}.tar.gz
+Buildroot: ${D}
+%description
+${DESCRIPTION}
+
+${HOMEPAGE}
+
+%prep
+%setup -c
+
+%build
+
+%install
+
+%clean
+
+%files
+/
+__END1__
+
+}
+
+dyn_rpm() {
+	cd "${T}" || die "cd failed"
+	local machine_name=$(uname -m)
+	local dest_dir=/usr/src/rpm/RPMS/${machine_name}
+	addwrite /usr/src/rpm
+	addwrite "${RPMDIR}"
+	dyn_spec
+	rpmbuild -bb --clean --rmsource "${PF}.spec" || die "Failed to integrate rpm spec file"
+	install -D "${dest_dir}/${PN}-${PV}-${PR}.${machine_name}.rpm" \
+		"${RPMDIR}/${CATEGORY}/${PN}-${PV}-${PR}.rpm" || \
+		die "Failed to move rpm"
+}
+
+die_hooks() {
+	[[ -f $PORTAGE_BUILDDIR/.die_hooks ]] && return
+	local x
+	for x in $EBUILD_DEATH_HOOKS ; do
+		$x >&2
+	done
+	> "$PORTAGE_BUILDDIR/.die_hooks"
+}
+
+success_hooks() {
+	local x
+	for x in $EBUILD_SUCCESS_HOOKS ; do
+		$x
+	done
+}
+
+if [ -n "${MISC_FUNCTIONS_ARGS}" ]; then
+	source_all_bashrcs
+	[ "$PORTAGE_DEBUG" == "1" ] && set -x
+	for x in ${MISC_FUNCTIONS_ARGS}; do
+		${x}
+	done
+	unset x
+	[[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+	if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+		[[ ! -s $SANDBOX_LOG ]]
+		"$PORTAGE_BIN_PATH"/ebuild-ipc exit $?
+	fi
+fi
+
+:

diff --git a/portage_with_autodep/bin/portageq b/portage_with_autodep/bin/portageq
new file mode 100755
index 0000000..57a7c39
--- /dev/null
+++ b/portage_with_autodep/bin/portageq
@@ -0,0 +1,822 @@
+#!/usr/bin/python -O
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import signal
+import sys
+# This block ensures that ^C interrupts are handled quietly.
+try:
+
+	def exithandler(signum, frame):
+		signal.signal(signal.SIGINT, signal.SIG_IGN)
+		signal.signal(signal.SIGTERM, signal.SIG_IGN)
+		sys.exit(128 + signum)
+
+	signal.signal(signal.SIGINT, exithandler)
+	signal.signal(signal.SIGTERM, exithandler)
+
+except KeyboardInterrupt:
+	sys.exit(128 + signal.SIGINT)
+
+import os
+import types
+
+# Avoid sandbox violations after python upgrade.
+pym_path = os.path.join(os.path.dirname(
+	os.path.dirname(os.path.realpath(__file__))), "pym")
+if os.environ.get("SANDBOX_ON") == "1":
+	sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+	if pym_path not in sandbox_write:
+		sandbox_write.append(pym_path)
+		os.environ["SANDBOX_WRITE"] = \
+			":".join(filter(None, sandbox_write))
+	del sandbox_write
+
+try:
+	import portage
+except ImportError:
+	sys.path.insert(0, pym_path)
+	import portage
+del pym_path
+
+from portage import os
+from portage.util import writemsg, writemsg_stdout
+portage.proxy.lazyimport.lazyimport(globals(),
+	'subprocess',
+	'_emerge.Package:Package',
+	'_emerge.RootConfig:RootConfig',
+	'portage.dbapi._expand_new_virt:expand_new_virt',
+)
+
+def eval_atom_use(atom):
+	if 'USE' in os.environ:
+		use = frozenset(os.environ['USE'].split())
+		atom = atom.evaluate_conditionals(use)
+	return atom
+
+#-----------------------------------------------------------------------------
+#
+# To add functionality to this tool, add a function below.
+#
+# The format for functions is:
+#
+#   def function(argv):
+#       """<list of options for this function>
+#       <description of the function>
+#       """
+#       <code>
+#
+# "argv" is an array of the command line parameters provided after the command.
+#
+# Make sure you document the function in the right format.  The documentation
+# is used to display help on the function.
+#
+# You do not need to add the function to any lists, this tool is introspective,
+# and will automaticly add a command by the same name as the function!
+#
+
+def has_version(argv):
+	"""<root> <category/package>
+	Return code 0 if it's available, 1 otherwise.
+	"""
+	if (len(argv) < 2):
+		print("ERROR: insufficient parameters!")
+		sys.exit(2)
+
+	warnings = []
+
+	try:
+		atom = portage.dep.Atom(argv[1])
+	except portage.exception.InvalidAtom:
+		if atom_validate_strict:
+			portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
+				noiselevel=-1)
+			return 2
+		else:
+			atom = argv[1]
+	else:
+		if atom_validate_strict:
+			try:
+				atom = portage.dep.Atom(argv[1], eapi=eapi)
+			except portage.exception.InvalidAtom as e:
+				warnings.append(
+					portage._unicode_decode("QA Notice: %s: %s") % \
+					('has_version', e))
+		atom = eval_atom_use(atom)
+
+	if warnings:
+		elog('eqawarn', warnings)
+
+	try:
+		mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
+		if mylist:
+			sys.exit(0)
+		else:
+			sys.exit(1)
+	except KeyError:
+		sys.exit(1)
+	except portage.exception.InvalidAtom:
+		portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
+			noiselevel=-1)
+		return 2
+has_version.uses_root = True
+
+
+def best_version(argv):
+	"""<root> <category/package>
+	Returns category/package-version (without .ebuild).
+	"""
+	if (len(argv) < 2):
+		print("ERROR: insufficient parameters!")
+		sys.exit(2)
+
+	warnings = []
+
+	try:
+		atom = portage.dep.Atom(argv[1])
+	except portage.exception.InvalidAtom:
+		if atom_validate_strict:
+			portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
+				noiselevel=-1)
+			return 2
+		else:
+			atom = argv[1]
+	else:
+		if atom_validate_strict:
+			try:
+				atom = portage.dep.Atom(argv[1], eapi=eapi)
+			except portage.exception.InvalidAtom as e:
+				warnings.append(
+					portage._unicode_decode("QA Notice: %s: %s") % \
+					('best_version', e))
+		atom = eval_atom_use(atom)
+
+	if warnings:
+		elog('eqawarn', warnings)
+
+	try:
+		mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
+		print(portage.best(mylist))
+	except KeyError:
+		sys.exit(1)
+best_version.uses_root = True
+
+
+def mass_best_version(argv):
+	"""<root> [<category/package>]+
+	Returns category/package-version (without .ebuild).
+	"""
+	if (len(argv) < 2):
+		print("ERROR: insufficient parameters!")
+		sys.exit(2)
+	try:
+		for pack in argv[1:]:
+			mylist=portage.db[argv[0]]["vartree"].dbapi.match(pack)
+			print(pack+":"+portage.best(mylist))
+	except KeyError:
+		sys.exit(1)
+mass_best_version.uses_root = True
+
+def metadata(argv):
+	if (len(argv) < 4):
+		print("ERROR: insufficient parameters!", file=sys.stderr)
+		sys.exit(2)
+
+	root, pkgtype, pkgspec = argv[0:3]
+	metakeys = argv[3:]
+	type_map = {
+		"ebuild":"porttree",
+		"binary":"bintree",
+		"installed":"vartree"}
+	if pkgtype not in type_map:
+		print("Unrecognized package type: '%s'" % pkgtype, file=sys.stderr)
+		sys.exit(1)
+	trees = portage.db
+	if os.path.realpath(root) == os.path.realpath(portage.settings["ROOT"]):
+		root = portage.settings["ROOT"] # contains the normalized $ROOT
+	try:
+			values = trees[root][type_map[pkgtype]].dbapi.aux_get(
+				pkgspec, metakeys)
+			writemsg_stdout(''.join('%s\n' % x for x in values), noiselevel=-1)
+	except KeyError:
+		print("Package not found: '%s'" % pkgspec, file=sys.stderr)
+		sys.exit(1)
+
+metadata.__doc__ = """
+<root> <pkgtype> <category/package> [<key>]+
+Returns metadata values for the specified package.
+Available keys: %s
+"""  % ','.join(sorted(x for x in portage.auxdbkeys \
+if not x.startswith('UNUSED_')))
+
+metadata.uses_root = True
+
+def contents(argv):
+	"""<root> <category/package>
+	List the files that are installed for a given package, with
+	one file listed on each line. All file names will begin with
+	<root>.
+	"""
+	if len(argv) != 2:
+		print("ERROR: expected 2 parameters, got %d!" % len(argv))
+		return 2
+
+	root, cpv = argv
+	vartree = portage.db[root]["vartree"]
+	if not vartree.dbapi.cpv_exists(cpv):
+		sys.stderr.write("Package not found: '%s'\n" % cpv)
+		return 1
+	cat, pkg = portage.catsplit(cpv)
+	db = portage.dblink(cat, pkg, root, vartree.settings,
+		treetype="vartree", vartree=vartree)
+	writemsg_stdout(''.join('%s\n' % x for x in sorted(db.getcontents())),
+		noiselevel=-1)
+contents.uses_root = True
+
+def owners(argv):
+	"""<root> [<filename>]+
+	Given a list of files, print the packages that own the files and which
+	files belong to each package. Files owned by a package are listed on
+	the lines below it, indented by a single tab character (\\t). All file
+	paths must either start with <root> or be a basename alone.
+	Returns 1 if no owners could be found, and 0 otherwise.
+	"""
+	if len(argv) < 2:
+		sys.stderr.write("ERROR: insufficient parameters!\n")
+		sys.stderr.flush()
+		return 2
+
+	from portage import catsplit, dblink
+	settings = portage.settings
+	root = settings["ROOT"]
+	vardb = portage.db[root]["vartree"].dbapi
+
+	cwd = None
+	try:
+		cwd = os.getcwd()
+	except OSError:
+		pass
+
+	files = []
+	orphan_abs_paths = set()
+	orphan_basenames = set()
+	for f in argv[1:]:
+		f = portage.normalize_path(f)
+		is_basename = os.sep not in f
+		if not is_basename and f[:1] != os.sep:
+			if cwd is None:
+				sys.stderr.write("ERROR: cwd does not exist!\n")
+				sys.stderr.flush()
+				return 2
+			f = os.path.join(cwd, f)
+			f = portage.normalize_path(f)
+		if not is_basename and not f.startswith(root):
+			sys.stderr.write("ERROR: file paths must begin with <root>!\n")
+			sys.stderr.flush()
+			return 2
+		if is_basename:
+			files.append(f)
+			orphan_basenames.add(f)
+		else:
+			files.append(f[len(root)-1:])
+			orphan_abs_paths.add(f)
+
+	owners = vardb._owners.get_owners(files)
+
+	msg = []
+	for pkg, owned_files in owners.items():
+		cpv = pkg.mycpv
+		msg.append("%s\n" % cpv)
+		for f in sorted(owned_files):
+			f_abs = os.path.join(root, f.lstrip(os.path.sep))
+			msg.append("\t%s\n" % (f_abs,))
+			orphan_abs_paths.discard(f_abs)
+			if orphan_basenames:
+				orphan_basenames.discard(os.path.basename(f_abs))
+
+	writemsg_stdout(''.join(msg), noiselevel=-1)
+
+	if orphan_abs_paths or orphan_basenames:
+		orphans = []
+		orphans.extend(orphan_abs_paths)
+		orphans.extend(orphan_basenames)
+		orphans.sort()
+		msg = []
+		msg.append("None of the installed packages claim these files:\n")
+		for f in orphans:
+			msg.append("\t%s\n" % (f,))
+		sys.stderr.write("".join(msg))
+		sys.stderr.flush()
+
+	if owners:
+		return 0
+	return 1
+
+owners.uses_root = True
+
+def is_protected(argv):
+	"""<root> <filename>
+	Given a single filename, return code 0 if it's protected, 1 otherwise.
+	The filename must begin with <root>.
+	"""
+	if len(argv) != 2:
+		sys.stderr.write("ERROR: expected 2 parameters, got %d!\n" % len(argv))
+		sys.stderr.flush()
+		return 2
+
+	root, filename = argv
+
+	err = sys.stderr
+	cwd = None
+	try:
+		cwd = os.getcwd()
+	except OSError:
+		pass
+
+	f = portage.normalize_path(filename)
+	if not f.startswith(os.path.sep):
+		if cwd is None:
+			err.write("ERROR: cwd does not exist!\n")
+			err.flush()
+			return 2
+		f = os.path.join(cwd, f)
+		f = portage.normalize_path(f)
+
+	if not f.startswith(root):
+		err.write("ERROR: file paths must begin with <root>!\n")
+		err.flush()
+		return 2
+
+	from portage.util import ConfigProtect
+
+	settings = portage.settings
+	protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
+	protect_mask = portage.util.shlex_split(
+		settings.get("CONFIG_PROTECT_MASK", ""))
+	protect_obj = ConfigProtect(root, protect, protect_mask)
+
+	if protect_obj.isprotected(f):
+		return 0
+	return 1
+
+is_protected.uses_root = True
+
+def filter_protected(argv):
+	"""<root>
+	Read filenames from stdin and write them to stdout if they are protected.
+	All filenames are delimited by \\n and must begin with <root>.
+	"""
+	if len(argv) != 1:
+		sys.stderr.write("ERROR: expected 1 parameter, got %d!\n" % len(argv))
+		sys.stderr.flush()
+		return 2
+
+	root, = argv
+	out = sys.stdout
+	err = sys.stderr
+	cwd = None
+	try:
+		cwd = os.getcwd()
+	except OSError:
+		pass
+
+	from portage.util import ConfigProtect
+
+	settings = portage.settings
+	protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
+	protect_mask = portage.util.shlex_split(
+		settings.get("CONFIG_PROTECT_MASK", ""))
+	protect_obj = ConfigProtect(root, protect, protect_mask)
+
+	protected = 0
+	errors = 0
+
+	for line in sys.stdin:
+		filename = line.rstrip("\n")
+		f = portage.normalize_path(filename)
+		if not f.startswith(os.path.sep):
+			if cwd is None:
+				err.write("ERROR: cwd does not exist!\n")
+				err.flush()
+				errors += 1
+				continue
+			f = os.path.join(cwd, f)
+			f = portage.normalize_path(f)
+
+		if not f.startswith(root):
+			err.write("ERROR: file paths must begin with <root>!\n")
+			err.flush()
+			errors += 1
+			continue
+
+		if protect_obj.isprotected(f):
+			protected += 1
+			out.write("%s\n" % filename)
+	out.flush()
+
+	if errors:
+		return 2
+
+	return 0
+
+filter_protected.uses_root = True
+
+def best_visible(argv):
+	"""<root> [pkgtype] <atom>
+	Returns category/package-version (without .ebuild).
+	The pkgtype argument defaults to "ebuild" if unspecified,
+	otherwise it must be one of ebuild, binary, or installed.
+	"""
+	if (len(argv) < 2):
+		writemsg("ERROR: insufficient parameters!\n", noiselevel=-1)
+		return 2
+
+	pkgtype = "ebuild"
+	if len(argv) > 2:
+		pkgtype = argv[1]
+		atom = argv[2]
+	else:
+		atom = argv[1]
+
+	type_map = {
+		"ebuild":"porttree",
+		"binary":"bintree",
+		"installed":"vartree"}
+
+	if pkgtype not in type_map:
+		writemsg("Unrecognized package type: '%s'\n" % pkgtype,
+			noiselevel=-1)
+		return 2
+
+	db = portage.db[portage.settings["ROOT"]][type_map[pkgtype]].dbapi
+
+	try:
+		atom = portage.dep_expand(atom, mydb=db, settings=portage.settings)
+	except portage.exception.InvalidAtom:
+		writemsg("ERROR: Invalid atom: '%s'\n" % atom,
+			noiselevel=-1)
+		return 2
+
+	root_config = RootConfig(portage.settings,
+		portage.db[portage.settings["ROOT"]], None)
+
+	try:
+		# reversed, for descending order
+		for cpv in reversed(db.match(atom)):
+			metadata = dict(zip(Package.metadata_keys,
+				db.aux_get(cpv, Package.metadata_keys, myrepo=atom.repo)))
+			pkg = Package(built=(pkgtype != "ebuild"), cpv=cpv,
+				installed=(pkgtype=="installed"), metadata=metadata,
+				root_config=root_config, type_name=pkgtype)
+			if pkg.visible:
+				writemsg_stdout("%s\n" % (pkg.cpv,), noiselevel=-1)
+				return os.EX_OK
+	except KeyError:
+		pass
+	return 1
+best_visible.uses_root = True
+
+
+def mass_best_visible(argv):
+	"""<root> [<category/package>]+
+	Returns category/package-version (without .ebuild).
+	"""
+	if (len(argv) < 2):
+		print("ERROR: insufficient parameters!")
+		sys.exit(2)
+	try:
+		for pack in argv[1:]:
+			mylist=portage.db[argv[0]]["porttree"].dbapi.match(pack)
+			print(pack+":"+portage.best(mylist))
+	except KeyError:
+		sys.exit(1)
+mass_best_visible.uses_root = True
+
+
+def all_best_visible(argv):
+	"""<root>
+	Returns all best_visible packages (without .ebuild).
+	"""
+	if len(argv) < 1:
+		sys.stderr.write("ERROR: insufficient parameters!\n")
+		sys.stderr.flush()
+		return 2
+
+	#print portage.db[argv[0]]["porttree"].dbapi.cp_all()
+	for pkg in portage.db[argv[0]]["porttree"].dbapi.cp_all():
+		mybest=portage.best(portage.db[argv[0]]["porttree"].dbapi.match(pkg))
+		if mybest:
+			print(mybest)
+all_best_visible.uses_root = True
+
+
+def match(argv):
+	"""<root> <atom>
+	Returns a \\n separated list of category/package-version.
+	When given an empty string, all installed packages will
+	be listed.
+	"""
+	if len(argv) != 2:
+		print("ERROR: expected 2 parameters, got %d!" % len(argv))
+		sys.exit(2)
+	root, atom = argv
+	if atom:
+		if atom_validate_strict and not portage.isvalidatom(atom):
+			portage.writemsg("ERROR: Invalid atom: '%s'\n" % atom,
+				noiselevel=-1)
+			return 2
+		results = portage.db[root]["vartree"].dbapi.match(atom)
+	else:
+		results = portage.db[root]["vartree"].dbapi.cpv_all()
+		results.sort()
+	for cpv in results:
+		print(cpv)
+match.uses_root = True
+
+def expand_virtual(argv):
+	"""<root> <atom>
+	Returns a \\n separated list of atoms expanded from a
+	given virtual atom (GLEP 37 virtuals only),
+	excluding blocker atoms. Satisfied
+	virtual atoms are not included in the output, since
+	they are expanded to real atoms which are displayed.
+	Unsatisfied virtual atoms are displayed without
+	any expansion. The "match" command can be used to
+	resolve the returned atoms to specific installed
+	packages.
+	"""
+	if len(argv) != 2:
+		writemsg("ERROR: expected 2 parameters, got %d!\n" % len(argv),
+			noiselevel=-1)
+		return 2
+
+	root, atom = argv
+
+	try:
+		results = list(expand_new_virt(
+			portage.db[root]["vartree"].dbapi, atom))
+	except portage.exception.InvalidAtom:
+		writemsg("ERROR: Invalid atom: '%s'\n" % atom,
+			noiselevel=-1)
+		return 2
+
+	results.sort()
+	for x in results:
+		if not x.blocker:
+			writemsg_stdout("%s\n" % (x,))
+
+	return os.EX_OK
+
+expand_virtual.uses_root = True
+
+def vdb_path(argv):
+	"""
+	Returns the path used for the var(installed) package database for the
+	set environment/configuration options.
+	"""
+	out = sys.stdout
+	out.write(os.path.join(portage.settings["EROOT"], portage.VDB_PATH) + "\n")
+	out.flush()
+	return os.EX_OK
+
+def gentoo_mirrors(argv):
+	"""
+	Returns the mirrors set to use in the portage configuration.
+	"""
+	print(portage.settings["GENTOO_MIRRORS"])
+
+
+def portdir(argv):
+	"""
+	Returns the PORTDIR path.
+	"""
+	print(portage.settings["PORTDIR"])
+
+
+def config_protect(argv):
+	"""
+	Returns the CONFIG_PROTECT paths.
+	"""
+	print(portage.settings["CONFIG_PROTECT"])
+
+
+def config_protect_mask(argv):
+	"""
+	Returns the CONFIG_PROTECT_MASK paths.
+	"""
+	print(portage.settings["CONFIG_PROTECT_MASK"])
+
+
+def portdir_overlay(argv):
+	"""
+	Returns the PORTDIR_OVERLAY path.
+	"""
+	print(portage.settings["PORTDIR_OVERLAY"])
+
+
+def pkgdir(argv):
+	"""
+	Returns the PKGDIR path.
+	"""
+	print(portage.settings["PKGDIR"])
+
+
+def distdir(argv):
+	"""
+	Returns the DISTDIR path.
+	"""
+	print(portage.settings["DISTDIR"])
+
+
+def envvar(argv):
+	"""<variable>+
+	Returns a specific environment variable as exists prior to ebuild.sh.
+	Similar to: emerge --verbose --info | egrep '^<variable>='
+	"""
+	verbose = "-v" in argv
+	if verbose:
+		argv.pop(argv.index("-v"))
+
+	if len(argv) == 0:
+		print("ERROR: insufficient parameters!")
+		sys.exit(2)
+
+	for arg in argv:
+		if verbose:
+			print(arg +"='"+ portage.settings[arg] +"'")
+		else:
+			print(portage.settings[arg])
+
+def get_repos(argv):
+	"""<root>
+	Returns all repos with names (repo_name file) argv[0] = $ROOT
+	"""
+	if len(argv) < 1:
+		print("ERROR: insufficient parameters!")
+		sys.exit(2)
+	print(" ".join(portage.db[argv[0]]["porttree"].dbapi.getRepositories()))
+
+def get_repo_path(argv):
+	"""<root> <repo_id>+
+	Returns the path to the repo named argv[1], argv[0] = $ROOT
+	"""
+	if len(argv) < 2:
+		print("ERROR: insufficient parameters!")
+		sys.exit(2)
+	for arg in argv[1:]:
+		path = portage.db[argv[0]]["porttree"].dbapi.getRepositoryPath(arg)
+		if path is None:
+			path = ""
+		print(path)
+
+def list_preserved_libs(argv):
+	"""<root>
+	Print a list of libraries preserved during a package update in the form
+	package: path. Returns 1 if no preserved libraries could be found,
+	0 otherwise.
+	"""
+
+	if len(argv) != 1:
+		print("ERROR: wrong number of arguments")
+		sys.exit(2)
+	mylibs = portage.db[argv[0]]["vartree"].dbapi._plib_registry.getPreservedLibs()
+	rValue = 1
+	msg = []
+	for cpv in sorted(mylibs):
+		msg.append(cpv)
+		for path in mylibs[cpv]:
+			msg.append(' ' + path)
+			rValue = 0
+		msg.append('\n')
+	writemsg_stdout(''.join(msg), noiselevel=-1)
+	return rValue
+list_preserved_libs.uses_root = True
+
+#-----------------------------------------------------------------------------
+#
+# DO NOT CHANGE CODE BEYOND THIS POINT - IT'S NOT NEEDED!
+#
+
+if not portage.const._ENABLE_PRESERVE_LIBS:
+	del list_preserved_libs
+
+non_commands = frozenset(['elog', 'eval_atom_use',
+	'exithandler', 'expand_new_virt', 'main',
+	'usage', 'writemsg', 'writemsg_stdout'])
+commands = sorted(k for k, v in globals().items() \
+	if k not in non_commands and isinstance(v, types.FunctionType))
+
+def usage(argv):
+	print(">>> Portage information query tool")
+	print(">>> %s" % portage.VERSION)
+	print(">>> Usage: portageq <command> [<option> ...]")
+	print("")
+	print("Available commands:")
+
+	#
+	# Show our commands -- we do this by scanning the functions in this
+	# file, and formatting each functions documentation.
+	#
+	help_mode = '--help' in sys.argv
+	for name in commands:
+		# Drop non-functions
+		obj = globals()[name]
+
+		doc = obj.__doc__
+		if (doc == None):
+			print("   " + name)
+			print("      MISSING DOCUMENTATION!")
+			print("")
+			continue
+
+		lines = doc.lstrip("\n").split("\n")
+		print("   " + name + " " + lines[0].strip())
+		if (len(sys.argv) > 1):
+			if (not help_mode):
+				lines = lines[:-1]
+			for line in lines[1:]:
+				print("      " + line.strip())
+	if (len(sys.argv) == 1):
+		print("\nRun portageq with --help for info")
+
+atom_validate_strict = "EBUILD_PHASE" in os.environ
+eapi = None
+if atom_validate_strict:
+	eapi = os.environ.get('EAPI')
+
+	def elog(elog_funcname, lines):
+		cmd = "source '%s/isolated-functions.sh' ; " % \
+			os.environ["PORTAGE_BIN_PATH"]
+		for line in lines:
+			cmd += "%s %s ; " % (elog_funcname, portage._shell_quote(line))
+		subprocess.call([portage.const.BASH_BINARY, "-c", cmd])
+
+else:
+	def elog(elog_funcname, lines):
+		pass
+
+def main():
+
+	nocolor = os.environ.get('NOCOLOR')
+	if nocolor in ('yes', 'true'):
+		portage.output.nocolor()
+
+	if len(sys.argv) < 2:
+		usage(sys.argv)
+		sys.exit(os.EX_USAGE)
+
+	for x in sys.argv:
+		if x in ("-h", "--help"):
+			usage(sys.argv)
+			sys.exit(os.EX_OK)
+		elif x == "--version":
+			print("Portage", portage.VERSION)
+			sys.exit(os.EX_OK)
+
+	cmd = sys.argv[1]
+	function = globals().get(cmd)
+	if function is None or cmd not in commands:
+		usage(sys.argv)
+		sys.exit(os.EX_USAGE)
+	function = globals()[cmd]
+	uses_root = getattr(function, "uses_root", False) and len(sys.argv) > 2
+	if uses_root:
+		if not os.path.isdir(sys.argv[2]):
+			sys.stderr.write("Not a directory: '%s'\n" % sys.argv[2])
+			sys.stderr.write("Run portageq with --help for info\n")
+			sys.stderr.flush()
+			sys.exit(os.EX_USAGE)
+		os.environ["ROOT"] = sys.argv[2]
+
+	args = sys.argv[2:]
+	if args and sys.hexversion < 0x3000000 and not isinstance(args[0], unicode):
+		for i in range(len(args)):
+			args[i] = portage._unicode_decode(args[i])
+
+	try:
+		if uses_root:
+			args[0] = portage.settings["ROOT"]
+		retval = function(args)
+		if retval:
+			sys.exit(retval)
+	except portage.exception.PermissionDenied as e:
+		sys.stderr.write("Permission denied: '%s'\n" % str(e))
+		sys.exit(e.errno)
+	except portage.exception.ParseError as e:
+		sys.stderr.write("%s\n" % str(e))
+		sys.exit(1)
+	except portage.exception.AmbiguousPackageName as e:
+		# Multiple matches thrown from cpv_expand
+		pkgs = e.args[0]
+		# An error has occurred so we writemsg to stderr and exit nonzero.
+		portage.writemsg("You specified an unqualified atom that matched multiple packages:\n", noiselevel=-1)
+		for pkg in pkgs:
+			portage.writemsg("* %s\n" % pkg, noiselevel=-1)
+		portage.writemsg("\nPlease use a more specific atom.\n", noiselevel=-1)
+		sys.exit(1)
+
+main()
+
+#-----------------------------------------------------------------------------

diff --git a/portage_with_autodep/bin/quickpkg b/portage_with_autodep/bin/quickpkg
new file mode 100755
index 0000000..09723f5
--- /dev/null
+++ b/portage_with_autodep/bin/quickpkg
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import errno
+import math
+import optparse
+import signal
+import sys
+import tarfile
+
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+
+from portage import os
+from portage import xpak
+from portage.dbapi.dep_expand import dep_expand
+from portage.dep import use_reduce
+from portage.exception import InvalidAtom, InvalidData, InvalidDependString, \
+	PackageSetNotFound, PermissionDenied
+from portage.util import ConfigProtect, ensure_dirs, shlex_split
+from portage.dbapi.vartree import dblink, tar_contents
+from portage.checksum import perform_md5
+from portage._sets import load_default_config, SETPREFIX
+
+def quickpkg_atom(options, infos, arg, eout):
+	settings = portage.settings
+	root = portage.settings["ROOT"]
+	trees = portage.db[root]
+	vartree = trees["vartree"]
+	vardb = vartree.dbapi
+	bintree = trees["bintree"]
+
+	include_config = options.include_config == "y"
+	include_unmodified_config = options.include_unmodified_config == "y"
+	fix_metadata_keys = ["PF", "CATEGORY"]
+
+	try:
+		atom = dep_expand(arg, mydb=vardb, settings=vartree.settings)
+	except ValueError as e:
+		# Multiple matches thrown from cpv_expand
+		eout.eerror("Please use a more specific atom: %s" % \
+			" ".join(e.args[0]))
+		del e
+		infos["missing"].append(arg)
+		return
+	except (InvalidAtom, InvalidData):
+		eout.eerror("Invalid atom: %s" % (arg,))
+		infos["missing"].append(arg)
+		return
+	if atom[:1] == '=' and arg[:1] != '=':
+		# dep_expand() allows missing '=' but it's really invalid
+		eout.eerror("Invalid atom: %s" % (arg,))
+		infos["missing"].append(arg)
+		return
+
+	matches = vardb.match(atom)
+	pkgs_for_arg = 0
+	for cpv in matches:
+		excluded_config_files = []
+		bintree.prevent_collision(cpv)
+		cat, pkg = portage.catsplit(cpv)
+		dblnk = dblink(cat, pkg, root,
+			vartree.settings, treetype="vartree",
+			vartree=vartree)
+		have_lock = False
+		try:
+			dblnk.lockdb()
+			have_lock = True
+		except PermissionDenied:
+			pass
+		try:
+			if not dblnk.exists():
+				# unmerged by a concurrent process
+				continue
+			iuse, use, restrict = vardb.aux_get(cpv,
+				["IUSE","USE","RESTRICT"])
+			iuse = [ x.lstrip("+-") for x in iuse.split() ]
+			use = use.split()
+			try:
+				restrict = use_reduce(restrict, uselist=use, flat=True)
+			except InvalidDependString as e:
+				eout.eerror("Invalid RESTRICT metadata " + \
+					"for '%s': %s; skipping" % (cpv, str(e)))
+				del e
+				continue
+			if "bindist" in iuse and "bindist" not in use:
+				eout.ewarn("%s: package was emerged with USE=-bindist!" % cpv)
+				eout.ewarn("%s: it might not be legal to redistribute this." % cpv)
+			elif "bindist" in restrict:
+				eout.ewarn("%s: package has RESTRICT=bindist!" % cpv)
+				eout.ewarn("%s: it might not be legal to redistribute this." % cpv)
+			eout.ebegin("Building package for %s" % cpv)
+			pkgs_for_arg += 1
+			contents = dblnk.getcontents()
+			protect = None
+			if not include_config:
+				confprot = ConfigProtect(root,
+					shlex_split(settings.get("CONFIG_PROTECT", "")),
+					shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
+				def protect(filename):
+					if not confprot.isprotected(filename):
+						return False
+					if include_unmodified_config:
+						file_data = contents[filename]
+						if file_data[0] == "obj":
+							orig_md5 = file_data[2].lower()
+							cur_md5 = perform_md5(filename, calc_prelink=1)
+							if orig_md5 == cur_md5:
+								return False
+					excluded_config_files.append(filename)
+					return True
+			existing_metadata = dict(zip(fix_metadata_keys,
+				vardb.aux_get(cpv, fix_metadata_keys)))
+			category, pf = portage.catsplit(cpv)
+			required_metadata = {}
+			required_metadata["CATEGORY"] = category
+			required_metadata["PF"] = pf
+			update_metadata = {}
+			for k, v in required_metadata.items():
+				if v != existing_metadata[k]:
+					update_metadata[k] = v
+			if update_metadata:
+				vardb.aux_update(cpv, update_metadata)
+			xpdata = xpak.xpak(dblnk.dbdir)
+			binpkg_tmpfile = os.path.join(bintree.pkgdir,
+				cpv + ".tbz2." + str(os.getpid()))
+			ensure_dirs(os.path.dirname(binpkg_tmpfile))
+			tar = tarfile.open(binpkg_tmpfile, "w:bz2")
+			tar_contents(contents, root, tar, protect=protect)
+			tar.close()
+			xpak.tbz2(binpkg_tmpfile).recompose_mem(xpdata)
+		finally:
+			if have_lock:
+				dblnk.unlockdb()
+		bintree.inject(cpv, filename=binpkg_tmpfile)
+		binpkg_path = bintree.getname(cpv)
+		try:
+			s = os.stat(binpkg_path)
+		except OSError as e:
+			# Sanity check, shouldn't happen normally.
+			eout.eend(1)
+			eout.eerror(str(e))
+			del e
+			eout.eerror("Failed to create package: '%s'" % binpkg_path)
+		else:
+			eout.eend(0)
+			infos["successes"].append((cpv, s.st_size))
+			infos["config_files_excluded"] += len(excluded_config_files)
+			for filename in excluded_config_files:
+				eout.ewarn("Excluded config: '%s'" % filename)
+	if not pkgs_for_arg:
+		eout.eerror("Could not find anything " + \
+			"to match '%s'; skipping" % arg)
+		infos["missing"].append(arg)
+
+def quickpkg_set(options, infos, arg, eout):
+	root = portage.settings["ROOT"]
+	trees = portage.db[root]
+	vartree = trees["vartree"]
+
+	settings = vartree.settings
+	settings._init_dirs()
+	setconfig = load_default_config(settings, trees)
+	sets = setconfig.getSets()
+
+	set = arg[1:]
+	if not set in sets:
+		eout.eerror("Package set not found: '%s'; skipping" % (arg,))
+		infos["missing"].append(arg)
+		return
+
+	try:
+		atoms = setconfig.getSetAtoms(set)
+	except PackageSetNotFound as e:
+		eout.eerror("Failed to process package set '%s' because " % set +
+			"it contains the non-existent package set '%s'; skipping" % e)
+		infos["missing"].append(arg)
+		return
+
+	for atom in atoms:
+		quickpkg_atom(options, infos, atom, eout)
+
+def quickpkg_main(options, args, eout):
+	root = portage.settings["ROOT"]
+	trees = portage.db[root]
+	bintree = trees["bintree"]
+
+	try:
+		ensure_dirs(bintree.pkgdir)
+	except portage.exception.PortageException:
+		pass
+	if not os.access(bintree.pkgdir, os.W_OK):
+		eout.eerror("No write access to '%s'" % bintree.pkgdir)
+		return errno.EACCES
+
+	infos = {}
+	infos["successes"] = []
+	infos["missing"] = []
+	infos["config_files_excluded"] = 0
+	for arg in args:
+		if arg[0] == SETPREFIX:
+			quickpkg_set(options, infos, arg, eout)
+		else:
+			quickpkg_atom(options, infos, arg, eout)
+
+	if not infos["successes"]:
+		eout.eerror("No packages found")
+		return 1
+	print()
+	eout.einfo("Packages now in '%s':" % bintree.pkgdir)
+	units = {10:'K', 20:'M', 30:'G', 40:'T',
+		50:'P', 60:'E', 70:'Z', 80:'Y'}
+	for cpv, size in infos["successes"]:
+		if not size:
+			# avoid OverflowError in math.log()
+			size_str = "0"
+		else:
+			power_of_2 = math.log(size, 2)
+			power_of_2 = 10*int(power_of_2/10)
+			unit = units.get(power_of_2)
+			if unit:
+				size = float(size)/(2**power_of_2)
+				size_str = "%.1f" % size
+				if len(size_str) > 4:
+					# emulate `du -h`, don't show too many sig figs
+					size_str = str(int(size))
+				size_str += unit
+			else:
+				size_str = str(size)
+		eout.einfo("%s: %s" % (cpv, size_str))
+	if infos["config_files_excluded"]:
+		print()
+		eout.ewarn("Excluded config files: %d" % infos["config_files_excluded"])
+		eout.ewarn("See --help if you would like to include config files.")
+	if infos["missing"]:
+		print()
+		eout.ewarn("The following packages could not be found:")
+		eout.ewarn(" ".join(infos["missing"]))
+		return 2
+	return os.EX_OK
+
+if __name__ == "__main__":
+	usage = "quickpkg [options] <list of package atoms or package sets>"
+	parser = optparse.OptionParser(usage=usage)
+	parser.add_option("--umask",
+		default="0077",
+		help="umask used during package creation (default is 0077)")
+	parser.add_option("--ignore-default-opts",
+		action="store_true",
+		help="do not use the QUICKPKG_DEFAULT_OPTS environment variable")
+	parser.add_option("--include-config",
+		type="choice",
+		choices=["y","n"],
+		default="n",
+		metavar="<y|n>",
+		help="include all files protected by CONFIG_PROTECT (as a security precaution, default is 'n')")
+	parser.add_option("--include-unmodified-config",
+		type="choice",
+		choices=["y","n"],
+		default="n",
+		metavar="<y|n>",
+		help="include files protected by CONFIG_PROTECT that have not been modified since installation (as a security precaution, default is 'n')")
+	options, args = parser.parse_args(sys.argv[1:])
+	if not options.ignore_default_opts:
+		default_opts = portage.settings.get("QUICKPKG_DEFAULT_OPTS","").split()
+		options, args = parser.parse_args(default_opts + sys.argv[1:])
+	if not args:
+		parser.error("no packages atoms given")
+	try:
+		umask = int(options.umask, 8)
+	except ValueError:
+		parser.error("invalid umask: %s" % options.umask)
+	# We need to ensure a sane umask for the packages that will be created.
+	old_umask = os.umask(umask)
+	eout = portage.output.EOutput()
+	def sigwinch_handler(signum, frame):
+		lines, eout.term_columns =  portage.output.get_term_size()
+	signal.signal(signal.SIGWINCH, sigwinch_handler)
+	try:
+		retval = quickpkg_main(options, args, eout)
+	finally:
+		os.umask(old_umask)
+		signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+	sys.exit(retval)

diff --git a/portage_with_autodep/bin/regenworld b/portage_with_autodep/bin/regenworld
new file mode 100755
index 0000000..6b5af4c
--- /dev/null
+++ b/portage_with_autodep/bin/regenworld
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+
+from portage import os
+from portage._sets.files import StaticFileSet, WorldSelectedSet
+
+import re
+import tempfile
+import textwrap
+
+__candidatematcher__ = re.compile("^[0-9]+: \\*\\*\\* emerge ")
+__noncandidatematcher__ = re.compile(" sync( |$)| clean( |$)| search( |$)|--oneshot|--fetchonly| unmerge( |$)")
+
+def issyspkg(pkgline):
+	return (pkgline[0] == "*")
+
+def iscandidate(logline):
+	return (__candidatematcher__.match(logline) \
+				and not __noncandidatematcher__.search(logline))
+
+def getpkginfo(logline):
+	logline = re.sub("^[0-9]+: \\*\\*\\* emerge ", "", logline)
+	logline = logline.strip()
+	logline = re.sub("(\\S+\\.(ebuild|tbz2))|(--\\S+)|inject ", "", logline)
+	return logline.strip()
+
+__uniqlist__ = []
+def isunwanted(pkgline):
+	if pkgline in ["world", "system", "depclean", "info", "regen", ""]:
+		return False
+	elif pkgline in __uniqlist__:
+		return False
+	elif not re.search("^[a-zA-Z<>=~]", pkgline):
+		return False
+	else:
+		__uniqlist__.append(pkgline)
+		return True
+
+root = portage.settings['ROOT']
+eroot = portage.settings['EROOT']
+world_file = os.path.join(eroot, portage.WORLD_FILE)
+
+# show a little description if we have arguments
+if len(sys.argv) >= 2 and sys.argv[1] in ["-h", "--help"]:
+	print("This script regenerates the portage world file by checking the portage")
+	print("logfile for all actions that you've done in the past. It ignores any")
+	print("arguments except --help. It is recommended that you make a backup of")
+	print("your existing world file (%s) before using this tool." % world_file)
+	sys.exit(0)
+
+worldlist = portage.grabfile(world_file)
+syslist = [x for x in portage.settings.packages if issyspkg(x)]
+
+logfile = portage.grabfile(os.path.join(eroot, "var/log/emerge.log"))
+biglist = [getpkginfo(x) for x in logfile if iscandidate(x)]
+tmplist = []
+for l in biglist:
+	tmplist += l.split()
+biglist = [x for x in tmplist if isunwanted(x)]
+#for p in biglist:
+#	print(p)
+#sys.exit(0)
+
+# resolving virtuals
+realsyslist = []
+for mykey in syslist:
+	# drop the asterix
+	mykey = mykey[1:]
+	#print("candidate:",mykey)
+	mylist = portage.db[root]["vartree"].dbapi.match(mykey)
+	if mylist:
+		mykey=portage.cpv_getkey(mylist[0])
+		if mykey not in realsyslist:
+			realsyslist.append(mykey)
+
+for mykey in biglist:
+	#print("checking:",mykey)
+	try:
+		mylist = portage.db[root]["vartree"].dbapi.match(mykey)
+	except (portage.exception.InvalidAtom, KeyError):
+		if "--debug" in sys.argv:
+			print("* ignoring broken log entry for %s (likely injected)" % mykey)
+	except ValueError as e:
+		try:
+			print("* %s is an ambiguous package name, candidates are:\n%s" % (mykey, e))
+		except AttributeError:
+			# FIXME: Find out what causes this (bug #344845).
+			print("* %s is an ambiguous package name" % (mykey,))
+		continue
+	if mylist:
+		#print "mylist:",mylist
+		myfavkey=portage.cpv_getkey(mylist[0])
+		if (myfavkey not in realsyslist) and (myfavkey not in worldlist):
+			print("add to world:",myfavkey)
+			worldlist.append(myfavkey)
+
+if not worldlist:
+	pass
+else:
+	existing_set = WorldSelectedSet(eroot)
+	existing_set.load()
+
+	if not existing_set:
+		existing_set.replace(worldlist)
+	else:
+		old_world = existing_set._filename
+		fd, tmp_filename = tempfile.mkstemp(suffix=".tmp",
+			prefix=os.path.basename(old_world) + ".",
+			dir=os.path.dirname(old_world))
+		os.close(fd)
+
+		new_set = StaticFileSet(tmp_filename)
+		new_set.update(worldlist)
+
+		if existing_set.getAtoms() == new_set.getAtoms():
+			os.unlink(tmp_filename)
+		else:
+			new_set.write()
+
+			msg = "Please review differences between old and new files, " + \
+				"and replace the old file if desired."
+
+			portage.util.writemsg_stdout("\n",
+				noiselevel=-1)
+			for line in textwrap.wrap(msg, 65):
+				portage.util.writemsg_stdout("%s\n" % line,
+					noiselevel=-1)
+			portage.util.writemsg_stdout("\n",
+				noiselevel=-1)
+			portage.util.writemsg_stdout("  old: %s\n\n" % old_world,
+				noiselevel=-1)
+			portage.util.writemsg_stdout("  new: %s\n\n" % tmp_filename,
+				noiselevel=-1)

diff --git a/portage_with_autodep/bin/repoman b/portage_with_autodep/bin/repoman
new file mode 100755
index 0000000..f1fbc24
--- /dev/null
+++ b/portage_with_autodep/bin/repoman
@@ -0,0 +1,2672 @@
+#!/usr/bin/python -O
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Next to do: dep syntax checking in mask files
+# Then, check to make sure deps are satisfiable (to avoid "can't find match for" problems)
+# that last one is tricky because multiple profiles need to be checked.
+
+from __future__ import print_function
+
+import calendar
+import copy
+import errno
+import formatter
+import io
+import logging
+import optparse
+import re
+import signal
+import stat
+import sys
+import tempfile
+import time
+import platform
+
+try:
+	from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+	from urllib import urlopen as urllib_request_urlopen
+
+from itertools import chain
+from stat import S_ISDIR
+
+try:
+	import portage
+except ImportError:
+	from os import path as osp
+	sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
+	import portage
+portage._disable_legacy_globals()
+portage.dep._internal_warnings = True
+
+try:
+	import xml.etree.ElementTree
+	from xml.parsers.expat import ExpatError
+except ImportError:
+	msg = ["Please enable python's \"xml\" USE flag in order to use repoman."]
+	from portage.output import EOutput
+	out = EOutput()
+	for line in msg:
+		out.eerror(line)
+	sys.exit(1)
+
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _encodings
+from portage import _unicode_encode
+from repoman.checks import run_checks
+from repoman import utilities
+from repoman.herdbase import make_herd_base
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
+from _emerge.userquery import userquery
+import portage.checksum
+import portage.const
+from portage import cvstree, normalize_path
+from portage import util
+from portage.exception import (FileNotFound, MissingParameter,
+	ParseError, PermissionDenied)
+from portage.manifest import Manifest
+from portage.process import find_binary, spawn
+from portage.output import bold, create_color_func, \
+	green, nocolor, red
+from portage.output import ConsoleStyleFile, StyleWriter
+from portage.util import cmp_sort_key, writemsg_level
+from portage.package.ebuild.digestgen import digestgen
+from portage.eapi import eapi_has_slot_deps, \
+	eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_iuse_defaults, \
+	eapi_has_required_use, eapi_has_use_dep_defaults
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+util.initialize_logger()
+
+# 14 is the length of DESCRIPTION=""
+max_desc_len = 100
+allowed_filename_chars="a-zA-Z0-9._-+:"
+disallowed_filename_chars_re = re.compile(r'[^a-zA-Z0-9._\-+:]')
+pv_toolong_re = re.compile(r'[0-9]{19,}')
+bad = create_color_func("BAD")
+
+# A sane umask is needed for files that portage creates.
+os.umask(0o22)
+# Repoman sets it's own ACCEPT_KEYWORDS and we don't want it to
+# behave incrementally.
+repoman_incrementals = tuple(x for x in \
+	portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS')
+repoman_settings = portage.config(local_config=False)
+repoman_settings.lock()
+
+if repoman_settings.get("NOCOLOR", "").lower() in ("yes", "true") or \
+	repoman_settings.get('TERM') == 'dumb' or \
+	not sys.stdout.isatty():
+	nocolor()
+
+def warn(txt):
+	print("repoman: " + txt)
+
+def err(txt):
+	warn(txt)
+	sys.exit(1)
+
+def exithandler(signum=None, frame=None):
+	logging.fatal("Interrupted; exiting...")
+	if signum is None:
+		sys.exit(1)
+	else:
+		sys.exit(128 + signum)
+
+signal.signal(signal.SIGINT,exithandler)
+
+class RepomanHelpFormatter(optparse.IndentedHelpFormatter):
+	"""Repoman needs it's own HelpFormatter for now, because the default ones
+	murder the help text."""
+	
+	def __init__(self, indent_increment=1, max_help_position=24, width=150, short_first=1):
+		optparse.HelpFormatter.__init__(self, indent_increment, max_help_position, width, short_first)
+
+	def format_description(self, description):
+		return description
+
+class RepomanOptionParser(optparse.OptionParser):
+	"""Add the on_tail function, ruby has it, optionParser should too
+	"""
+	
+	def __init__(self, *args, **kwargs):
+		optparse.OptionParser.__init__(self, *args, **kwargs)
+		self.tail = ""
+
+	def on_tail(self, description):
+		self.tail += description
+
+	def format_help(self, formatter=None):
+		result = optparse.OptionParser.format_help(self, formatter)
+		result += self.tail
+		return result
+
+
+def ParseArgs(argv, qahelp):
+	"""This function uses a customized optionParser to parse command line arguments for repoman
+	Args:
+	  argv - a sequence of command line arguments
+		qahelp - a dict of qa warning to help message
+	Returns:
+	  (opts, args), just like a call to parser.parse_args()
+	"""
+
+	if argv and sys.hexversion < 0x3000000 and not isinstance(argv[0], unicode):
+		argv = [portage._unicode_decode(x) for x in argv]
+
+	modes = {
+		'commit' : 'Run a scan then commit changes',
+		'ci' : 'Run a scan then commit changes',
+		'fix' : 'Fix simple QA issues (stray digests, missing digests)',
+		'full' : 'Scan directory tree and print all issues (not a summary)',
+		'help' : 'Show this screen',
+		'manifest' : 'Generate a Manifest (fetches files if necessary)',
+		'manifest-check' : 'Check Manifests for missing or incorrect digests',
+		'scan' : 'Scan directory tree for QA issues' 
+	}
+
+	mode_keys = list(modes)
+	mode_keys.sort()
+
+	parser = RepomanOptionParser(formatter=RepomanHelpFormatter(), usage="%prog [options] [mode]")
+	parser.description = green(" ".join((os.path.basename(argv[0]), "1.2")))
+	parser.description += "\nCopyright 1999-2007 Gentoo Foundation"
+	parser.description += "\nDistributed under the terms of the GNU General Public License v2"
+	parser.description += "\nmodes: " + " | ".join(map(green,mode_keys))
+
+	parser.add_option('-a', '--ask', dest='ask', action='store_true', default=False,
+		help='Request a confirmation before commiting')
+
+	parser.add_option('-m', '--commitmsg', dest='commitmsg',
+		help='specify a commit message on the command line')
+
+	parser.add_option('-M', '--commitmsgfile', dest='commitmsgfile',
+		help='specify a path to a file that contains a commit message')
+
+	parser.add_option('-p', '--pretend', dest='pretend', default=False,
+		action='store_true', help='don\'t commit or fix anything; just show what would be done')
+	
+	parser.add_option('-q', '--quiet', dest="quiet", action="count", default=0,
+		help='do not print unnecessary messages')
+
+	parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
+		help='Commit with QA violations')
+
+	parser.add_option('--vcs', dest='vcs',
+		help='Force using specific VCS instead of autodetection')
+
+	parser.add_option('-v', '--verbose', dest="verbosity", action='count',
+		help='be very verbose in output', default=0)
+
+	parser.add_option('-V', '--version', dest='version', action='store_true',
+		help='show version info')
+
+	parser.add_option('-x', '--xmlparse', dest='xml_parse', action='store_true',
+		default=False, help='forces the metadata.xml parse check to be carried out')
+
+	parser.add_option('-i', '--ignore-arches', dest='ignore_arches', action='store_true',
+		default=False, help='ignore arch-specific failures (where arch != host)')
+
+	parser.add_option('-I', '--ignore-masked', dest='ignore_masked', action='store_true',
+		default=False, help='ignore masked packages (not allowed with commit mode)')
+
+	parser.add_option('-d', '--include-dev', dest='include_dev', action='store_true',
+		default=False, help='include dev profiles in dependency checks')
+
+	parser.add_option('--unmatched-removal', dest='unmatched_removal', action='store_true',
+		default=False, help='enable strict checking of package.mask and package.unmask files for unmatched removal atoms')
+
+	parser.add_option('--without-mask', dest='without_mask', action='store_true',
+		default=False, help='behave as if no package.mask entries exist (not allowed with commit mode)')
+
+	parser.add_option('--mode', type='choice', dest='mode', choices=list(modes), 
+		help='specify which mode repoman will run in (default=full)')
+
+	parser.on_tail("\n " + green("Modes".ljust(20) + " Description\n"))
+
+	for k in mode_keys:
+		parser.on_tail(" %s %s\n" % (k.ljust(20), modes[k]))
+
+	parser.on_tail("\n " + green("QA keyword".ljust(20) + " Description\n"))
+
+	sorted_qa = list(qahelp)
+	sorted_qa.sort()
+	for k in sorted_qa:
+		parser.on_tail(" %s %s\n" % (k.ljust(20), qahelp[k]))
+
+	opts, args = parser.parse_args(argv[1:])
+
+	if opts.mode == 'help':
+		parser.print_help(short=False)
+
+	for arg in args:
+		if arg in modes:
+			if not opts.mode:
+				opts.mode = arg
+				break
+		else:
+			parser.error("invalid mode: %s" % arg)
+
+	if not opts.mode:
+		opts.mode = 'full'
+	
+	if opts.mode == 'ci':
+		opts.mode = 'commit'  # backwards compat shortcut
+
+	if opts.mode == 'commit' and not (opts.force or opts.pretend):
+		if opts.ignore_masked:
+			parser.error('Commit mode and --ignore-masked are not compatible')
+		if opts.without_mask:
+			parser.error('Commit mode and --without-mask are not compatible')
+
+	# Use the verbosity and quiet options to fiddle with the loglevel appropriately
+	for val in range(opts.verbosity):
+		logger = logging.getLogger()
+		logger.setLevel(logger.getEffectiveLevel() - 10)
+
+	for val in range(opts.quiet):
+		logger = logging.getLogger()
+		logger.setLevel(logger.getEffectiveLevel() + 10)
+
+	return (opts, args)
+
+qahelp={
+	"CVS/Entries.IO_error":"Attempting to commit, and an IO error was encountered access the Entries file",
+	"desktop.invalid":"desktop-file-validate reports errors in a *.desktop file",
+	"ebuild.invalidname":"Ebuild files with a non-parseable or syntactically incorrect name (or using 2.1 versioning extensions)",
+	"ebuild.namenomatch":"Ebuild files that do not have the same name as their parent directory",
+	"changelog.ebuildadded":"An ebuild was added but the ChangeLog was not modified",
+	"changelog.missing":"Missing ChangeLog files",
+	"ebuild.notadded":"Ebuilds that exist but have not been added to cvs",
+	"ebuild.patches":"PATCHES variable should be a bash array to ensure white space safety",
+	"changelog.notadded":"ChangeLogs that exist but have not been added to cvs",
+	"dependency.unknown" : "Ebuild has a dependency that refers to an unknown package (which may be provided by an overlay)",
+	"file.executable":"Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do note need the executable bit",
+	"file.size":"Files in the files directory must be under 20 KiB",
+	"file.size.fatal":"Files in the files directory must be under 60 KiB",
+	"file.name":"File/dir name must be composed of only the following chars: %s " % allowed_filename_chars,
+	"file.UTF8":"File is not UTF8 compliant",
+	"inherit.autotools":"Ebuild inherits autotools but does not call eautomake, eautoconf or eautoreconf",
+	"inherit.deprecated":"Ebuild inherits a deprecated eclass",
+	"java.eclassesnotused":"With virtual/jdk in DEPEND you must inherit a java eclass",
+	"wxwidgets.eclassnotused":"Ebuild DEPENDs on x11-libs/wxGTK without inheriting wxwidgets.eclass",
+	"KEYWORDS.dropped":"Ebuilds that appear to have dropped KEYWORDS for some arch",
+	"KEYWORDS.missing":"Ebuilds that have a missing or empty KEYWORDS variable",
+	"KEYWORDS.stable":"Ebuilds that have been added directly with stable KEYWORDS",
+	"KEYWORDS.stupid":"Ebuilds that use KEYWORDS=-* instead of package.mask", 
+	"LICENSE.missing":"Ebuilds that have a missing or empty LICENSE variable",
+	"LICENSE.virtual":"Virtuals that have a non-empty LICENSE variable",
+	"DESCRIPTION.missing":"Ebuilds that have a missing or empty DESCRIPTION variable",
+	"DESCRIPTION.toolong":"DESCRIPTION is over %d characters" % max_desc_len,
+	"EAPI.definition":"EAPI is defined after an inherit call (must be defined before)",
+	"EAPI.deprecated":"Ebuilds that use features that are deprecated in the current EAPI",
+	"EAPI.incompatible":"Ebuilds that use features that are only available with a different EAPI",
+	"EAPI.unsupported":"Ebuilds that have an unsupported EAPI version (you must upgrade portage)",
+	"SLOT.invalid":"Ebuilds that have a missing or invalid SLOT variable value",
+	"HOMEPAGE.missing":"Ebuilds that have a missing or empty HOMEPAGE variable",
+	"HOMEPAGE.virtual":"Virtuals that have a non-empty HOMEPAGE variable",
+	"DEPEND.bad":"User-visible ebuilds with bad DEPEND settings (matched against *visible* ebuilds)",
+	"RDEPEND.bad":"User-visible ebuilds with bad RDEPEND settings (matched against *visible* ebuilds)",
+	"PDEPEND.bad":"User-visible ebuilds with bad PDEPEND settings (matched against *visible* ebuilds)",
+	"DEPEND.badmasked":"Masked ebuilds with bad DEPEND settings (matched against *all* ebuilds)",
+	"RDEPEND.badmasked":"Masked ebuilds with RDEPEND settings (matched against *all* ebuilds)",
+	"PDEPEND.badmasked":"Masked ebuilds with PDEPEND settings (matched against *all* ebuilds)",
+	"DEPEND.badindev":"User-visible ebuilds with bad DEPEND settings (matched against *visible* ebuilds) in developing arch",
+	"RDEPEND.badindev":"User-visible ebuilds with bad RDEPEND settings (matched against *visible* ebuilds) in developing arch",
+	"PDEPEND.badindev":"User-visible ebuilds with bad PDEPEND settings (matched against *visible* ebuilds) in developing arch",
+	"DEPEND.badmaskedindev":"Masked ebuilds with bad DEPEND settings (matched against *all* ebuilds) in developing arch",
+	"RDEPEND.badmaskedindev":"Masked ebuilds with RDEPEND settings (matched against *all* ebuilds) in developing arch",
+	"PDEPEND.badmaskedindev":"Masked ebuilds with PDEPEND settings (matched against *all* ebuilds) in developing arch",
+	"PDEPEND.suspect":"PDEPEND contains a package that usually only belongs in DEPEND.",
+	"DEPEND.syntax":"Syntax error in DEPEND (usually an extra/missing space/parenthesis)",
+	"RDEPEND.syntax":"Syntax error in RDEPEND (usually an extra/missing space/parenthesis)",
+	"PDEPEND.syntax":"Syntax error in PDEPEND (usually an extra/missing space/parenthesis)",
+	"DEPEND.badtilde":"DEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
+	"RDEPEND.badtilde":"RDEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
+	"PDEPEND.badtilde":"PDEPEND uses the ~ dep operator with a non-zero revision part, which is useless (the revision is ignored)",
+	"LICENSE.syntax":"Syntax error in LICENSE (usually an extra/missing space/parenthesis)",
+	"PROVIDE.syntax":"Syntax error in PROVIDE (usually an extra/missing space/parenthesis)",
+	"PROPERTIES.syntax":"Syntax error in PROPERTIES (usually an extra/missing space/parenthesis)",
+	"RESTRICT.syntax":"Syntax error in RESTRICT (usually an extra/missing space/parenthesis)",
+	"REQUIRED_USE.syntax":"Syntax error in REQUIRED_USE (usually an extra/missing space/parenthesis)",
+	"SRC_URI.syntax":"Syntax error in SRC_URI (usually an extra/missing space/parenthesis)",
+	"SRC_URI.mirror":"A uri listed in profiles/thirdpartymirrors is found in SRC_URI",
+	"ebuild.syntax":"Error generating cache entry for ebuild; typically caused by ebuild syntax error or digest verification failure",
+	"ebuild.output":"A simple sourcing of the ebuild produces output; this breaks ebuild policy.",
+	"ebuild.nesteddie":"Placing 'die' inside ( ) prints an error, but doesn't stop the ebuild.",
+	"variable.invalidchar":"A variable contains an invalid character that is not part of the ASCII character set",
+	"variable.readonly":"Assigning a readonly variable",
+	"variable.usedwithhelpers":"Ebuild uses D, ROOT, ED, EROOT or EPREFIX with helpers",
+	"LIVEVCS.stable":"This ebuild is a live checkout from a VCS but has stable keywords.",
+	"LIVEVCS.unmasked":"This ebuild is a live checkout from a VCS but has keywords and is not masked in the global package.mask.",
+	"IUSE.invalid":"This ebuild has a variable in IUSE that is not in the use.desc or its metadata.xml file",
+	"IUSE.missing":"This ebuild has a USE conditional which references a flag that is not listed in IUSE",
+	"IUSE.undefined":"This ebuild does not define IUSE (style guideline says to define IUSE even when empty)",
+	"LICENSE.invalid":"This ebuild is listing a license that doesnt exist in portages license/ dir.",
+	"KEYWORDS.invalid":"This ebuild contains KEYWORDS that are not listed in profiles/arch.list or for which no valid profile was found",
+	"RDEPEND.implicit":"RDEPEND is unset in the ebuild which triggers implicit RDEPEND=$DEPEND assignment (prior to EAPI 4)",
+	"RDEPEND.suspect":"RDEPEND contains a package that usually only belongs in DEPEND.",
+	"RESTRICT.invalid":"This ebuild contains invalid RESTRICT values.",
+	"digest.assumed":"Existing digest must be assumed correct (Package level only)",
+	"digest.missing":"Some files listed in SRC_URI aren't referenced in the Manifest",
+	"digest.unused":"Some files listed in the Manifest aren't referenced in SRC_URI",
+	"ebuild.nostable":"There are no ebuilds that are marked as stable for your ARCH",
+	"ebuild.allmasked":"All ebuilds are masked for this package (Package level only)",
+	"ebuild.majorsyn":"This ebuild has a major syntax error that may cause the ebuild to fail partially or fully",
+	"ebuild.minorsyn":"This ebuild has a minor syntax error that contravenes gentoo coding style",
+	"ebuild.badheader":"This ebuild has a malformed header",
+	"eprefixify.defined":"The ebuild uses eprefixify, but does not inherit the prefix eclass",
+	"manifest.bad":"Manifest has missing or incorrect digests",
+	"metadata.missing":"Missing metadata.xml files",
+	"metadata.bad":"Bad metadata.xml files",
+	"metadata.warning":"Warnings in metadata.xml files",
+	"portage.internal":"The ebuild uses an internal Portage function",
+	"virtual.oldstyle":"The ebuild PROVIDEs an old-style virtual (see GLEP 37)",
+	"usage.obsolete":"The ebuild makes use of an obsolete construct",
+	"upstream.workaround":"The ebuild works around an upstream bug, an upstream bug should be filed and tracked in bugs.gentoo.org"
+}
+
+qacats = list(qahelp)
+qacats.sort()
+
+qawarnings = set((
+"changelog.missing",
+"changelog.notadded",
+"dependency.unknown",
+"digest.assumed",
+"digest.unused",
+"ebuild.notadded",
+"ebuild.nostable",
+"ebuild.allmasked",
+"ebuild.nesteddie",
+"desktop.invalid",
+"DEPEND.badmasked","RDEPEND.badmasked","PDEPEND.badmasked",
+"DEPEND.badindev","RDEPEND.badindev","PDEPEND.badindev",
+"DEPEND.badmaskedindev","RDEPEND.badmaskedindev","PDEPEND.badmaskedindev",
+"DEPEND.badtilde", "RDEPEND.badtilde", "PDEPEND.badtilde",
+"DESCRIPTION.toolong",
+"EAPI.deprecated",
+"HOMEPAGE.virtual",
+"LICENSE.virtual",
+"KEYWORDS.dropped",
+"KEYWORDS.stupid",
+"KEYWORDS.missing",
+"IUSE.undefined",
+"PDEPEND.suspect",
+"RDEPEND.implicit",
+"RDEPEND.suspect",
+"RESTRICT.invalid",
+"SRC_URI.mirror",
+"ebuild.minorsyn",
+"ebuild.badheader",
+"ebuild.patches",
+"file.size",
+"inherit.autotools",
+"inherit.deprecated",
+"java.eclassesnotused",
+"wxwidgets.eclassnotused",
+"metadata.warning",
+"portage.internal",
+"usage.obsolete",
+"upstream.workaround",
+"virtual.oldstyle",
+"LIVEVCS.stable",
+"LIVEVCS.unmasked",
+))
+
+non_ascii_re = re.compile(r'[^\x00-\x7f]')
+
+missingvars = ["KEYWORDS", "LICENSE", "DESCRIPTION", "HOMEPAGE"]
+allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
+allvars.update(Package.metadata_keys)
+allvars = sorted(allvars)
+commitmessage=None
+for x in missingvars:
+	x += ".missing"
+	if x not in qacats:
+		logging.warn('* missingvars values need to be added to qahelp ("%s")' % x)
+		qacats.append(x)
+		qawarnings.add(x)
+
+valid_restrict = frozenset(["binchecks", "bindist",
+	"fetch", "installsources", "mirror",
+	"primaryuri", "strip", "test", "userpriv"])
+
+live_eclasses = frozenset([
+	"bzr",
+	"cvs",
+	"darcs",
+	"git",
+	"git-2",
+	"mercurial",
+	"subversion",
+	"tla",
+])
+
+suspect_rdepend = frozenset([
+	"app-arch/cabextract",
+	"app-arch/rpm2targz",
+	"app-doc/doxygen",
+	"dev-lang/nasm",
+	"dev-lang/swig",
+	"dev-lang/yasm",
+	"dev-perl/extutils-pkgconfig",
+	"dev-util/byacc",
+	"dev-util/cmake",
+	"dev-util/ftjam",
+	"dev-util/gperf",
+	"dev-util/gtk-doc",
+	"dev-util/gtk-doc-am",
+	"dev-util/intltool",
+	"dev-util/jam",
+	"dev-util/pkgconfig",
+	"dev-util/scons",
+	"dev-util/unifdef",
+	"dev-util/yacc",
+	"media-gfx/ebdftopcf",
+	"sys-apps/help2man",
+	"sys-devel/autoconf",
+	"sys-devel/automake",
+	"sys-devel/bin86",
+	"sys-devel/bison",
+	"sys-devel/dev86",
+	"sys-devel/flex",
+	"sys-devel/m4",
+	"sys-devel/pmake",
+	"virtual/linux-sources",
+	"x11-misc/bdftopcf",
+	"x11-misc/imake",
+])
+
+metadata_dtd_uri = 'http://www.gentoo.org/dtd/metadata.dtd'
+# force refetch if the local copy creation time is older than this
+metadata_dtd_ctime_interval = 60 * 60 * 24 * 7 # 7 days
+
+# file.executable
+no_exec = frozenset(["Manifest","ChangeLog","metadata.xml"])
+
+options, arguments = ParseArgs(sys.argv, qahelp)
+
+if options.version:
+	print("Portage", portage.VERSION)
+	sys.exit(0)
+
+# Set this to False when an extraordinary issue (generally
+# something other than a QA issue) makes it impossible to
+# commit (like if Manifest generation fails).
+can_force = True
+
+portdir, portdir_overlay, mydir = utilities.FindPortdir(repoman_settings)
+if portdir is None:
+	sys.exit(1)
+
+myreporoot = os.path.basename(portdir_overlay)
+myreporoot += mydir[len(portdir_overlay):]
+
+if options.vcs:
+	if options.vcs in ('cvs', 'svn', 'git', 'bzr', 'hg'):
+		vcs = options.vcs
+	else:
+		vcs = None
+else:
+	vcses = utilities.FindVCS()
+	if len(vcses) > 1:
+		print(red('*** Ambiguous workdir -- more than one VCS found at the same depth: %s.' % ', '.join(vcses)))
+		print(red('*** Please either clean up your workdir or specify --vcs option.'))
+		sys.exit(1)
+	elif vcses:
+		vcs = vcses[0]
+	else:
+		vcs = None
+
+# Note: We don't use ChangeLogs in distributed SCMs.
+# It will be generated on server side from scm log,
+# before package moves to the rsync server.
+# This is needed because we try to avoid merge collisions.
+check_changelog = vcs in ('cvs', 'svn')
+
+# Disable copyright/mtime check if vcs does not preserve mtime (bug #324075).
+vcs_preserves_mtime = vcs not in ('git',)
+
+vcs_local_opts = repoman_settings.get("REPOMAN_VCS_LOCAL_OPTS", "").split()
+vcs_global_opts = repoman_settings.get("REPOMAN_VCS_GLOBAL_OPTS")
+if vcs_global_opts is None:
+	if vcs in ('cvs', 'svn'):
+		vcs_global_opts = "-q"
+	else:
+		vcs_global_opts = ""
+vcs_global_opts = vcs_global_opts.split()
+
+if vcs == "cvs" and \
+	"commit" == options.mode and \
+	"RMD160" not in portage.checksum.hashorigin_map:
+	from portage.util import grablines
+	repo_lines = grablines("./CVS/Repository")
+	if repo_lines and \
+		"gentoo-x86" == repo_lines[0].strip().split(os.path.sep)[0]:
+		msg = "Please install " \
+		"pycrypto or enable python's ssl USE flag in order " \
+		"to enable RMD160 hash support. See bug #198398 for " \
+		"more information."
+		prefix = bad(" * ")
+		from textwrap import wrap
+		for line in wrap(msg, 70):
+			print(prefix + line)
+		sys.exit(1)
+	del repo_lines
+
+if options.mode == 'commit' and not options.pretend and not vcs:
+	logging.info("Not in a version controlled repository; enabling pretend mode.")
+	options.pretend = True
+
+# Ensure that PORTDIR_OVERLAY contains the repository corresponding to $PWD.
+repoman_settings = portage.config(local_config=False)
+repoman_settings['PORTDIR_OVERLAY'] = "%s %s" % \
+	(repoman_settings.get('PORTDIR_OVERLAY', ''), portdir_overlay)
+# We have to call the config constructor again so
+# that config.repositories is initialized correctly.
+repoman_settings = portage.config(local_config=False, env=dict(os.environ,
+	PORTDIR_OVERLAY=repoman_settings['PORTDIR_OVERLAY']))
+
+root = '/'
+trees = {
+	root : {'porttree' : portage.portagetree(root, settings=repoman_settings)}
+}
+portdb = trees[root]['porttree'].dbapi
+
+# Constrain dependency resolution to the master(s)
+# that are specified in layout.conf.
+portdir_overlay = os.path.realpath(portdir_overlay)
+repo_info = portdb._repo_info[portdir_overlay]
+portdb.porttrees = list(repo_info.eclass_db.porttrees)
+portdir = portdb.porttrees[0]
+
+# Generate an appropriate PORTDIR_OVERLAY value for passing into the
+# profile-specific config constructor calls.
+env = os.environ.copy()
+env['PORTDIR'] = portdir
+env['PORTDIR_OVERLAY'] = ' '.join(portdb.porttrees[1:])
+
+logging.info('Setting paths:')
+logging.info('PORTDIR = "' + portdir + '"')
+logging.info('PORTDIR_OVERLAY = "%s"' % env['PORTDIR_OVERLAY'])
+
+# It's confusing if these warnings are displayed without the user
+# being told which profile they come from, so disable them.
+env['FEATURES'] = env.get('FEATURES', '') + ' -unknown-features-warn'
+
+categories = []
+for path in set([portdir, portdir_overlay]):
+	categories.extend(portage.util.grabfile(
+		os.path.join(path, 'profiles', 'categories')))
+repoman_settings.categories = tuple(sorted(
+	portage.util.stack_lists([categories], incremental=1)))
+del categories
+
+portdb.settings = repoman_settings
+root_config = RootConfig(repoman_settings, trees[root], None)
+# We really only need to cache the metadata that's necessary for visibility
+# filtering. Anything else can be discarded to reduce memory consumption.
+portdb._aux_cache_keys.clear()
+portdb._aux_cache_keys.update(["EAPI", "KEYWORDS", "SLOT"])
+
+reposplit = myreporoot.split(os.path.sep)
+repolevel = len(reposplit)
+
+# check if it's in $PORTDIR/$CATEGORY/$PN , otherwise bail if commiting.
+# Reason for this is if they're trying to commit in just $FILESDIR/*, the Manifest needs updating.
+# this check ensures that repoman knows where it is, and the manifest recommit is at least possible.
+if options.mode == 'commit' and repolevel not in [1,2,3]:
+	print(red("***")+" Commit attempts *must* be from within a vcs co, category, or package directory.")
+	print(red("***")+" Attempting to commit from a packages files directory will be blocked for instance.")
+	print(red("***")+" This is intended behaviour, to ensure the manifest is recommitted for a package.")
+	print(red("***"))
+	err("Unable to identify level we're commiting from for %s" % '/'.join(reposplit))
+
+startdir = normalize_path(mydir)
+repodir = startdir
+for x in range(0, repolevel - 1):
+	repodir = os.path.dirname(repodir)
+repodir = os.path.realpath(repodir)
+
+def caterror(mycat):
+	err(mycat+" is not an official category.  Skipping QA checks in this directory.\nPlease ensure that you add "+catdir+" to "+repodir+"/profiles/categories\nif it is a new category.")
+
+class ProfileDesc(object):
+	__slots__ = ('abs_path', 'arch', 'status', 'sub_path', 'tree_path',)
+	def __init__(self, arch, status, sub_path, tree_path):
+		self.arch = arch
+		self.status = status
+		if sub_path:
+			sub_path = normalize_path(sub_path.lstrip(os.sep))
+		self.sub_path = sub_path
+		self.tree_path = tree_path
+		if tree_path:
+			self.abs_path = os.path.join(tree_path, 'profiles', self.sub_path)
+		else:
+			self.abs_path = tree_path
+
+	def __str__(self):
+		if self.sub_path:
+			return self.sub_path
+		return 'empty profile'
+
+profile_list = []
+valid_profile_types = frozenset(['dev', 'exp', 'stable'])
+
+# get lists of valid keywords, licenses, and use
+kwlist = set()
+liclist = set()
+uselist = set()
+global_pmasklines = []
+
+for path in portdb.porttrees:
+	try:
+		liclist.update(os.listdir(os.path.join(path, "licenses")))
+	except OSError:
+		pass
+	kwlist.update(portage.grabfile(os.path.join(path,
+		"profiles", "arch.list")))
+
+	use_desc = portage.grabfile(os.path.join(path, 'profiles', 'use.desc'))
+	for x in use_desc:
+		x = x.split()
+		if x:
+			uselist.add(x[0])
+
+	expand_desc_dir = os.path.join(path, 'profiles', 'desc')
+	try:
+		expand_list = os.listdir(expand_desc_dir)
+	except OSError:
+		pass
+	else:
+		for fn in expand_list:
+			if not fn[-5:] == '.desc':
+				continue
+			use_prefix = fn[:-5].lower() + '_'
+			for x in portage.grabfile(os.path.join(expand_desc_dir, fn)):
+				x = x.split()
+				if x:
+					uselist.add(use_prefix + x[0])
+
+	global_pmasklines.append(portage.util.grabfile_package(
+		os.path.join(path, 'profiles', 'package.mask'), recursive=1, verify_eapi=True))
+
+	desc_path = os.path.join(path, 'profiles', 'profiles.desc')
+	try:
+		desc_file = io.open(_unicode_encode(desc_path,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'], errors='replace')
+	except EnvironmentError:
+		pass
+	else:
+		for i, x in enumerate(desc_file):
+			if x[0] == "#":
+				continue
+			arch = x.split()
+			if len(arch) == 0:
+				continue
+			if len(arch) != 3:
+				err("wrong format: \"" + bad(x.strip()) + "\" in " + \
+					desc_path + " line %d" % (i+1, ))
+			elif arch[0] not in kwlist:
+				err("invalid arch: \"" + bad(arch[0]) + "\" in " + \
+					desc_path + " line %d" % (i+1, ))
+			elif arch[2] not in valid_profile_types:
+				err("invalid profile type: \"" + bad(arch[2]) + "\" in " + \
+					desc_path + " line %d" % (i+1, ))
+			profile_desc = ProfileDesc(arch[0], arch[2], arch[1], path)
+			if not os.path.isdir(profile_desc.abs_path):
+				logging.error(
+					"Invalid %s profile (%s) for arch %s in %s line %d",
+					arch[2], arch[1], arch[0], desc_path, i+1)
+				continue
+			if os.path.exists(
+				os.path.join(profile_desc.abs_path, 'deprecated')):
+				continue
+			profile_list.append(profile_desc)
+		desc_file.close()
+
+repoman_settings['PORTAGE_ARCHLIST'] = ' '.join(sorted(kwlist))
+repoman_settings.backup_changes('PORTAGE_ARCHLIST')
+
+global_pmasklines = portage.util.stack_lists(global_pmasklines, incremental=1)
+global_pmaskdict = {}
+for x in global_pmasklines:
+	global_pmaskdict.setdefault(x.cp, []).append(x)
+del global_pmasklines
+
+def has_global_mask(pkg):
+	mask_atoms = global_pmaskdict.get(pkg.cp)
+	if mask_atoms:
+		pkg_list = [pkg]
+		for x in mask_atoms:
+			if portage.dep.match_from_list(x, pkg_list):
+				return x
+	return None
+
+# Ensure that profile sub_path attributes are unique. Process in reverse order
+# so that profiles with duplicate sub_path from overlays will override
+# profiles with the same sub_path from parent repos.
+profiles = {}
+profile_list.reverse()
+profile_sub_paths = set()
+for prof in profile_list:
+	if prof.sub_path in profile_sub_paths:
+		continue
+	profile_sub_paths.add(prof.sub_path)
+	profiles.setdefault(prof.arch, []).append(prof)
+
+# Use an empty profile for checking dependencies of
+# packages that have empty KEYWORDS.
+prof = ProfileDesc('**', 'stable', '', '')
+profiles.setdefault(prof.arch, []).append(prof)
+
+for x in repoman_settings.archlist():
+	if x[0] == "~":
+		continue
+	if x not in profiles:
+		print(red("\""+x+"\" doesn't have a valid profile listed in profiles.desc."))
+		print(red("You need to either \"cvs update\" your profiles dir or follow this"))
+		print(red("up with the "+x+" team."))
+		print()
+
+if not liclist:
+	logging.fatal("Couldn't find licenses?")
+	sys.exit(1)
+
+if not kwlist:
+	logging.fatal("Couldn't read KEYWORDS from arch.list")
+	sys.exit(1)
+
+if not uselist:
+	logging.fatal("Couldn't find use.desc?")
+	sys.exit(1)
+
+scanlist=[]
+if repolevel==2:
+	#we are inside a category directory
+	catdir=reposplit[-1]
+	if catdir not in repoman_settings.categories:
+		caterror(catdir)
+	mydirlist=os.listdir(startdir)
+	for x in mydirlist:
+		if x == "CVS" or x.startswith("."):
+			continue
+		if os.path.isdir(startdir+"/"+x):
+			scanlist.append(catdir+"/"+x)
+	repo_subdir = catdir + os.sep
+elif repolevel==1:
+	for x in repoman_settings.categories:
+		if not os.path.isdir(startdir+"/"+x):
+			continue
+		for y in os.listdir(startdir+"/"+x):
+			if y == "CVS" or y.startswith("."):
+				continue
+			if os.path.isdir(startdir+"/"+x+"/"+y):
+				scanlist.append(x+"/"+y)
+	repo_subdir = ""
+elif repolevel==3:
+	catdir = reposplit[-2]
+	if catdir not in repoman_settings.categories:
+		caterror(catdir)
+	scanlist.append(catdir+"/"+reposplit[-1])
+	repo_subdir = scanlist[-1] + os.sep
+else:
+	msg = 'Repoman is unable to determine PORTDIR or PORTDIR_OVERLAY' + \
+		' from the current working directory'
+	logging.critical(msg)
+	sys.exit(1)
+
+repo_subdir_len = len(repo_subdir)
+scanlist.sort()
+
+logging.debug("Found the following packages to scan:\n%s" % '\n'.join(scanlist))
+
+def dev_keywords(profiles):
+	"""
+	Create a set of KEYWORDS values that exist in 'dev'
+	profiles. These are used
+	to trigger a message notifying the user when they might
+	want to add the --include-dev option.
+	"""
+	type_arch_map = {}
+	for arch, arch_profiles in profiles.items():
+		for prof in arch_profiles:
+			arch_set = type_arch_map.get(prof.status)
+			if arch_set is None:
+				arch_set = set()
+				type_arch_map[prof.status] = arch_set
+			arch_set.add(arch)
+
+	dev_keywords = type_arch_map.get('dev', set())
+	dev_keywords.update(['~' + arch for arch in dev_keywords])
+	return frozenset(dev_keywords)
+
+dev_keywords = dev_keywords(profiles)
+
+stats={}
+fails={}
+
+# provided by the desktop-file-utils package
+desktop_file_validate = find_binary("desktop-file-validate")
+desktop_pattern = re.compile(r'.*\.desktop$')
+
+for x in qacats:
+	stats[x]=0
+	fails[x]=[]
+
+xmllint_capable = False
+metadata_dtd = os.path.join(repoman_settings["DISTDIR"], 'metadata.dtd')
+
+def parsedate(s):
+	"""Parse a RFC 822 date and time string.
+	This is required for python3 compatibility, since the
+	rfc822.parsedate() function is not available."""
+
+	s_split = []
+	for x in s.upper().split():
+		for y in x.split(','):
+			if y:
+				s_split.append(y)
+
+	if len(s_split) != 6:
+		return None
+
+	# %a, %d %b %Y %H:%M:%S %Z
+	a, d, b, Y, H_M_S, Z = s_split
+
+	# Convert month to integer, since strptime %w is locale-dependent.
+	month_map = {'JAN':1, 'FEB':2, 'MAR':3, 'APR':4, 'MAY':5, 'JUN':6,
+		'JUL':7, 'AUG':8, 'SEP':9, 'OCT':10, 'NOV':11, 'DEC':12}
+	m = month_map.get(b)
+	if m is None:
+		return None
+	m = str(m).rjust(2, '0')
+
+	return time.strptime(':'.join((Y, m, d, H_M_S)), '%Y:%m:%d:%H:%M:%S')
+
+def fetch_metadata_dtd():
+	"""
+	Fetch metadata.dtd if it doesn't exist or the ctime is older than
+	metadata_dtd_ctime_interval.
+	@rtype: bool
+	@returns: True if successful, otherwise False
+	"""
+
+	must_fetch = True
+	metadata_dtd_st = None
+	current_time = int(time.time())
+	try:
+		metadata_dtd_st = os.stat(metadata_dtd)
+	except EnvironmentError as e:
+		if e.errno not in (errno.ENOENT, errno.ESTALE):
+			raise
+		del e
+	else:
+		# Trigger fetch if metadata.dtd mtime is old or clock is wrong.
+		if abs(current_time - metadata_dtd_st.st_ctime) \
+			< metadata_dtd_ctime_interval:
+			must_fetch = False
+
+	if must_fetch:
+		print()
+		print(green("***") + " the local copy of metadata.dtd " + \
+			"needs to be refetched, doing that now")
+		print()
+		try:
+			url_f = urllib_request_urlopen(metadata_dtd_uri)
+			msg_info = url_f.info()
+			last_modified = msg_info.get('last-modified')
+			if last_modified is not None:
+				last_modified = parsedate(last_modified)
+				if last_modified is not None:
+					last_modified = calendar.timegm(last_modified)
+
+			metadata_dtd_tmp = "%s.%s" % (metadata_dtd, os.getpid())
+			try:
+				local_f = open(metadata_dtd_tmp, mode='wb')
+				local_f.write(url_f.read())
+				local_f.close()
+				if last_modified is not None:
+					try:
+						os.utime(metadata_dtd_tmp,
+							(int(last_modified), int(last_modified)))
+					except OSError:
+						# This fails on some odd non-unix-like filesystems.
+						# We don't really need the mtime to be preserved
+						# anyway here (currently we use ctime to trigger
+						# fetch), so just ignore it.
+						pass
+				os.rename(metadata_dtd_tmp, metadata_dtd)
+			finally:
+				try:
+					os.unlink(metadata_dtd_tmp)
+				except OSError:
+					pass
+
+			url_f.close()
+
+		except EnvironmentError as e:
+			print()
+			print(red("!!!")+" attempting to fetch '%s', caught" % metadata_dtd_uri)
+			print(red("!!!")+" exception '%s' though." % (e,))
+			print(red("!!!")+" fetching new metadata.dtd failed, aborting")
+			return False
+
+	return True
+
+if options.mode == "manifest":
+	pass
+elif not find_binary('xmllint'):
+	print(red("!!! xmllint not found. Can't check metadata.xml.\n"))
+	if options.xml_parse or repolevel==3:
+		print(red("!!!")+" sorry, xmllint is needed.  failing\n")
+		sys.exit(1)
+else:
+	if not fetch_metadata_dtd():
+		sys.exit(1)
+	#this can be problematic if xmllint changes their output
+	xmllint_capable=True
+
+if options.mode == 'commit' and vcs:
+	utilities.detect_vcs_conflicts(options, vcs)
+
+if options.mode == "manifest":
+	pass
+elif options.pretend:
+	print(green("\nRepoMan does a once-over of the neighborhood..."))
+else:
+	print(green("\nRepoMan scours the neighborhood..."))
+
+new_ebuilds = set()
+modified_ebuilds = set()
+modified_changelogs = set()
+mychanged = []
+mynew = []
+myremoved = []
+
+if vcs == "cvs":
+	mycvstree = cvstree.getentries("./", recursive=1)
+	mychanged = cvstree.findchanged(mycvstree, recursive=1, basedir="./")
+	mynew = cvstree.findnew(mycvstree, recursive=1, basedir="./")
+if vcs == "svn":
+	svnstatus = os.popen("svn status").readlines()
+	mychanged = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem and elem[:1] in "MR" ]
+	mynew     = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A") ]
+elif vcs == "git":
+	mychanged = os.popen("git diff-index --name-only --relative --diff-filter=M HEAD").readlines()
+	mychanged = ["./" + elem[:-1] for elem in mychanged]
+
+	mynew = os.popen("git diff-index --name-only --relative --diff-filter=A HEAD").readlines()
+	mynew = ["./" + elem[:-1] for elem in mynew]
+elif vcs == "bzr":
+	bzrstatus = os.popen("bzr status -S .").readlines()
+	mychanged = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M" ]
+	mynew     = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] == "NK" or elem[0:1] == "R" ) ]
+elif vcs == "hg":
+	mychanged = os.popen("hg status --no-status --modified .").readlines()
+	mychanged = ["./" + elem.rstrip() for elem in mychanged]
+	mynew = os.popen("hg status --no-status --added .").readlines()
+	mynew = ["./" + elem.rstrip() for elem in mynew]
+
+if vcs:
+	new_ebuilds.update(x for x in mynew if x.endswith(".ebuild"))
+	modified_ebuilds.update(x for x in mychanged if x.endswith(".ebuild"))
+	modified_changelogs.update(x for x in chain(mychanged, mynew) \
+		if os.path.basename(x) == "ChangeLog")
+
+have_pmasked = False
+have_dev_keywords = False
+dofail = 0
+arch_caches={}
+arch_xmatch_caches = {}
+shared_xmatch_caches = {"cp-list":{}}
+
+# Disable the "ebuild.notadded" check when not in commit mode and
+# running `svn status` in every package dir will be too expensive.
+
+check_ebuild_notadded = not \
+	(vcs == "svn" and repolevel < 3 and options.mode != "commit")
+
+# Build a regex from thirdpartymirrors for the SRC_URI.mirror check.
+thirdpartymirrors = []
+for v in repoman_settings.thirdpartymirrors().values():
+	thirdpartymirrors.extend(v)
+
+class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
+	"""
+	Implements doctype() as required to avoid deprecation warnings with
+	>=python-2.7.
+	"""
+	def doctype(self, name, pubid, system):
+		pass
+
+try:
+	herd_base = make_herd_base(os.path.join(repoman_settings["PORTDIR"], "metadata/herds.xml"))
+except (EnvironmentError, ParseError, PermissionDenied) as e:
+	err(str(e))
+except FileNotFound:
+	# TODO: Download as we do for metadata.dtd, but add a way to
+	# disable for non-gentoo repoman users who may not have herds.
+	herd_base = None
+
+for x in scanlist:
+	#ebuilds and digests added to cvs respectively.
+	logging.info("checking package %s" % x)
+	eadded=[]
+	catdir,pkgdir=x.split("/")
+	checkdir=repodir+"/"+x
+	checkdir_relative = ""
+	if repolevel < 3:
+		checkdir_relative = os.path.join(pkgdir, checkdir_relative)
+	if repolevel < 2:
+		checkdir_relative = os.path.join(catdir, checkdir_relative)
+	checkdir_relative = os.path.join(".", checkdir_relative)
+	generated_manifest = False
+
+	if options.mode == "manifest" or \
+	  (options.mode != 'manifest-check' and \
+	  'digest' in repoman_settings.features) or \
+	  options.mode in ('commit', 'fix') and not options.pretend:
+		auto_assumed = set()
+		fetchlist_dict = portage.FetchlistDict(checkdir,
+			repoman_settings, portdb)
+		if options.mode == 'manifest' and options.force:
+			portage._doebuild_manifest_exempt_depend += 1
+			try:
+				distdir = repoman_settings['DISTDIR']
+				mf = portage.manifest.Manifest(checkdir, distdir,
+					fetchlist_dict=fetchlist_dict)
+				mf.create(requiredDistfiles=None,
+					assumeDistHashesAlways=True)
+				for distfiles in fetchlist_dict.values():
+					for distfile in distfiles:
+						if os.path.isfile(os.path.join(distdir, distfile)):
+							mf.fhashdict['DIST'].pop(distfile, None)
+						else:
+							auto_assumed.add(distfile)
+				mf.write()
+			finally:
+				portage._doebuild_manifest_exempt_depend -= 1
+
+		repoman_settings["O"] = checkdir
+		try:
+			generated_manifest = digestgen(
+				mysettings=repoman_settings, myportdb=portdb)
+		except portage.exception.PermissionDenied as e:
+			generated_manifest = False
+			writemsg_level("!!! Permission denied: '%s'\n" % (e,),
+				level=logging.ERROR, noiselevel=-1)
+
+		if not generated_manifest:
+			print("Unable to generate manifest.")
+			dofail = 1
+
+		if options.mode == "manifest":
+			if not dofail and options.force and auto_assumed and \
+				'assume-digests' in repoman_settings.features:
+				# Show which digests were assumed despite the --force option
+				# being given. This output will already have been shown by
+				# digestgen() if assume-digests is not enabled, so only show
+				# it here if assume-digests is enabled.
+				pkgs = list(fetchlist_dict)
+				pkgs.sort()
+				portage.writemsg_stdout("  digest.assumed" + \
+					portage.output.colorize("WARN",
+					str(len(auto_assumed)).rjust(18)) + "\n")
+				for cpv in pkgs:
+					fetchmap = fetchlist_dict[cpv]
+					pf = portage.catsplit(cpv)[1]
+					for distfile in sorted(fetchmap):
+						if distfile in auto_assumed:
+							portage.writemsg_stdout(
+								"   %s::%s\n" % (pf, distfile))
+			continue
+		elif dofail:
+			sys.exit(1)
+
+	if not generated_manifest:
+		repoman_settings['O'] = checkdir
+		repoman_settings['PORTAGE_QUIET'] = '1'
+		if not portage.digestcheck([], repoman_settings, strict=1):
+			stats["manifest.bad"] += 1
+			fails["manifest.bad"].append(os.path.join(x, 'Manifest'))
+		repoman_settings.pop('PORTAGE_QUIET', None)
+
+	if options.mode == 'manifest-check':
+		continue
+
+	checkdirlist=os.listdir(checkdir)
+	ebuildlist=[]
+	pkgs = {}
+	allvalid = True
+	for y in checkdirlist:
+		if (y in no_exec or y.endswith(".ebuild")) and \
+			stat.S_IMODE(os.stat(os.path.join(checkdir, y)).st_mode) & 0o111:
+				stats["file.executable"] += 1
+				fails["file.executable"].append(os.path.join(checkdir, y))
+		if y.endswith(".ebuild"):
+			pf = y[:-7]
+			ebuildlist.append(pf)
+			cpv = "%s/%s" % (catdir, pf)
+			try:
+				myaux = dict(zip(allvars, portdb.aux_get(cpv, allvars)))
+			except KeyError:
+				allvalid = False
+				stats["ebuild.syntax"] += 1
+				fails["ebuild.syntax"].append(os.path.join(x, y))
+				continue
+			except IOError:
+				allvalid = False
+				stats["ebuild.output"] += 1
+				fails["ebuild.output"].append(os.path.join(x, y))
+				continue
+			if not portage.eapi_is_supported(myaux["EAPI"]):
+				allvalid = False
+				stats["EAPI.unsupported"] += 1
+				fails["EAPI.unsupported"].append(os.path.join(x, y))
+				continue
+			pkgs[pf] = Package(cpv=cpv, metadata=myaux,
+				root_config=root_config, type_name="ebuild")
+
+	# Sort ebuilds in ascending order for the KEYWORDS.dropped check.
+	pkgsplits = {}
+	for i in range(len(ebuildlist)):
+		ebuild_split = portage.pkgsplit(ebuildlist[i])
+		pkgsplits[ebuild_split] = ebuildlist[i]
+		ebuildlist[i] = ebuild_split
+	ebuildlist.sort(key=cmp_sort_key(portage.pkgcmp))
+	for i in range(len(ebuildlist)):
+		ebuildlist[i] = pkgsplits[ebuildlist[i]]
+	del pkgsplits
+
+	slot_keywords = {}
+
+	if len(pkgs) != len(ebuildlist):
+		# If we can't access all the metadata then it's totally unsafe to
+		# commit since there's no way to generate a correct Manifest.
+		# Do not try to do any more QA checks on this package since missing
+		# metadata leads to false positives for several checks, and false
+		# positives confuse users.
+		can_force = False
+		continue
+
+	for y in checkdirlist:
+		m = disallowed_filename_chars_re.search(y.strip(os.sep))
+		if m is not None:
+			stats["file.name"] += 1
+			fails["file.name"].append("%s/%s: char '%s'" % \
+				(checkdir, y, m.group(0)))
+
+		if not (y in ("ChangeLog", "metadata.xml") or y.endswith(".ebuild")):
+			continue
+		try:
+			line = 1
+			for l in io.open(_unicode_encode(os.path.join(checkdir, y),
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content']):
+				line +=1
+		except UnicodeDecodeError as ue:
+			stats["file.UTF8"] += 1
+			s = ue.object[:ue.start]
+			l2 = s.count("\n")
+			line += l2
+			if l2 != 0:
+				s = s[s.rfind("\n") + 1:]
+			fails["file.UTF8"].append("%s/%s: line %i, just after: '%s'" % (checkdir, y, line, s))
+
+	if vcs in ("git", "hg") and check_ebuild_notadded:
+		if vcs == "git":
+			myf = os.popen("git ls-files --others %s" % \
+				(portage._shell_quote(checkdir_relative),))
+		if vcs == "hg":
+			myf = os.popen("hg status --no-status --unknown %s" % \
+				(portage._shell_quote(checkdir_relative),))
+		for l in myf:
+			if l[:-1][-7:] == ".ebuild":
+				stats["ebuild.notadded"] += 1
+				fails["ebuild.notadded"].append(
+					os.path.join(x, os.path.basename(l[:-1])))
+		myf.close()
+
+	if vcs in ("cvs", "svn", "bzr") and check_ebuild_notadded:
+		try:
+			if vcs == "cvs":
+				myf=open(checkdir+"/CVS/Entries","r")
+			if vcs == "svn":
+				myf = os.popen("svn status --depth=files --verbose " + checkdir)
+			if vcs == "bzr":
+				myf = os.popen("bzr ls -v --kind=file " + checkdir)
+			myl = myf.readlines()
+			myf.close()
+			for l in myl:
+				if vcs == "cvs":
+					if l[0]!="/":
+						continue
+					splitl=l[1:].split("/")
+					if not len(splitl):
+						continue
+					if splitl[0][-7:]==".ebuild":
+						eadded.append(splitl[0][:-7])
+				if vcs == "svn":
+					if l[:1] == "?":
+						continue
+					if l[:7] == '      >':
+						# tree conflict, new in subversion 1.6
+						continue
+					l = l.split()[-1]
+					if l[-7:] == ".ebuild":
+						eadded.append(os.path.basename(l[:-7]))
+				if vcs == "bzr":
+					if l[1:2] == "?":
+						continue
+					l = l.split()[-1]
+					if l[-7:] == ".ebuild":
+						eadded.append(os.path.basename(l[:-7]))
+			if vcs == "svn":
+				myf = os.popen("svn status " + checkdir)
+				myl=myf.readlines()
+				myf.close()
+				for l in myl:
+					if l[0] == "A":
+						l = l.rstrip().split(' ')[-1]
+						if l[-7:] == ".ebuild":
+							eadded.append(os.path.basename(l[:-7]))
+		except IOError:
+			if vcs == "cvs":
+				stats["CVS/Entries.IO_error"] += 1
+				fails["CVS/Entries.IO_error"].append(checkdir+"/CVS/Entries")
+			else:
+				raise
+			continue
+
+	mf = Manifest(checkdir, repoman_settings["DISTDIR"])
+	mydigests=mf.getTypeDigests("DIST")
+
+	fetchlist_dict = portage.FetchlistDict(checkdir, repoman_settings, portdb)
+	myfiles_all = []
+	src_uri_error = False
+	for mykey in fetchlist_dict:
+		try:
+			myfiles_all.extend(fetchlist_dict[mykey])
+		except portage.exception.InvalidDependString as e:
+			src_uri_error = True
+			try:
+				portdb.aux_get(mykey, ["SRC_URI"])
+			except KeyError:
+				# This will be reported as an "ebuild.syntax" error.
+				pass
+			else:
+				stats["SRC_URI.syntax"] = stats["SRC_URI.syntax"] + 1
+				fails["SRC_URI.syntax"].append(
+					"%s.ebuild SRC_URI: %s" % (mykey, e))
+	del fetchlist_dict
+	if not src_uri_error:
+		# This test can produce false positives if SRC_URI could not
+		# be parsed for one or more ebuilds. There's no point in
+		# producing a false error here since the root cause will
+		# produce a valid error elsewhere, such as "SRC_URI.syntax"
+		# or "ebuild.sytax".
+		myfiles_all = set(myfiles_all)
+		for entry in mydigests:
+			if entry not in myfiles_all:
+				stats["digest.unused"] += 1
+				fails["digest.unused"].append(checkdir+"::"+entry)
+		for entry in myfiles_all:
+			if entry not in mydigests:
+				stats["digest.missing"] += 1
+				fails["digest.missing"].append(checkdir+"::"+entry)
+	del myfiles_all
+
+	if os.path.exists(checkdir+"/files"):
+		filesdirlist=os.listdir(checkdir+"/files")
+
+		# recurse through files directory
+		# use filesdirlist as a stack, appending directories as needed so people can't hide > 20k files in a subdirectory.
+		while filesdirlist:
+			y = filesdirlist.pop(0)
+			relative_path = os.path.join(x, "files", y)
+			full_path = os.path.join(repodir, relative_path)
+			try:
+				mystat = os.stat(full_path)
+			except OSError as oe:
+				if oe.errno == 2:
+					# don't worry about it.  it likely was removed via fix above.
+					continue
+				else:
+					raise oe
+			if S_ISDIR(mystat.st_mode):
+				# !!! VCS "portability" alert!  Need some function isVcsDir() or alike !!!
+				if y == "CVS" or y == ".svn":
+					continue
+				for z in os.listdir(checkdir+"/files/"+y):
+					if z == "CVS" or z == ".svn":
+						continue
+					filesdirlist.append(y+"/"+z)
+			# Current policy is no files over 20 KiB, these are the checks. File size between
+			# 20 KiB and 60 KiB causes a warning, while file size over 60 KiB causes an error.
+			elif mystat.st_size > 61440:
+				stats["file.size.fatal"] += 1
+				fails["file.size.fatal"].append("("+ str(mystat.st_size//1024) + " KiB) "+x+"/files/"+y)
+			elif mystat.st_size > 20480:
+				stats["file.size"] += 1
+				fails["file.size"].append("("+ str(mystat.st_size//1024) + " KiB) "+x+"/files/"+y)
+
+			m = disallowed_filename_chars_re.search(
+				os.path.basename(y.rstrip(os.sep)))
+			if m is not None:
+				stats["file.name"] += 1
+				fails["file.name"].append("%s/files/%s: char '%s'" % \
+					(checkdir, y, m.group(0)))
+
+			if desktop_file_validate and desktop_pattern.match(y):
+				status, cmd_output = subprocess_getstatusoutput(
+					"'%s' '%s'" % (desktop_file_validate, full_path))
+				if os.WIFEXITED(status) and os.WEXITSTATUS(status) != os.EX_OK:
+					# Note: in the future we may want to grab the
+					# warnings in addition to the errors. We're
+					# just doing errors now since we don't want
+					# to generate too much noise at first.
+					error_re = re.compile(r'.*\s*error:\s*(.*)')
+					for line in cmd_output.splitlines():
+						error_match = error_re.match(line)
+						if error_match is None:
+							continue
+						stats["desktop.invalid"] += 1
+						fails["desktop.invalid"].append(
+							relative_path + ': %s' % error_match.group(1))
+
+	del mydigests
+
+	if check_changelog and "ChangeLog" not in checkdirlist:
+		stats["changelog.missing"]+=1
+		fails["changelog.missing"].append(x+"/ChangeLog")
+	
+	musedict = {}
+	#metadata.xml file check
+	if "metadata.xml" not in checkdirlist:
+		stats["metadata.missing"]+=1
+		fails["metadata.missing"].append(x+"/metadata.xml")
+	#metadata.xml parse check
+	else:
+		metadata_bad = False
+
+		# read metadata.xml into memory
+		try:
+			_metadata_xml = xml.etree.ElementTree.parse(
+				os.path.join(checkdir, "metadata.xml"),
+				parser=xml.etree.ElementTree.XMLParser(
+					target=_MetadataTreeBuilder()))
+		except (ExpatError, SyntaxError, EnvironmentError) as e:
+			metadata_bad = True
+			stats["metadata.bad"] += 1
+			fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+			del e
+		else:
+			# load USE flags from metadata.xml
+			try:
+				musedict = utilities.parse_metadata_use(_metadata_xml)
+			except portage.exception.ParseError as e:
+				metadata_bad = True
+				stats["metadata.bad"] += 1
+				fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+
+			# Run other metadata.xml checkers
+			try:
+				utilities.check_metadata(_metadata_xml, herd_base)
+			except (utilities.UnknownHerdsError, ) as e:
+				metadata_bad = True
+				stats["metadata.bad"] += 1
+				fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
+				del e
+
+		#Only carry out if in package directory or check forced
+		if xmllint_capable and not metadata_bad:
+			# xmlint can produce garbage output even on success, so only dump
+			# the ouput when it fails.
+			st, out = subprocess_getstatusoutput(
+				"xmllint --nonet --noout --dtdvalid '%s' '%s'" % \
+				 (metadata_dtd, os.path.join(checkdir, "metadata.xml")))
+			if st != os.EX_OK:
+				print(red("!!!") + " metadata.xml is invalid:")
+				for z in out.splitlines():
+					print(red("!!! ")+z)
+				stats["metadata.bad"]+=1
+				fails["metadata.bad"].append(x+"/metadata.xml")
+
+		del metadata_bad
+	muselist = frozenset(musedict)
+
+	changelog_path = os.path.join(checkdir_relative, "ChangeLog")
+	changelog_modified = changelog_path in modified_changelogs
+
+	allmasked = True
+	# detect unused local USE-descriptions
+	used_useflags = set()
+
+	for y in ebuildlist:
+		relative_path = os.path.join(x, y + ".ebuild")
+		full_path = os.path.join(repodir, relative_path)
+		ebuild_path = y + ".ebuild"
+		if repolevel < 3:
+			ebuild_path = os.path.join(pkgdir, ebuild_path)
+		if repolevel < 2:
+			ebuild_path = os.path.join(catdir, ebuild_path)
+		ebuild_path = os.path.join(".", ebuild_path)
+		if check_changelog and not changelog_modified \
+			and ebuild_path in new_ebuilds:
+			stats['changelog.ebuildadded'] += 1
+			fails['changelog.ebuildadded'].append(relative_path)
+
+		if vcs in ("cvs", "svn", "bzr") and check_ebuild_notadded and y not in eadded:
+			#ebuild not added to vcs
+			stats["ebuild.notadded"]=stats["ebuild.notadded"]+1
+			fails["ebuild.notadded"].append(x+"/"+y+".ebuild")
+		myesplit=portage.pkgsplit(y)
+		if myesplit is None or myesplit[0] != x.split("/")[-1] \
+			    or pv_toolong_re.search(myesplit[1]) \
+			    or pv_toolong_re.search(myesplit[2]):
+			stats["ebuild.invalidname"]=stats["ebuild.invalidname"]+1
+			fails["ebuild.invalidname"].append(x+"/"+y+".ebuild")
+			continue
+		elif myesplit[0]!=pkgdir:
+			print(pkgdir,myesplit[0])
+			stats["ebuild.namenomatch"]=stats["ebuild.namenomatch"]+1
+			fails["ebuild.namenomatch"].append(x+"/"+y+".ebuild")
+			continue
+
+		pkg = pkgs[y]
+
+		if pkg.invalid:
+			allvalid = False
+			for k, msgs in pkg.invalid.items():
+				for msg in msgs:
+					stats[k] = stats[k] + 1
+					fails[k].append("%s %s" % (relative_path, msg))
+			continue
+
+		myaux = pkg.metadata
+		eapi = myaux["EAPI"]
+		inherited = pkg.inherited
+		live_ebuild = live_eclasses.intersection(inherited)
+
+		for k, v in myaux.items():
+			if not isinstance(v, basestring):
+				continue
+			m = non_ascii_re.search(v)
+			if m is not None:
+				stats["variable.invalidchar"] += 1
+				fails["variable.invalidchar"].append(
+					("%s: %s variable contains non-ASCII " + \
+					"character at position %s") % \
+					(relative_path, k, m.start() + 1))
+
+		if not src_uri_error:
+			# Check that URIs don't reference a server from thirdpartymirrors.
+			for uri in portage.dep.use_reduce( \
+				myaux["SRC_URI"], matchall=True, is_src_uri=True, eapi=eapi, flat=True):
+				contains_mirror = False
+				for mirror in thirdpartymirrors:
+					if uri.startswith(mirror):
+						contains_mirror = True
+						break
+				if not contains_mirror:
+					continue
+
+				stats["SRC_URI.mirror"] += 1
+				fails["SRC_URI.mirror"].append(
+					"%s: '%s' found in thirdpartymirrors" % \
+					(relative_path, mirror))
+
+		if myaux.get("PROVIDE"):
+			stats["virtual.oldstyle"]+=1
+			fails["virtual.oldstyle"].append(relative_path)
+
+		for pos, missing_var in enumerate(missingvars):
+			if not myaux.get(missing_var):
+				if catdir == "virtual" and \
+					missing_var in ("HOMEPAGE", "LICENSE"):
+					continue
+				if live_ebuild and missing_var == "KEYWORDS":
+					continue
+				myqakey=missingvars[pos]+".missing"
+				stats[myqakey]=stats[myqakey]+1
+				fails[myqakey].append(x+"/"+y+".ebuild")
+
+		if catdir == "virtual":
+			for var in ("HOMEPAGE", "LICENSE"):
+				if myaux.get(var):
+					myqakey = var + ".virtual"
+					stats[myqakey] = stats[myqakey] + 1
+					fails[myqakey].append(relative_path)
+
+		# 14 is the length of DESCRIPTION=""
+		if len(myaux['DESCRIPTION']) > max_desc_len:
+			stats['DESCRIPTION.toolong'] += 1
+			fails['DESCRIPTION.toolong'].append(
+				"%s: DESCRIPTION is %d characters (max %d)" % \
+				(relative_path, len(myaux['DESCRIPTION']), max_desc_len))
+
+		keywords = myaux["KEYWORDS"].split()
+		stable_keywords = []
+		for keyword in keywords:
+			if not keyword.startswith("~") and \
+				not keyword.startswith("-"):
+				stable_keywords.append(keyword)
+		if stable_keywords:
+			if ebuild_path in new_ebuilds:
+				stable_keywords.sort()
+				stats["KEYWORDS.stable"] += 1
+				fails["KEYWORDS.stable"].append(
+					x + "/" + y + ".ebuild added with stable keywords: %s" % \
+						" ".join(stable_keywords))
+
+		ebuild_archs = set(kw.lstrip("~") for kw in keywords \
+			if not kw.startswith("-"))
+
+		previous_keywords = slot_keywords.get(myaux["SLOT"])
+		if previous_keywords is None:
+			slot_keywords[myaux["SLOT"]] = set()
+		elif ebuild_archs and not live_ebuild:
+			dropped_keywords = previous_keywords.difference(ebuild_archs)
+			if dropped_keywords:
+				stats["KEYWORDS.dropped"] += 1
+				fails["KEYWORDS.dropped"].append(
+					relative_path + ": %s" % \
+					" ".join(sorted(dropped_keywords)))
+
+		slot_keywords[myaux["SLOT"]].update(ebuild_archs)
+
+		# KEYWORDS="-*" is a stupid replacement for package.mask and screws general KEYWORDS semantics
+		if "-*" in keywords:
+			haskeyword = False
+			for kw in keywords:
+				if kw[0] == "~":
+					kw = kw[1:]
+				if kw in kwlist:
+					haskeyword = True
+			if not haskeyword:
+				stats["KEYWORDS.stupid"] += 1
+				fails["KEYWORDS.stupid"].append(x+"/"+y+".ebuild")
+
+		"""
+		Ebuilds that inherit a "Live" eclass (darcs,subversion,git,cvs,etc..) should
+		not be allowed to be marked stable
+		"""
+		if live_ebuild:
+			bad_stable_keywords = []
+			for keyword in keywords:
+				if not keyword.startswith("~") and \
+					not keyword.startswith("-"):
+					bad_stable_keywords.append(keyword)
+				del keyword
+			if bad_stable_keywords:
+				stats["LIVEVCS.stable"] += 1
+				fails["LIVEVCS.stable"].append(
+					x + "/" + y + ".ebuild with stable keywords:%s " % \
+						bad_stable_keywords)
+			del bad_stable_keywords
+
+			if keywords and not has_global_mask(pkg):
+				stats["LIVEVCS.unmasked"] += 1
+				fails["LIVEVCS.unmasked"].append(relative_path)
+
+		if options.ignore_arches:
+			arches = [[repoman_settings["ARCH"], repoman_settings["ARCH"],
+				repoman_settings["ACCEPT_KEYWORDS"].split()]]
+		else:
+			arches=[]
+			for keyword in myaux["KEYWORDS"].split():
+				if (keyword[0]=="-"):
+					continue
+				elif (keyword[0]=="~"):
+					arches.append([keyword, keyword[1:], [keyword[1:], keyword]])
+				else:
+					arches.append([keyword, keyword, [keyword]])
+					allmasked = False
+			if not arches:
+				# Use an empty profile for checking dependencies of
+				# packages that have empty KEYWORDS.
+				arches.append(['**', '**', ['**']])
+
+		unknown_pkgs = {}
+		baddepsyntax = False
+		badlicsyntax = False
+		badprovsyntax = False
+		catpkg = catdir+"/"+y
+
+		inherited_java_eclass = "java-pkg-2" in inherited or \
+			"java-pkg-opt-2" in inherited
+		inherited_wxwidgets_eclass = "wxwidgets" in inherited
+		operator_tokens = set(["||", "(", ")"])
+		type_list, badsyntax = [], []
+		for mytype in ("DEPEND", "RDEPEND", "PDEPEND",
+			"LICENSE", "PROPERTIES", "PROVIDE"):
+			mydepstr = myaux[mytype]
+
+			token_class = None
+			if mytype in ("DEPEND", "RDEPEND", "PDEPEND"):
+				token_class=portage.dep.Atom
+
+			try:
+				atoms = portage.dep.use_reduce(mydepstr, matchall=1, flat=True, \
+					is_valid_flag=pkg.iuse.is_valid_flag, token_class=token_class)
+			except portage.exception.InvalidDependString as e:
+				atoms = None
+				badsyntax.append(str(e))
+
+			if atoms and mytype in ("DEPEND", "RDEPEND", "PDEPEND"):
+				if mytype in ("RDEPEND", "PDEPEND") and \
+					"test?" in mydepstr.split():
+					stats[mytype + '.suspect'] += 1
+					fails[mytype + '.suspect'].append(relative_path + \
+						": 'test?' USE conditional in %s" % mytype)
+
+				for atom in atoms:
+					if atom == "||":
+						continue
+
+					if not atom.blocker and \
+						not portdb.cp_list(atom.cp) and \
+						not atom.cp.startswith("virtual/"):
+						unknown_pkgs.setdefault(atom.cp, set()).add(
+							(mytype, atom.unevaluated_atom))
+
+					is_blocker = atom.blocker
+
+					if mytype == "DEPEND" and \
+						not is_blocker and \
+						not inherited_java_eclass and \
+						atom.cp == "virtual/jdk":
+						stats['java.eclassesnotused'] += 1
+						fails['java.eclassesnotused'].append(relative_path)
+					elif mytype == "DEPEND" and \
+						not is_blocker and \
+						not inherited_wxwidgets_eclass and \
+						atom.cp == "x11-libs/wxGTK":
+						stats['wxwidgets.eclassnotused'] += 1
+						fails['wxwidgets.eclassnotused'].append(
+							relative_path + ": DEPENDs on x11-libs/wxGTK"
+							" without inheriting wxwidgets.eclass")
+					elif mytype in ("PDEPEND", "RDEPEND"):
+						if not is_blocker and \
+							atom.cp in suspect_rdepend:
+							stats[mytype + '.suspect'] += 1
+							fails[mytype + '.suspect'].append(
+								relative_path + ": '%s'" % atom)
+
+					if atom.operator == "~" and \
+						portage.versions.catpkgsplit(atom.cpv)[3] != "r0":
+						stats[mytype + '.badtilde'] += 1
+						fails[mytype + '.badtilde'].append(
+							(relative_path + ": %s uses the ~ operator"
+							 " with a non-zero revision:" + \
+							 " '%s'") % (mytype, atom))
+
+			type_list.extend([mytype] * (len(badsyntax) - len(type_list)))
+
+		for m,b in zip(type_list, badsyntax):
+			stats[m+".syntax"] += 1
+			fails[m+".syntax"].append(catpkg+".ebuild "+m+": "+b)
+
+		badlicsyntax = len([z for z in type_list if z == "LICENSE"])
+		badprovsyntax = len([z for z in type_list if z == "PROVIDE"])
+		baddepsyntax = len(type_list) != badlicsyntax + badprovsyntax 
+		badlicsyntax = badlicsyntax > 0
+		badprovsyntax = badprovsyntax > 0
+
+		# uselist checks - global
+		myuse = []
+		default_use = []
+		for myflag in myaux["IUSE"].split():
+			flag_name = myflag.lstrip("+-")
+			used_useflags.add(flag_name)
+			if myflag != flag_name:
+				default_use.append(myflag)
+			if flag_name not in uselist:
+				myuse.append(flag_name)
+
+		# uselist checks - metadata
+		for mypos in range(len(myuse)-1,-1,-1):
+			if myuse[mypos] and (myuse[mypos] in muselist):
+				del myuse[mypos]
+
+		if default_use and not eapi_has_iuse_defaults(eapi):
+			for myflag in default_use:
+				stats['EAPI.incompatible'] += 1
+				fails['EAPI.incompatible'].append(
+					(relative_path + ": IUSE defaults" + \
+					" not supported with EAPI='%s':" + \
+					" '%s'") % (eapi, myflag))
+
+		for mypos in range(len(myuse)):
+			stats["IUSE.invalid"]=stats["IUSE.invalid"]+1
+			fails["IUSE.invalid"].append(x+"/"+y+".ebuild: %s" % myuse[mypos])	
+
+		# license checks
+		if not badlicsyntax:
+			# Parse the LICENSE variable, remove USE conditions and
+			# flatten it.
+			licenses = portage.dep.use_reduce(myaux["LICENSE"], matchall=1, flat=True)
+			# Check each entry to ensure that it exists in PORTDIR's
+			# license directory.
+			for lic in licenses:
+				# Need to check for "||" manually as no portage
+				# function will remove it without removing values.
+				if lic not in liclist and lic != "||":
+					stats["LICENSE.invalid"]=stats["LICENSE.invalid"]+1
+					fails["LICENSE.invalid"].append(x+"/"+y+".ebuild: %s" % lic)
+
+		#keyword checks
+		myuse = myaux["KEYWORDS"].split()
+		for mykey in myuse:
+			myskey=mykey[:]
+			if myskey[0]=="-":
+				myskey=myskey[1:]
+			if myskey[0]=="~":
+				myskey=myskey[1:]
+			if mykey!="-*":
+				if myskey not in kwlist:
+					stats["KEYWORDS.invalid"] += 1
+					fails["KEYWORDS.invalid"].append(x+"/"+y+".ebuild: %s" % mykey)
+				elif myskey not in profiles:
+					stats["KEYWORDS.invalid"] += 1
+					fails["KEYWORDS.invalid"].append(x+"/"+y+".ebuild: %s (profile invalid)" % mykey)
+
+		#restrict checks
+		myrestrict = None
+		try:
+			myrestrict = portage.dep.use_reduce(myaux["RESTRICT"], matchall=1, flat=True)
+		except portage.exception.InvalidDependString as e:
+			stats["RESTRICT.syntax"] = stats["RESTRICT.syntax"] + 1
+			fails["RESTRICT.syntax"].append(
+				"%s: RESTRICT: %s" % (relative_path, e))
+			del e
+		if myrestrict:
+			myrestrict = set(myrestrict)
+			mybadrestrict = myrestrict.difference(valid_restrict)
+			if mybadrestrict:
+				stats["RESTRICT.invalid"] += len(mybadrestrict)
+				for mybad in mybadrestrict:
+					fails["RESTRICT.invalid"].append(x+"/"+y+".ebuild: %s" % mybad)
+		#REQUIRED_USE check
+		required_use = myaux["REQUIRED_USE"]
+		if required_use:
+			if not eapi_has_required_use(eapi):
+				stats['EAPI.incompatible'] += 1
+				fails['EAPI.incompatible'].append(
+					relative_path + ": REQUIRED_USE" + \
+					" not supported with EAPI='%s'" % (eapi,))
+			try:
+				portage.dep.check_required_use(required_use, (),
+					pkg.iuse.is_valid_flag)
+			except portage.exception.InvalidDependString as e:
+				stats["REQUIRED_USE.syntax"] = stats["REQUIRED_USE.syntax"] + 1
+				fails["REQUIRED_USE.syntax"].append(
+					"%s: REQUIRED_USE: %s" % (relative_path, e))
+				del e
+
+		# Syntax Checks
+		relative_path = os.path.join(x, y + ".ebuild")
+		full_path = os.path.join(repodir, relative_path)
+		if not vcs_preserves_mtime:
+			if ebuild_path not in new_ebuilds and \
+				ebuild_path not in modified_ebuilds:
+				pkg.mtime = None
+		try:
+			# All ebuilds should have utf_8 encoding.
+			f = io.open(_unicode_encode(full_path,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'])
+			try:
+				for check_name, e in run_checks(f, pkg):
+					stats[check_name] += 1
+					fails[check_name].append(relative_path + ': %s' % e)
+			finally:
+				f.close()
+		except UnicodeDecodeError:
+			# A file.UTF8 failure will have already been recorded above.
+			pass
+
+		if options.force:
+			# The dep_check() calls are the most expensive QA test. If --force
+			# is enabled, there's no point in wasting time on these since the
+			# user is intent on forcing the commit anyway.
+			continue
+
+		for keyword,arch,groups in arches:
+
+			if arch not in profiles:
+				# A missing profile will create an error further down
+				# during the KEYWORDS verification.
+				continue
+				
+			for prof in profiles[arch]:
+
+				if prof.status not in ("stable", "dev") or \
+					prof.status == "dev" and not options.include_dev:
+					continue
+
+				dep_settings = arch_caches.get(prof.sub_path)
+				if dep_settings is None:
+					dep_settings = portage.config(
+						config_profile_path=prof.abs_path,
+						config_incrementals=repoman_incrementals,
+						local_config=False,
+						_unmatched_removal=options.unmatched_removal,
+						env=env)
+					if options.without_mask:
+						dep_settings._mask_manager = \
+							copy.deepcopy(dep_settings._mask_manager)
+						dep_settings._mask_manager._pmaskdict.clear()
+					arch_caches[prof.sub_path] = dep_settings
+
+				xmatch_cache_key = (prof.sub_path, tuple(groups))
+				xcache = arch_xmatch_caches.get(xmatch_cache_key)
+				if xcache is None:
+					portdb.melt()
+					portdb.freeze()
+					xcache = portdb.xcache
+					xcache.update(shared_xmatch_caches)
+					arch_xmatch_caches[xmatch_cache_key] = xcache
+
+				trees["/"]["porttree"].settings = dep_settings
+				portdb.settings = dep_settings
+				portdb.xcache = xcache
+				# for package.use.mask support inside dep_check
+				dep_settings.setcpv(pkg)
+				dep_settings["ACCEPT_KEYWORDS"] = " ".join(groups)
+				# just in case, prevent config.reset() from nuking these.
+				dep_settings.backup_changes("ACCEPT_KEYWORDS")
+
+				if not baddepsyntax:
+					ismasked = not ebuild_archs or \
+						pkg.cpv not in portdb.xmatch("list-visible", pkg.cp)
+					if ismasked:
+						if not have_pmasked:
+							have_pmasked = bool(dep_settings._getMaskAtom(
+								pkg.cpv, pkg.metadata))
+						if options.ignore_masked:
+							continue
+						#we are testing deps for a masked package; give it some lee-way
+						suffix="masked"
+						matchmode = "minimum-all"
+					else:
+						suffix=""
+						matchmode = "minimum-visible"
+
+					if not have_dev_keywords:
+						have_dev_keywords = \
+							bool(dev_keywords.intersection(keywords))
+
+					if prof.status == "dev":
+						suffix=suffix+"indev"
+
+					for mytype,mypos in [["DEPEND",len(missingvars)],["RDEPEND",len(missingvars)+1],["PDEPEND",len(missingvars)+2]]:
+						
+						mykey=mytype+".bad"+suffix
+						myvalue = myaux[mytype]
+						if not myvalue:
+							continue
+
+						success, atoms = portage.dep_check(myvalue, portdb,
+							dep_settings, use="all", mode=matchmode,
+							trees=trees)
+
+						if success:
+							if atoms:
+								for atom in atoms:
+									if not atom.blocker:
+										# Don't bother with dependency.unknown
+										# for cases in which *DEPEND.bad is
+										# triggered.
+										unknown_pkgs.pop(atom.cp, None)
+
+								if not prof.sub_path:
+									# old-style virtuals currently aren't
+									# resolvable with empty profile, since
+									# 'virtuals' mappings are unavailable
+									# (it would be expensive to search
+									# for PROVIDE in all ebuilds)
+									atoms = [atom for atom in atoms if not \
+										(atom.cp.startswith('virtual/') and \
+										not portdb.cp_list(atom.cp))]
+
+								#we have some unsolvable deps
+								#remove ! deps, which always show up as unsatisfiable
+								atoms = [str(atom.unevaluated_atom) \
+									for atom in atoms if not atom.blocker]
+
+								#if we emptied out our list, continue:
+								if not atoms:
+									continue
+								stats[mykey]=stats[mykey]+1
+								fails[mykey].append("%s: %s(%s) %s" % \
+									(relative_path, keyword,
+									prof, repr(atoms)))
+						else:
+							stats[mykey]=stats[mykey]+1
+							fails[mykey].append("%s: %s(%s) %s" % \
+								(relative_path, keyword,
+								prof, repr(atoms)))
+
+		if not baddepsyntax and unknown_pkgs:
+			all_unknown = set()
+			all_unknown.update(*unknown_pkgs.values())
+			type_map = {}
+			for mytype, atom in all_unknown:
+				type_map.setdefault(mytype, set()).add(atom)
+			for mytype, atoms in type_map.items():
+				stats["dependency.unknown"] += 1
+				fails["dependency.unknown"].append("%s: %s: %s" %
+					(relative_path, mytype, ", ".join(sorted(atoms))))
+
+	# Check for 'all unstable' or 'all masked' -- ACCEPT_KEYWORDS is stripped
+	# XXX -- Needs to be implemented in dep code. Can't determine ~arch nicely.
+	#if not portage.portdb.xmatch("bestmatch-visible",x):
+	#    stats["ebuild.nostable"]+=1
+	#    fails["ebuild.nostable"].append(x)
+	if ebuildlist and allmasked and repolevel == 3:
+		stats["ebuild.allmasked"]+=1
+		fails["ebuild.allmasked"].append(x)
+
+	# check if there are unused local USE-descriptions in metadata.xml
+	# (unless there are any invalids, to avoid noise)
+	if allvalid:
+		for myflag in muselist.difference(used_useflags):
+			stats["metadata.warning"] += 1
+			fails["metadata.warning"].append(
+				"%s/metadata.xml: unused local USE-description: '%s'" % \
+				(x, myflag))
+
+if options.mode == "manifest":
+	sys.exit(dofail)
+
+#dofail will be set to 1 if we have failed in at least one non-warning category
+dofail=0
+#dowarn will be set to 1 if we tripped any warnings
+dowarn=0
+#dofull will be set if we should print a "repoman full" informational message
+dofull = options.mode != 'full'
+
+for x in qacats:
+	if not stats[x]:
+		continue
+	dowarn = 1
+	if x not in qawarnings:
+		dofail = 1
+
+if dofail or \
+	(dowarn and not (options.quiet or options.mode == "scan")):
+	dofull = 0
+
+# Save QA output so that it can be conveniently displayed
+# in $EDITOR while the user creates a commit message.
+# Otherwise, the user would not be able to see this output
+# once the editor has taken over the screen.
+qa_output = io.StringIO()
+style_file = ConsoleStyleFile(sys.stdout)
+if options.mode == 'commit' and \
+	(not commitmessage or not commitmessage.strip()):
+	style_file.write_listener = qa_output
+console_writer = StyleWriter(file=style_file, maxcol=9999)
+console_writer.style_listener = style_file.new_styles
+
+f = formatter.AbstractFormatter(console_writer)
+
+utilities.format_qa_output(f, stats, fails, dofull, dofail, options, qawarnings)
+
+style_file.flush()
+del console_writer, f, style_file
+qa_output = qa_output.getvalue()
+qa_output = qa_output.splitlines(True)
+
+def grouplist(mylist,seperator="/"):
+	"""(list,seperator="/") -- Takes a list of elements; groups them into
+	same initial element categories. Returns a dict of {base:[sublist]}
+	From: ["blah/foo","spork/spatula","blah/weee/splat"]
+	To:   {"blah":["foo","weee/splat"], "spork":["spatula"]}"""
+	mygroups={}
+	for x in mylist:
+		xs=x.split(seperator)
+		if xs[0]==".":
+			xs=xs[1:]
+		if xs[0] not in mygroups:
+			mygroups[xs[0]]=[seperator.join(xs[1:])]
+		else:
+			mygroups[xs[0]]+=[seperator.join(xs[1:])]
+	return mygroups
+
+suggest_ignore_masked = False
+suggest_include_dev = False
+
+if have_pmasked and not (options.without_mask or options.ignore_masked):
+	suggest_ignore_masked = True
+if have_dev_keywords and not options.include_dev:
+	suggest_include_dev = True
+
+if suggest_ignore_masked or suggest_include_dev:
+	print()
+	if suggest_ignore_masked:
+		print(bold("Note: use --without-mask to check " + \
+			"KEYWORDS on dependencies of masked packages"))
+
+	if suggest_include_dev:
+		print(bold("Note: use --include-dev (-d) to check " + \
+			"dependencies for 'dev' profiles"))
+	print()
+
+if options.mode != 'commit':
+	if dofull:
+		print(bold("Note: type \"repoman full\" for a complete listing."))
+	if dowarn and not dofail:
+		print(green("RepoMan sez:"),"\"You're only giving me a partial QA payment?\n              I'll take it this time, but I'm not happy.\"")
+	elif not dofail:
+		print(green("RepoMan sez:"),"\"If everyone were like you, I'd be out of business!\"")
+	elif dofail:
+		print(bad("Please fix these important QA issues first."))
+		print(green("RepoMan sez:"),"\"Make your QA payment on time and you'll never see the likes of me.\"\n")
+		sys.exit(1)
+else:
+	if dofail and can_force and options.force and not options.pretend:
+		print(green("RepoMan sez:") + \
+			" \"You want to commit even with these QA issues?\n" + \
+			"              I'll take it this time, but I'm not happy.\"\n")
+	elif dofail:
+		if options.force and not can_force:
+			print(bad("The --force option has been disabled due to extraordinary issues."))
+		print(bad("Please fix these important QA issues first."))
+		print(green("RepoMan sez:"),"\"Make your QA payment on time and you'll never see the likes of me.\"\n")
+		sys.exit(1)
+
+	if options.pretend:
+		print(green("RepoMan sez:"), "\"So, you want to play it safe. Good call.\"\n")
+
+	myunadded = []
+	if vcs == "cvs":
+		try:
+			myvcstree=portage.cvstree.getentries("./",recursive=1)
+			myunadded=portage.cvstree.findunadded(myvcstree,recursive=1,basedir="./")
+		except SystemExit as e:
+			raise  # TODO propagate this
+		except:
+			err("Error retrieving CVS tree; exiting.")
+	if vcs == "svn":
+		try:
+			svnstatus=os.popen("svn status --no-ignore").readlines()
+			myunadded = [ "./"+elem.rstrip().split()[1] for elem in svnstatus if elem.startswith("?") or elem.startswith("I") ]
+		except SystemExit as e:
+			raise  # TODO propagate this
+		except:
+			err("Error retrieving SVN info; exiting.")
+	if vcs == "git":
+		# get list of files not under version control or missing
+		myf = os.popen("git ls-files --others")
+		myunadded = [ "./" + elem[:-1] for elem in myf ]
+		myf.close()
+	if vcs == "bzr":
+		try:
+			bzrstatus=os.popen("bzr status -S .").readlines()
+			myunadded = [ "./"+elem.rstrip().split()[1].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("?") or elem[0:2] == " D" ]
+		except SystemExit as e:
+			raise  # TODO propagate this
+		except:
+			err("Error retrieving bzr info; exiting.")
+	if vcs == "hg":
+		myunadded = os.popen("hg status --no-status --unknown .").readlines()
+		myunadded = ["./" + elem.rstrip() for elem in myunadded]
+		
+		# Mercurial doesn't handle manually deleted files as removed from
+		# the repository, so the user need to remove them before commit,
+		# using "hg remove [FILES]"
+		mydeleted = os.popen("hg status --no-status --deleted .").readlines()
+		mydeleted = ["./" + elem.rstrip() for elem in mydeleted]
+
+
+	myautoadd=[]
+	if myunadded:
+		for x in range(len(myunadded)-1,-1,-1):
+			xs=myunadded[x].split("/")
+			if xs[-1]=="files":
+				print("!!! files dir is not added! Please correct this.")
+				sys.exit(-1)
+			elif xs[-1]=="Manifest":
+				# It's a manifest... auto add
+				myautoadd+=[myunadded[x]]
+				del myunadded[x]
+
+	if myautoadd:
+		print(">>> Auto-Adding missing Manifest(s)...")
+		if options.pretend:
+			if vcs == "cvs":
+				print("(cvs add "+" ".join(myautoadd)+")")
+			elif vcs == "svn":
+				print("(svn add "+" ".join(myautoadd)+")")
+			elif vcs == "git":
+				print("(git add "+" ".join(myautoadd)+")")
+			elif vcs == "bzr":
+				print("(bzr add "+" ".join(myautoadd)+")")
+			elif vcs == "hg":
+				print("(hg add "+" ".join(myautoadd)+")")
+			retval=0
+		else:
+			if vcs == "cvs":
+				retval=os.system("cvs add "+" ".join(myautoadd))
+			elif vcs == "svn":
+				retval=os.system("svn add "+" ".join(myautoadd))
+			elif vcs == "git":
+				retval=os.system("git add "+" ".join(myautoadd))
+			elif vcs == "bzr":
+				retval=os.system("bzr add "+" ".join(myautoadd))
+			elif vcs == "hg":
+				retval=os.system("hg add "+" ".join(myautoadd))
+		if retval:
+			writemsg_level("!!! Exiting on %s (shell) error code: %s\n" % \
+				(vcs, retval), level=logging.ERROR, noiselevel=-1)
+			sys.exit(retval)
+
+	if myunadded:
+		print(red("!!! The following files are in your local tree but are not added to the master"))
+		print(red("!!! tree. Please remove them from the local tree or add them to the master tree."))
+		for x in myunadded:
+			print("   ",x)
+		print()
+		print()
+		sys.exit(1)
+
+	if vcs == "hg" and mydeleted:
+		print(red("!!! The following files are removed manually from your local tree but are not"))
+		print(red("!!! removed from the repository. Please remove them, using \"hg remove [FILES]\"."))
+		for x in mydeleted:
+			print("   ",x)
+		print()
+		print()
+		sys.exit(1)
+
+	if vcs == "cvs":
+		mycvstree = cvstree.getentries("./", recursive=1)
+		mychanged = cvstree.findchanged(mycvstree, recursive=1, basedir="./")
+		mynew = cvstree.findnew(mycvstree, recursive=1, basedir="./")
+		myremoved=portage.cvstree.findremoved(mycvstree,recursive=1,basedir="./")
+		bin_blob_pattern = re.compile("^-kb$")
+		no_expansion = set(portage.cvstree.findoption(mycvstree, bin_blob_pattern,
+			recursive=1, basedir="./"))
+
+
+	if vcs == "svn":
+		svnstatus = os.popen("svn status").readlines()
+		mychanged = [ "./" + elem.split()[-1:][0] for elem in svnstatus if (elem[:1] in "MR" or elem[1:2] in "M")]
+		mynew     = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A")]
+		myremoved = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
+
+		# Subversion expands keywords specified in svn:keywords properties.
+		props = os.popen("svn propget -R svn:keywords").readlines()
+		expansion = dict(("./" + prop.split(" - ")[0], prop.split(" - ")[1].split()) \
+			for prop in props if " - " in prop)
+
+	elif vcs == "git":
+		mychanged = os.popen("git diff-index --name-only --relative --diff-filter=M HEAD").readlines()
+		mychanged = ["./" + elem[:-1] for elem in mychanged]
+
+		mynew = os.popen("git diff-index --name-only --relative --diff-filter=A HEAD").readlines()
+		mynew = ["./" + elem[:-1] for elem in mynew]
+
+		myremoved = os.popen("git diff-index --name-only --relative --diff-filter=D HEAD").readlines()
+		myremoved = ["./" + elem[:-1] for elem in myremoved]
+
+	if vcs == "bzr":
+		bzrstatus = os.popen("bzr status -S .").readlines()
+		mychanged = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M" ]
+		mynew     = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] in "NK" or elem[0:1] == "R" ) ]
+		myremoved = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("-") ]
+		myremoved = [ "./" + elem.split()[-3:-2][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] == "K" or elem[0:1] == "R" ) ]
+		# Bazaar expands nothing.
+
+	if vcs == "hg":
+		mychanged = os.popen("hg status --no-status --modified .").readlines()
+		mychanged = ["./" + elem.rstrip() for elem in mychanged]
+		mynew = os.popen("hg status --no-status --added .").readlines()
+		mynew = ["./" + elem.rstrip() for elem in mynew]
+		myremoved = os.popen("hg status --no-status --removed .").readlines()
+		myremoved = ["./" + elem.rstrip() for elem in myremoved]
+
+	if vcs:
+		if not (mychanged or mynew or myremoved or (vcs == "hg" and mydeleted)):
+			print(green("RepoMan sez:"), "\"Doing nothing is not always good for QA.\"")
+			print()
+			print("(Didn't find any changed files...)")
+			print()
+			sys.exit(1)
+
+	# Manifests need to be regenerated after all other commits, so don't commit
+	# them now even if they have changed.
+	mymanifests = set()
+	myupdates = set()
+	for f in mychanged + mynew:
+		if "Manifest" == os.path.basename(f):
+			mymanifests.add(f)
+		else:
+			myupdates.add(f)
+	if vcs in ('git', 'hg'):
+		myupdates.difference_update(myremoved)
+	myupdates = list(myupdates)
+	mymanifests = list(mymanifests)
+	myheaders = []
+	mydirty = []
+
+	print("* %s files being committed..." % green(str(len(myupdates))), end=' ')
+	if vcs not in ('cvs', 'svn'):
+		# With git, bzr and hg, there's never any keyword expansion, so
+		# there's no need to regenerate manifests and all files will be
+		# committed in one big commit at the end.
+		print()
+	else:
+		if vcs == 'cvs':
+			headerstring = "'\$(Header|Id).*\$'"
+		elif vcs == "svn":
+			svn_keywords = dict((k.lower(), k) for k in [
+					"Rev",
+					"Revision",
+					"LastChangedRevision",
+					"Date",
+					"LastChangedDate",
+					"Author",
+					"LastChangedBy",
+					"URL",
+					"HeadURL",
+					"Id",
+					"Header",
+			])
+
+		for myfile in myupdates:
+
+			# for CVS, no_expansion contains files that are excluded from expansion
+			if vcs == "cvs":
+				if myfile in no_expansion:
+					continue
+
+			# for SVN, expansion contains files that are included in expansion
+			elif vcs == "svn":
+				if myfile not in expansion:
+					continue
+				
+				# Subversion keywords are case-insensitive in svn:keywords properties, but case-sensitive in contents of files.
+				enabled_keywords = []
+				for k in expansion[myfile]:
+					keyword = svn_keywords.get(k.lower())
+					if keyword is not None:
+						enabled_keywords.append(keyword)
+
+				headerstring = "'\$(%s).*\$'" % "|".join(enabled_keywords)
+
+			myout = subprocess_getstatusoutput("egrep -q "+headerstring+" "+myfile)
+			if myout[0] == 0:
+				myheaders.append(myfile)
+
+		print("%s have headers that will change." % green(str(len(myheaders))))
+		print("* Files with headers will cause the manifests to be changed and committed separately.")
+
+	logging.info("myupdates: %s", myupdates)
+	logging.info("myheaders: %s", myheaders)
+
+	commitmessage = options.commitmsg
+	if options.commitmsgfile:
+		try:
+			f = io.open(_unicode_encode(options.commitmsgfile,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['content'], errors='replace')
+			commitmessage = f.read()
+			f.close()
+			del f
+		except (IOError, OSError) as e:
+			if e.errno == errno.ENOENT:
+				portage.writemsg("!!! File Not Found: --commitmsgfile='%s'\n" % options.commitmsgfile)
+			else:
+				raise
+		# We've read the content so the file is no longer needed.
+		commitmessagefile = None
+	if not commitmessage or not commitmessage.strip():
+		try:
+			editor = os.environ.get("EDITOR")
+			if editor and utilities.editor_is_executable(editor):
+				commitmessage = utilities.get_commit_message_with_editor(
+					editor, message=qa_output)
+			else:
+				commitmessage = utilities.get_commit_message_with_stdin()
+		except KeyboardInterrupt:
+			exithandler()
+		if not commitmessage or not commitmessage.strip():
+			print("* no commit message?  aborting commit.")
+			sys.exit(1)
+	commitmessage = commitmessage.rstrip()
+	portage_version = getattr(portage, "VERSION", None)
+	if portage_version is None:
+		sys.stderr.write("Failed to insert portage version in message!\n")
+		sys.stderr.flush()
+		portage_version = "Unknown"
+	unameout = platform.system() + " "
+	if platform.system() in ["Darwin", "SunOS"]:
+		unameout += platform.processor()
+	else:
+		unameout += platform.machine()
+	commitmessage += "\n\n(Portage version: %s/%s/%s" % \
+		(portage_version, vcs, unameout)
+	if options.force:
+		commitmessage += ", RepoMan options: --force"
+	commitmessage += ")"
+
+	if options.ask and userquery('Commit changes?', True) != 'Yes':
+		print("* aborting commit.")
+		sys.exit(1)
+
+	if vcs in ('cvs', 'svn') and (myupdates or myremoved):
+		myfiles = myupdates + myremoved
+		if not myheaders and "sign" not in repoman_settings.features:
+			myfiles += mymanifests
+		fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
+		mymsg = os.fdopen(fd, "wb")
+		mymsg.write(_unicode_encode(commitmessage))
+		mymsg.close()
+
+		print()
+		print(green("Using commit message:"))
+		print(green("------------------------------------------------------------------------------"))
+		print(commitmessage)
+		print(green("------------------------------------------------------------------------------"))
+		print()
+
+		# Having a leading ./ prefix on file paths can trigger a bug in
+		# the cvs server when committing files to multiple directories,
+		# so strip the prefix.
+		myfiles = [f.lstrip("./") for f in myfiles]
+
+		commit_cmd = [vcs]
+		commit_cmd.extend(vcs_global_opts)
+		commit_cmd.append("commit")
+		commit_cmd.extend(vcs_local_opts)
+		commit_cmd.extend(["-F", commitmessagefile])
+		commit_cmd.extend(myfiles)
+
+		try:
+			if options.pretend:
+				print("(%s)" % (" ".join(commit_cmd),))
+			else:
+				retval = spawn(commit_cmd, env=os.environ)
+				if retval != os.EX_OK:
+					writemsg_level(("!!! Exiting on %s (shell) " + \
+						"error code: %s\n") % (vcs, retval),
+						level=logging.ERROR, noiselevel=-1)
+					sys.exit(retval)
+		finally:
+			try:
+				os.unlink(commitmessagefile)
+			except OSError:
+				pass
+
+	# Setup the GPG commands
+	def gpgsign(filename):
+		gpgcmd = repoman_settings.get("PORTAGE_GPG_SIGNING_COMMAND")
+		if gpgcmd is None:
+			raise MissingParameter("PORTAGE_GPG_SIGNING_COMMAND is unset!" + \
+				" Is make.globals missing?")
+		if "${PORTAGE_GPG_KEY}" in gpgcmd and \
+			"PORTAGE_GPG_KEY" not in repoman_settings:
+			raise MissingParameter("PORTAGE_GPG_KEY is unset!")
+		if "${PORTAGE_GPG_DIR}" in gpgcmd:
+			if "PORTAGE_GPG_DIR" not in repoman_settings:
+				repoman_settings["PORTAGE_GPG_DIR"] = \
+					os.path.expanduser("~/.gnupg")
+				logging.info("Automatically setting PORTAGE_GPG_DIR to '%s'" \
+					% repoman_settings["PORTAGE_GPG_DIR"])
+			else:
+				repoman_settings["PORTAGE_GPG_DIR"] = \
+					os.path.expanduser(repoman_settings["PORTAGE_GPG_DIR"])
+			if not os.access(repoman_settings["PORTAGE_GPG_DIR"], os.X_OK):
+				raise portage.exception.InvalidLocation(
+					"Unable to access directory: PORTAGE_GPG_DIR='%s'" % \
+					repoman_settings["PORTAGE_GPG_DIR"])
+		gpgvars = {"FILE": filename}
+		for k in ("PORTAGE_GPG_DIR", "PORTAGE_GPG_KEY"):
+			v = repoman_settings.get(k)
+			if v is not None:
+				gpgvars[k] = v
+		gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
+		if options.pretend:
+			print("("+gpgcmd+")")
+		else:
+			rValue = os.system(gpgcmd)
+			if rValue == os.EX_OK:
+				os.rename(filename+".asc", filename)
+			else:
+				raise portage.exception.PortageException("!!! gpg exited with '" + str(rValue) + "' status")
+
+	# When files are removed and re-added, the cvs server will put /Attic/
+	# inside the $Header path. This code detects the problem and corrects it
+	# so that the Manifest will generate correctly. See bug #169500.
+	# Use binary mode in order to avoid potential character encoding issues.
+	cvs_header_re = re.compile(br'^#\s*\$Header.*\$$')
+	attic_str = b'/Attic/'
+	attic_replace = b'/'
+	for x in myheaders:
+		f = open(_unicode_encode(x,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='rb')
+		mylines = f.readlines()
+		f.close()
+		modified = False
+		for i, line in enumerate(mylines):
+			if cvs_header_re.match(line) is not None and \
+				attic_str in line:
+				mylines[i] = line.replace(attic_str, attic_replace)
+				modified = True
+		if modified:
+			portage.util.write_atomic(x, b''.join(mylines),
+				mode='wb')
+
+	manifest_commit_required = True
+	if vcs in ('cvs', 'svn') and (myupdates or myremoved):
+		myfiles = myupdates + myremoved
+		for x in range(len(myfiles)-1, -1, -1):
+			if myfiles[x].count("/") < 4-repolevel:
+				del myfiles[x]
+		mydone=[]
+		if repolevel==3:   # In a package dir
+			repoman_settings["O"] = startdir
+			digestgen(mysettings=repoman_settings, myportdb=portdb)
+		elif repolevel==2: # In a category dir
+			for x in myfiles:
+				xs=x.split("/")
+				if len(xs) < 4-repolevel:
+					continue
+				if xs[0]==".":
+					xs=xs[1:]
+				if xs[0] in mydone:
+					continue
+				mydone.append(xs[0])
+				repoman_settings["O"] = os.path.join(startdir, xs[0])
+				if not os.path.isdir(repoman_settings["O"]):
+					continue
+				digestgen(mysettings=repoman_settings, myportdb=portdb)
+		elif repolevel==1: # repo-cvsroot
+			print(green("RepoMan sez:"), "\"You're rather crazy... doing the entire repository.\"\n")
+			for x in myfiles:
+				xs=x.split("/")
+				if len(xs) < 4-repolevel:
+					continue
+				if xs[0]==".":
+					xs=xs[1:]
+				if "/".join(xs[:2]) in mydone:
+					continue
+				mydone.append("/".join(xs[:2]))
+				repoman_settings["O"] = os.path.join(startdir, xs[0], xs[1])
+				if not os.path.isdir(repoman_settings["O"]):
+					continue
+				digestgen(mysettings=repoman_settings, myportdb=portdb)
+		else:
+			print(red("I'm confused... I don't know where I am!"))
+			sys.exit(1)
+
+		# Force an unsigned commit when more than one Manifest needs to be signed.
+		if repolevel < 3 and "sign" in repoman_settings.features:
+
+			fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
+			mymsg = os.fdopen(fd, "wb")
+			mymsg.write(_unicode_encode(commitmessage))
+			mymsg.write(b"\n (Unsigned Manifest commit)")
+			mymsg.close()
+
+			commit_cmd = [vcs]
+			commit_cmd.extend(vcs_global_opts)
+			commit_cmd.append("commit")
+			commit_cmd.extend(vcs_local_opts)
+			commit_cmd.extend(["-F", commitmessagefile])
+			commit_cmd.extend(f.lstrip("./") for f in mymanifests)
+
+			try:
+				if options.pretend:
+					print("(%s)" % (" ".join(commit_cmd),))
+				else:
+					retval = spawn(commit_cmd, env=os.environ)
+					if retval:
+						writemsg_level(("!!! Exiting on %s (shell) " + \
+							"error code: %s\n") % (vcs, retval),
+							level=logging.ERROR, noiselevel=-1)
+						sys.exit(retval)
+			finally:
+				try:
+					os.unlink(commitmessagefile)
+				except OSError:
+					pass
+			manifest_commit_required = False
+
+	signed = False
+	if "sign" in repoman_settings.features:
+		signed = True
+		myfiles = myupdates + myremoved + mymanifests
+		try:
+			if repolevel==3:   # In a package dir
+				repoman_settings["O"] = "."
+				gpgsign(os.path.join(repoman_settings["O"], "Manifest"))
+			elif repolevel==2: # In a category dir
+				mydone=[]
+				for x in myfiles:
+					xs=x.split("/")
+					if len(xs) < 4-repolevel:
+						continue
+					if xs[0]==".":
+						xs=xs[1:]
+					if xs[0] in mydone:
+						continue
+					mydone.append(xs[0])
+					repoman_settings["O"] = os.path.join(".", xs[0])
+					if not os.path.isdir(repoman_settings["O"]):
+						continue
+					gpgsign(os.path.join(repoman_settings["O"], "Manifest"))
+			elif repolevel==1: # repo-cvsroot
+				print(green("RepoMan sez:"), "\"You're rather crazy... doing the entire repository.\"\n")
+				mydone=[]
+				for x in myfiles:
+					xs=x.split("/")
+					if len(xs) < 4-repolevel:
+						continue
+					if xs[0]==".":
+						xs=xs[1:]
+					if "/".join(xs[:2]) in mydone:
+						continue
+					mydone.append("/".join(xs[:2]))
+					repoman_settings["O"] = os.path.join(".", xs[0], xs[1])
+					if not os.path.isdir(repoman_settings["O"]):
+						continue
+					gpgsign(os.path.join(repoman_settings["O"], "Manifest"))
+		except portage.exception.PortageException as e:
+			portage.writemsg("!!! %s\n" % str(e))
+			portage.writemsg("!!! Disabled FEATURES='sign'\n")
+			signed = False
+
+	if vcs == 'git':
+		# It's not safe to use the git commit -a option since there might
+		# be some modified files elsewhere in the working tree that the
+		# user doesn't want to commit. Therefore, call git update-index
+		# in order to ensure that the index is updated with the latest
+		# versions of all new and modified files in the relevant portion
+		# of the working tree.
+		myfiles = mymanifests + myupdates
+		myfiles.sort()
+		update_index_cmd = ["git", "update-index"]
+		update_index_cmd.extend(f.lstrip("./") for f in myfiles)
+		if options.pretend:
+			print("(%s)" % (" ".join(update_index_cmd),))
+		else:
+			retval = spawn(update_index_cmd, env=os.environ)
+			if retval != os.EX_OK:
+				writemsg_level(("!!! Exiting on %s (shell) " + \
+					"error code: %s\n") % (vcs, retval),
+					level=logging.ERROR, noiselevel=-1)
+				sys.exit(retval)
+
+	if vcs in ['git', 'bzr', 'hg'] or manifest_commit_required or signed:
+
+		myfiles = mymanifests[:]
+		if vcs in ['git', 'bzr', 'hg']:
+			myfiles += myupdates
+			myfiles += myremoved
+		myfiles.sort()
+
+		fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
+		mymsg = os.fdopen(fd, "wb")
+		# strip the closing parenthesis
+		mymsg.write(_unicode_encode(commitmessage[:-1]))
+		if signed:
+			mymsg.write(_unicode_encode(
+				", signed Manifest commit with key %s)" % \
+				repoman_settings["PORTAGE_GPG_KEY"]))
+		else:
+			mymsg.write(b", unsigned Manifest commit)")
+		mymsg.close()
+
+		commit_cmd = []
+		if options.pretend and vcs is None:
+			# substitute a bogus value for pretend output
+			commit_cmd.append("cvs")
+		else:
+			commit_cmd.append(vcs)
+		commit_cmd.extend(vcs_global_opts)
+		commit_cmd.append("commit")
+		commit_cmd.extend(vcs_local_opts)
+		if vcs == "hg":
+			commit_cmd.extend(["--logfile", commitmessagefile])
+			commit_cmd.extend(myfiles)
+		else:
+			commit_cmd.extend(["-F", commitmessagefile])
+			commit_cmd.extend(f.lstrip("./") for f in myfiles)
+
+		try:
+			if options.pretend:
+				print("(%s)" % (" ".join(commit_cmd),))
+			else:
+				retval = spawn(commit_cmd, env=os.environ)
+				if retval != os.EX_OK:
+					writemsg_level(("!!! Exiting on %s (shell) " + \
+						"error code: %s\n") % (vcs, retval),
+						level=logging.ERROR, noiselevel=-1)
+					sys.exit(retval)
+		finally:
+			try:
+				os.unlink(commitmessagefile)
+			except OSError:
+				pass
+
+	print()
+	if vcs:
+		print("Commit complete.")
+	else:
+		print("repoman was too scared by not seeing any familiar version control file that he forgot to commit anything")
+	print(green("RepoMan sez:"), "\"If everyone were like you, I'd be out of business!\"\n")
+sys.exit(0)
+

diff --git a/portage_with_autodep/bin/xpak-helper.py b/portage_with_autodep/bin/xpak-helper.py
new file mode 100755
index 0000000..4766d99
--- /dev/null
+++ b/portage_with_autodep/bin/xpak-helper.py
@@ -0,0 +1,68 @@
+#!/usr/bin/python
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import optparse
+import sys
+import portage
+from portage import os
+
+def command_recompose(args):
+
+	usage = "usage: recompose <binpkg_path> <metadata_dir>\n"
+
+	if len(args) != 2:
+		sys.stderr.write(usage)
+		sys.stderr.write("2 arguments are required, got %s\n" % len(args))
+		return 1
+
+	binpkg_path, metadata_dir = args
+
+	if not os.path.isfile(binpkg_path):
+		sys.stderr.write(usage)
+		sys.stderr.write("Argument 1 is not a regular file: '%s'\n" % \
+			binpkg_path)
+		return 1
+
+	if not os.path.isdir(metadata_dir):
+		sys.stderr.write(usage)
+		sys.stderr.write("Argument 2 is not a directory: '%s'\n" % \
+			metadata_dir)
+		return 1
+
+	t = portage.xpak.tbz2(binpkg_path)
+	t.recompose(metadata_dir)
+	return os.EX_OK
+
+def main(argv):
+
+	if argv and sys.hexversion < 0x3000000 and not isinstance(argv[0], unicode):
+		for i, x in enumerate(argv):
+			argv[i] = portage._unicode_decode(x, errors='strict')
+
+	valid_commands = ('recompose',)
+	description = "Perform metadata operations on a binary package."
+	usage = "usage: %s COMMAND [args]" % \
+		os.path.basename(argv[0])
+
+	parser = optparse.OptionParser(description=description, usage=usage)
+	options, args = parser.parse_args(argv[1:])
+
+	if not args:
+		parser.error("missing command argument")
+
+	command = args[0]
+
+	if command not in valid_commands:
+		parser.error("invalid command: '%s'" % command)
+
+	if command == 'recompose':
+		rval = command_recompose(args[1:])
+	else:
+		raise AssertionError("invalid command: '%s'" % command)
+
+	return rval
+
+if __name__ == "__main__":
+	rval = main(sys.argv[:])
+	sys.exit(rval)

diff --git a/portage_with_autodep/pym/_emerge/AbstractDepPriority.py b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
new file mode 100644
index 0000000..94a9379
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
@@ -0,0 +1,29 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from _emerge.SlotObject import SlotObject
+
+class AbstractDepPriority(SlotObject):
+	__slots__ = ("buildtime", "runtime", "runtime_post")
+
+	def __lt__(self, other):
+		return self.__int__() < other
+
+	def __le__(self, other):
+		return self.__int__() <= other
+
+	def __eq__(self, other):
+		return self.__int__() == other
+
+	def __ne__(self, other):
+		return self.__int__() != other
+
+	def __gt__(self, other):
+		return self.__int__() > other
+
+	def __ge__(self, other):
+		return self.__int__() >= other
+
+	def copy(self):
+		return copy.copy(self)

diff --git a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
new file mode 100644
index 0000000..4147ecb
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
@@ -0,0 +1,266 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import stat
+import textwrap
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+import portage
+from portage.elog import messages as elog_messages
+from portage.localization import _
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage import os
+from portage.util._pty import _create_pty_or_pipe
+from portage.util import apply_secpass_permissions
+
+class AbstractEbuildProcess(SpawnProcess):
+
+	__slots__ = ('phase', 'settings',) + \
+		('_build_dir', '_ipc_daemon', '_exit_command',)
+	_phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
+
+	# Number of milliseconds to allow natural exit of the ebuild
+	# process after it has called the exit command via IPC. It
+	# doesn't hurt to be generous here since the scheduler
+	# continues to process events during this period, and it can
+	# return long before the timeout expires.
+	_exit_timeout = 10000 # 10 seconds
+
+	# The EbuildIpcDaemon support is well tested, but this variable
+	# is left so we can temporarily disable it if any issues arise.
+	_enable_ipc_daemon = True
+
+	def __init__(self, **kwargs):
+		SpawnProcess.__init__(self, **kwargs)
+		if self.phase is None:
+			phase = self.settings.get("EBUILD_PHASE")
+			if not phase:
+				phase = 'other'
+			self.phase = phase
+
+	def _start(self):
+
+		need_builddir = self.phase not in self._phases_without_builddir
+
+		# This can happen if the pre-clean phase triggers
+		# die_hooks for some reason, and PORTAGE_BUILDDIR
+		# doesn't exist yet.
+		if need_builddir and \
+			not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+			msg = _("The ebuild phase '%s' has been aborted "
+			"since PORTAGE_BUILDIR does not exist: '%s'") % \
+			(self.phase, self.settings['PORTAGE_BUILDDIR'])
+			self._eerror(textwrap.wrap(msg, 72))
+			self._set_returncode((self.pid, 1 << 8))
+			self.wait()
+			return
+
+		if self.background:
+			# Automatically prevent color codes from showing up in logs,
+			# since we're not displaying to a terminal anyway.
+			self.settings['NOCOLOR'] = 'true'
+
+		if self._enable_ipc_daemon:
+			self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+			if self.phase not in self._phases_without_builddir:
+				if 'PORTAGE_BUILDIR_LOCKED' not in self.settings:
+					self._build_dir = EbuildBuildDir(
+						scheduler=self.scheduler, settings=self.settings)
+					self._build_dir.lock()
+				self.settings['PORTAGE_IPC_DAEMON'] = "1"
+				self._start_ipc_daemon()
+			else:
+				self.settings.pop('PORTAGE_IPC_DAEMON', None)
+		else:
+			# Since the IPC daemon is disabled, use a simple tempfile based
+			# approach to detect unexpected exit like in bug #190128.
+			self.settings.pop('PORTAGE_IPC_DAEMON', None)
+			if self.phase not in self._phases_without_builddir:
+				exit_file = os.path.join(
+					self.settings['PORTAGE_BUILDDIR'],
+					'.exit_status')
+				self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file
+				try:
+					os.unlink(exit_file)
+				except OSError:
+					if os.path.exists(exit_file):
+						# make sure it doesn't exist
+						raise
+			else:
+				self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+
+		SpawnProcess._start(self)
+
+	def _init_ipc_fifos(self):
+
+		input_fifo = os.path.join(
+			self.settings['PORTAGE_BUILDDIR'], '.ipc_in')
+		output_fifo = os.path.join(
+			self.settings['PORTAGE_BUILDDIR'], '.ipc_out')
+
+		for p in (input_fifo, output_fifo):
+
+			st = None
+			try:
+				st = os.lstat(p)
+			except OSError:
+				os.mkfifo(p)
+			else:
+				if not stat.S_ISFIFO(st.st_mode):
+					st = None
+					try:
+						os.unlink(p)
+					except OSError:
+						pass
+					os.mkfifo(p)
+
+			apply_secpass_permissions(p,
+				uid=os.getuid(),
+				gid=portage.data.portage_gid,
+				mode=0o770, stat_cached=st)
+
+		return (input_fifo, output_fifo)
+
+	def _start_ipc_daemon(self):
+		self._exit_command = ExitCommand()
+		self._exit_command.reply_hook = self._exit_command_callback
+		query_command = QueryCommand(self.settings, self.phase)
+		commands = {
+			'best_version' : query_command,
+			'exit'         : self._exit_command,
+			'has_version'  : query_command,
+		}
+		input_fifo, output_fifo = self._init_ipc_fifos()
+		self._ipc_daemon = EbuildIpcDaemon(commands=commands,
+			input_fifo=input_fifo,
+			output_fifo=output_fifo,
+			scheduler=self.scheduler)
+		self._ipc_daemon.start()
+
+	def _exit_command_callback(self):
+		if self._registered:
+			# Let the process exit naturally, if possible.
+			self.scheduler.schedule(self._reg_id, timeout=self._exit_timeout)
+			if self._registered:
+				# If it doesn't exit naturally in a reasonable amount
+				# of time, kill it (solves bug #278895). We try to avoid
+				# this when possible since it makes sandbox complain about
+				# being killed by a signal.
+				self.cancel()
+
+	def _orphan_process_warn(self):
+		phase = self.phase
+
+		msg = _("The ebuild phase '%s' with pid %s appears "
+		"to have left an orphan process running in the "
+		"background.") % (phase, self.pid)
+
+		self._eerror(textwrap.wrap(msg, 72))
+
+	def _pipe(self, fd_pipes):
+		stdout_pipe = None
+		if not self.background:
+			stdout_pipe = fd_pipes.get(1)
+		got_pty, master_fd, slave_fd = \
+			_create_pty_or_pipe(copy_term_size=stdout_pipe)
+		return (master_fd, slave_fd)
+
+	def _can_log(self, slave_fd):
+		# With sesandbox, logging works through a pty but not through a
+		# normal pipe. So, disable logging if ptys are broken.
+		# See Bug #162404.
+		# TODO: Add support for logging via named pipe (fifo) with
+		# sesandbox, since EbuildIpcDaemon uses a fifo and it's known
+		# to be compatible with sesandbox.
+		return not ('sesandbox' in self.settings.features \
+			and self.settings.selinux_enabled()) or os.isatty(slave_fd)
+
+	def _killed_by_signal(self, signum):
+		msg = _("The ebuild phase '%s' has been "
+		"killed by signal %s.") % (self.phase, signum)
+		self._eerror(textwrap.wrap(msg, 72))
+
+	def _unexpected_exit(self):
+
+		phase = self.phase
+
+		msg = _("The ebuild phase '%s' has exited "
+		"unexpectedly. This type of behavior "
+		"is known to be triggered "
+		"by things such as failed variable "
+		"assignments (bug #190128) or bad substitution "
+		"errors (bug #200313). Normally, before exiting, bash should "
+		"have displayed an error message above. If bash did not "
+		"produce an error message above, it's possible "
+		"that the ebuild has called `exit` when it "
+		"should have called `die` instead. This behavior may also "
+		"be triggered by a corrupt bash binary or a hardware "
+		"problem such as memory or cpu malfunction. If the problem is not "
+		"reproducible or it appears to occur randomly, then it is likely "
+		"to be triggered by a hardware problem. "
+		"If you suspect a hardware problem then you should "
+		"try some basic hardware diagnostics such as memtest. "
+		"Please do not report this as a bug unless it is consistently "
+		"reproducible and you are sure that your bash binary and hardware "
+		"are functioning properly.") % phase
+
+		self._eerror(textwrap.wrap(msg, 72))
+
+	def _eerror(self, lines):
+		self._elog('eerror', lines)
+
+	def _elog(self, elog_funcname, lines):
+		out = io.StringIO()
+		phase = self.phase
+		elog_func = getattr(elog_messages, elog_funcname)
+		global_havecolor = portage.output.havecolor
+		try:
+			portage.output.havecolor = \
+				self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+			for line in lines:
+				elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+		finally:
+			portage.output.havecolor = global_havecolor
+		msg = out.getvalue()
+		if msg:
+			log_path = None
+			if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+				log_path = self.settings.get("PORTAGE_LOG_FILE")
+			self.scheduler.output(msg, log_path=log_path)
+
+	def _log_poll_exception(self, event):
+		self._elog("eerror",
+			["%s received strange poll event: %s\n" % \
+			(self.__class__.__name__, event,)])
+
+	def _set_returncode(self, wait_retval):
+		SpawnProcess._set_returncode(self, wait_retval)
+
+		if self._ipc_daemon is not None:
+			self._ipc_daemon.cancel()
+			if self._exit_command.exitcode is not None:
+				self.returncode = self._exit_command.exitcode
+			else:
+				if self.returncode < 0:
+					if not self.cancelled:
+						self._killed_by_signal(-self.returncode)
+				else:
+					self.returncode = 1
+					if not self.cancelled:
+						self._unexpected_exit()
+			if self._build_dir is not None:
+				self._build_dir.unlock()
+				self._build_dir = None
+		elif not self.cancelled:
+			exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE')
+			if exit_file and not os.path.exists(exit_file):
+				if self.returncode < 0:
+					if not self.cancelled:
+						self._killed_by_signal(-self.returncode)
+				else:
+					self.returncode = 1
+					if not self.cancelled:
+						self._unexpected_exit()

diff --git a/portage_with_autodep/pym/_emerge/AbstractPollTask.py b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
new file mode 100644
index 0000000..f7f3a95
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
@@ -0,0 +1,62 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import logging
+
+from portage.util import writemsg_level
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollConstants import PollConstants
+class AbstractPollTask(AsynchronousTask):
+
+	__slots__ = ("scheduler",) + \
+		("_registered",)
+
+	_bufsize = 4096
+	_exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
+	_registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
+		_exceptional_events
+
+	def isAlive(self):
+		return bool(self._registered)
+
+	def _read_buf(self, f, event):
+		"""
+		| POLLIN | RETURN
+		| BIT    | VALUE
+		| ---------------------------------------------------
+		| 1      | Read self._bufsize into an instance of
+		|        | array.array('B') and return it, ignoring
+		|        | EOFError and IOError. An empty array
+		|        | indicates EOF.
+		| ---------------------------------------------------
+		| 0      | None
+		"""
+		buf = None
+		if event & PollConstants.POLLIN:
+			buf = array.array('B')
+			try:
+				buf.fromfile(f, self._bufsize)
+			except (EOFError, IOError):
+				pass
+		return buf
+
+	def _unregister(self):
+		raise NotImplementedError(self)
+
+	def _log_poll_exception(self, event):
+		writemsg_level(
+			"!!! %s received strange poll event: %s\n" % \
+			(self.__class__.__name__, event,),
+			level=logging.ERROR, noiselevel=-1)
+
+	def _unregister_if_appropriate(self, event):
+		if self._registered:
+			if event & self._exceptional_events:
+				self._log_poll_exception(event)
+				self._unregister()
+				self.cancel()
+			elif event & PollConstants.POLLHUP:
+				self._unregister()
+				self.wait()
+

diff --git a/portage_with_autodep/pym/_emerge/AsynchronousLock.py b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
new file mode 100644
index 0000000..637ba73
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
@@ -0,0 +1,288 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import dummy_threading
+import fcntl
+import logging
+import sys
+
+try:
+	import threading
+except ImportError:
+	import dummy_threading as threading
+
+import portage
+from portage import os
+from portage.exception import TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.util import writemsg_level
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollConstants import PollConstants
+from _emerge.SpawnProcess import SpawnProcess
+
+class AsynchronousLock(AsynchronousTask):
+	"""
+	This uses the portage.locks module to acquire a lock asynchronously,
+	using either a thread (if available) or a subprocess.
+
+	The default behavior is to use a process instead of a thread, since
+	there is currently no way to interrupt a thread that is waiting for
+	a lock (notably, SIGINT doesn't work because python delivers all
+	signals to the main thread).
+	"""
+
+	__slots__ = ('path', 'scheduler',) + \
+		('_imp', '_force_async', '_force_dummy', '_force_process', \
+		'_force_thread', '_waiting')
+
+	_use_process_by_default = True
+
+	def _start(self):
+
+		if not self._force_async:
+			try:
+				self._imp = lockfile(self.path,
+					wantnewlockfile=True, flags=os.O_NONBLOCK)
+			except TryAgain:
+				pass
+			else:
+				self.returncode = os.EX_OK
+				self.wait()
+				return
+
+		if self._force_process or \
+			(not self._force_thread and \
+			(self._use_process_by_default or threading is dummy_threading)):
+			self._imp = _LockProcess(path=self.path, scheduler=self.scheduler)
+		else:
+			self._imp = _LockThread(path=self.path,
+				scheduler=self.scheduler,
+				_force_dummy=self._force_dummy)
+
+		self._imp.addExitListener(self._imp_exit)
+		self._imp.start()
+
+	def _imp_exit(self, imp):
+		# call exit listeners
+		if not self._waiting:
+			self.wait()
+
+	def _cancel(self):
+		if isinstance(self._imp, AsynchronousTask):
+			self._imp.cancel()
+
+	def _poll(self):
+		if isinstance(self._imp, AsynchronousTask):
+			self._imp.poll()
+		return self.returncode
+
+	def _wait(self):
+		if self.returncode is not None:
+			return self.returncode
+		self._waiting = True
+		self.returncode = self._imp.wait()
+		self._waiting = False
+		return self.returncode
+
+	def unlock(self):
+		if self._imp is None:
+			raise AssertionError('not locked')
+		if isinstance(self._imp, (_LockProcess, _LockThread)):
+			self._imp.unlock()
+		else:
+			unlockfile(self._imp)
+		self._imp = None
+
+class _LockThread(AbstractPollTask):
+	"""
+	This uses the portage.locks module to acquire a lock asynchronously,
+	using a background thread. After the lock is acquired, the thread
+	writes to a pipe in order to notify a poll loop running in the main
+	thread.
+
+	If the threading module is unavailable then the dummy_threading
+	module will be used, and the lock will be acquired synchronously
+	(before the start() method returns).
+	"""
+
+	__slots__ = ('path',) + \
+		('_files', '_force_dummy', '_lock_obj',
+		'_thread', '_reg_id',)
+
+	def _start(self):
+		pr, pw = os.pipe()
+		self._files = {}
+		self._files['pipe_read'] = os.fdopen(pr, 'rb', 0)
+		self._files['pipe_write'] = os.fdopen(pw, 'wb', 0)
+		for k, f in self._files.items():
+			fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
+				fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+		self._reg_id = self.scheduler.register(self._files['pipe_read'].fileno(),
+			PollConstants.POLLIN, self._output_handler)
+		self._registered = True
+		threading_mod = threading
+		if self._force_dummy:
+			threading_mod = dummy_threading
+		self._thread = threading_mod.Thread(target=self._run_lock)
+		self._thread.start()
+
+	def _run_lock(self):
+		self._lock_obj = lockfile(self.path, wantnewlockfile=True)
+		self._files['pipe_write'].write(b'\0')
+
+	def _output_handler(self, f, event):
+		buf = self._read_buf(self._files['pipe_read'], event)
+		if buf:
+			self._unregister()
+			self.returncode = os.EX_OK
+			self.wait()
+
+	def _cancel(self):
+		# There's currently no way to force thread termination.
+		pass
+
+	def _wait(self):
+		if self.returncode is not None:
+			return self.returncode
+		if self._registered:
+			self.scheduler.schedule(self._reg_id)
+		return self.returncode
+
+	def unlock(self):
+		if self._lock_obj is None:
+			raise AssertionError('not locked')
+		if self.returncode is None:
+			raise AssertionError('lock not acquired yet')
+		unlockfile(self._lock_obj)
+		self._lock_obj = None
+
+	def _unregister(self):
+		self._registered = False
+
+		if self._thread is not None:
+			self._thread.join()
+			self._thread = None
+
+		if self._reg_id is not None:
+			self.scheduler.unregister(self._reg_id)
+			self._reg_id = None
+
+		if self._files is not None:
+			for f in self._files.values():
+				f.close()
+			self._files = None
+
+class _LockProcess(AbstractPollTask):
+	"""
+	This uses the portage.locks module to acquire a lock asynchronously,
+	using a subprocess. After the lock is acquired, the process
+	writes to a pipe in order to notify a poll loop running in the main
+	process. The unlock() method notifies the subprocess to release the
+	lock and exit.
+	"""
+
+	__slots__ = ('path',) + \
+		('_acquired', '_kill_test', '_proc', '_files', '_reg_id', '_unlocked')
+
+	def _start(self):
+		in_pr, in_pw = os.pipe()
+		out_pr, out_pw = os.pipe()
+		self._files = {}
+		self._files['pipe_in'] = os.fdopen(in_pr, 'rb', 0)
+		self._files['pipe_out'] = os.fdopen(out_pw, 'wb', 0)
+		fcntl.fcntl(in_pr, fcntl.F_SETFL,
+			fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
+		self._reg_id = self.scheduler.register(in_pr,
+			PollConstants.POLLIN, self._output_handler)
+		self._registered = True
+		self._proc = SpawnProcess(
+			args=[portage._python_interpreter,
+				os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
+				env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
+				fd_pipes={0:out_pr, 1:in_pw, 2:sys.stderr.fileno()},
+				scheduler=self.scheduler)
+		self._proc.addExitListener(self._proc_exit)
+		self._proc.start()
+		os.close(out_pr)
+		os.close(in_pw)
+
+	def _proc_exit(self, proc):
+		if proc.returncode != os.EX_OK:
+			# Typically, this will happen due to the
+			# process being killed by a signal.
+			if not self._acquired:
+				# If the lock hasn't been aquired yet, the
+				# caller can check the returncode and handle
+				# this failure appropriately.
+				if not (self.cancelled or self._kill_test):
+					writemsg_level("_LockProcess: %s\n" % \
+						_("failed to acquire lock on '%s'") % (self.path,),
+						level=logging.ERROR, noiselevel=-1)
+				self._unregister()
+				self.returncode = proc.returncode
+				self.wait()
+				return
+
+			if not self.cancelled and \
+				not self._unlocked:
+				# We don't want lost locks going unnoticed, so it's
+				# only safe to ignore if either the cancel() or
+				# unlock() methods have been previously called.
+				raise AssertionError("lock process failed with returncode %s" \
+					% (proc.returncode,))
+
+	def _cancel(self):
+		if self._proc is not None:
+			self._proc.cancel()
+
+	def _poll(self):
+		if self._proc is not None:
+			self._proc.poll()
+		return self.returncode
+
+	def _wait(self):
+		if self.returncode is not None:
+			return self.returncode
+		if self._registered:
+			self.scheduler.schedule(self._reg_id)
+		return self.returncode
+
+	def _output_handler(self, f, event):
+		buf = self._read_buf(self._files['pipe_in'], event)
+		if buf:
+			self._acquired = True
+			self._unregister()
+			self.returncode = os.EX_OK
+			self.wait()
+
+	def _unregister(self):
+		self._registered = False
+
+		if self._reg_id is not None:
+			self.scheduler.unregister(self._reg_id)
+			self._reg_id = None
+
+		if self._files is not None:
+			try:
+				pipe_in = self._files.pop('pipe_in')
+			except KeyError:
+				pass
+			else:
+				pipe_in.close()
+
+	def unlock(self):
+		if self._proc is None:
+			raise AssertionError('not locked')
+		if self.returncode is None:
+			raise AssertionError('lock not acquired yet')
+		if self.returncode != os.EX_OK:
+			raise AssertionError("lock process failed with returncode %s" \
+				% (self.returncode,))
+		self._unlocked = True
+		self._files['pipe_out'].write(b'\0')
+		self._files['pipe_out'].close()
+		self._files = None
+		self._proc.wait()
+		self._proc = None

diff --git a/portage_with_autodep/pym/_emerge/AsynchronousTask.py b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
new file mode 100644
index 0000000..36522ca
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
@@ -0,0 +1,129 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.SlotObject import SlotObject
+class AsynchronousTask(SlotObject):
+	"""
+	Subclasses override _wait() and _poll() so that calls
+	to public methods can be wrapped for implementing
+	hooks such as exit listener notification.
+
+	Sublasses should call self.wait() to notify exit listeners after
+	the task is complete and self.returncode has been set.
+	"""
+
+	__slots__ = ("background", "cancelled", "returncode") + \
+		("_exit_listeners", "_exit_listener_stack", "_start_listeners")
+
+	def start(self):
+		"""
+		Start an asynchronous task and then return as soon as possible.
+		"""
+		self._start_hook()
+		self._start()
+
+	def _start(self):
+		self.returncode = os.EX_OK
+		self.wait()
+
+	def isAlive(self):
+		return self.returncode is None
+
+	def poll(self):
+		if self.returncode is not None:
+			return self.returncode
+		self._poll()
+		self._wait_hook()
+		return self.returncode
+
+	def _poll(self):
+		return self.returncode
+
+	def wait(self):
+		if self.returncode is None:
+			self._wait()
+		self._wait_hook()
+		return self.returncode
+
+	def _wait(self):
+		return self.returncode
+
+	def cancel(self):
+		if not self.cancelled:
+			self.cancelled = True
+			self._cancel()
+			self.wait()
+
+	def _cancel(self):
+		"""
+		Subclasses should implement this, as a template method
+		to be called by AsynchronousTask.cancel().
+		"""
+		pass
+
+	def addStartListener(self, f):
+		"""
+		The function will be called with one argument, a reference to self.
+		"""
+		if self._start_listeners is None:
+			self._start_listeners = []
+		self._start_listeners.append(f)
+
+	def removeStartListener(self, f):
+		if self._start_listeners is None:
+			return
+		self._start_listeners.remove(f)
+
+	def _start_hook(self):
+		if self._start_listeners is not None:
+			start_listeners = self._start_listeners
+			self._start_listeners = None
+
+			for f in start_listeners:
+				f(self)
+
+	def addExitListener(self, f):
+		"""
+		The function will be called with one argument, a reference to self.
+		"""
+		if self._exit_listeners is None:
+			self._exit_listeners = []
+		self._exit_listeners.append(f)
+
+	def removeExitListener(self, f):
+		if self._exit_listeners is None:
+			if self._exit_listener_stack is not None:
+				self._exit_listener_stack.remove(f)
+			return
+		self._exit_listeners.remove(f)
+
+	def _wait_hook(self):
+		"""
+		Call this method after the task completes, just before returning
+		the returncode from wait() or poll(). This hook is
+		used to trigger exit listeners when the returncode first
+		becomes available.
+		"""
+		if self.returncode is not None and \
+			self._exit_listeners is not None:
+
+			# This prevents recursion, in case one of the
+			# exit handlers triggers this method again by
+			# calling wait(). Use a stack that gives
+			# removeExitListener() an opportunity to consume
+			# listeners from the stack, before they can get
+			# called below. This is necessary because a call
+			# to one exit listener may result in a call to
+			# removeExitListener() for another listener on
+			# the stack. That listener needs to be removed
+			# from the stack since it would be inconsistent
+			# to call it after it has been been passed into
+			# removeExitListener().
+			self._exit_listener_stack = self._exit_listeners
+			self._exit_listeners = None
+
+			self._exit_listener_stack.reverse()
+			while self._exit_listener_stack:
+				self._exit_listener_stack.pop()(self)
+

diff --git a/portage_with_autodep/pym/_emerge/AtomArg.py b/portage_with_autodep/pym/_emerge/AtomArg.py
new file mode 100644
index 0000000..a929b43
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AtomArg.py
@@ -0,0 +1,11 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage._sets.base import InternalPackageSet
+from _emerge.DependencyArg import DependencyArg
+
+class AtomArg(DependencyArg):
+	def __init__(self, atom=None, **kwargs):
+		DependencyArg.__init__(self, **kwargs)
+		self.atom = atom
+		self.pset = InternalPackageSet(initial_atoms=(self.atom,), allow_repo=True)

diff --git a/portage_with_autodep/pym/_emerge/Binpkg.py b/portage_with_autodep/pym/_emerge/Binpkg.py
new file mode 100644
index 0000000..bc6511e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Binpkg.py
@@ -0,0 +1,333 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.BinpkgExtractorAsync import BinpkgExtractorAsync
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from portage.eapi import eapi_exports_replace_vars
+from portage.util import writemsg
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import io
+import logging
+from portage.output import colorize
+
+class Binpkg(CompositeTask):
+
+	__slots__ = ("find_blockers",
+		"ldpath_mtimes", "logger", "opts",
+		"pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
+		("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
+		"_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
+
+	def _writemsg_level(self, msg, level=0, noiselevel=0):
+		self.scheduler.output(msg, level=level, noiselevel=noiselevel,
+			log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+	def _start(self):
+
+		pkg = self.pkg
+		settings = self.settings
+		settings.setcpv(pkg)
+		self._tree = "bintree"
+		self._bintree = self.pkg.root_config.trees[self._tree]
+		self._verify = not self.opts.pretend
+
+		# Use realpath like doebuild_environment() does, since we assert
+		# that this path is literally identical to PORTAGE_BUILDDIR.
+		dir_path = os.path.join(os.path.realpath(settings["PORTAGE_TMPDIR"]),
+			"portage", pkg.category, pkg.pf)
+		self._image_dir = os.path.join(dir_path, "image")
+		self._infloc = os.path.join(dir_path, "build-info")
+		self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
+		settings["EBUILD"] = self._ebuild_path
+		portage.doebuild_environment(self._ebuild_path, 'setup',
+			settings=self.settings, db=self._bintree.dbapi)
+		if dir_path != self.settings['PORTAGE_BUILDDIR']:
+			raise AssertionError("'%s' != '%s'" % \
+				(dir_path, self.settings['PORTAGE_BUILDDIR']))
+		self._build_dir = EbuildBuildDir(
+			scheduler=self.scheduler, settings=settings)
+		settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+		settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+		if eapi_exports_replace_vars(settings["EAPI"]):
+			vardb = self.pkg.root_config.trees["vartree"].dbapi
+			settings["REPLACING_VERSIONS"] = " ".join(
+				set(portage.versions.cpv_getversion(x) \
+					for x in vardb.match(self.pkg.slot_atom) + \
+					vardb.match('='+self.pkg.cpv)))
+
+		# The prefetcher has already completed or it
+		# could be running now. If it's running now,
+		# wait for it to complete since it holds
+		# a lock on the file being fetched. The
+		# portage.locks functions are only designed
+		# to work between separate processes. Since
+		# the lock is held by the current process,
+		# use the scheduler and fetcher methods to
+		# synchronize with the fetcher.
+		prefetcher = self.prefetcher
+		if prefetcher is None:
+			pass
+		elif prefetcher.isAlive() and \
+			prefetcher.poll() is None:
+
+			waiting_msg = ("Fetching '%s' " + \
+				"in the background. " + \
+				"To view fetch progress, run `tail -f " + \
+				"/var/log/emerge-fetch.log` in another " + \
+				"terminal.") % prefetcher.pkg_path
+			msg_prefix = colorize("GOOD", " * ")
+			from textwrap import wrap
+			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
+				for line in wrap(waiting_msg, 65))
+			if not self.background:
+				writemsg(waiting_msg, noiselevel=-1)
+
+			self._current_task = prefetcher
+			prefetcher.addExitListener(self._prefetch_exit)
+			return
+
+		self._prefetch_exit(prefetcher)
+
+	def _prefetch_exit(self, prefetcher):
+
+		pkg = self.pkg
+		pkg_count = self.pkg_count
+		if not (self.opts.pretend or self.opts.fetchonly):
+			self._build_dir.lock()
+			# Initialize PORTAGE_LOG_FILE (clean_log won't work without it).
+			portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+			# If necessary, discard old log so that we don't
+			# append to it.
+			self._build_dir.clean_log()
+		fetcher = BinpkgFetcher(background=self.background,
+			logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
+			pretend=self.opts.pretend, scheduler=self.scheduler)
+		pkg_path = fetcher.pkg_path
+		self._pkg_path = pkg_path
+		# This gives bashrc users an opportunity to do various things
+		# such as remove binary packages after they're installed.
+		self.settings["PORTAGE_BINPKG_FILE"] = pkg_path
+
+		if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
+
+			msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+			short_msg = "emerge: (%s of %s) %s Fetch" % \
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
+			self.logger.log(msg, short_msg=short_msg)
+
+			# Allow the Scheduler's fetch queue to control the
+			# number of concurrent fetchers.
+			fetcher.addExitListener(self._fetcher_exit)
+			self._task_queued(fetcher)
+			self.scheduler.fetch.schedule(fetcher)
+			return
+
+		self._fetcher_exit(fetcher)
+
+	def _fetcher_exit(self, fetcher):
+
+		# The fetcher only has a returncode when
+		# --getbinpkg is enabled.
+		if fetcher.returncode is not None:
+			self._fetched_pkg = True
+			if self._default_exit(fetcher) != os.EX_OK:
+				self._unlock_builddir()
+				self.wait()
+				return
+
+		if self.opts.pretend:
+			self._current_task = None
+			self.returncode = os.EX_OK
+			self.wait()
+			return
+
+		verifier = None
+		if self._verify:
+			logfile = self.settings.get("PORTAGE_LOG_FILE")
+			verifier = BinpkgVerifier(background=self.background,
+				logfile=logfile, pkg=self.pkg, scheduler=self.scheduler)
+			self._start_task(verifier, self._verifier_exit)
+			return
+
+		self._verifier_exit(verifier)
+
+	def _verifier_exit(self, verifier):
+		if verifier is not None and \
+			self._default_exit(verifier) != os.EX_OK:
+			self._unlock_builddir()
+			self.wait()
+			return
+
+		logger = self.logger
+		pkg = self.pkg
+		pkg_count = self.pkg_count
+		pkg_path = self._pkg_path
+
+		if self._fetched_pkg:
+			self._bintree.inject(pkg.cpv, filename=pkg_path)
+
+		logfile = self.settings.get("PORTAGE_LOG_FILE")
+		if logfile is not None and os.path.isfile(logfile):
+			# Remove fetch log after successful fetch.
+			try:
+				os.unlink(logfile)
+			except OSError:
+				pass
+
+		if self.opts.fetchonly:
+			self._current_task = None
+			self.returncode = os.EX_OK
+			self.wait()
+			return
+
+		msg = " === (%s of %s) Merging Binary (%s::%s)" % \
+			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+		short_msg = "emerge: (%s of %s) %s Merge Binary" % \
+			(pkg_count.curval, pkg_count.maxval, pkg.cpv)
+		logger.log(msg, short_msg=short_msg)
+
+		phase = "clean"
+		settings = self.settings
+		ebuild_phase = EbuildPhase(background=self.background,
+			phase=phase, scheduler=self.scheduler,
+			settings=settings)
+
+		self._start_task(ebuild_phase, self._clean_exit)
+
+	def _clean_exit(self, clean_phase):
+		if self._default_exit(clean_phase) != os.EX_OK:
+			self._unlock_builddir()
+			self.wait()
+			return
+
+		dir_path = self.settings['PORTAGE_BUILDDIR']
+
+		infloc = self._infloc
+		pkg = self.pkg
+		pkg_path = self._pkg_path
+
+		dir_mode = 0o755
+		for mydir in (dir_path, self._image_dir, infloc):
+			portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
+				gid=portage.data.portage_gid, mode=dir_mode)
+
+		# This initializes PORTAGE_LOG_FILE.
+		portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+		self._writemsg_level(">>> Extracting info\n")
+
+		pkg_xpak = portage.xpak.tbz2(self._pkg_path)
+		check_missing_metadata = ("CATEGORY", "PF")
+		missing_metadata = set()
+		for k in check_missing_metadata:
+			v = pkg_xpak.getfile(_unicode_encode(k,
+				encoding=_encodings['repo.content']))
+			if not v:
+				missing_metadata.add(k)
+
+		pkg_xpak.unpackinfo(infloc)
+		for k in missing_metadata:
+			if k == "CATEGORY":
+				v = pkg.category
+			elif k == "PF":
+				v = pkg.pf
+			else:
+				continue
+
+			f = io.open(_unicode_encode(os.path.join(infloc, k),
+				encoding=_encodings['fs'], errors='strict'),
+				mode='w', encoding=_encodings['content'],
+				errors='backslashreplace')
+			try:
+				f.write(_unicode_decode(v + "\n"))
+			finally:
+				f.close()
+
+		# Store the md5sum in the vdb.
+		f = io.open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5'),
+			encoding=_encodings['fs'], errors='strict'),
+			mode='w', encoding=_encodings['content'], errors='strict')
+		try:
+			f.write(_unicode_decode(
+				str(portage.checksum.perform_md5(pkg_path)) + "\n"))
+		finally:
+			f.close()
+
+		env_extractor = BinpkgEnvExtractor(background=self.background,
+			scheduler=self.scheduler, settings=self.settings)
+
+		self._start_task(env_extractor, self._env_extractor_exit)
+
+	def _env_extractor_exit(self, env_extractor):
+		if self._default_exit(env_extractor) != os.EX_OK:
+			self._unlock_builddir()
+			self.wait()
+			return
+
+		setup_phase = EbuildPhase(background=self.background,
+			phase="setup", scheduler=self.scheduler,
+			settings=self.settings)
+
+		setup_phase.addExitListener(self._setup_exit)
+		self._task_queued(setup_phase)
+		self.scheduler.scheduleSetup(setup_phase)
+
+	def _setup_exit(self, setup_phase):
+		if self._default_exit(setup_phase) != os.EX_OK:
+			self._unlock_builddir()
+			self.wait()
+			return
+
+		extractor = BinpkgExtractorAsync(background=self.background,
+			env=self.settings.environ(),
+			image_dir=self._image_dir,
+			pkg=self.pkg, pkg_path=self._pkg_path,
+			logfile=self.settings.get("PORTAGE_LOG_FILE"),
+			scheduler=self.scheduler)
+		self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
+		self._start_task(extractor, self._extractor_exit)
+
+	def _extractor_exit(self, extractor):
+		if self._final_exit(extractor) != os.EX_OK:
+			self._unlock_builddir()
+			self._writemsg_level("!!! Error Extracting '%s'\n" % \
+				self._pkg_path, noiselevel=-1, level=logging.ERROR)
+		self.wait()
+
+	def _unlock_builddir(self):
+		if self.opts.pretend or self.opts.fetchonly:
+			return
+		portage.elog.elog_process(self.pkg.cpv, self.settings)
+		self._build_dir.unlock()
+
+	def create_install_task(self):
+		task = EbuildMerge(find_blockers=self.find_blockers,
+			ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
+			pkg=self.pkg, pkg_count=self.pkg_count,
+			pkg_path=self._pkg_path, scheduler=self.scheduler,
+			settings=self.settings, tree=self._tree,
+			world_atom=self.world_atom)
+		task.addExitListener(self._install_exit)
+		return task
+
+	def _install_exit(self, task):
+		self.settings.pop("PORTAGE_BINPKG_FILE", None)
+		self._unlock_builddir()
+		if task.returncode == os.EX_OK and \
+			'binpkg-logs' not in self.settings.features and \
+			self.settings.get("PORTAGE_LOG_FILE"):
+			try:
+				os.unlink(self.settings["PORTAGE_LOG_FILE"])
+			except OSError:
+				pass

diff --git a/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
new file mode 100644
index 0000000..f68971b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
@@ -0,0 +1,66 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.SpawnProcess import SpawnProcess
+from portage import os, _shell_quote, _unicode_encode
+from portage.const import BASH_BINARY
+
+class BinpkgEnvExtractor(CompositeTask):
+	"""
+	Extract environment.bz2 for a binary or installed package.
+	"""
+	__slots__ = ('settings',)
+
+	def saved_env_exists(self):
+		return os.path.exists(self._get_saved_env_path())
+
+	def dest_env_exists(self):
+		return os.path.exists(self._get_dest_env_path())
+
+	def _get_saved_env_path(self):
+		return os.path.join(os.path.dirname(self.settings['EBUILD']),
+			"environment.bz2")
+
+	def _get_dest_env_path(self):
+		return os.path.join(self.settings["T"], "environment")
+
+	def _start(self):
+		saved_env_path = self._get_saved_env_path()
+		dest_env_path = self._get_dest_env_path()
+		shell_cmd = "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- %s > %s" % \
+			(_shell_quote(saved_env_path),
+			_shell_quote(dest_env_path))
+		extractor_proc = SpawnProcess(
+			args=[BASH_BINARY, "-c", shell_cmd],
+			background=self.background,
+			env=self.settings.environ(), 
+			scheduler=self.scheduler,
+			logfile=self.settings.get('PORTAGE_LOGFILE'))
+
+		self._start_task(extractor_proc, self._extractor_exit)
+
+	def _remove_dest_env(self):
+		try:
+			os.unlink(self._get_dest_env_path())
+		except OSError as e:
+			if e.errno != errno.ENOENT:
+				raise
+
+	def _extractor_exit(self, extractor_proc):
+
+		if self._default_exit(extractor_proc) != os.EX_OK:
+			self._remove_dest_env()
+			self.wait()
+			return
+
+		# This is a signal to ebuild.sh, so that it knows to filter
+		# out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
+		# would be preserved between normal phases.
+		open(_unicode_encode(self._get_dest_env_path() + '.raw'), 'w')
+
+		self._current_task = None
+		self.returncode = os.EX_OK
+		self.wait()

diff --git a/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
new file mode 100644
index 0000000..d1630f2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
@@ -0,0 +1,31 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SpawnProcess import SpawnProcess
+import portage
+import os
+import signal
+
+class BinpkgExtractorAsync(SpawnProcess):
+
+	__slots__ = ("image_dir", "pkg", "pkg_path")
+
+	_shell_binary = portage.const.BASH_BINARY
+
+	def _start(self):
+		# Add -q to bzip2 opts, in order to avoid "trailing garbage after
+		# EOF ignored" warning messages due to xpak trailer.
+		# SIGPIPE handling (128 + SIGPIPE) should be compatible with
+		# assert_sigpipe_ok() that's used by the ebuild unpack() helper.
+		self.args = [self._shell_binary, "-c",
+			("${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -cq -- %s | tar -xp -C %s -f - ; " + \
+			"p=(${PIPESTATUS[@]}) ; " + \
+			"if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \
+			"echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \
+			"if [ ${p[1]} != 0 ] ; then " + \
+			"echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \
+			"exit 0 ;") % \
+			(portage._shell_quote(self.pkg_path),
+			portage._shell_quote(self.image_dir))]
+
+		SpawnProcess._start(self)

diff --git a/portage_with_autodep/pym/_emerge/BinpkgFetcher.py b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
new file mode 100644
index 0000000..baea4d6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
@@ -0,0 +1,181 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.SpawnProcess import SpawnProcess
+try:
+	from urllib.parse import urlparse as urllib_parse_urlparse
+except ImportError:
+	from urlparse import urlparse as urllib_parse_urlparse
+import stat
+import sys
+import portage
+from portage import os
+from portage.util._pty import _create_pty_or_pipe
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+class BinpkgFetcher(SpawnProcess):
+
+	__slots__ = ("pkg", "pretend",
+		"locked", "pkg_path", "_lock_obj")
+
+	def __init__(self, **kwargs):
+		SpawnProcess.__init__(self, **kwargs)
+		pkg = self.pkg
+		self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
+
+	def _start(self):
+
+		if self.cancelled:
+			return
+
+		pkg = self.pkg
+		pretend = self.pretend
+		bintree = pkg.root_config.trees["bintree"]
+		settings = bintree.settings
+		use_locks = "distlocks" in settings.features
+		pkg_path = self.pkg_path
+
+		if not pretend:
+			portage.util.ensure_dirs(os.path.dirname(pkg_path))
+			if use_locks:
+				self.lock()
+		exists = os.path.exists(pkg_path)
+		resume = exists and os.path.basename(pkg_path) in bintree.invalids
+		if not (pretend or resume):
+			# Remove existing file or broken symlink.
+			try:
+				os.unlink(pkg_path)
+			except OSError:
+				pass
+
+		# urljoin doesn't work correctly with
+		# unrecognized protocols like sftp
+		if bintree._remote_has_index:
+			rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
+			if not rel_uri:
+				rel_uri = pkg.cpv + ".tbz2"
+			remote_base_uri = bintree._remotepkgs[pkg.cpv]["BASE_URI"]
+			uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/")
+		else:
+			uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
+				"/" + pkg.pf + ".tbz2"
+
+		if pretend:
+			portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
+			self._set_returncode((self.pid, os.EX_OK << 8))
+			self.wait()
+			return
+
+		protocol = urllib_parse_urlparse(uri)[0]
+		fcmd_prefix = "FETCHCOMMAND"
+		if resume:
+			fcmd_prefix = "RESUMECOMMAND"
+		fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
+		if not fcmd:
+			fcmd = settings.get(fcmd_prefix)
+
+		fcmd_vars = {
+			"DISTDIR" : os.path.dirname(pkg_path),
+			"URI"     : uri,
+			"FILE"    : os.path.basename(pkg_path)
+		}
+
+		fetch_env = dict(settings.items())
+		fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
+			for x in portage.util.shlex_split(fcmd)]
+
+		if self.fd_pipes is None:
+			self.fd_pipes = {}
+		fd_pipes = self.fd_pipes
+
+		# Redirect all output to stdout since some fetchers like
+		# wget pollute stderr (if portage detects a problem then it
+		# can send it's own message to stderr).
+		fd_pipes.setdefault(0, sys.stdin.fileno())
+		fd_pipes.setdefault(1, sys.stdout.fileno())
+		fd_pipes.setdefault(2, sys.stdout.fileno())
+
+		self.args = fetch_args
+		self.env = fetch_env
+		if settings.selinux_enabled():
+			self._selinux_type = settings["PORTAGE_FETCH_T"]
+		SpawnProcess._start(self)
+
+	def _pipe(self, fd_pipes):
+		"""When appropriate, use a pty so that fetcher progress bars,
+		like wget has, will work properly."""
+		if self.background or not sys.stdout.isatty():
+			# When the output only goes to a log file,
+			# there's no point in creating a pty.
+			return os.pipe()
+		stdout_pipe = None
+		if not self.background:
+			stdout_pipe = fd_pipes.get(1)
+		got_pty, master_fd, slave_fd = \
+			_create_pty_or_pipe(copy_term_size=stdout_pipe)
+		return (master_fd, slave_fd)
+
+	def _set_returncode(self, wait_retval):
+		SpawnProcess._set_returncode(self, wait_retval)
+		if not self.pretend and self.returncode == os.EX_OK:
+			# If possible, update the mtime to match the remote package if
+			# the fetcher didn't already do it automatically.
+			bintree = self.pkg.root_config.trees["bintree"]
+			if bintree._remote_has_index:
+				remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
+				if remote_mtime is not None:
+					try:
+						remote_mtime = long(remote_mtime)
+					except ValueError:
+						pass
+					else:
+						try:
+							local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
+						except OSError:
+							pass
+						else:
+							if remote_mtime != local_mtime:
+								try:
+									os.utime(self.pkg_path,
+										(remote_mtime, remote_mtime))
+								except OSError:
+									pass
+
+		if self.locked:
+			self.unlock()
+
+	def lock(self):
+		"""
+		This raises an AlreadyLocked exception if lock() is called
+		while a lock is already held. In order to avoid this, call
+		unlock() or check whether the "locked" attribute is True
+		or False before calling lock().
+		"""
+		if self._lock_obj is not None:
+			raise self.AlreadyLocked((self._lock_obj,))
+
+		async_lock = AsynchronousLock(path=self.pkg_path,
+			scheduler=self.scheduler)
+		async_lock.start()
+
+		if async_lock.wait() != os.EX_OK:
+			# TODO: Use CompositeTask for better handling, like in EbuildPhase.
+			raise AssertionError("AsynchronousLock failed with returncode %s" \
+				% (async_lock.returncode,))
+
+		self._lock_obj = async_lock
+		self.locked = True
+
+	class AlreadyLocked(portage.exception.PortageException):
+		pass
+
+	def unlock(self):
+		if self._lock_obj is None:
+			return
+		self._lock_obj.unlock()
+		self._lock_obj = None
+		self.locked = False
+

diff --git a/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py
new file mode 100644
index 0000000..ffa4900
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.py
@@ -0,0 +1,43 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from portage import os
+
+class BinpkgPrefetcher(CompositeTask):
+
+	__slots__ = ("pkg",) + \
+		("pkg_path", "_bintree",)
+
+	def _start(self):
+		self._bintree = self.pkg.root_config.trees["bintree"]
+		fetcher = BinpkgFetcher(background=self.background,
+			logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+			scheduler=self.scheduler)
+		self.pkg_path = fetcher.pkg_path
+		self._start_task(fetcher, self._fetcher_exit)
+
+	def _fetcher_exit(self, fetcher):
+
+		if self._default_exit(fetcher) != os.EX_OK:
+			self.wait()
+			return
+
+		verifier = BinpkgVerifier(background=self.background,
+			logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+			scheduler=self.scheduler)
+		self._start_task(verifier, self._verifier_exit)
+
+	def _verifier_exit(self, verifier):
+		if self._default_exit(verifier) != os.EX_OK:
+			self.wait()
+			return
+
+		self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
+
+		self._current_task = None
+		self.returncode = os.EX_OK
+		self.wait()
+

diff --git a/portage_with_autodep/pym/_emerge/BinpkgVerifier.py b/portage_with_autodep/pym/_emerge/BinpkgVerifier.py
new file mode 100644
index 0000000..0052967
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgVerifier.py
@@ -0,0 +1,75 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from portage.util import writemsg
+import io
+import sys
+import portage
+from portage import os
+from portage.package.ebuild.fetch import _checksum_failure_temp_file
+
+class BinpkgVerifier(AsynchronousTask):
+	__slots__ = ("logfile", "pkg", "scheduler")
+
+	def _start(self):
+		"""
+		Note: Unlike a normal AsynchronousTask.start() method,
+		this one does all work is synchronously. The returncode
+		attribute will be set before it returns.
+		"""
+
+		pkg = self.pkg
+		root_config = pkg.root_config
+		bintree = root_config.trees["bintree"]
+		rval = os.EX_OK
+		stdout_orig = sys.stdout
+		stderr_orig = sys.stderr
+		global_havecolor = portage.output.havecolor
+		out = io.StringIO()
+		file_exists = True
+		try:
+			sys.stdout = out
+			sys.stderr = out
+			if portage.output.havecolor:
+				portage.output.havecolor = not self.background
+			try:
+				bintree.digestCheck(pkg)
+			except portage.exception.FileNotFound:
+				writemsg("!!! Fetching Binary failed " + \
+					"for '%s'\n" % pkg.cpv, noiselevel=-1)
+				rval = 1
+				file_exists = False
+			except portage.exception.DigestException as e:
+				writemsg("\n!!! Digest verification failed:\n",
+					noiselevel=-1)
+				writemsg("!!! %s\n" % e.value[0],
+					noiselevel=-1)
+				writemsg("!!! Reason: %s\n" % e.value[1],
+					noiselevel=-1)
+				writemsg("!!! Got: %s\n" % e.value[2],
+					noiselevel=-1)
+				writemsg("!!! Expected: %s\n" % e.value[3],
+					noiselevel=-1)
+				rval = 1
+			if rval == os.EX_OK:
+				pass
+			elif file_exists:
+				pkg_path = bintree.getname(pkg.cpv)
+				head, tail = os.path.split(pkg_path)
+				temp_filename = _checksum_failure_temp_file(head, tail)
+				writemsg("File renamed to '%s'\n" % (temp_filename,),
+					noiselevel=-1)
+		finally:
+			sys.stdout = stdout_orig
+			sys.stderr = stderr_orig
+			portage.output.havecolor = global_havecolor
+
+		msg = out.getvalue()
+		if msg:
+			self.scheduler.output(msg, log_path=self.logfile,
+				background=self.background)
+
+		self.returncode = rval
+		self.wait()
+

diff --git a/portage_with_autodep/pym/_emerge/Blocker.py b/portage_with_autodep/pym/_emerge/Blocker.py
new file mode 100644
index 0000000..9304606
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Blocker.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.Task import Task
+
+class Blocker(Task):
+
+	__hash__ = Task.__hash__
+	__slots__ = ("root", "atom", "cp", "eapi", "priority", "satisfied")
+
+	def __init__(self, **kwargs):
+		Task.__init__(self, **kwargs)
+		self.cp = self.atom.cp
+		self._hash_key = ("blocks", self.root, self.atom, self.eapi)
+		self._hash_value = hash(self._hash_key)

diff --git a/portage_with_autodep/pym/_emerge/BlockerCache.py b/portage_with_autodep/pym/_emerge/BlockerCache.py
new file mode 100644
index 0000000..5c4f43e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerCache.py
@@ -0,0 +1,182 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.util import writemsg
+from portage.data import secpass
+import portage
+from portage import os
+
+try:
+	import cPickle as pickle
+except ImportError:
+	import pickle
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+	long = int
+
+class BlockerCache(portage.cache.mappings.MutableMapping):
+	"""This caches blockers of installed packages so that dep_check does not
+	have to be done for every single installed package on every invocation of
+	emerge.  The cache is invalidated whenever it is detected that something
+	has changed that might alter the results of dep_check() calls:
+		1) the set of installed packages (including COUNTER) has changed
+	"""
+
+	# Number of uncached packages to trigger cache update, since
+	# it's wasteful to update it for every vdb change.
+	_cache_threshold = 5
+
+	class BlockerData(object):
+
+		__slots__ = ("__weakref__", "atoms", "counter")
+
+		def __init__(self, counter, atoms):
+			self.counter = counter
+			self.atoms = atoms
+
+	def __init__(self, myroot, vardb):
+		""" myroot is ignored in favour of EROOT """
+		self._vardb = vardb
+		self._cache_filename = os.path.join(vardb.settings['EROOT'],
+			portage.CACHE_PATH, "vdb_blockers.pickle")
+		self._cache_version = "1"
+		self._cache_data = None
+		self._modified = set()
+		self._load()
+
+	def _load(self):
+		try:
+			f = open(self._cache_filename, mode='rb')
+			mypickle = pickle.Unpickler(f)
+			try:
+				mypickle.find_global = None
+			except AttributeError:
+				# TODO: If py3k, override Unpickler.find_class().
+				pass
+			self._cache_data = mypickle.load()
+			f.close()
+			del f
+		except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
+			if isinstance(e, pickle.UnpicklingError):
+				writemsg("!!! Error loading '%s': %s\n" % \
+					(self._cache_filename, str(e)), noiselevel=-1)
+			del e
+
+		cache_valid = self._cache_data and \
+			isinstance(self._cache_data, dict) and \
+			self._cache_data.get("version") == self._cache_version and \
+			isinstance(self._cache_data.get("blockers"), dict)
+		if cache_valid:
+			# Validate all the atoms and counters so that
+			# corruption is detected as soon as possible.
+			invalid_items = set()
+			for k, v in self._cache_data["blockers"].items():
+				if not isinstance(k, basestring):
+					invalid_items.add(k)
+					continue
+				try:
+					if portage.catpkgsplit(k) is None:
+						invalid_items.add(k)
+						continue
+				except portage.exception.InvalidData:
+					invalid_items.add(k)
+					continue
+				if not isinstance(v, tuple) or \
+					len(v) != 2:
+					invalid_items.add(k)
+					continue
+				counter, atoms = v
+				if not isinstance(counter, (int, long)):
+					invalid_items.add(k)
+					continue
+				if not isinstance(atoms, (list, tuple)):
+					invalid_items.add(k)
+					continue
+				invalid_atom = False
+				for atom in atoms:
+					if not isinstance(atom, basestring):
+						invalid_atom = True
+						break
+					if atom[:1] != "!" or \
+						not portage.isvalidatom(
+						atom, allow_blockers=True):
+						invalid_atom = True
+						break
+				if invalid_atom:
+					invalid_items.add(k)
+					continue
+
+			for k in invalid_items:
+				del self._cache_data["blockers"][k]
+			if not self._cache_data["blockers"]:
+				cache_valid = False
+
+		if not cache_valid:
+			self._cache_data = {"version":self._cache_version}
+			self._cache_data["blockers"] = {}
+		self._modified.clear()
+
+	def flush(self):
+		"""If the current user has permission and the internal blocker cache
+		been updated, save it to disk and mark it unmodified.  This is called
+		by emerge after it has proccessed blockers for all installed packages.
+		Currently, the cache is only written if the user has superuser
+		privileges (since that's required to obtain a lock), but all users
+		have read access and benefit from faster blocker lookups (as long as
+		the entire cache is still valid).  The cache is stored as a pickled
+		dict object with the following format:
+
+		{
+			version : "1",
+			"blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
+		}
+		"""
+		if len(self._modified) >= self._cache_threshold and \
+			secpass >= 2:
+			try:
+				f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
+				pickle.dump(self._cache_data, f, protocol=2)
+				f.close()
+				portage.util.apply_secpass_permissions(
+					self._cache_filename, gid=portage.portage_gid, mode=0o644)
+			except (IOError, OSError) as e:
+				pass
+			self._modified.clear()
+
+	def __setitem__(self, cpv, blocker_data):
+		"""
+		Update the cache and mark it as modified for a future call to
+		self.flush().
+
+		@param cpv: Package for which to cache blockers.
+		@type cpv: String
+		@param blocker_data: An object with counter and atoms attributes.
+		@type blocker_data: BlockerData
+		"""
+		self._cache_data["blockers"][cpv] = \
+			(blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
+		self._modified.add(cpv)
+
+	def __iter__(self):
+		if self._cache_data is None:
+			# triggered by python-trace
+			return iter([])
+		return iter(self._cache_data["blockers"])
+
+	def __len__(self):
+		"""This needs to be implemented in order to avoid
+		infinite recursion in some cases."""
+		return len(self._cache_data["blockers"])
+
+	def __delitem__(self, cpv):
+		del self._cache_data["blockers"][cpv]
+
+	def __getitem__(self, cpv):
+		"""
+		@rtype: BlockerData
+		@returns: An object with counter and atoms attributes.
+		"""
+		return self.BlockerData(*self._cache_data["blockers"][cpv])
+

diff --git a/portage_with_autodep/pym/_emerge/BlockerDB.py b/portage_with_autodep/pym/_emerge/BlockerDB.py
new file mode 100644
index 0000000..4819749
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerDB.py
@@ -0,0 +1,124 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+from portage import os
+from portage import digraph
+from portage._sets.base import InternalPackageSet
+
+from _emerge.BlockerCache import BlockerCache
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+class BlockerDB(object):
+
+	def __init__(self, fake_vartree):
+		root_config = fake_vartree._root_config
+		self._root_config = root_config
+		self._vartree = root_config.trees["vartree"]
+		self._portdb = root_config.trees["porttree"].dbapi
+
+		self._dep_check_trees = None
+		self._fake_vartree = fake_vartree
+		self._dep_check_trees = {
+			self._vartree.root : {
+				"porttree"    :  fake_vartree,
+				"vartree"     :  fake_vartree,
+		}}
+
+	def findInstalledBlockers(self, new_pkg):
+		"""
+		Search for installed run-time blockers in the root where
+		new_pkg is planned to be installed. This ignores build-time
+		blockers, since new_pkg is assumed to be built already.
+		"""
+		blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
+		dep_keys = ["RDEPEND", "PDEPEND"]
+		settings = self._vartree.settings
+		stale_cache = set(blocker_cache)
+		fake_vartree = self._fake_vartree
+		dep_check_trees = self._dep_check_trees
+		vardb = fake_vartree.dbapi
+		installed_pkgs = list(vardb)
+
+		for inst_pkg in installed_pkgs:
+			stale_cache.discard(inst_pkg.cpv)
+			cached_blockers = blocker_cache.get(inst_pkg.cpv)
+			if cached_blockers is not None and \
+				cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
+				cached_blockers = None
+			if cached_blockers is not None:
+				blocker_atoms = cached_blockers.atoms
+			else:
+				# Use aux_get() to trigger FakeVartree global
+				# updates on *DEPEND when appropriate.
+				depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
+				success, atoms = portage.dep_check(depstr,
+					vardb, settings, myuse=inst_pkg.use.enabled,
+					trees=dep_check_trees, myroot=inst_pkg.root)
+				if not success:
+					pkg_location = os.path.join(inst_pkg.root,
+						portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
+					portage.writemsg("!!! %s/*DEPEND: %s\n" % \
+						(pkg_location, atoms), noiselevel=-1)
+					continue
+
+				blocker_atoms = [atom for atom in atoms \
+					if atom.startswith("!")]
+				blocker_atoms.sort()
+				counter = long(inst_pkg.metadata["COUNTER"])
+				blocker_cache[inst_pkg.cpv] = \
+					blocker_cache.BlockerData(counter, blocker_atoms)
+		for cpv in stale_cache:
+			del blocker_cache[cpv]
+		blocker_cache.flush()
+
+		blocker_parents = digraph()
+		blocker_atoms = []
+		for pkg in installed_pkgs:
+			for blocker_atom in blocker_cache[pkg.cpv].atoms:
+				blocker_atom = blocker_atom.lstrip("!")
+				blocker_atoms.append(blocker_atom)
+				blocker_parents.add(blocker_atom, pkg)
+
+		blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+		blocking_pkgs = set()
+		for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
+			blocking_pkgs.update(blocker_parents.parent_nodes(atom))
+
+		# Check for blockers in the other direction.
+		depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
+		success, atoms = portage.dep_check(depstr,
+			vardb, settings, myuse=new_pkg.use.enabled,
+			trees=dep_check_trees, myroot=new_pkg.root)
+		if not success:
+			# We should never get this far with invalid deps.
+			show_invalid_depstring_notice(new_pkg, depstr, atoms)
+			assert False
+
+		blocker_atoms = [atom.lstrip("!") for atom in atoms \
+			if atom[:1] == "!"]
+		if blocker_atoms:
+			blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+			for inst_pkg in installed_pkgs:
+				try:
+					next(blocker_atoms.iterAtomsForPackage(inst_pkg))
+				except (portage.exception.InvalidDependString, StopIteration):
+					continue
+				blocking_pkgs.add(inst_pkg)
+
+		return blocking_pkgs
+
+	def discardBlocker(self, pkg):
+		"""Discard a package from the list of potential blockers.
+		This will match any package(s) with identical cpv or cp:slot."""
+		for cpv_match in self._fake_vartree.dbapi.match_pkgs("=%s" % (pkg.cpv,)):
+			if cpv_match.cp == pkg.cp:
+				self._fake_vartree.cpv_discard(cpv_match)
+		for slot_match in self._fake_vartree.dbapi.match_pkgs(pkg.slot_atom):
+			if slot_match.cp == pkg.cp:
+				self._fake_vartree.cpv_discard(slot_match)

diff --git a/portage_with_autodep/pym/_emerge/BlockerDepPriority.py b/portage_with_autodep/pym/_emerge/BlockerDepPriority.py
new file mode 100644
index 0000000..1004a37
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerDepPriority.py
@@ -0,0 +1,13 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class BlockerDepPriority(DepPriority):
+	__slots__ = ()
+	def __int__(self):
+		return 0
+
+	def __str__(self):
+		return 'blocker'
+
+BlockerDepPriority.instance = BlockerDepPriority()

diff --git a/portage_with_autodep/pym/_emerge/CompositeTask.py b/portage_with_autodep/pym/_emerge/CompositeTask.py
new file mode 100644
index 0000000..644a69b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/CompositeTask.py
@@ -0,0 +1,157 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from portage import os
+
+class CompositeTask(AsynchronousTask):
+
+	__slots__ = ("scheduler",) + ("_current_task",)
+
+	_TASK_QUEUED = -1
+
+	def isAlive(self):
+		return self._current_task is not None
+
+	def _cancel(self):
+		if self._current_task is not None:
+			if self._current_task is self._TASK_QUEUED:
+				self.returncode = 1
+				self._current_task = None
+			else:
+				self._current_task.cancel()
+
+	def _poll(self):
+		"""
+		This does a loop calling self._current_task.poll()
+		repeatedly as long as the value of self._current_task
+		keeps changing. It calls poll() a maximum of one time
+		for a given self._current_task instance. This is useful
+		since calling poll() on a task can trigger advance to
+		the next task could eventually lead to the returncode
+		being set in cases when polling only a single task would
+		not have the same effect.
+		"""
+
+		prev = None
+		while True:
+			task = self._current_task
+			if task is None or \
+				task is self._TASK_QUEUED or \
+				task is prev:
+				# don't poll the same task more than once
+				break
+			task.poll()
+			prev = task
+
+		return self.returncode
+
+	def _wait(self):
+
+		prev = None
+		while True:
+			task = self._current_task
+			if task is None:
+				# don't wait for the same task more than once
+				break
+			if task is self._TASK_QUEUED:
+				if self.cancelled:
+					self.returncode = 1
+					self._current_task = None
+					break
+				else:
+					self.scheduler.schedule(condition=self._task_queued_wait)
+					if self.returncode is not None:
+						break
+					elif self.cancelled:
+						self.returncode = 1
+						self._current_task = None
+						break
+					else:
+						# try this again with new _current_task value
+						continue
+			if task is prev:
+				if self.returncode is not None:
+					# This is expected if we're being
+					# called from the task's exit listener
+					# after it's been cancelled.
+					break
+				# Before the task.wait() method returned, an exit
+				# listener should have set self._current_task to either
+				# a different task or None. Something is wrong.
+				raise AssertionError("self._current_task has not " + \
+					"changed since calling wait", self, task)
+			task.wait()
+			prev = task
+
+		return self.returncode
+
+	def _assert_current(self, task):
+		"""
+		Raises an AssertionError if the given task is not the
+		same one as self._current_task. This can be useful
+		for detecting bugs.
+		"""
+		if task is not self._current_task:
+			raise AssertionError("Unrecognized task: %s" % (task,))
+
+	def _default_exit(self, task):
+		"""
+		Calls _assert_current() on the given task and then sets the
+		composite returncode attribute if task.returncode != os.EX_OK.
+		If the task failed then self._current_task will be set to None.
+		Subclasses can use this as a generic task exit callback.
+
+		@rtype: int
+		@returns: The task.returncode attribute.
+		"""
+		self._assert_current(task)
+		if task.returncode != os.EX_OK:
+			self.returncode = task.returncode
+			self._current_task = None
+		return task.returncode
+
+	def _final_exit(self, task):
+		"""
+		Assumes that task is the final task of this composite task.
+		Calls _default_exit() and sets self.returncode to the task's
+		returncode and sets self._current_task to None.
+		"""
+		self._default_exit(task)
+		self._current_task = None
+		self.returncode = task.returncode
+		return self.returncode
+
+	def _default_final_exit(self, task):
+		"""
+		This calls _final_exit() and then wait().
+
+		Subclasses can use this as a generic final task exit callback.
+
+		"""
+		self._final_exit(task)
+		return self.wait()
+
+	def _start_task(self, task, exit_handler):
+		"""
+		Register exit handler for the given task, set it
+		as self._current_task, and call task.start().
+
+		Subclasses can use this as a generic way to start
+		a task.
+
+		"""
+		task.addExitListener(exit_handler)
+		self._current_task = task
+		task.start()
+
+	def _task_queued(self, task):
+		task.addStartListener(self._task_queued_start_handler)
+		self._current_task = self._TASK_QUEUED
+
+	def _task_queued_start_handler(self, task):
+		self._current_task = task
+
+	def _task_queued_wait(self):
+		return self._current_task is not self._TASK_QUEUED or \
+			self.cancelled or self.returncode is not None

diff --git a/portage_with_autodep/pym/_emerge/DepPriority.py b/portage_with_autodep/pym/_emerge/DepPriority.py
new file mode 100644
index 0000000..3c2256a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPriority.py
@@ -0,0 +1,49 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class DepPriority(AbstractDepPriority):
+
+	__slots__ = ("satisfied", "optional", "ignored")
+
+	def __int__(self):
+		"""
+		Note: These priorities are only used for measuring hardness
+		in the circular dependency display via digraph.debug_print(),
+		and nothing more. For actual merge order calculations, the
+		measures defined by the DepPriorityNormalRange and
+		DepPrioritySatisfiedRange classes are used.
+
+		Attributes                            Hardness
+
+		buildtime                               0
+		runtime                                -1
+		runtime_post                           -2
+		optional                               -3
+		(none of the above)                    -4
+
+		"""
+
+		if self.optional:
+			return -3
+		if self.buildtime:
+			return 0
+		if self.runtime:
+			return -1
+		if self.runtime_post:
+			return -2
+		return -4
+
+	def __str__(self):
+		if self.ignored:
+			return "ignored"
+		if self.optional:
+			return "optional"
+		if self.buildtime:
+			return "buildtime"
+		if self.runtime:
+			return "runtime"
+		if self.runtime_post:
+			return "runtime_post"
+		return "soft"
+

diff --git a/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py
new file mode 100644
index 0000000..8639554
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.py
@@ -0,0 +1,47 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPriorityNormalRange(object):
+	"""
+	DepPriority properties              Index      Category
+
+	buildtime                                      HARD
+	runtime                                3       MEDIUM
+	runtime_post                           2       MEDIUM_SOFT
+	optional                               1       SOFT
+	(none of the above)                    0       NONE
+	"""
+	MEDIUM      = 3
+	MEDIUM_SOFT = 2
+	SOFT        = 1
+	NONE        = 0
+
+	@classmethod
+	def _ignore_optional(cls, priority):
+		if priority.__class__ is not DepPriority:
+			return False
+		return bool(priority.optional)
+
+	@classmethod
+	def _ignore_runtime_post(cls, priority):
+		if priority.__class__ is not DepPriority:
+			return False
+		return bool(priority.optional or priority.runtime_post)
+
+	@classmethod
+	def _ignore_runtime(cls, priority):
+		if priority.__class__ is not DepPriority:
+			return False
+		return bool(priority.optional or not priority.buildtime)
+
+	ignore_medium      = _ignore_runtime
+	ignore_medium_soft = _ignore_runtime_post
+	ignore_soft        = _ignore_optional
+
+DepPriorityNormalRange.ignore_priority = (
+	None,
+	DepPriorityNormalRange._ignore_optional,
+	DepPriorityNormalRange._ignore_runtime_post,
+	DepPriorityNormalRange._ignore_runtime
+)

diff --git a/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py
new file mode 100644
index 0000000..edb29df
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.py
@@ -0,0 +1,85 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPrioritySatisfiedRange(object):
+	"""
+	DepPriority                         Index      Category
+
+	not satisfied and buildtime                    HARD
+	not satisfied and runtime              6       MEDIUM
+	not satisfied and runtime_post         5       MEDIUM_SOFT
+	satisfied and buildtime                4       SOFT
+	satisfied and runtime                  3       SOFT
+	satisfied and runtime_post             2       SOFT
+	optional                               1       SOFT
+	(none of the above)                    0       NONE
+	"""
+	MEDIUM      = 6
+	MEDIUM_SOFT = 5
+	SOFT        = 4
+	NONE        = 0
+
+	@classmethod
+	def _ignore_optional(cls, priority):
+		if priority.__class__ is not DepPriority:
+			return False
+		return bool(priority.optional)
+
+	@classmethod
+	def _ignore_satisfied_runtime_post(cls, priority):
+		if priority.__class__ is not DepPriority:
+			return False
+		if priority.optional:
+			return True
+		if not priority.satisfied:
+			return False
+		return bool(priority.runtime_post)
+
+	@classmethod
+	def _ignore_satisfied_runtime(cls, priority):
+		if priority.__class__ is not DepPriority:
+			return False
+		if priority.optional:
+			return True
+		if not priority.satisfied:
+			return False
+		return not priority.buildtime
+
+	@classmethod
+	def _ignore_satisfied_buildtime(cls, priority):
+		if priority.__class__ is not DepPriority:
+			return False
+		return bool(priority.optional or \
+			priority.satisfied)
+
+	@classmethod
+	def _ignore_runtime_post(cls, priority):
+		if priority.__class__ is not DepPriority:
+			return False
+		return bool(priority.optional or \
+			priority.satisfied or \
+			priority.runtime_post)
+
+	@classmethod
+	def _ignore_runtime(cls, priority):
+		if priority.__class__ is not DepPriority:
+			return False
+		return bool(priority.satisfied or \
+			priority.optional or \
+			not priority.buildtime)
+
+	ignore_medium      = _ignore_runtime
+	ignore_medium_soft = _ignore_runtime_post
+	ignore_soft        = _ignore_satisfied_buildtime
+
+
+DepPrioritySatisfiedRange.ignore_priority = (
+	None,
+	DepPrioritySatisfiedRange._ignore_optional,
+	DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
+	DepPrioritySatisfiedRange._ignore_satisfied_runtime,
+	DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
+	DepPrioritySatisfiedRange._ignore_runtime_post,
+	DepPrioritySatisfiedRange._ignore_runtime
+)

diff --git a/portage_with_autodep/pym/_emerge/Dependency.py b/portage_with_autodep/pym/_emerge/Dependency.py
new file mode 100644
index 0000000..0f746b6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Dependency.py
@@ -0,0 +1,20 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+from _emerge.SlotObject import SlotObject
+class Dependency(SlotObject):
+	__slots__ = ("atom", "blocker", "child", "depth",
+		"parent", "onlydeps", "priority", "root",
+		"collapsed_parent", "collapsed_priority")
+	def __init__(self, **kwargs):
+		SlotObject.__init__(self, **kwargs)
+		if self.priority is None:
+			self.priority = DepPriority()
+		if self.depth is None:
+			self.depth = 0
+		if self.collapsed_parent is None:
+			self.collapsed_parent = self.parent
+		if self.collapsed_priority is None:
+			self.collapsed_priority = self.priority
+

diff --git a/portage_with_autodep/pym/_emerge/DependencyArg.py b/portage_with_autodep/pym/_emerge/DependencyArg.py
new file mode 100644
index 0000000..861d837
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DependencyArg.py
@@ -0,0 +1,33 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+from portage import _encodings, _unicode_encode, _unicode_decode
+
+class DependencyArg(object):
+	def __init__(self, arg=None, root_config=None):
+		self.arg = arg
+		self.root_config = root_config
+
+	def __eq__(self, other):
+		if self.__class__ is not other.__class__:
+			return False
+		return self.arg == other.arg and \
+			self.root_config.root == other.root_config.root
+
+	def __hash__(self):
+		return hash((self.arg, self.root_config.root))
+
+	def __str__(self):
+		# Force unicode format string for python-2.x safety,
+		# ensuring that self.arg.__unicode__() is used
+		# when necessary.
+		return _unicode_decode("%s") % (self.arg,)
+
+	if sys.hexversion < 0x3000000:
+
+		__unicode__ = __str__
+
+		def __str__(self):
+			return _unicode_encode(self.__unicode__(), encoding=_encodings['content'])

diff --git a/portage_with_autodep/pym/_emerge/EbuildBinpkg.py b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
new file mode 100644
index 0000000..b7d43ba
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
@@ -0,0 +1,46 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildPhase import EbuildPhase
+from portage import os
+
+class EbuildBinpkg(CompositeTask):
+	"""
+	This assumes that src_install() has successfully completed.
+	"""
+	__slots__ = ('pkg', 'settings') + \
+		('_binpkg_tmpfile',)
+
+	def _start(self):
+		pkg = self.pkg
+		root_config = pkg.root_config
+		bintree = root_config.trees["bintree"]
+		bintree.prevent_collision(pkg.cpv)
+		binpkg_tmpfile = os.path.join(bintree.pkgdir,
+			pkg.cpv + ".tbz2." + str(os.getpid()))
+		bintree._ensure_dir(os.path.dirname(binpkg_tmpfile))
+
+		self._binpkg_tmpfile = binpkg_tmpfile
+		self.settings["PORTAGE_BINPKG_TMPFILE"] = self._binpkg_tmpfile
+
+		package_phase = EbuildPhase(background=self.background,
+			phase='package', scheduler=self.scheduler,
+			settings=self.settings)
+
+		self._start_task(package_phase, self._package_phase_exit)
+
+	def _package_phase_exit(self, package_phase):
+
+		self.settings.pop("PORTAGE_BINPKG_TMPFILE", None)
+		if self._default_exit(package_phase) != os.EX_OK:
+			self.wait()
+			return
+
+		pkg = self.pkg
+		bintree = pkg.root_config.trees["bintree"]
+		bintree.inject(pkg.cpv, filename=self._binpkg_tmpfile)
+
+		self._current_task = None
+		self.returncode = os.EX_OK
+		self.wait()

diff --git a/portage_with_autodep/pym/_emerge/EbuildBuild.py b/portage_with_autodep/pym/_emerge/EbuildBuild.py
new file mode 100644
index 0000000..1c423a3
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBuild.py
@@ -0,0 +1,426 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildExecuter import EbuildExecuter
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.EbuildBinpkg import EbuildBinpkg
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildFetchonly import EbuildFetchonly
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EventsAnalyser import EventsAnalyser, FilterProcGenerator
+from _emerge.EventsLogger import EventsLogger
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from portage.util import writemsg
+import portage
+from portage import os
+from portage.output import colorize
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+
+class EbuildBuild(CompositeTask):
+
+	__slots__ = ("args_set", "config_pool", "find_blockers",
+		"ldpath_mtimes", "logger", "logserver", "opts", "pkg", "pkg_count",
+		"prefetcher", "settings", "world_atom") + \
+		("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
+
+	def _start(self):
+
+		pkg = self.pkg
+		settings = self.settings
+
+		rval = _check_temp_dir(settings)
+		if rval != os.EX_OK:
+			self.returncode = rval
+			self._current_task = None
+			self.wait()
+			return
+
+		root_config = pkg.root_config
+		tree = "porttree"
+		self._tree = tree
+		portdb = root_config.trees[tree].dbapi
+		settings.setcpv(pkg)
+		settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+		if self.opts.buildpkgonly:
+			settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+		else:
+			settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+		ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+		if ebuild_path is None:
+			raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+		self._ebuild_path = ebuild_path
+		portage.doebuild_environment(ebuild_path, 'setup',
+			settings=self.settings, db=portdb)
+
+		# Check the manifest here since with --keep-going mode it's
+		# currently possible to get this far with a broken manifest.
+		if not self._check_manifest():
+			self.returncode = 1
+			self._current_task = None
+			self.wait()
+			return
+
+		prefetcher = self.prefetcher
+		if prefetcher is None:
+			pass
+		elif prefetcher.isAlive() and \
+			prefetcher.poll() is None:
+
+			waiting_msg = "Fetching files " + \
+				"in the background. " + \
+				"To view fetch progress, run `tail -f " + \
+				"/var/log/emerge-fetch.log` in another " + \
+				"terminal."
+			msg_prefix = colorize("GOOD", " * ")
+			from textwrap import wrap
+			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
+				for line in wrap(waiting_msg, 65))
+			if not self.background:
+				writemsg(waiting_msg, noiselevel=-1)
+
+			self._current_task = prefetcher
+			prefetcher.addExitListener(self._prefetch_exit)
+			return
+
+		self._prefetch_exit(prefetcher)
+
+	def _check_manifest(self):
+		success = True
+
+		settings = self.settings
+		if 'strict' in settings.features:
+			settings['O'] = os.path.dirname(self._ebuild_path)
+			quiet_setting = settings.get('PORTAGE_QUIET')
+			settings['PORTAGE_QUIET'] = '1'
+			try:
+				success = digestcheck([], settings, strict=True)
+			finally:
+				if quiet_setting:
+					settings['PORTAGE_QUIET'] = quiet_setting
+				else:
+					del settings['PORTAGE_QUIET']
+
+		return success
+
+	def _prefetch_exit(self, prefetcher):
+
+		opts = self.opts
+		pkg = self.pkg
+		settings = self.settings
+
+		if opts.fetchonly:
+			if opts.pretend:
+				fetcher = EbuildFetchonly(
+					fetch_all=opts.fetch_all_uri,
+					pkg=pkg, pretend=opts.pretend,
+					settings=settings)
+				retval = fetcher.execute()
+				self.returncode = retval
+				self.wait()
+				return
+			else:
+				fetcher = EbuildFetcher(
+					config_pool=self.config_pool,
+					ebuild_path=self._ebuild_path,
+					fetchall=self.opts.fetch_all_uri,
+					fetchonly=self.opts.fetchonly,
+					background=False,
+					logfile=None,
+					pkg=self.pkg,
+					scheduler=self.scheduler)
+				self._start_task(fetcher, self._fetchonly_exit)
+				return
+
+		self._build_dir = EbuildBuildDir(
+			scheduler=self.scheduler, settings=settings)
+		self._build_dir.lock()
+
+		# Cleaning needs to happen before fetch, since the build dir
+		# is used for log handling.
+		msg = " === (%s of %s) Cleaning (%s::%s)" % \
+			(self.pkg_count.curval, self.pkg_count.maxval,
+			self.pkg.cpv, self._ebuild_path)
+		short_msg = "emerge: (%s of %s) %s Clean" % \
+			(self.pkg_count.curval, self.pkg_count.maxval, self.pkg.cpv)
+		self.logger.log(msg, short_msg=short_msg)
+
+		pre_clean_phase = EbuildPhase(background=self.background,
+			phase='clean', scheduler=self.scheduler, settings=self.settings)
+		self._start_task(pre_clean_phase, self._pre_clean_exit)
+
+	def _fetchonly_exit(self, fetcher):
+		self._final_exit(fetcher)
+		if self.returncode != os.EX_OK:
+			portdb = self.pkg.root_config.trees[self._tree].dbapi
+			spawn_nofetch(portdb, self._ebuild_path, settings=self.settings)
+		self.wait()
+
+	def _pre_clean_exit(self, pre_clean_phase):
+		if self._default_exit(pre_clean_phase) != os.EX_OK:
+			self._unlock_builddir()
+			self.wait()
+			return
+
+		# for log handling
+		portage.prepare_build_dirs(self.pkg.root, self.settings, 1)
+
+		fetcher = EbuildFetcher(config_pool=self.config_pool,
+			ebuild_path=self._ebuild_path,
+			fetchall=self.opts.fetch_all_uri,
+			fetchonly=self.opts.fetchonly,
+			background=self.background,
+			logfile=self.settings.get('PORTAGE_LOG_FILE'),
+			pkg=self.pkg, scheduler=self.scheduler)
+
+		try:
+			already_fetched = fetcher.already_fetched(self.settings)
+		except portage.exception.InvalidDependString as e:
+			msg_lines = []
+			msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+				(self.pkg.cpv, e)
+			msg_lines.append(msg)
+			fetcher._eerror(msg_lines)
+			portage.elog.elog_process(self.pkg.cpv, self.settings)
+			self.returncode = 1
+			self._current_task = None
+			self._unlock_builddir()
+			self.wait()
+			return
+
+		if already_fetched:
+			# This case is optimized to skip the fetch queue.
+			fetcher = None
+			self._fetch_exit(fetcher)
+			return
+
+		# Allow the Scheduler's fetch queue to control the
+		# number of concurrent fetchers.
+		fetcher.addExitListener(self._fetch_exit)
+		self._task_queued(fetcher)
+		self.scheduler.fetch.schedule(fetcher)
+
+	def _fetch_exit(self, fetcher):
+
+		if fetcher is not None and \
+			self._default_exit(fetcher) != os.EX_OK:
+			self._fetch_failed()
+			return
+
+		# discard successful fetch log
+		self._build_dir.clean_log()
+		pkg = self.pkg
+		logger = self.logger
+		opts = self.opts
+		pkg_count = self.pkg_count
+		scheduler = self.scheduler
+		settings = self.settings
+		features = settings.features
+		ebuild_path = self._ebuild_path
+		system_set = pkg.root_config.sets["system"]
+
+		#buildsyspkg: Check if we need to _force_ binary package creation
+		self._issyspkg = "buildsyspkg" in features and \
+				system_set.findAtomForPackage(pkg) and \
+				not opts.buildpkg
+
+		if opts.buildpkg or self._issyspkg:
+
+			self._buildpkg = True
+
+			msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+			short_msg = "emerge: (%s of %s) %s Compile" % \
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
+			logger.log(msg, short_msg=short_msg)
+
+		else:
+			msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+			short_msg = "emerge: (%s of %s) %s Compile" % \
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
+			logger.log(msg, short_msg=short_msg)
+
+		build = EbuildExecuter(background=self.background, pkg=pkg,
+			scheduler=scheduler, settings=settings)
+
+		build.addStartListener(self._build_start)
+		build.addExitListener(self._build_stop)
+
+		self._start_task(build, self._build_exit)
+
+	def _build_start(self,phase):
+		if  "depcheck" in self.settings["FEATURES"] or \
+			"depcheckstrict" in self.settings["FEATURES"]:
+			# Lets start a log listening server
+			temp_path=self.settings.get("T",self.settings["PORTAGE_TMPDIR"])
+			
+			if "depcheckstrict" not in self.settings["FEATURES"]:
+				# use default filter_proc
+				self.logserver=EventsLogger(socket_dir=temp_path)
+			else:
+				portage.util.writemsg("Getting list of allowed files..." + \
+					"This may take some time\n")
+				filter_gen=FilterProcGenerator(self.pkg.cpv, self.settings)
+				filter_proc=filter_gen.get_filter_proc()
+				self.logserver=EventsLogger(socket_dir=temp_path, 
+					filter_proc=filter_proc)
+	
+			self.logserver.start()
+			
+			# Copy socket path to LOG_SOCKET environment variable
+			env=self.settings.configdict["pkg"]
+			env['LOG_SOCKET'] = self.logserver.socket_name
+
+			#import pdb; pdb.set_trace()
+
+	def _build_stop(self,phase):
+		if  "depcheck" in self.settings["FEATURES"] or \
+			"depcheckstrict" in self.settings["FEATURES"]:
+			# Delete LOG_SOCKET from environment
+			env=self.settings.configdict["pkg"]
+			if 'LOG_SOCKET' in env:
+				del env['LOG_SOCKET']
+						
+			events=self.logserver.stop()
+			self.logserver=None
+			analyser=EventsAnalyser(self.pkg.cpv, events, self.settings)
+			analyser.display() # show the analyse
+
+			#import pdb; pdb.set_trace()
+	  
+
+
+	def _fetch_failed(self):
+		# We only call the pkg_nofetch phase if either RESTRICT=fetch
+		# is set or the package has explicitly overridden the default
+		# pkg_nofetch implementation. This allows specialized messages
+		# to be displayed for problematic packages even though they do
+		# not set RESTRICT=fetch (bug #336499).
+
+		if 'fetch' not in self.pkg.metadata.restrict and \
+			'nofetch' not in self.pkg.metadata.defined_phases:
+			self._unlock_builddir()
+			self.wait()
+			return
+
+		self.returncode = None
+		nofetch_phase = EbuildPhase(background=self.background,
+			phase='nofetch', scheduler=self.scheduler, settings=self.settings)
+		self._start_task(nofetch_phase, self._nofetch_exit)
+
+	def _nofetch_exit(self, nofetch_phase):
+		self._final_exit(nofetch_phase)
+		self._unlock_builddir()
+		self.returncode = 1
+		self.wait()
+
+	def _unlock_builddir(self):
+		portage.elog.elog_process(self.pkg.cpv, self.settings)
+		self._build_dir.unlock()
+
+	def _build_exit(self, build):
+		if self._default_exit(build) != os.EX_OK:
+			self._unlock_builddir()
+			self.wait()
+			return
+
+		buildpkg = self._buildpkg
+
+		if not buildpkg:
+			self._final_exit(build)
+			self.wait()
+			return
+
+		if self._issyspkg:
+			msg = ">>> This is a system package, " + \
+				"let's pack a rescue tarball.\n"
+			self.scheduler.output(msg,
+				log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+		packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
+			scheduler=self.scheduler, settings=self.settings)
+
+		self._start_task(packager, self._buildpkg_exit)
+
+	def _buildpkg_exit(self, packager):
+		"""
+		Released build dir lock when there is a failure or
+		when in buildpkgonly mode. Otherwise, the lock will
+		be released when merge() is called.
+		"""
+
+		if self._default_exit(packager) != os.EX_OK:
+			self._unlock_builddir()
+			self.wait()
+			return
+
+		if self.opts.buildpkgonly:
+			phase = 'success_hooks'
+			success_hooks = MiscFunctionsProcess(
+				background=self.background,
+				commands=[phase], phase=phase,
+				scheduler=self.scheduler, settings=self.settings)
+			self._start_task(success_hooks,
+				self._buildpkgonly_success_hook_exit)
+			return
+
+		# Continue holding the builddir lock until
+		# after the package has been installed.
+		self._current_task = None
+		self.returncode = packager.returncode
+		self.wait()
+
+	def _buildpkgonly_success_hook_exit(self, success_hooks):
+		self._default_exit(success_hooks)
+		self.returncode = None
+		# Need to call "clean" phase for buildpkgonly mode
+		portage.elog.elog_process(self.pkg.cpv, self.settings)
+		phase = 'clean'
+		clean_phase = EbuildPhase(background=self.background,
+			phase=phase, scheduler=self.scheduler, settings=self.settings)
+		self._start_task(clean_phase, self._clean_exit)
+
+	def _clean_exit(self, clean_phase):
+		if self._final_exit(clean_phase) != os.EX_OK or \
+			self.opts.buildpkgonly:
+			self._unlock_builddir()
+		self.wait()
+
+	def create_install_task(self):
+		"""
+		Install the package and then clean up and release locks.
+		Only call this after the build has completed successfully
+		and neither fetchonly nor buildpkgonly mode are enabled.
+		"""
+
+		ldpath_mtimes = self.ldpath_mtimes
+		logger = self.logger
+		pkg = self.pkg
+		pkg_count = self.pkg_count
+		settings = self.settings
+		world_atom = self.world_atom
+		ebuild_path = self._ebuild_path
+		tree = self._tree
+
+		task = EbuildMerge(find_blockers=self.find_blockers,
+			ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
+			pkg_count=pkg_count, pkg_path=ebuild_path,
+			scheduler=self.scheduler,
+			settings=settings, tree=tree, world_atom=world_atom)
+
+		msg = " === (%s of %s) Merging (%s::%s)" % \
+			(pkg_count.curval, pkg_count.maxval,
+			pkg.cpv, ebuild_path)
+		short_msg = "emerge: (%s of %s) %s Merge" % \
+			(pkg_count.curval, pkg_count.maxval, pkg.cpv)
+		logger.log(msg, short_msg=short_msg)
+
+		task.addExitListener(self._install_exit)
+		return task
+
+	def _install_exit(self, task):
+		self._unlock_builddir()

diff --git a/portage_with_autodep/pym/_emerge/EbuildBuildDir.py b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
new file mode 100644
index 0000000..ddc5fe0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
@@ -0,0 +1,109 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.SlotObject import SlotObject
+import portage
+from portage import os
+from portage.exception import PortageException
+import errno
+
+class EbuildBuildDir(SlotObject):
+
+	__slots__ = ("scheduler", "settings",
+		"locked", "_catdir", "_lock_obj")
+
+	def __init__(self, **kwargs):
+		SlotObject.__init__(self, **kwargs)
+		self.locked = False
+
+	def lock(self):
+		"""
+		This raises an AlreadyLocked exception if lock() is called
+		while a lock is already held. In order to avoid this, call
+		unlock() or check whether the "locked" attribute is True
+		or False before calling lock().
+		"""
+		if self._lock_obj is not None:
+			raise self.AlreadyLocked((self._lock_obj,))
+
+		dir_path = self.settings.get('PORTAGE_BUILDDIR')
+		if not dir_path:
+			raise AssertionError('PORTAGE_BUILDDIR is unset')
+		catdir = os.path.dirname(dir_path)
+		self._catdir = catdir
+
+		try:
+			portage.util.ensure_dirs(os.path.dirname(catdir),
+				gid=portage.portage_gid,
+				mode=0o70, mask=0)
+		except PortageException:
+			if not os.path.isdir(os.path.dirname(catdir)):
+				raise
+		catdir_lock = AsynchronousLock(path=catdir, scheduler=self.scheduler)
+		catdir_lock.start()
+		catdir_lock.wait()
+		self._assert_lock(catdir_lock)
+
+		try:
+			try:
+				portage.util.ensure_dirs(catdir,
+					gid=portage.portage_gid,
+					mode=0o70, mask=0)
+			except PortageException:
+				if not os.path.isdir(catdir):
+					raise
+			builddir_lock = AsynchronousLock(path=dir_path,
+				scheduler=self.scheduler)
+			builddir_lock.start()
+			builddir_lock.wait()
+			self._assert_lock(builddir_lock)
+			self._lock_obj = builddir_lock
+			self.settings['PORTAGE_BUILDIR_LOCKED'] = '1'
+		finally:
+			self.locked = self._lock_obj is not None
+			catdir_lock.unlock()
+
+	def _assert_lock(self, async_lock):
+		if async_lock.returncode != os.EX_OK:
+			# TODO: create a better way to propagate this error to the caller
+			raise AssertionError("AsynchronousLock failed with returncode %s" \
+				% (async_lock.returncode,))
+
+	def clean_log(self):
+		"""Discard existing log. The log will not be be discarded
+		in cases when it would not make sense, like when FEATURES=keepwork
+		is enabled."""
+		settings = self.settings
+		if 'keepwork' in settings.features:
+			return
+		log_file = settings.get('PORTAGE_LOG_FILE')
+		if log_file is not None and os.path.isfile(log_file):
+			try:
+				os.unlink(log_file)
+			except OSError:
+				pass
+
+	def unlock(self):
+		if self._lock_obj is None:
+			return
+
+		self._lock_obj.unlock()
+		self._lock_obj = None
+		self.locked = False
+		self.settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+		catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler)
+		catdir_lock.start()
+		if catdir_lock.wait() == os.EX_OK:
+			try:
+				os.rmdir(self._catdir)
+			except OSError as e:
+				if e.errno not in (errno.ENOENT,
+					errno.ENOTEMPTY, errno.EEXIST, errno.EPERM):
+					raise
+			finally:
+				catdir_lock.unlock()
+
+	class AlreadyLocked(portage.exception.PortageException):
+		pass
+

diff --git a/portage_with_autodep/pym/_emerge/EbuildExecuter.py b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
new file mode 100644
index 0000000..f8febd4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
@@ -0,0 +1,99 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.TaskSequence import TaskSequence
+from _emerge.CompositeTask import CompositeTask
+import portage
+from portage import os
+from portage.eapi import eapi_has_src_prepare_and_src_configure, \
+	eapi_exports_replace_vars
+from portage.package.ebuild.doebuild import _prepare_fake_distdir
+
+class EbuildExecuter(CompositeTask):
+
+	__slots__ = ("pkg", "scheduler", "settings")
+
+	_phases = ("prepare", "configure", "compile", "test", "install")
+
+	_live_eclasses = frozenset([
+		"bzr",
+		"cvs",
+		"darcs",
+		"git",
+		"git-2",
+		"mercurial",
+		"subversion",
+		"tla",
+	])
+
+	def _start(self):
+		pkg = self.pkg
+		scheduler = self.scheduler
+		settings = self.settings
+		cleanup = 0
+		portage.prepare_build_dirs(pkg.root, settings, cleanup)
+
+		portdb = pkg.root_config.trees['porttree'].dbapi
+		ebuild_path = settings['EBUILD']
+		alist = settings.configdict["pkg"].get("A", "").split()
+		_prepare_fake_distdir(settings, alist)
+
+		if eapi_exports_replace_vars(settings['EAPI']):
+			vardb = pkg.root_config.trees['vartree'].dbapi
+			settings["REPLACING_VERSIONS"] = " ".join(
+				set(portage.versions.cpv_getversion(match) \
+					for match in vardb.match(pkg.slot_atom) + \
+					vardb.match('='+pkg.cpv)))
+
+		setup_phase = EbuildPhase(background=self.background,
+			phase="setup", scheduler=scheduler,
+			settings=settings)
+
+		setup_phase.addExitListener(self._setup_exit)
+		self._task_queued(setup_phase)
+		self.scheduler.scheduleSetup(setup_phase)
+
+	def _setup_exit(self, setup_phase):
+
+		if self._default_exit(setup_phase) != os.EX_OK:
+			self.wait()
+			return
+
+		unpack_phase = EbuildPhase(background=self.background,
+			phase="unpack", scheduler=self.scheduler,
+			settings=self.settings)
+
+		if self._live_eclasses.intersection(self.pkg.inherited):
+			# Serialize $DISTDIR access for live ebuilds since
+			# otherwise they can interfere with eachother.
+
+			unpack_phase.addExitListener(self._unpack_exit)
+			self._task_queued(unpack_phase)
+			self.scheduler.scheduleUnpack(unpack_phase)
+
+		else:
+			self._start_task(unpack_phase, self._unpack_exit)
+
+	def _unpack_exit(self, unpack_phase):
+
+		if self._default_exit(unpack_phase) != os.EX_OK:
+			self.wait()
+			return
+
+		ebuild_phases = TaskSequence(scheduler=self.scheduler)
+
+		pkg = self.pkg
+		phases = self._phases
+		eapi = pkg.metadata["EAPI"]
+		if not eapi_has_src_prepare_and_src_configure(eapi):
+			# skip src_prepare and src_configure
+			phases = phases[2:]
+
+		for phase in phases:
+			ebuild_phases.add(EbuildPhase(background=self.background,
+				phase=phase, scheduler=self.scheduler,
+				settings=self.settings))
+
+		self._start_task(ebuild_phases, self._default_final_exit)
+

diff --git a/portage_with_autodep/pym/_emerge/EbuildFetcher.py b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
new file mode 100644
index 0000000..feb68d0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
@@ -0,0 +1,302 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import traceback
+
+from _emerge.SpawnProcess import SpawnProcess
+import copy
+import io
+import signal
+import sys
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.elog.messages import eerror
+from portage.package.ebuild.fetch import _check_distfile, fetch
+from portage.util._pty import _create_pty_or_pipe
+
+class EbuildFetcher(SpawnProcess):
+
+	__slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
+		"pkg", "prefetch") + \
+		("_digests", "_settings", "_uri_map")
+
+	def already_fetched(self, settings):
+		"""
+		Returns True if all files already exist locally and have correct
+		digests, otherwise return False. When returning True, appropriate
+		digest checking messages are produced for display and/or logging.
+		When returning False, no messages are produced, since we assume
+		that a fetcher process will later be executed in order to produce
+		such messages. This will raise InvalidDependString if SRC_URI is
+		invalid.
+		"""
+
+		uri_map = self._get_uri_map()
+		if not uri_map:
+			return True
+
+		digests = self._get_digests()
+		distdir = settings["DISTDIR"]
+		allow_missing = "allow-missing-manifests" in settings.features
+
+		for filename in uri_map:
+			# Use stat rather than lstat since fetch() creates
+			# symlinks when PORTAGE_RO_DISTDIRS is used.
+			try:
+				st = os.stat(os.path.join(distdir, filename))
+			except OSError:
+				return False
+			if st.st_size == 0:
+				return False
+			expected_size = digests.get(filename, {}).get('size')
+			if expected_size is None:
+				continue
+			if st.st_size != expected_size:
+				return False
+
+		stdout_orig = sys.stdout
+		stderr_orig = sys.stderr
+		global_havecolor = portage.output.havecolor
+		out = io.StringIO()
+		eout = portage.output.EOutput()
+		eout.quiet = settings.get("PORTAGE_QUIET") == "1"
+		success = True
+		try:
+			sys.stdout = out
+			sys.stderr = out
+			if portage.output.havecolor:
+				portage.output.havecolor = not self.background
+
+			for filename in uri_map:
+				mydigests = digests.get(filename)
+				if mydigests is None:
+					if not allow_missing:
+						success = False
+						break
+					continue
+				ok, st = _check_distfile(os.path.join(distdir, filename),
+					mydigests, eout, show_errors=False)
+				if not ok:
+					success = False
+					break
+		except portage.exception.FileNotFound:
+			# A file disappeared unexpectedly.
+			return False
+		finally:
+			sys.stdout = stdout_orig
+			sys.stderr = stderr_orig
+			portage.output.havecolor = global_havecolor
+
+		if success:
+			# When returning unsuccessfully, no messages are produced, since
+			# we assume that a fetcher process will later be executed in order
+			# to produce such messages.
+			msg = out.getvalue()
+			if msg:
+				self.scheduler.output(msg, log_path=self.logfile)
+
+		return success
+
+	def _start(self):
+
+		root_config = self.pkg.root_config
+		portdb = root_config.trees["porttree"].dbapi
+		ebuild_path = self._get_ebuild_path()
+
+		try:
+			uri_map = self._get_uri_map()
+		except portage.exception.InvalidDependString as e:
+			msg_lines = []
+			msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+				(self.pkg.cpv, e)
+			msg_lines.append(msg)
+			self._eerror(msg_lines)
+			self._set_returncode((self.pid, 1 << 8))
+			self.wait()
+			return
+
+		if not uri_map:
+			# Nothing to fetch.
+			self._set_returncode((self.pid, os.EX_OK << 8))
+			self.wait()
+			return
+
+		settings = self.config_pool.allocate()
+		settings.setcpv(self.pkg)
+		portage.doebuild_environment(ebuild_path, 'fetch',
+			settings=settings, db=portdb)
+
+		if self.prefetch and \
+			self._prefetch_size_ok(uri_map, settings, ebuild_path):
+			self.config_pool.deallocate(settings)
+			self._set_returncode((self.pid, os.EX_OK << 8))
+			self.wait()
+			return
+
+		nocolor = settings.get("NOCOLOR")
+
+		if self.prefetch:
+			settings["PORTAGE_PARALLEL_FETCHONLY"] = "1"
+
+		if self.background:
+			nocolor = "true"
+
+		if nocolor is not None:
+			settings["NOCOLOR"] = nocolor
+
+		self._settings = settings
+		SpawnProcess._start(self)
+
+		# Free settings now since it's no longer needed in
+		# this process (the subprocess has a private copy).
+		self.config_pool.deallocate(settings)
+		settings = None
+		self._settings = None
+
+	def _spawn(self, args, fd_pipes=None, **kwargs):
+		"""
+		Fork a subprocess, apply local settings, and call fetch().
+		"""
+
+		pid = os.fork()
+		if pid != 0:
+			portage.process.spawned_pids.append(pid)
+			return [pid]
+
+		portage.process._setup_pipes(fd_pipes)
+
+		# Use default signal handlers in order to avoid problems
+		# killing subprocesses as reported in bug #353239.
+		signal.signal(signal.SIGINT, signal.SIG_DFL)
+		signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+		# Force consistent color output, in case we are capturing fetch
+		# output through a normal pipe due to unavailability of ptys.
+		portage.output.havecolor = self._settings.get('NOCOLOR') \
+			not in ('yes', 'true')
+
+		rval = 1
+		allow_missing = 'allow-missing-manifests' in self._settings.features
+		try:
+			if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
+				digests=copy.deepcopy(self._get_digests()),
+				allow_missing_digests=allow_missing):
+				rval = os.EX_OK
+		except SystemExit:
+			raise
+		except:
+			traceback.print_exc()
+		finally:
+			# Call os._exit() from finally block, in order to suppress any
+			# finally blocks from earlier in the call stack. See bug #345289.
+			os._exit(rval)
+
+	def _get_ebuild_path(self):
+		if self.ebuild_path is not None:
+			return self.ebuild_path
+		portdb = self.pkg.root_config.trees["porttree"].dbapi
+		self.ebuild_path = portdb.findname(self.pkg.cpv, myrepo=self.pkg.repo)
+		if self.ebuild_path is None:
+			raise AssertionError("ebuild not found for '%s'" % self.pkg.cpv)
+		return self.ebuild_path
+
+	def _get_digests(self):
+		if self._digests is not None:
+			return self._digests
+		self._digests = portage.Manifest(os.path.dirname(
+			self._get_ebuild_path()), None).getTypeDigests("DIST")
+		return self._digests
+
+	def _get_uri_map(self):
+		"""
+		This can raise InvalidDependString from portdbapi.getFetchMap().
+		"""
+		if self._uri_map is not None:
+			return self._uri_map
+		pkgdir = os.path.dirname(self._get_ebuild_path())
+		mytree = os.path.dirname(os.path.dirname(pkgdir))
+		use = None
+		if not self.fetchall:
+			use = self.pkg.use.enabled
+		portdb = self.pkg.root_config.trees["porttree"].dbapi
+		self._uri_map = portdb.getFetchMap(self.pkg.cpv,
+			useflags=use, mytree=mytree)
+		return self._uri_map
+
+	def _prefetch_size_ok(self, uri_map, settings, ebuild_path):
+		distdir = settings["DISTDIR"]
+
+		sizes = {}
+		for filename in uri_map:
+			# Use stat rather than lstat since portage.fetch() creates
+			# symlinks when PORTAGE_RO_DISTDIRS is used.
+			try:
+				st = os.stat(os.path.join(distdir, filename))
+			except OSError:
+				return False
+			if st.st_size == 0:
+				return False
+			sizes[filename] = st.st_size
+
+		digests = self._get_digests()
+		for filename, actual_size in sizes.items():
+			size = digests.get(filename, {}).get('size')
+			if size is None:
+				continue
+			if size != actual_size:
+				return False
+
+		# All files are present and sizes are ok. In this case the normal
+		# fetch code will be skipped, so we need to generate equivalent
+		# output here.
+		if self.logfile is not None:
+			f = io.open(_unicode_encode(self.logfile,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='a', encoding=_encodings['content'],
+				errors='backslashreplace')
+			for filename in uri_map:
+				f.write(_unicode_decode((' * %s size ;-) ...' % \
+					filename).ljust(73) + '[ ok ]\n'))
+			f.close()
+
+		return True
+
+	def _pipe(self, fd_pipes):
+		"""When appropriate, use a pty so that fetcher progress bars,
+		like wget has, will work properly."""
+		if self.background or not sys.stdout.isatty():
+			# When the output only goes to a log file,
+			# there's no point in creating a pty.
+			return os.pipe()
+		stdout_pipe = None
+		if not self.background:
+			stdout_pipe = fd_pipes.get(1)
+		got_pty, master_fd, slave_fd = \
+			_create_pty_or_pipe(copy_term_size=stdout_pipe)
+		return (master_fd, slave_fd)
+
+	def _eerror(self, lines):
+		out = io.StringIO()
+		for line in lines:
+			eerror(line, phase="unpack", key=self.pkg.cpv, out=out)
+		msg = out.getvalue()
+		if msg:
+			self.scheduler.output(msg, log_path=self.logfile)
+
+	def _set_returncode(self, wait_retval):
+		SpawnProcess._set_returncode(self, wait_retval)
+		# Collect elog messages that might have been
+		# created by the pkg_nofetch phase.
+		# Skip elog messages for prefetch, in order to avoid duplicates.
+		if not self.prefetch and self.returncode != os.EX_OK:
+			msg_lines = []
+			msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
+			if self.logfile is not None:
+				msg += ", Log file:"
+			msg_lines.append(msg)
+			if self.logfile is not None:
+				msg_lines.append(" '%s'" % (self.logfile,))
+			self._eerror(msg_lines)

diff --git a/portage_with_autodep/pym/_emerge/EbuildFetchonly.py b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
new file mode 100644
index 0000000..b898971
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
@@ -0,0 +1,32 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SlotObject import SlotObject
+import portage
+from portage import os
+from portage.elog.messages import eerror
+
+class EbuildFetchonly(SlotObject):
+
+	__slots__ = ("fetch_all", "pkg", "pretend", "settings")
+
+	def execute(self):
+		settings = self.settings
+		pkg = self.pkg
+		portdb = pkg.root_config.trees["porttree"].dbapi
+		ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+		if ebuild_path is None:
+			raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+		settings.setcpv(pkg)
+		debug = settings.get("PORTAGE_DEBUG") == "1"
+
+		rval = portage.doebuild(ebuild_path, "fetch",
+			settings["ROOT"], settings, debug=debug,
+			listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
+			mydbapi=portdb, tree="porttree")
+
+		if rval != os.EX_OK:
+			msg = "Fetch failed for '%s'" % (pkg.cpv,)
+			eerror(msg, phase="unpack", key=pkg.cpv)
+
+		return rval

diff --git a/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
new file mode 100644
index 0000000..5dabe34
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
@@ -0,0 +1,108 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import pickle
+from portage import os
+from portage.localization import _
+from portage.util import writemsg_level
+from _emerge.FifoIpcDaemon import FifoIpcDaemon
+from _emerge.PollConstants import PollConstants
+
+class EbuildIpcDaemon(FifoIpcDaemon):
+	"""
+    This class serves as an IPC daemon, which ebuild processes can use
+    to communicate with portage's main python process.
+
+    Here are a few possible uses:
+
+    1) Robust subshell/subprocess die support. This allows the ebuild
+       environment to reliably die without having to rely on signal IPC.
+
+    2) Delegation of portageq calls to the main python process, eliminating
+       performance and userpriv permission issues.
+
+    3) Reliable ebuild termination in cases when the ebuild has accidentally
+       left orphan processes running in the background (as in bug #278895).
+
+    4) Detect cases in which bash has exited unexpectedly (as in bug #190128).
+	"""
+
+	__slots__ = ('commands',)
+
+	def _input_handler(self, fd, event):
+		# Read the whole pickle in a single atomic read() call.
+		data = None
+		if event & PollConstants.POLLIN:
+			# For maximum portability, use os.read() here since
+			# array.fromfile() and file.read() are both known to
+			# erroneously return an empty string from this
+			# non-blocking fifo stream on FreeBSD (bug #337465).
+			try:
+				data = os.read(fd, self._bufsize)
+			except OSError as e:
+				if e.errno != errno.EAGAIN:
+					raise
+				# Assume that another event will be generated
+				# if there's any relevant data.
+
+		if data:
+
+			try:
+				obj = pickle.loads(data)
+			except SystemExit:
+				raise
+			except Exception:
+				# The pickle module can raise practically
+				# any exception when given corrupt data.
+				pass
+			else:
+
+				self._reopen_input()
+
+				cmd_key = obj[0]
+				cmd_handler = self.commands[cmd_key]
+				reply = cmd_handler(obj)
+				try:
+					self._send_reply(reply)
+				except OSError as e:
+					if e.errno == errno.ENXIO:
+						# This happens if the client side has been killed.
+						pass
+					else:
+						raise
+
+				# Allow the command to execute hooks after its reply
+				# has been sent. This hook is used by the 'exit'
+				# command to kill the ebuild process. For some
+				# reason, the ebuild-ipc helper hangs up the
+				# ebuild process if it is waiting for a reply
+				# when we try to kill the ebuild process.
+				reply_hook = getattr(cmd_handler,
+					'reply_hook', None)
+				if reply_hook is not None:
+					reply_hook()
+
+	def _send_reply(self, reply):
+		# File streams are in unbuffered mode since we do atomic
+		# read and write of whole pickles. Use non-blocking mode so
+		# we don't hang if the client is killed before we can send
+		# the reply. We rely on the client opening the other side
+		# of this fifo before it sends its request, since otherwise
+		# we'd have a race condition with this open call raising
+		# ENXIO if the client hasn't opened the fifo yet.
+		try:
+			output_fd = os.open(self.output_fifo,
+				os.O_WRONLY | os.O_NONBLOCK)
+			try:
+				os.write(output_fd, pickle.dumps(reply))
+			finally:
+				os.close(output_fd)
+		except OSError as e:
+			# This probably means that the client has been killed,
+			# which causes open to fail with ENXIO.
+			writemsg_level(
+				"!!! EbuildIpcDaemon %s: %s\n" % \
+				(_('failed to send reply'), e),
+				level=logging.ERROR, noiselevel=-1)

diff --git a/portage_with_autodep/pym/_emerge/EbuildMerge.py b/portage_with_autodep/pym/_emerge/EbuildMerge.py
new file mode 100644
index 0000000..9c35988
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildMerge.py
@@ -0,0 +1,56 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+
+class EbuildMerge(CompositeTask):
+
+	__slots__ = ("find_blockers", "logger", "ldpath_mtimes",
+		"pkg", "pkg_count", "pkg_path", "pretend",
+		"settings", "tree", "world_atom")
+
+	def _start(self):
+		root_config = self.pkg.root_config
+		settings = self.settings
+		mycat = settings["CATEGORY"]
+		mypkg = settings["PF"]
+		pkgloc = settings["D"]
+		infloc = os.path.join(settings["PORTAGE_BUILDDIR"], "build-info")
+		myebuild = settings["EBUILD"]
+		mydbapi = root_config.trees[self.tree].dbapi
+		vartree = root_config.trees["vartree"]
+		background = (settings.get('PORTAGE_BACKGROUND') == '1')
+		logfile = settings.get('PORTAGE_LOG_FILE')
+
+		merge_task = MergeProcess(
+			mycat=mycat, mypkg=mypkg, settings=settings,
+			treetype=self.tree, vartree=vartree, scheduler=self.scheduler,
+			background=background, blockers=self.find_blockers, pkgloc=pkgloc,
+			infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+			prev_mtimes=self.ldpath_mtimes, logfile=logfile)
+
+		self._start_task(merge_task, self._merge_exit)
+
+	def _merge_exit(self, merge_task):
+		if self._final_exit(merge_task) != os.EX_OK:
+			self.wait()
+			return
+
+		pkg = self.pkg
+		self.world_atom(pkg)
+		pkg_count = self.pkg_count
+		pkg_path = self.pkg_path
+		logger = self.logger
+		if "noclean" not in self.settings.features:
+			short_msg = "emerge: (%s of %s) %s Clean Post" % \
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv)
+			logger.log((" === (%s of %s) " + \
+				"Post-Build Cleaning (%s::%s)") % \
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
+				short_msg=short_msg)
+		logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
+			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+		self.wait()

diff --git a/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
new file mode 100644
index 0000000..e53298b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
@@ -0,0 +1,133 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+from _emerge.PollConstants import PollConstants
+import sys
+from portage.cache.mappings import slot_dict_class
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import fcntl
+import io
+
+class EbuildMetadataPhase(SubProcess):
+
+	"""
+	Asynchronous interface for the ebuild "depend" phase which is
+	used to extract metadata from the ebuild.
+	"""
+
+	__slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
+		"ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
+		("_raw_metadata",)
+
+	_file_names = ("ebuild",)
+	_files_dict = slot_dict_class(_file_names, prefix="")
+	_metadata_fd = 9
+
+	def _start(self):
+		settings = self.settings
+		settings.setcpv(self.cpv)
+		ebuild_path = self.ebuild_path
+
+		eapi = None
+		if eapi is None and \
+			'parse-eapi-ebuild-head' in settings.features:
+			eapi = portage._parse_eapi_ebuild_head(
+				io.open(_unicode_encode(ebuild_path,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'],
+				errors='replace'))
+
+		if eapi is not None:
+			if not portage.eapi_is_supported(eapi):
+				self.metadata_callback(self.cpv, self.ebuild_path,
+					self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
+				self._set_returncode((self.pid, os.EX_OK << 8))
+				self.wait()
+				return
+
+			settings.configdict['pkg']['EAPI'] = eapi
+
+		debug = settings.get("PORTAGE_DEBUG") == "1"
+		master_fd = None
+		slave_fd = None
+		fd_pipes = None
+		if self.fd_pipes is not None:
+			fd_pipes = self.fd_pipes.copy()
+		else:
+			fd_pipes = {}
+
+		fd_pipes.setdefault(0, sys.stdin.fileno())
+		fd_pipes.setdefault(1, sys.stdout.fileno())
+		fd_pipes.setdefault(2, sys.stderr.fileno())
+
+		# flush any pending output
+		for fd in fd_pipes.values():
+			if fd == sys.stdout.fileno():
+				sys.stdout.flush()
+			if fd == sys.stderr.fileno():
+				sys.stderr.flush()
+
+		fd_pipes_orig = fd_pipes.copy()
+		self._files = self._files_dict()
+		files = self._files
+
+		master_fd, slave_fd = os.pipe()
+		fcntl.fcntl(master_fd, fcntl.F_SETFL,
+			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+		fd_pipes[self._metadata_fd] = slave_fd
+
+		self._raw_metadata = []
+		files.ebuild = os.fdopen(master_fd, 'rb', 0)
+		self._reg_id = self.scheduler.register(files.ebuild.fileno(),
+			self._registered_events, self._output_handler)
+		self._registered = True
+
+		retval = portage.doebuild(ebuild_path, "depend",
+			settings["ROOT"], settings, debug,
+			mydbapi=self.portdb, tree="porttree",
+			fd_pipes=fd_pipes, returnpid=True)
+
+		os.close(slave_fd)
+
+		if isinstance(retval, int):
+			# doebuild failed before spawning
+			self._unregister()
+			self._set_returncode((self.pid, retval << 8))
+			self.wait()
+			return
+
+		self.pid = retval[0]
+		portage.process.spawned_pids.remove(self.pid)
+
+	def _output_handler(self, fd, event):
+
+		if event & PollConstants.POLLIN:
+			self._raw_metadata.append(self._files.ebuild.read())
+			if not self._raw_metadata[-1]:
+				self._unregister()
+				self.wait()
+
+		self._unregister_if_appropriate(event)
+
+	def _set_returncode(self, wait_retval):
+		SubProcess._set_returncode(self, wait_retval)
+		if self.returncode == os.EX_OK:
+			metadata_lines = ''.join(_unicode_decode(chunk,
+				encoding=_encodings['repo.content'], errors='replace')
+				for chunk in self._raw_metadata).splitlines()
+			if len(portage.auxdbkeys) != len(metadata_lines):
+				# Don't trust bash's returncode if the
+				# number of lines is incorrect.
+				self.returncode = 1
+			else:
+				metadata = zip(portage.auxdbkeys, metadata_lines)
+				self.metadata = self.metadata_callback(self.cpv,
+					self.ebuild_path, self.repo_path, metadata,
+					self.ebuild_mtime)
+

diff --git a/portage_with_autodep/pym/_emerge/EbuildPhase.py b/portage_with_autodep/pym/_emerge/EbuildPhase.py
new file mode 100644
index 0000000..82c165d
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildPhase.py
@@ -0,0 +1,350 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import io
+import sys
+import tempfile
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.EbuildProcess import EbuildProcess
+from _emerge.CompositeTask import CompositeTask
+from portage.util import writemsg
+from portage.xml.metadata import MetaDataXML
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.elog:messages@elog_messages',
+	'portage.package.ebuild.doebuild:_check_build_log,' + \
+		'_post_phase_cmds,_post_phase_userpriv_perms,' + \
+		'_post_src_install_chost_fix,' + \
+		'_post_src_install_soname_symlinks,' + \
+		'_post_src_install_uid_fix,_postinst_bsdflags,' + \
+		'_preinst_bsdflags'
+)
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+class EbuildPhase(CompositeTask):
+
+	__slots__ = ("actionmap", "phase", "settings") + \
+		("_ebuild_lock",)
+
+	# FEATURES displayed prior to setup phase
+	_features_display = ("ccache", "depcheck", "depcheckstrict" "distcc", 
+		"distcc-pump", "fakeroot",
+		"installsources", "keeptemp", "keepwork", "nostrip",
+		"preserve-libs", "sandbox", "selinux", "sesandbox",
+		"splitdebug", "suidctl", "test", "userpriv",
+		"usersandbox")
+
+	# Locked phases
+	_locked_phases = ("setup", "preinst", "postinst", "prerm", "postrm")
+
+	def _start(self):
+
+		need_builddir = self.phase not in EbuildProcess._phases_without_builddir
+
+		if need_builddir:
+			phase_completed_file = os.path.join(
+				self.settings['PORTAGE_BUILDDIR'],
+				".%sed" % self.phase.rstrip('e'))
+			if not os.path.exists(phase_completed_file):
+				# If the phase is really going to run then we want
+				# to eliminate any stale elog messages that may
+				# exist from a previous run.
+				try:
+					os.unlink(os.path.join(self.settings['T'],
+						'logging', self.phase))
+				except OSError:
+					pass
+
+		if self.phase in ('nofetch', 'pretend', 'setup'):
+
+			use = self.settings.get('PORTAGE_BUILT_USE')
+			if use is None:
+				use = self.settings['PORTAGE_USE']
+
+			maint_str = ""
+			upstr_str = ""
+			metadata_xml_path = os.path.join(os.path.dirname(self.settings['EBUILD']), "metadata.xml")
+			if os.path.isfile(metadata_xml_path):
+				herds_path = os.path.join(self.settings['PORTDIR'],
+					'metadata/herds.xml')
+				try:
+					metadata_xml = MetaDataXML(metadata_xml_path, herds_path)
+					maint_str = metadata_xml.format_maintainer_string()
+					upstr_str = metadata_xml.format_upstream_string()
+				except SyntaxError:
+					maint_str = "<invalid metadata.xml>"
+
+			msg = []
+			msg.append("Package:    %s" % self.settings.mycpv)
+			if self.settings.get('PORTAGE_REPO_NAME'):
+				msg.append("Repository: %s" % self.settings['PORTAGE_REPO_NAME'])
+			if maint_str:
+				msg.append("Maintainer: %s" % maint_str)
+			if upstr_str:
+				msg.append("Upstream:   %s" % upstr_str)
+
+			msg.append("USE:        %s" % use)
+			relevant_features = []
+			enabled_features = self.settings.features
+			for x in self._features_display:
+				if x in enabled_features:
+					relevant_features.append(x)
+			if relevant_features:
+				msg.append("FEATURES:   %s" % " ".join(relevant_features))
+
+			# Force background=True for this header since it's intended
+			# for the log and it doesn't necessarily need to be visible
+			# elsewhere.
+			self._elog('einfo', msg, background=True)
+
+		if self.phase == 'package':
+			if 'PORTAGE_BINPKG_TMPFILE' not in self.settings:
+				self.settings['PORTAGE_BINPKG_TMPFILE'] = \
+					os.path.join(self.settings['PKGDIR'],
+					self.settings['CATEGORY'], self.settings['PF']) + '.tbz2'
+
+		if self.phase in ("pretend", "prerm"):
+			env_extractor = BinpkgEnvExtractor(background=self.background,
+				scheduler=self.scheduler, settings=self.settings)
+			if env_extractor.saved_env_exists():
+				self._start_task(env_extractor, self._env_extractor_exit)
+				return
+			# If the environment.bz2 doesn't exist, then ebuild.sh will
+			# source the ebuild as a fallback.
+
+		self._start_lock()
+
+	def _env_extractor_exit(self, env_extractor):
+		if self._default_exit(env_extractor) != os.EX_OK:
+			self.wait()
+			return
+
+		self._start_lock()
+
+	def _start_lock(self):
+		if (self.phase in self._locked_phases and
+			"ebuild-locks" in self.settings.features):
+			eroot = self.settings["EROOT"]
+			lock_path = os.path.join(eroot, portage.VDB_PATH + "-ebuild")
+			if os.access(os.path.dirname(lock_path), os.W_OK):
+				self._ebuild_lock = AsynchronousLock(path=lock_path,
+					scheduler=self.scheduler)
+				self._start_task(self._ebuild_lock, self._lock_exit)
+				return
+
+		self._start_ebuild()
+
+	def _lock_exit(self, ebuild_lock):
+		if self._default_exit(ebuild_lock) != os.EX_OK:
+			self.wait()
+			return
+		self._start_ebuild()
+
+	def _start_ebuild(self):
+
+		# Don't open the log file during the clean phase since the
+		# open file can result in an nfs lock on $T/build.log which
+		# prevents the clean phase from removing $T.
+		logfile = None
+		if self.phase not in ("clean", "cleanrm") and \
+			self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+			logfile = self.settings.get("PORTAGE_LOG_FILE")
+
+		fd_pipes = None
+		if not self.background and self.phase == 'nofetch':
+			# All the pkg_nofetch output goes to stderr since
+			# it's considered to be an error message.
+			fd_pipes = {1 : sys.stderr.fileno()}
+
+		ebuild_process = EbuildProcess(actionmap=self.actionmap,
+			background=self.background, fd_pipes=fd_pipes, logfile=logfile,
+			phase=self.phase, scheduler=self.scheduler,
+			settings=self.settings)
+
+		self._start_task(ebuild_process, self._ebuild_exit)
+
+	def _ebuild_exit(self, ebuild_process):
+
+		if self._ebuild_lock is not None:
+			self._ebuild_lock.unlock()
+			self._ebuild_lock = None
+
+		fail = False
+		if self._default_exit(ebuild_process) != os.EX_OK:
+			if self.phase == "test" and \
+				"test-fail-continue" in self.settings.features:
+				pass
+			else:
+				fail = True
+
+		if not fail:
+			self.returncode = None
+
+		logfile = None
+		if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+			logfile = self.settings.get("PORTAGE_LOG_FILE")
+
+		if self.phase == "install":
+			out = io.StringIO()
+			_check_build_log(self.settings, out=out)
+			msg = out.getvalue()
+			self.scheduler.output(msg, log_path=logfile)
+
+		if fail:
+			self._die_hooks()
+			return
+
+		settings = self.settings
+		_post_phase_userpriv_perms(settings)
+
+		if self.phase == "install":
+			out = io.StringIO()
+			_post_src_install_chost_fix(settings)
+			_post_src_install_uid_fix(settings, out)
+			msg = out.getvalue()
+			if msg:
+				self.scheduler.output(msg, log_path=logfile)
+		elif self.phase == "preinst":
+			_preinst_bsdflags(settings)
+		elif self.phase == "postinst":
+			_postinst_bsdflags(settings)
+
+		post_phase_cmds = _post_phase_cmds.get(self.phase)
+		if post_phase_cmds is not None:
+			if logfile is not None and self.phase in ("install",):
+				# Log to a temporary file, since the code we are running
+				# reads PORTAGE_LOG_FILE for QA checks, and we want to
+				# avoid annoying "gzip: unexpected end of file" messages
+				# when FEATURES=compress-build-logs is enabled.
+				fd, logfile = tempfile.mkstemp()
+				os.close(fd)
+			post_phase = MiscFunctionsProcess(background=self.background,
+				commands=post_phase_cmds, logfile=logfile, phase=self.phase,
+				scheduler=self.scheduler, settings=settings)
+			self._start_task(post_phase, self._post_phase_exit)
+			return
+
+		# this point is not reachable if there was a failure and
+		# we returned for die_hooks above, so returncode must
+		# indicate success (especially if ebuild_process.returncode
+		# is unsuccessful and test-fail-continue came into play)
+		self.returncode = os.EX_OK
+		self._current_task = None
+		self.wait()
+
+	def _post_phase_exit(self, post_phase):
+
+		self._assert_current(post_phase)
+
+		log_path = None
+		if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+			log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+		if post_phase.logfile is not None and \
+			post_phase.logfile != log_path:
+			# We were logging to a temp file (see above), so append
+			# temp file to main log and remove temp file.
+			self._append_temp_log(post_phase.logfile, log_path)
+
+		if self._final_exit(post_phase) != os.EX_OK:
+			writemsg("!!! post %s failed; exiting.\n" % self.phase,
+				noiselevel=-1)
+			self._die_hooks()
+			return
+
+		if self.phase == "install":
+			out = io.StringIO()
+			_post_src_install_soname_symlinks(self.settings, out)
+			msg = out.getvalue()
+			if msg:
+				self.scheduler.output(msg, log_path=log_path)
+
+		self._current_task = None
+		self.wait()
+		return
+
+	def _append_temp_log(self, temp_log, log_path):
+
+		temp_file = open(_unicode_encode(temp_log,
+			encoding=_encodings['fs'], errors='strict'), 'rb')
+
+		log_file = self._open_log(log_path)
+
+		for line in temp_file:
+			log_file.write(line)
+
+		temp_file.close()
+		log_file.close()
+		os.unlink(temp_log)
+
+	def _open_log(self, log_path):
+
+		f = open(_unicode_encode(log_path,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='ab')
+
+		if log_path.endswith('.gz'):
+			f =  gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+		return f
+
+	def _die_hooks(self):
+		self.returncode = None
+		phase = 'die_hooks'
+		die_hooks = MiscFunctionsProcess(background=self.background,
+			commands=[phase], phase=phase,
+			scheduler=self.scheduler, settings=self.settings)
+		self._start_task(die_hooks, self._die_hooks_exit)
+
+	def _die_hooks_exit(self, die_hooks):
+		if self.phase != 'clean' and \
+			'noclean' not in self.settings.features and \
+			'fail-clean' in self.settings.features:
+			self._default_exit(die_hooks)
+			self._fail_clean()
+			return
+		self._final_exit(die_hooks)
+		self.returncode = 1
+		self.wait()
+
+	def _fail_clean(self):
+		self.returncode = None
+		portage.elog.elog_process(self.settings.mycpv, self.settings)
+		phase = "clean"
+		clean_phase = EbuildPhase(background=self.background,
+			phase=phase, scheduler=self.scheduler, settings=self.settings)
+		self._start_task(clean_phase, self._fail_clean_exit)
+		return
+
+	def _fail_clean_exit(self, clean_phase):
+		self._final_exit(clean_phase)
+		self.returncode = 1
+		self.wait()
+
+	def _elog(self, elog_funcname, lines, background=None):
+		if background is None:
+			background = self.background
+		out = io.StringIO()
+		phase = self.phase
+		elog_func = getattr(elog_messages, elog_funcname)
+		global_havecolor = portage.output.havecolor
+		try:
+			portage.output.havecolor = \
+				self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+			for line in lines:
+				elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+		finally:
+			portage.output.havecolor = global_havecolor
+		msg = out.getvalue()
+		if msg:
+			log_path = None
+			if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+				log_path = self.settings.get("PORTAGE_LOG_FILE")
+			self.scheduler.output(msg, log_path=log_path,
+				background=background)

diff --git a/portage_with_autodep/pym/_emerge/EbuildProcess.py b/portage_with_autodep/pym/_emerge/EbuildProcess.py
new file mode 100644
index 0000000..ce97aff
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildProcess.py
@@ -0,0 +1,21 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.package.ebuild.doebuild:_doebuild_spawn,_spawn_actionmap'
+)
+
+class EbuildProcess(AbstractEbuildProcess):
+
+	__slots__ = ('actionmap',)
+
+	def _spawn(self, args, **kwargs):
+
+		actionmap = self.actionmap
+		if actionmap is None:
+			actionmap = _spawn_actionmap(self.settings)
+
+		return _doebuild_spawn(self.phase, self.settings,
+				actionmap=actionmap, **kwargs)

diff --git a/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py
new file mode 100644
index 0000000..e1f682a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.py
@@ -0,0 +1,16 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+
+class EbuildSpawnProcess(AbstractEbuildProcess):
+	"""
+	Used by doebuild.spawn() to manage the spawned process.
+	"""
+	_spawn_kwarg_names = AbstractEbuildProcess._spawn_kwarg_names + \
+		('fakeroot_state',)
+
+	__slots__ = ('fakeroot_state', 'spawn_func')
+
+	def _spawn(self, args, **kwargs):
+		return self.spawn_func(args, env=self.settings.environ(), **kwargs)

diff --git a/portage_with_autodep/pym/_emerge/EventsAnalyser.py b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
new file mode 100644
index 0000000..65ece7b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
@@ -0,0 +1,511 @@
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage import os
+
+import subprocess
+import re
+
+class PortageUtils:
+	""" class for accessing the portage api """
+	def __init__(self, settings):
+		""" test """
+		self.settings=settings
+		self.vartree=portage.vartree(settings=settings)
+		self.vardbapi=portage.vardbapi(settings=settings, vartree=self.vartree)
+		self.portdbapi=portage.portdbapi(mysettings=settings)
+		self.metadata_keys = [k for k in portage.auxdbkeys if not k.startswith("UNUSED_")]
+		self.use=self.settings["USE"]
+
+	def get_best_visible_pkg(self,pkg):
+		"""
+		Gets best candidate on installing. Returns empty string if no found
+
+		:param pkg: package name
+
+		"""
+		try:
+			return self.portdbapi.xmatch("bestmatch-visible", pkg)
+		except:
+			return ''
+
+	# non-recursive dependency getter
+	def get_dep(self,pkg,dep_type=["RDEPEND","DEPEND"]):
+	  """
+	  Gets current dependencies of a package. Looks in portage db
+	  
+	  :param pkg: name of package
+	  :param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or 
+		["RDEPEND", "DEPEND"]
+	  :returns: **set** of packages names
+	  """
+	  ret=set()
+	  
+	  pkg = self.get_best_visible_pkg(pkg)	
+	  if not pkg:
+		  return ret
+	  
+	  # we found the best visible match in common tree
+
+
+	  metadata = dict(zip(self.metadata_keys, 
+		  self.portdbapi.aux_get(pkg, self.metadata_keys)))
+	  dep_str = " ".join(metadata[k] for k in dep_type)
+
+	  # the IUSE default are very important for us
+	  iuse_defaults=[
+		u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
+	  
+	  use=self.use.split()
+	  
+	  for u in iuse_defaults:
+		  if u not in use:
+			  use.append(u)
+
+	  success, atoms = portage.dep_check(dep_str, None, self.settings, 
+		  myuse=use, myroot=self.settings["ROOT"],
+		  trees={self.settings["ROOT"]:{"vartree":self.vartree, "porttree": self.vartree}})
+	  if not success:
+		  return ret
+
+	  for atom in atoms:
+		  atomname = self.vartree.dep_bestmatch(atom)
+
+		  if not atomname:
+			  continue
+		
+		  for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
+			  for pkg in self.vartree.dep_match(unvirt_pkg):
+				  ret.add(pkg)
+
+	  return ret
+
+	# recursive dependency getter
+	def get_deps(self,pkg,dep_type=["RDEPEND","DEPEND"]):
+		""" 
+		Gets current dependencies of a package on any depth 
+		All dependencies **must** be installed
+		
+		:param pkg: name of package
+		:param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or 
+		  ["RDEPEND", "DEPEND"]
+		:returns: **set** of packages names
+		"""
+		ret=set()
+
+
+		# get porttree dependencies on the first package
+
+		pkg = self.portdbapi.xmatch("bestmatch-visible", pkg)	
+		if not pkg:
+			return ret
+
+		known_packages=set()
+		unknown_packages=self.get_dep(pkg,dep_type)
+		ret=ret.union(unknown_packages)
+		
+		while unknown_packages:
+			p=unknown_packages.pop()
+			if p in known_packages:
+				continue
+			known_packages.add(p)
+
+			metadata = dict(zip(self.metadata_keys, self.vardbapi.aux_get(p, self.metadata_keys)))
+
+			dep_str = " ".join(metadata[k] for k in dep_type)
+			
+			# the IUSE default are very important for us
+			iuse_defaults=[
+			  u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
+	  
+			use=self.use.split()
+	  
+			for u in iuse_defaults:
+				if u not in use:
+					use.append(u)
+			
+			success, atoms = portage.dep_check(dep_str, None, self.settings, 
+				myuse=use,	myroot=self.settings["ROOT"],
+				trees={self.settings["ROOT"]:{"vartree":self.vartree,"porttree": self.vartree}})
+
+			if not success:
+			  continue
+
+			for atom in atoms:
+				atomname = self.vartree.dep_bestmatch(atom)
+				if not atomname:
+					continue
+								
+				for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
+					for pkg in self.vartree.dep_match(unvirt_pkg):
+						ret.add(pkg)
+						unknown_packages.add(pkg)
+		return ret
+
+	def get_deps_for_package_building(self, pkg):
+		""" 
+		  returns buildtime dependencies of current package and 
+		  all runtime dependencies of that buildtime dependencies
+		"""
+		buildtime_deps=self.get_dep(pkg, ["DEPEND"])
+		runtime_deps=set()
+		for dep in buildtime_deps:
+			runtime_deps=runtime_deps.union(self.get_deps(dep,["RDEPEND"]))
+
+		ret=buildtime_deps.union(runtime_deps)
+		return ret
+
+	def get_system_packages_list(self):
+		""" 
+		returns all packages from system set. They are always implicit dependencies
+		
+		:returns: **list** of package names
+		"""
+		ret=[]
+		for atom in self.settings.packages:
+			for pre_pkg in self.vartree.dep_match(atom):
+				for unvirt_pkg in expand_new_virt(self.vardbapi,'='+pre_pkg):
+					for pkg in self.vartree.dep_match(unvirt_pkg):
+						ret.append(pkg)
+		return ret
+
+
+class GentoolkitUtils:
+	""" 
+	Interface with qfile and qlist utils. They are much faster than 
+	internals.
+	"""
+
+	def getpackagesbyfiles(files):
+		"""
+		:param files: list of filenames
+		:returns: **dictionary** file->package, if file doesn't belong to any
+		  package it not returned as key of this dictionary
+		"""
+		ret={}
+		listtocheck=[]
+		for f in files:
+			if os.path.isdir(f):
+				ret[f]="directory"
+			else:
+				listtocheck.append(f)
+			
+		try:
+			proc=subprocess.Popen(['qfile']+['--nocolor','--exact','','--from','-'],
+				stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE, 
+				bufsize=4096)
+						
+			out,err=proc.communicate("\n".join(listtocheck).encode("utf8"))
+			
+			lines=out.decode("utf8").split("\n")
+			#print lines
+			line_re=re.compile(r"^([^ ]+)\s+\(([^)]+)\)$")
+			for line in lines:
+				if len(line)==0:
+					continue
+				match=line_re.match(line)
+				if match:
+					ret[match.group(2)]=match.group(1)
+				else:
+					portage.util.writemsg("Util qfile returned unparsable string: %s\n" % line)
+
+		except OSError as e:
+			portage.util.writemsg("Error while launching qfile: %s\n" % e)
+		  
+		  
+		return ret
+	  
+	def getfilesbypackages(packagenames):
+		"""
+		
+		:param packagename: name of package
+		:returns: **list** of files in package with name *packagename*
+		"""
+		ret=[]
+		try:
+			proc=subprocess.Popen(['qlist']+['--nocolor',"--obj"]+packagenames,
+				stdout=subprocess.PIPE,stderr=subprocess.PIPE, 
+				bufsize=4096)
+			
+			out,err=proc.communicate()
+			
+			ret=out.decode("utf8").split("\n")
+			if ret==['']:
+				ret=[]
+		except OSError as e:
+			portage.util.writemsg("Error while launching qfile: %s\n" % e)
+
+		return ret
+	  
+	def get_all_packages_files():   
+		"""
+		Memory-hungry operation
+		
+		:returns: **set** of all files that belongs to package
+		"""
+		ret=[]
+		try:
+			proc=subprocess.Popen(['qlist']+['--all',"--obj"],
+				stdout=subprocess.PIPE,stderr=subprocess.PIPE, 
+				bufsize=4096)
+			  
+			out,err=proc.communicate()
+			
+			ret=out.decode("utf8").split("\n")
+		except OSError as e:
+			portage.util.writemsg("Error while launching qfile: %s\n" % e)
+
+		return set(ret)
+   
+class FilterProcGenerator:
+	def __init__(self, pkgname, settings):
+		portageutils=PortageUtils(settings=settings)
+
+		deps_all=portageutils.get_deps_for_package_building(pkgname)
+		deps_portage=portageutils.get_dep('portage',["RDEPEND"])
+		
+		system_packages=portageutils.get_system_packages_list()
+
+		allfiles=GentoolkitUtils.get_all_packages_files()
+		portage.util.writemsg("All files list recieved, waiting for " \
+			"a list of allowed files\n")
+
+
+		allowedpkgs=system_packages+list(deps_portage)+list(deps_all)	
+		
+		allowedfiles=GentoolkitUtils.getfilesbypackages(allowedpkgs)
+		#for pkg in allowedpkgs:
+		#	allowedfiles+=GentoolkitUtils.getfilesbypackage(pkg)
+
+		#import pdb; pdb.set_trace()
+
+		# manually add all python interpreters to this list
+		allowedfiles+=GentoolkitUtils.getfilesbypackages(['python'])
+		allowedfiles=set(allowedfiles)
+		
+		deniedfiles=allfiles-allowedfiles
+
+		def filter_proc(eventname,filename,stage):
+			if filename in deniedfiles:
+				return False
+			return True
+			
+		self.filter_proc=filter_proc
+	def get_filter_proc(self):
+		return self.filter_proc
+
+class EventsAnalyser:
+	def __init__(self, pkgname, events, settings):
+		self.pkgname=pkgname
+		self.events=events
+		self.settings=settings
+		self.portageutils=PortageUtils(settings=settings)
+
+		self.deps_all=self.portageutils.get_deps_for_package_building(pkgname)
+		self.deps_direct=self.portageutils.get_dep(pkgname,["DEPEND"])
+		self.deps_portage=self.portageutils.get_dep('portage',["RDEPEND"])
+		
+		self.system_packages=self.portageutils.get_system_packages_list()
+		# All analyse work is here
+		
+		# get unique filenames
+		filenames=set()
+		for stage in events:
+			succ_events=set(events[stage][0])
+			fail_events=set(events[stage][1])
+			filenames=filenames.union(succ_events)
+			filenames=filenames.union(fail_events)
+		filenames=list(filenames)
+
+		file_to_package=GentoolkitUtils.getpackagesbyfiles(filenames)
+		# This part is completly unreadable. 
+		# It converting one complex struct(returned by getfsevents) to another complex
+		# struct which good for generating output.
+		#
+		# Old struct is also used during output
+
+		packagesinfo={}
+
+		for stage in sorted(events):
+			  succ_events=events[stage][0]
+			  fail_events=events[stage][1]
+			  
+			  for filename in succ_events:
+				  if filename in file_to_package:
+					  package=file_to_package[filename]
+				  else:
+					  package="unknown"
+					
+				  if not package in packagesinfo:
+					  packagesinfo[package]={}
+				  stageinfo=packagesinfo[package]
+				  if not stage in stageinfo:
+					  stageinfo[stage]={}
+					
+				  filesinfo=stageinfo[stage]
+				  if not filename in filesinfo:
+					  filesinfo[filename]={"found":[],"notfound":[]}
+				  filesinfo[filename]["found"]=succ_events[filename]
+				
+			  for filename in fail_events:
+				  if filename in file_to_package:
+					  package=file_to_package[filename]
+				  else:
+					  package="unknown"
+				  if not package in packagesinfo:
+					  packagesinfo[package]={}
+				  stageinfo=packagesinfo[package]
+				  if not stage in stageinfo:
+					  stageinfo[stage]={}
+					
+				  filesinfo=stageinfo[stage]
+				  if not filename in filesinfo:
+					  filesinfo[filename]={"found":[],"notfound":[]}
+				  filesinfo[filename]["notfound"]=fail_events[filename]
+		self.packagesinfo=packagesinfo
+		  
+	def display(self):
+		portage.util.writemsg(
+			portage.output.colorize(
+				"WARN", "\nFile access report for %s:\n" % self.pkgname))
+
+		stagesorder={"clean":1,"setup":2,"unpack":3,"prepare":4,"configure":5,"compile":6,"test":7,
+			 "install":8,"preinst":9,"postinst":10,"prerm":11,"postrm":12,"unknown":13}
+		packagesinfo=self.packagesinfo
+		# print information grouped by package	  
+		for package in sorted(packagesinfo):
+			# not showing special directory package
+			if package=="directory":
+				continue
+			
+			if package=="unknown":
+				continue
+
+
+			is_pkg_in_dep=package in self.deps_all
+			is_pkg_in_portage_dep=package in self.deps_portage
+			is_pkg_in_system=package in self.system_packages
+			is_pkg_python="dev-lang/python" in package
+
+			stages=[]
+			for stage in sorted(packagesinfo[package].keys(), key=stagesorder.get):
+				if stage!="unknown":
+					stages.append(stage)
+
+			if len(stages)==0:
+				continue
+	
+			filenames={}
+			for stage in stages:
+				for filename in packagesinfo[package][stage]:
+					if len(packagesinfo[package][stage][filename]["found"])!=0:
+						was_readed,was_writed=packagesinfo[package][stage][filename]["found"]
+						if not filename in filenames:
+							filenames[filename]=['ok',was_readed,was_writed]
+						else:
+							status, old_was_readed, old_was_writed=filenames[filename]
+							filenames[filename]=[
+							  'ok',old_was_readed | was_readed, old_was_writed | was_writed 
+							]
+					if len(packagesinfo[package][stage][filename]["notfound"])!=0:
+						was_notfound,was_blocked=packagesinfo[package][stage][filename]["notfound"]
+						if not filename in filenames:
+							filenames[filename]=['err',was_notfound,was_blocked]
+						else:
+							status, old_was_notfound, old_was_blocked=filenames[filename]
+							filenames[filename]=[
+							  'err',old_was_notfound | was_notfound, old_was_blocked | was_blocked 
+							]
+				  
+
+			if is_pkg_in_dep:
+				portage.util.writemsg("[OK]")
+			elif is_pkg_in_system:
+				portage.util.writemsg("[SYSTEM]")
+			elif is_pkg_in_portage_dep:
+				portage.util.writemsg("[PORTAGE DEP]")
+			elif is_pkg_python:
+				portage.util.writemsg("[INTERPRETER]")
+			elif not self.is_package_useful(package,stages,filenames.keys()):
+				portage.util.writemsg("[LIKELY OK]")
+			else:
+				portage.util.writemsg(portage.output.colorize("BAD", "[NOT IN DEPS]"))
+			# show information about accessed files
+
+			portage.util.writemsg(" %-40s: %s\n" % (package,stages))
+
+			# this is here for readability
+			action={
+				('ok',False,False):"accessed",
+				('ok',True,False):"readed",
+				('ok',False,True):"writed",
+				('ok',True,True):"readed and writed",
+				('err',False,False):"other error",
+				('err',True,False):"not found",
+				('err',False,True):"blocked",
+				('err',True,True):"not found and blocked"
+			}
+			
+			filescounter=0
+			
+			for filename in filenames:
+				event_info=tuple(filenames[filename])
+				portage.util.writemsg("  %-56s %-21s\n" % (filename,action[event_info]))
+				filescounter+=1
+				if filescounter>10:
+					portage.util.writemsg("  ... and %d more ...\n" % (len(filenames)-10))
+					break
+		# ... and one more check. Making sure that direct build time 
+		# dependencies were accessed
+		#import pdb; pdb.set_trace()
+		not_accessed_deps=set(self.deps_direct)-set(self.packagesinfo.keys())
+		if not_accessed_deps:
+			portage.util.writemsg(portage.output.colorize("WARN", "!!! "))
+			portage.util.writemsg("Warning! Some build time dependencies " + \
+				"of packages were not accessed: " + \
+				" ".join(not_accessed_deps) + "\n")
+					
+	def is_package_useful(self,pkg,stages,files):
+		""" some basic heuristics here to cut part of packages """
+
+		excluded_paths=set(
+			['/etc/sandbox.d/']
+		)
+
+		excluded_packages=set(
+			# autodep shows these two packages every time
+			['net-zope/zope-fixers', 'net-zope/zope-interface']
+		)
+
+
+		def is_pkg_excluded(p):
+			for pkg in excluded_packages:
+				if p.startswith(pkg): # if package is excluded
+					return True
+			return False
+
+
+		def is_file_excluded(f):
+			for path in excluded_paths:
+				if f.startswith(path): # if path is excluded
+					return True
+			return False
+
+
+		if is_pkg_excluded(pkg):
+			return False
+
+		for f in files:
+			if is_file_excluded(f):
+			  continue
+			
+			# test 1: package is not useful if all files are *.desktop or *.xml or *.m4		
+			if not (f.endswith(".desktop") or f.endswith(".xml") or f.endswith(".m4") or f.endswith(".pc")):
+			  break
+		else:
+			return False # we get here if cycle ends not with break
+		  
+		return True
+		
+    
\ No newline at end of file

diff --git a/portage_with_autodep/pym/_emerge/EventsLogger.py b/portage_with_autodep/pym/_emerge/EventsLogger.py
new file mode 100644
index 0000000..68b3c67
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EventsLogger.py
@@ -0,0 +1,180 @@
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import sys
+import stat
+import socket
+import select
+import tempfile
+
+import threading
+
+from portage import os
+
+class EventsLogger(threading.Thread):
+	def default_filter(eventname, filename, stage):
+		return True
+		
+	def __init__(self, socket_dir="/tmp/", filter_proc=default_filter):
+		threading.Thread.__init__(self) # init the Thread
+		
+		self.alive=False
+		
+		self.main_thread=threading.currentThread()
+		
+		self.socket_dir=socket_dir
+		self.filter_proc=filter_proc
+		
+		self.socket_name=None
+		self.socket_logger=None
+
+		self.events={}
+
+		try:
+			socket_dir_name = tempfile.mkdtemp(dir=self.socket_dir,
+				prefix="log_socket_")
+			
+			socket_name = os.path.join(socket_dir_name, 'socket')
+
+		except OSError as e:
+			return
+		
+		self.socket_name=socket_name
+		
+		#print(self.socket_name)
+		
+		try:
+			socket_logger=socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
+			socket_logger.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+			socket_logger.bind(self.socket_name)
+			socket_logger.listen(64)
+
+		except socket.error as e:
+			return
+
+		self.socket_logger=socket_logger
+
+		try:
+  			# Allow connecting to socket for anyone
+			os.chmod(socket_dir_name,
+				stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|
+				stat.S_IROTH|stat.S_IWOTH|stat.S_IXOTH)
+			os.chmod(socket_name,
+				stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|
+				stat.S_IROTH|stat.S_IWOTH|stat.S_IXOTH)
+		except OSError as e:
+			return
+		  
+	def run(self):
+		""" Starts the log server """
+
+		self.alive=True
+		self.listen_thread=threading.currentThread()
+		clients={}
+		
+		epoll=select.epoll()
+		epoll.register(self.socket_logger.fileno(), select.EPOLLIN)
+
+		while self.alive:
+			try:
+				sock_events = epoll.poll(3)
+				
+				for fileno, sock_event in sock_events:
+					if fileno == self.socket_logger.fileno():
+						ret = self.socket_logger.accept()
+						if ret is None:
+							pass
+						else:
+							(client,addr)=ret
+							epoll.register(client.fileno(), select.EPOLLIN)
+							clients[client.fileno()]=client
+					elif sock_event & select.EPOLLIN:
+						s=clients[fileno]
+						record=s.recv(8192)
+					
+						if not record: # if connection was closed
+							epoll.unregister(fileno)
+							clients[fileno].close()
+							del clients[fileno]
+							continue
+					
+						#import pdb; pdb.set_trace()
+						try:
+							message=record.decode("utf8").split("\0")
+						except UnicodeDecodeError:
+  							print("Bad message %s" % record)
+  							continue
+
+						#  continue
+
+						#print(message)
+					
+						try:
+							if message[4]=="ASKING":
+								if self.filter_proc(message[1],message[2],message[3]):
+									s.sendall(b"ALLOW\0")
+								else:
+									# TODO: log through portage infrastructure
+									#print("Blocking an access to %s" % message[2])
+									s.sendall(b"DENY\0")
+							else:
+								eventname,filename,stage,result=message[1:5]
+
+								if not stage in self.events:
+									self.events[stage]=[{},{}]
+						
+								hashofsucesses=self.events[stage][0]
+								hashoffailures=self.events[stage][1]
+
+								if result=="DENIED":
+									print("Blocking an access to %s" % filename)
+
+								if result=="OK":
+									if not filename in hashofsucesses:
+										hashofsucesses[filename]=[False,False]
+						  
+									readed_or_writed=hashofsucesses[filename]
+						  
+									if eventname=="read":
+										readed_or_writed[0]=True
+									elif eventname=="write":
+										readed_or_writed[1]=True
+							
+								elif result[0:3]=="ERR" or result=="DENIED":
+									if not filename in hashoffailures:
+										hashoffailures[filename]=[False,False]
+									notfound_or_blocked=hashoffailures[filename]
+								  
+									if result=="ERR/2":
+										notfound_or_blocked[0]=True
+									elif result=="DENIED":
+										notfound_or_blocked[1]=True
+
+								else:
+									print("Error in logger module<->analyser protocol")
+						
+						except IndexError:
+								print("IndexError while parsing %s" % record)
+			except IOError as e:
+				if e.errno!=4: # handling "Interrupted system call" errors
+					raise
+				  
+			# if main thread doesnt exists then exit	  
+			if not self.main_thread.is_alive():
+				break
+		epoll.unregister(self.socket_logger.fileno())
+		epoll.close()
+		self.socket_logger.close()
+		
+	def stop(self):
+		""" Stops the log server. Returns all events """
+
+		self.alive=False
+		
+		# Block the main thread until listener exists
+		self.listen_thread.join()
+		
+		# We assume portage clears tmp folder, so no deleting a socket file
+		# We assume that no new socket data will arrive after this moment
+		return self.events

diff --git a/portage_with_autodep/pym/_emerge/FakeVartree.py b/portage_with_autodep/pym/_emerge/FakeVartree.py
new file mode 100644
index 0000000..a11966f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/FakeVartree.py
@@ -0,0 +1,265 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+from portage import os
+from _emerge.Package import Package
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from portage.const import VDB_PATH
+from portage.dbapi.vartree import vartree
+from portage.repository.config import _gen_valid_repo
+from portage.update import grab_updates, parse_updates, update_dbentries
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+class FakeVardbapi(PackageVirtualDbapi):
+	"""
+	Implements the vardbapi.getpath() method which is used in error handling
+	code for the Package class and vartree.get_provide().
+	"""
+	def getpath(self, cpv, filename=None):
+		path = os.path.join(self.settings['EROOT'], VDB_PATH, cpv)
+		if filename is not None:
+			path =os.path.join(path, filename)
+		return path
+
+class FakeVartree(vartree):
+	"""This is implements an in-memory copy of a vartree instance that provides
+	all the interfaces required for use by the depgraph.  The vardb is locked
+	during the constructor call just long enough to read a copy of the
+	installed package information.  This allows the depgraph to do it's
+	dependency calculations without holding a lock on the vardb.  It also
+	allows things like vardb global updates to be done in memory so that the
+	user doesn't necessarily need write access to the vardb in cases where
+	global updates are necessary (updates are performed when necessary if there
+	is not a matching ebuild in the tree). Instances of this class are not
+	populated until the sync() method is called."""
+	def __init__(self, root_config, pkg_cache=None, pkg_root_config=None):
+		self._root_config = root_config
+		if pkg_root_config is None:
+			pkg_root_config = self._root_config
+		self._pkg_root_config = pkg_root_config
+		if pkg_cache is None:
+			pkg_cache = {}
+		real_vartree = root_config.trees["vartree"]
+		self._real_vardb = real_vartree.dbapi
+		portdb = root_config.trees["porttree"].dbapi
+		self.root = real_vartree.root
+		self.settings = real_vartree.settings
+		mykeys = list(real_vartree.dbapi._aux_cache_keys)
+		if "_mtime_" not in mykeys:
+			mykeys.append("_mtime_")
+		self._db_keys = mykeys
+		self._pkg_cache = pkg_cache
+		self.dbapi = FakeVardbapi(real_vartree.settings)
+
+		# Initialize variables needed for lazy cache pulls of the live ebuild
+		# metadata.  This ensures that the vardb lock is released ASAP, without
+		# being delayed in case cache generation is triggered.
+		self._aux_get = self.dbapi.aux_get
+		self.dbapi.aux_get = self._aux_get_wrapper
+		self._match = self.dbapi.match
+		self.dbapi.match = self._match_wrapper
+		self._aux_get_history = set()
+		self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
+		self._portdb = portdb
+		self._global_updates = None
+
+	def _match_wrapper(self, cpv, use_cache=1):
+		"""
+		Make sure the metadata in Package instances gets updated for any
+		cpv that is returned from a match() call, since the metadata can
+		be accessed directly from the Package instance instead of via
+		aux_get().
+		"""
+		matches = self._match(cpv, use_cache=use_cache)
+		for cpv in matches:
+			if cpv in self._aux_get_history:
+				continue
+			self._aux_get_wrapper(cpv, [])
+		return matches
+
+	def _aux_get_wrapper(self, pkg, wants, myrepo=None):
+		if pkg in self._aux_get_history:
+			return self._aux_get(pkg, wants)
+		self._aux_get_history.add(pkg)
+		# We need to check the EAPI, and this also raises
+		# a KeyError to the caller if appropriate.
+		installed_eapi, repo = self._aux_get(pkg, ["EAPI", "repository"])
+		try:
+			# Use the live ebuild metadata if possible.
+			repo = _gen_valid_repo(repo)
+			live_metadata = dict(zip(self._portdb_keys,
+				self._portdb.aux_get(pkg, self._portdb_keys, myrepo=repo)))
+			# Use the metadata from the installed instance if the EAPI
+			# of either instance is unsupported, since if the installed
+			# instance has an unsupported or corrupt EAPI then we don't
+			# want to attempt to do complex operations such as execute
+			# pkg_config, pkg_prerm or pkg_postrm phases. If both EAPIs
+			# are supported then go ahead and use the live_metadata, in
+			# order to respect dep updates without revision bump or EAPI
+			# bump, as in bug #368725.
+			if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
+				portage.eapi_is_supported(installed_eapi)):
+				raise KeyError(pkg)
+			self.dbapi.aux_update(pkg, live_metadata)
+		except (KeyError, portage.exception.PortageException):
+			if self._global_updates is None:
+				self._global_updates = \
+					grab_global_updates(self._portdb)
+			perform_global_updates(
+				pkg, self.dbapi, self._global_updates)
+		return self._aux_get(pkg, wants)
+
+	def cpv_discard(self, pkg):
+		"""
+		Discard a package from the fake vardb if it exists.
+		"""
+		old_pkg = self.dbapi.get(pkg)
+		if old_pkg is not None:
+			self.dbapi.cpv_remove(old_pkg)
+			self._pkg_cache.pop(old_pkg, None)
+			self._aux_get_history.discard(old_pkg.cpv)
+
+	def sync(self, acquire_lock=1):
+		"""
+		Call this method to synchronize state with the real vardb
+		after one or more packages may have been installed or
+		uninstalled.
+		"""
+		locked = False
+		try:
+			if acquire_lock and os.access(self._real_vardb._dbroot, os.W_OK):
+				self._real_vardb.lock()
+				locked = True
+			self._sync()
+		finally:
+			if locked:
+				self._real_vardb.unlock()
+
+		# Populate the old-style virtuals using the cached values.
+		# Skip the aux_get wrapper here, to avoid unwanted
+		# cache generation.
+		try:
+			self.dbapi.aux_get = self._aux_get
+			self.settings._populate_treeVirtuals_if_needed(self)
+		finally:
+			self.dbapi.aux_get = self._aux_get_wrapper
+
+	def _sync(self):
+
+		real_vardb = self._root_config.trees["vartree"].dbapi
+		current_cpv_set = frozenset(real_vardb.cpv_all())
+		pkg_vardb = self.dbapi
+		pkg_cache = self._pkg_cache
+		aux_get_history = self._aux_get_history
+
+		# Remove any packages that have been uninstalled.
+		for pkg in list(pkg_vardb):
+			if pkg.cpv not in current_cpv_set:
+				self.cpv_discard(pkg)
+
+		# Validate counters and timestamps.
+		slot_counters = {}
+		root_config = self._pkg_root_config
+		validation_keys = ["COUNTER", "_mtime_"]
+		for cpv in current_cpv_set:
+
+			pkg_hash_key = Package._gen_hash_key(cpv=cpv,
+				installed=True, root_config=root_config,
+				type_name="installed")
+			pkg = pkg_vardb.get(pkg_hash_key)
+			if pkg is not None:
+				counter, mtime = real_vardb.aux_get(cpv, validation_keys)
+				try:
+					counter = long(counter)
+				except ValueError:
+					counter = 0
+
+				if counter != pkg.counter or \
+					mtime != pkg.mtime:
+					self.cpv_discard(pkg)
+					pkg = None
+
+			if pkg is None:
+				pkg = self._pkg(cpv)
+
+			other_counter = slot_counters.get(pkg.slot_atom)
+			if other_counter is not None:
+				if other_counter > pkg.counter:
+					continue
+
+			slot_counters[pkg.slot_atom] = pkg.counter
+			pkg_vardb.cpv_inject(pkg)
+
+		real_vardb.flush_cache()
+
+	def _pkg(self, cpv):
+		"""
+		The RootConfig instance that will become the Package.root_config
+		attribute can be overridden by the FakeVartree pkg_root_config
+		constructory argument, since we want to be consistent with the
+		depgraph._pkg() method which uses a specially optimized
+		RootConfig that has a FakeVartree instead of a real vartree.
+		"""
+		pkg = Package(cpv=cpv, built=True, installed=True,
+			metadata=zip(self._db_keys,
+			self._real_vardb.aux_get(cpv, self._db_keys)),
+			root_config=self._pkg_root_config,
+			type_name="installed")
+
+		try:
+			mycounter = long(pkg.metadata["COUNTER"])
+		except ValueError:
+			mycounter = 0
+			pkg.metadata["COUNTER"] = str(mycounter)
+
+		self._pkg_cache[pkg] = pkg
+		return pkg
+
+def grab_global_updates(portdb):
+	retupdates = {}
+
+	for repo_name in portdb.getRepositories():
+		repo = portdb.getRepositoryPath(repo_name)
+		updpath = os.path.join(repo, "profiles", "updates")
+		if not os.path.isdir(updpath):
+			continue
+
+		try:
+			rawupdates = grab_updates(updpath)
+		except portage.exception.DirectoryNotFound:
+			rawupdates = []
+		upd_commands = []
+		for mykey, mystat, mycontent in rawupdates:
+			commands, errors = parse_updates(mycontent)
+			upd_commands.extend(commands)
+		retupdates[repo_name] = upd_commands
+
+	master_repo = portdb.getRepositoryName(portdb.porttree_root)
+	if master_repo in retupdates:
+		retupdates['DEFAULT'] = retupdates[master_repo]
+
+	return retupdates
+
+def perform_global_updates(mycpv, mydb, myupdates):
+	aux_keys = ["DEPEND", "RDEPEND", "PDEPEND", 'repository']
+	aux_dict = dict(zip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
+	repository = aux_dict.pop('repository')
+	try:
+		mycommands = myupdates[repository]
+	except KeyError:
+		try:
+			mycommands = myupdates['DEFAULT']
+		except KeyError:
+			return
+
+	if not mycommands:
+		return
+
+	updates = update_dbentries(mycommands, aux_dict)
+	if updates:
+		mydb.aux_update(mycpv, updates)

diff --git a/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
new file mode 100644
index 0000000..a716dac
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+from portage.cache.mappings import slot_dict_class
+
+class FifoIpcDaemon(AbstractPollTask):
+
+	__slots__ = ("input_fifo", "output_fifo",) + \
+		("_files", "_reg_id",)
+
+	_file_names = ("pipe_in",)
+	_files_dict = slot_dict_class(_file_names, prefix="")
+
+	def _start(self):
+		self._files = self._files_dict()
+		input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+
+		# File streams are in unbuffered mode since we do atomic
+		# read and write of whole pickles.
+		self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
+
+		self._reg_id = self.scheduler.register(
+			self._files.pipe_in.fileno(),
+			self._registered_events, self._input_handler)
+
+		self._registered = True
+
+	def _reopen_input(self):
+		"""
+		Re-open the input stream, in order to suppress
+		POLLHUP events (bug #339976).
+		"""
+		self._files.pipe_in.close()
+		input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+		self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
+		self.scheduler.unregister(self._reg_id)
+		self._reg_id = self.scheduler.register(
+			self._files.pipe_in.fileno(),
+			self._registered_events, self._input_handler)
+
+	def isAlive(self):
+		return self._registered
+
+	def _cancel(self):
+		if self.returncode is None:
+			self.returncode = 1
+		self._unregister()
+
+	def _wait(self):
+		if self.returncode is not None:
+			return self.returncode
+
+		if self._registered:
+			self.scheduler.schedule(self._reg_id)
+			self._unregister()
+
+		if self.returncode is None:
+			self.returncode = os.EX_OK
+
+		return self.returncode
+
+	def _input_handler(self, fd, event):
+		raise NotImplementedError(self)
+
+	def _unregister(self):
+		"""
+		Unregister from the scheduler and close open files.
+		"""
+
+		self._registered = False
+
+		if self._reg_id is not None:
+			self.scheduler.unregister(self._reg_id)
+			self._reg_id = None
+
+		if self._files is not None:
+			for f in self._files.values():
+				f.close()
+			self._files = None

diff --git a/portage_with_autodep/pym/_emerge/JobStatusDisplay.py b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
new file mode 100644
index 0000000..1949232
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
@@ -0,0 +1,292 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import formatter
+import io
+import sys
+import time
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.output import xtermTitle
+
+from _emerge.getloadavg import getloadavg
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+class JobStatusDisplay(object):
+
+	_bound_properties = ("curval", "failed", "running")
+
+	# Don't update the display unless at least this much
+	# time has passed, in units of seconds.
+	_min_display_latency = 2
+
+	_default_term_codes = {
+		'cr'  : '\r',
+		'el'  : '\x1b[K',
+		'nel' : '\n',
+	}
+
+	_termcap_name_map = {
+		'carriage_return' : 'cr',
+		'clr_eol'         : 'el',
+		'newline'         : 'nel',
+	}
+
+	def __init__(self, quiet=False, xterm_titles=True):
+		object.__setattr__(self, "quiet", quiet)
+		object.__setattr__(self, "xterm_titles", xterm_titles)
+		object.__setattr__(self, "maxval", 0)
+		object.__setattr__(self, "merges", 0)
+		object.__setattr__(self, "_changed", False)
+		object.__setattr__(self, "_displayed", False)
+		object.__setattr__(self, "_last_display_time", 0)
+
+		self.reset()
+
+		isatty = os.environ.get('TERM') != 'dumb' and \
+			hasattr(self.out, 'isatty') and \
+			self.out.isatty()
+		object.__setattr__(self, "_isatty", isatty)
+		if not isatty or not self._init_term():
+			term_codes = {}
+			for k, capname in self._termcap_name_map.items():
+				term_codes[k] = self._default_term_codes[capname]
+			object.__setattr__(self, "_term_codes", term_codes)
+		encoding = sys.getdefaultencoding()
+		for k, v in self._term_codes.items():
+			if not isinstance(v, basestring):
+				self._term_codes[k] = v.decode(encoding, 'replace')
+
+		if self._isatty:
+			width = portage.output.get_term_size()[1]
+		else:
+			width = 80
+		self._set_width(width)
+
+	def _set_width(self, width):
+		if width == getattr(self, 'width', None):
+			return
+		if width <= 0 or width > 80:
+			width = 80
+		object.__setattr__(self, "width", width)
+		object.__setattr__(self, "_jobs_column_width", width - 32)
+
+	@property
+	def out(self):
+		"""Use a lazy reference to sys.stdout, in case the API consumer has
+		temporarily overridden stdout."""
+		return sys.stdout
+
+	def _write(self, s):
+		# avoid potential UnicodeEncodeError
+		s = _unicode_encode(s,
+			encoding=_encodings['stdio'], errors='backslashreplace')
+		out = self.out
+		if sys.hexversion >= 0x3000000:
+			out = out.buffer
+		out.write(s)
+		out.flush()
+
+	def _init_term(self):
+		"""
+		Initialize term control codes.
+		@rtype: bool
+		@returns: True if term codes were successfully initialized,
+			False otherwise.
+		"""
+
+		term_type = os.environ.get("TERM", "").strip()
+		if not term_type:
+			return False
+		tigetstr = None
+
+		try:
+			import curses
+			try:
+				curses.setupterm(term_type, self.out.fileno())
+				tigetstr = curses.tigetstr
+			except curses.error:
+				pass
+		except ImportError:
+			pass
+
+		if tigetstr is None:
+			return False
+
+		term_codes = {}
+		for k, capname in self._termcap_name_map.items():
+			code = tigetstr(capname)
+			if code is None:
+				code = self._default_term_codes[capname]
+			term_codes[k] = code
+		object.__setattr__(self, "_term_codes", term_codes)
+		return True
+
+	def _format_msg(self, msg):
+		return ">>> %s" % msg
+
+	def _erase(self):
+		self._write(
+			self._term_codes['carriage_return'] + \
+			self._term_codes['clr_eol'])
+		self._displayed = False
+
+	def _display(self, line):
+		self._write(line)
+		self._displayed = True
+
+	def _update(self, msg):
+
+		if not self._isatty:
+			self._write(self._format_msg(msg) + self._term_codes['newline'])
+			self._displayed = True
+			return
+
+		if self._displayed:
+			self._erase()
+
+		self._display(self._format_msg(msg))
+
+	def displayMessage(self, msg):
+
+		was_displayed = self._displayed
+
+		if self._isatty and self._displayed:
+			self._erase()
+
+		self._write(self._format_msg(msg) + self._term_codes['newline'])
+		self._displayed = False
+
+		if was_displayed:
+			self._changed = True
+			self.display()
+
+	def reset(self):
+		self.maxval = 0
+		self.merges = 0
+		for name in self._bound_properties:
+			object.__setattr__(self, name, 0)
+
+		if self._displayed:
+			self._write(self._term_codes['newline'])
+			self._displayed = False
+
+	def __setattr__(self, name, value):
+		old_value = getattr(self, name)
+		if value == old_value:
+			return
+		object.__setattr__(self, name, value)
+		if name in self._bound_properties:
+			self._property_change(name, old_value, value)
+
+	def _property_change(self, name, old_value, new_value):
+		self._changed = True
+		self.display()
+
+	def _load_avg_str(self):
+		try:
+			avg = getloadavg()
+		except OSError:
+			return 'unknown'
+
+		max_avg = max(avg)
+
+		if max_avg < 10:
+			digits = 2
+		elif max_avg < 100:
+			digits = 1
+		else:
+			digits = 0
+
+		return ", ".join(("%%.%df" % digits ) % x for x in avg)
+
+	def display(self):
+		"""
+		Display status on stdout, but only if something has
+		changed since the last call.
+		"""
+
+		if self.quiet:
+			return
+
+		current_time = time.time()
+		time_delta = current_time - self._last_display_time
+		if self._displayed and \
+			not self._changed:
+			if not self._isatty:
+				return
+			if time_delta < self._min_display_latency:
+				return
+
+		self._last_display_time = current_time
+		self._changed = False
+		self._display_status()
+
+	def _display_status(self):
+		# Don't use len(self._completed_tasks) here since that also
+		# can include uninstall tasks.
+		curval_str = str(self.curval)
+		maxval_str = str(self.maxval)
+		running_str = str(self.running)
+		failed_str = str(self.failed)
+		load_avg_str = self._load_avg_str()
+
+		color_output = io.StringIO()
+		plain_output = io.StringIO()
+		style_file = portage.output.ConsoleStyleFile(color_output)
+		style_file.write_listener = plain_output
+		style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
+		style_writer.style_listener = style_file.new_styles
+		f = formatter.AbstractFormatter(style_writer)
+
+		number_style = "INFORM"
+		f.add_literal_data(_unicode_decode("Jobs: "))
+		f.push_style(number_style)
+		f.add_literal_data(_unicode_decode(curval_str))
+		f.pop_style()
+		f.add_literal_data(_unicode_decode(" of "))
+		f.push_style(number_style)
+		f.add_literal_data(_unicode_decode(maxval_str))
+		f.pop_style()
+		f.add_literal_data(_unicode_decode(" complete"))
+
+		if self.running:
+			f.add_literal_data(_unicode_decode(", "))
+			f.push_style(number_style)
+			f.add_literal_data(_unicode_decode(running_str))
+			f.pop_style()
+			f.add_literal_data(_unicode_decode(" running"))
+
+		if self.failed:
+			f.add_literal_data(_unicode_decode(", "))
+			f.push_style(number_style)
+			f.add_literal_data(_unicode_decode(failed_str))
+			f.pop_style()
+			f.add_literal_data(_unicode_decode(" failed"))
+
+		padding = self._jobs_column_width - len(plain_output.getvalue())
+		if padding > 0:
+			f.add_literal_data(padding * _unicode_decode(" "))
+
+		f.add_literal_data(_unicode_decode("Load avg: "))
+		f.add_literal_data(_unicode_decode(load_avg_str))
+
+		# Truncate to fit width, to avoid making the terminal scroll if the
+		# line overflows (happens when the load average is large).
+		plain_output = plain_output.getvalue()
+		if self._isatty and len(plain_output) > self.width:
+			# Use plain_output here since it's easier to truncate
+			# properly than the color output which contains console
+			# color codes.
+			self._update(plain_output[:self.width])
+		else:
+			self._update(color_output.getvalue())
+
+		if self.xterm_titles:
+			xtermTitle(" ".join(plain_output.split()))

diff --git a/portage_with_autodep/pym/_emerge/MergeListItem.py b/portage_with_autodep/pym/_emerge/MergeListItem.py
new file mode 100644
index 0000000..2176bf6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MergeListItem.py
@@ -0,0 +1,135 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.output import colorize
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.Binpkg import Binpkg
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildBuild import EbuildBuild
+from _emerge.PackageUninstall import PackageUninstall
+
+class MergeListItem(CompositeTask):
+
+	"""
+	TODO: For parallel scheduling, everything here needs asynchronous
+	execution support (start, poll, and wait methods).
+	"""
+
+	__slots__ = ("args_set",
+		"binpkg_opts", "build_opts", "config_pool", "emerge_opts",
+		"find_blockers", "logger", "mtimedb", "pkg",
+		"pkg_count", "pkg_to_replace", "prefetcher",
+		"settings", "statusMessage", "world_atom") + \
+		("_install_task",)
+
+	def _start(self):
+
+		pkg = self.pkg
+		build_opts = self.build_opts
+
+		if pkg.installed:
+			# uninstall,  executed by self.merge()
+			self.returncode = os.EX_OK
+			self.wait()
+			return
+
+		args_set = self.args_set
+		find_blockers = self.find_blockers
+		logger = self.logger
+		mtimedb = self.mtimedb
+		pkg_count = self.pkg_count
+		scheduler = self.scheduler
+		settings = self.settings
+		world_atom = self.world_atom
+		ldpath_mtimes = mtimedb["ldpath"]
+
+		action_desc = "Emerging"
+		preposition = "for"
+		if pkg.type_name == "binary":
+			action_desc += " binary"
+
+		if build_opts.fetchonly:
+			action_desc = "Fetching"
+
+		msg = "%s (%s of %s) %s" % \
+			(action_desc,
+			colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+			colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
+			colorize("GOOD", pkg.cpv))
+
+		portdb = pkg.root_config.trees["porttree"].dbapi
+		portdir_repo_name = portdb.getRepositoryName(portdb.porttree_root)
+		if portdir_repo_name:
+			pkg_repo_name = pkg.repo
+			if pkg_repo_name != portdir_repo_name:
+				if pkg_repo_name == pkg.UNKNOWN_REPO:
+					pkg_repo_name = "unknown repo"
+				msg += " from %s" % pkg_repo_name
+
+		if pkg.root != "/":
+			msg += " %s %s" % (preposition, pkg.root)
+
+		if not build_opts.pretend:
+			self.statusMessage(msg)
+			logger.log(" >>> emerge (%s of %s) %s to %s" % \
+				(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+		if pkg.type_name == "ebuild":
+
+			build = EbuildBuild(args_set=args_set,
+				background=self.background,
+				config_pool=self.config_pool,
+				find_blockers=find_blockers,
+				ldpath_mtimes=ldpath_mtimes, logger=logger,
+				opts=build_opts, pkg=pkg, pkg_count=pkg_count,
+				prefetcher=self.prefetcher, scheduler=scheduler,
+				settings=settings, world_atom=world_atom)
+
+			self._install_task = build
+			self._start_task(build, self._default_final_exit)
+			return
+
+		elif pkg.type_name == "binary":
+
+			binpkg = Binpkg(background=self.background,
+				find_blockers=find_blockers,
+				ldpath_mtimes=ldpath_mtimes, logger=logger,
+				opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
+				prefetcher=self.prefetcher, settings=settings,
+				scheduler=scheduler, world_atom=world_atom)
+
+			self._install_task = binpkg
+			self._start_task(binpkg, self._default_final_exit)
+			return
+
+	def create_install_task(self):
+
+		pkg = self.pkg
+		build_opts = self.build_opts
+		mtimedb = self.mtimedb
+		scheduler = self.scheduler
+		settings = self.settings
+		world_atom = self.world_atom
+		ldpath_mtimes = mtimedb["ldpath"]
+
+		if pkg.installed:
+			if not (build_opts.buildpkgonly or \
+				build_opts.fetchonly or build_opts.pretend):
+
+				task = PackageUninstall(background=self.background,
+					ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
+					pkg=pkg, scheduler=scheduler, settings=settings,
+					world_atom=world_atom)
+
+			else:
+				task = AsynchronousTask()
+
+		elif build_opts.fetchonly or \
+			build_opts.buildpkgonly:
+			task = AsynchronousTask()
+		else:
+			task = self._install_task.create_install_task()
+
+		return task

diff --git a/portage_with_autodep/pym/_emerge/MetadataRegen.py b/portage_with_autodep/pym/_emerge/MetadataRegen.py
new file mode 100644
index 0000000..8103175
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MetadataRegen.py
@@ -0,0 +1,184 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.PollScheduler import PollScheduler
+
+class MetadataRegen(PollScheduler):
+
+	def __init__(self, portdb, cp_iter=None, consumer=None,
+		max_jobs=None, max_load=None):
+		PollScheduler.__init__(self)
+		self._portdb = portdb
+		self._global_cleanse = False
+		if cp_iter is None:
+			cp_iter = self._iter_every_cp()
+			# We can globally cleanse stale cache only if we
+			# iterate over every single cp.
+			self._global_cleanse = True
+		self._cp_iter = cp_iter
+		self._consumer = consumer
+
+		if max_jobs is None:
+			max_jobs = 1
+
+		self._max_jobs = max_jobs
+		self._max_load = max_load
+
+		self._valid_pkgs = set()
+		self._cp_set = set()
+		self._process_iter = self._iter_metadata_processes()
+		self.returncode = os.EX_OK
+		self._error_count = 0
+		self._running_tasks = set()
+
+	def _terminate_tasks(self):
+		while self._running_tasks:
+			self._running_tasks.pop().cancel()
+
+	def _iter_every_cp(self):
+		portage.writemsg_stdout("Listing available packages...\n")
+		every_cp = self._portdb.cp_all()
+		portage.writemsg_stdout("Regenerating cache entries...\n")
+		every_cp.sort(reverse=True)
+		try:
+			while not self._terminated_tasks:
+				yield every_cp.pop()
+		except IndexError:
+			pass
+
+	def _iter_metadata_processes(self):
+		portdb = self._portdb
+		valid_pkgs = self._valid_pkgs
+		cp_set = self._cp_set
+		consumer = self._consumer
+
+		for cp in self._cp_iter:
+			if self._terminated_tasks:
+				break
+			cp_set.add(cp)
+			portage.writemsg_stdout("Processing %s\n" % cp)
+			cpv_list = portdb.cp_list(cp)
+			for cpv in cpv_list:
+				if self._terminated_tasks:
+					break
+				valid_pkgs.add(cpv)
+				ebuild_path, repo_path = portdb.findname2(cpv)
+				if ebuild_path is None:
+					raise AssertionError("ebuild not found for '%s'" % cpv)
+				metadata, st, emtime = portdb._pull_valid_cache(
+					cpv, ebuild_path, repo_path)
+				if metadata is not None:
+					if consumer is not None:
+						consumer(cpv, ebuild_path,
+							repo_path, metadata)
+					continue
+
+				yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
+					ebuild_mtime=emtime,
+					metadata_callback=portdb._metadata_callback,
+					portdb=portdb, repo_path=repo_path,
+					settings=portdb.doebuild_settings)
+
+	def run(self):
+
+		portdb = self._portdb
+		from portage.cache.cache_errors import CacheError
+		dead_nodes = {}
+
+		while self._schedule():
+			self._poll_loop()
+
+		while self._jobs:
+			self._poll_loop()
+
+		if self._terminated_tasks:
+			self.returncode = 1
+			return
+
+		if self._global_cleanse:
+			for mytree in portdb.porttrees:
+				try:
+					dead_nodes[mytree] = set(portdb.auxdb[mytree])
+				except CacheError as e:
+					portage.writemsg("Error listing cache entries for " + \
+						"'%s': %s, continuing...\n" % (mytree, e),
+						noiselevel=-1)
+					del e
+					dead_nodes = None
+					break
+		else:
+			cp_set = self._cp_set
+			cpv_getkey = portage.cpv_getkey
+			for mytree in portdb.porttrees:
+				try:
+					dead_nodes[mytree] = set(cpv for cpv in \
+						portdb.auxdb[mytree] \
+						if cpv_getkey(cpv) in cp_set)
+				except CacheError as e:
+					portage.writemsg("Error listing cache entries for " + \
+						"'%s': %s, continuing...\n" % (mytree, e),
+						noiselevel=-1)
+					del e
+					dead_nodes = None
+					break
+
+		if dead_nodes:
+			for y in self._valid_pkgs:
+				for mytree in portdb.porttrees:
+					if portdb.findname2(y, mytree=mytree)[0]:
+						dead_nodes[mytree].discard(y)
+
+			for mytree, nodes in dead_nodes.items():
+				auxdb = portdb.auxdb[mytree]
+				for y in nodes:
+					try:
+						del auxdb[y]
+					except (KeyError, CacheError):
+						pass
+
+	def _schedule_tasks(self):
+		"""
+		@rtype: bool
+		@returns: True if there may be remaining tasks to schedule,
+			False otherwise.
+		"""
+		if self._terminated_tasks:
+			return False
+
+		while self._can_add_job():
+			try:
+				metadata_process = next(self._process_iter)
+			except StopIteration:
+				return False
+
+			self._jobs += 1
+			self._running_tasks.add(metadata_process)
+			metadata_process.scheduler = self.sched_iface
+			metadata_process.addExitListener(self._metadata_exit)
+			metadata_process.start()
+		return True
+
+	def _metadata_exit(self, metadata_process):
+		self._jobs -= 1
+		self._running_tasks.discard(metadata_process)
+		if metadata_process.returncode != os.EX_OK:
+			self.returncode = 1
+			self._error_count += 1
+			self._valid_pkgs.discard(metadata_process.cpv)
+			if not self._terminated_tasks:
+				portage.writemsg("Error processing %s, continuing...\n" % \
+					(metadata_process.cpv,), noiselevel=-1)
+
+		if self._consumer is not None:
+			# On failure, still notify the consumer (in this case the metadata
+			# argument is None).
+			self._consumer(metadata_process.cpv,
+				metadata_process.ebuild_path,
+				metadata_process.repo_path,
+				metadata_process.metadata)
+
+		self._schedule()
+

diff --git a/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
new file mode 100644
index 0000000..ce0ab14
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
@@ -0,0 +1,33 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.package.ebuild.doebuild:spawn'
+)
+from portage import os
+
+class MiscFunctionsProcess(AbstractEbuildProcess):
+	"""
+	Spawns misc-functions.sh with an existing ebuild environment.
+	"""
+
+	__slots__ = ('commands',)
+
+	def _start(self):
+		settings = self.settings
+		portage_bin_path = settings["PORTAGE_BIN_PATH"]
+		misc_sh_binary = os.path.join(portage_bin_path,
+			os.path.basename(portage.const.MISC_SH_BINARY))
+
+		self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
+		if self.logfile is None and \
+			self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+			self.logfile = settings.get("PORTAGE_LOG_FILE")
+
+		AbstractEbuildProcess._start(self)
+
+	def _spawn(self, args, **kwargs):
+		self.settings.pop("EBUILD_PHASE", None)
+		return spawn(" ".join(args), self.settings, **kwargs)

diff --git a/portage_with_autodep/pym/_emerge/Package.py b/portage_with_autodep/pym/_emerge/Package.py
new file mode 100644
index 0000000..20c72b4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Package.py
@@ -0,0 +1,700 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from itertools import chain
+import portage
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.const import EBUILD_PHASES
+from portage.dep import Atom, check_required_use, use_reduce, \
+	paren_enclose, _slot_re, _slot_separator, _repo_separator
+from portage.eapi import eapi_has_iuse_defaults, eapi_has_required_use
+from portage.exception import InvalidDependString
+from portage.repository.config import _gen_valid_repo
+from _emerge.Task import Task
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+	long = int
+
+class Package(Task):
+
+	__hash__ = Task.__hash__
+	__slots__ = ("built", "cpv", "depth",
+		"installed", "metadata", "onlydeps", "operation",
+		"root_config", "type_name",
+		"category", "counter", "cp", "cpv_split",
+		"inherited", "invalid", "iuse", "masks", "mtime",
+		"pf", "pv_split", "root", "slot", "slot_atom", "visible",) + \
+	("_raw_metadata", "_use",)
+
+	metadata_keys = [
+		"BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "EAPI",
+		"INHERITED", "IUSE", "KEYWORDS",
+		"LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
+		"repository", "PROPERTIES", "RESTRICT", "SLOT", "USE",
+		"_mtime_", "DEFINED_PHASES", "REQUIRED_USE"]
+
+	_dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND',)
+	_use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
+	UNKNOWN_REPO = "__unknown__"
+
+	def __init__(self, **kwargs):
+		Task.__init__(self, **kwargs)
+		# the SlotObject constructor assigns self.root_config from keyword args
+		# and is an instance of a '_emerge.RootConfig.RootConfig class
+		self.root = self.root_config.root
+		self._raw_metadata = _PackageMetadataWrapperBase(self.metadata)
+		self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
+		if not self.built:
+			self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
+		self.cp = portage.cpv_getkey(self.cpv)
+		slot = self.slot
+		if _slot_re.match(slot) is None:
+			self._invalid_metadata('SLOT.invalid',
+				"SLOT: invalid value: '%s'" % slot)
+			# Avoid an InvalidAtom exception when creating slot_atom.
+			# This package instance will be masked due to empty SLOT.
+			slot = '0'
+		if (self.iuse.enabled or self.iuse.disabled) and \
+			not eapi_has_iuse_defaults(self.metadata["EAPI"]):
+			if not self.installed:
+				self._invalid_metadata('EAPI.incompatible',
+					"IUSE contains defaults, but EAPI doesn't allow them")
+		self.slot_atom = portage.dep.Atom("%s%s%s" % (self.cp, _slot_separator, slot))
+		self.category, self.pf = portage.catsplit(self.cpv)
+		self.cpv_split = portage.catpkgsplit(self.cpv)
+		self.pv_split = self.cpv_split[1:]
+		if self.inherited is None:
+			self.inherited = frozenset()
+		repo = _gen_valid_repo(self.metadata.get('repository', ''))
+		if not repo:
+			repo = self.UNKNOWN_REPO
+		self.metadata['repository'] = repo
+
+		self._validate_deps()
+		self.masks = self._masks()
+		self.visible = self._visible(self.masks)
+		if self.operation is None:
+			if self.onlydeps or self.installed:
+				self.operation = "nomerge"
+			else:
+				self.operation = "merge"
+
+		self._hash_key = Package._gen_hash_key(cpv=self.cpv,
+			installed=self.installed, onlydeps=self.onlydeps,
+			operation=self.operation, repo_name=repo,
+			root_config=self.root_config,
+			type_name=self.type_name)
+		self._hash_value = hash(self._hash_key)
+
+	@classmethod
+	def _gen_hash_key(cls, cpv=None, installed=None, onlydeps=None,
+		operation=None, repo_name=None, root_config=None,
+		type_name=None, **kwargs):
+
+		if operation is None:
+			if installed or onlydeps:
+				operation = "nomerge"
+			else:
+				operation = "merge"
+
+		root = None
+		if root_config is not None:
+			root = root_config.root
+		else:
+			raise TypeError("root_config argument is required")
+
+		# For installed (and binary) packages we don't care for the repo
+		# when it comes to hashing, because there can only be one cpv.
+		# So overwrite the repo_key with type_name.
+		if type_name is None:
+			raise TypeError("type_name argument is required")
+		elif type_name == "ebuild":
+			if repo_name is None:
+				raise AssertionError(
+					"Package._gen_hash_key() " + \
+					"called without 'repo_name' argument")
+			repo_key = repo_name
+		else:
+			# For installed (and binary) packages we don't care for the repo
+			# when it comes to hashing, because there can only be one cpv.
+			# So overwrite the repo_key with type_name.
+			repo_key = type_name
+
+		return (type_name, root, cpv, operation, repo_key)
+
+	def _validate_deps(self):
+		"""
+		Validate deps. This does not trigger USE calculation since that
+		is expensive for ebuilds and therefore we want to avoid doing
+		in unnecessarily (like for masked packages).
+		"""
+		eapi = self.metadata['EAPI']
+		dep_eapi = eapi
+		dep_valid_flag = self.iuse.is_valid_flag
+		if self.installed:
+			# Ignore EAPI.incompatible and conditionals missing
+			# from IUSE for installed packages since these issues
+			# aren't relevant now (re-evaluate when new EAPIs are
+			# deployed).
+			dep_eapi = None
+			dep_valid_flag = None
+
+		for k in self._dep_keys:
+			v = self.metadata.get(k)
+			if not v:
+				continue
+			try:
+				use_reduce(v, eapi=dep_eapi, matchall=True,
+					is_valid_flag=dep_valid_flag, token_class=Atom)
+			except InvalidDependString as e:
+				self._metadata_exception(k, e)
+
+		k = 'PROVIDE'
+		v = self.metadata.get(k)
+		if v:
+			try:
+				use_reduce(v, eapi=dep_eapi, matchall=True,
+					is_valid_flag=dep_valid_flag, token_class=Atom)
+			except InvalidDependString as e:
+				self._invalid_metadata("PROVIDE.syntax",
+					_unicode_decode("%s: %s") % (k, e))
+
+		for k in self._use_conditional_misc_keys:
+			v = self.metadata.get(k)
+			if not v:
+				continue
+			try:
+				use_reduce(v, eapi=dep_eapi, matchall=True,
+					is_valid_flag=dep_valid_flag)
+			except InvalidDependString as e:
+				self._metadata_exception(k, e)
+
+		k = 'REQUIRED_USE'
+		v = self.metadata.get(k)
+		if v:
+			if not eapi_has_required_use(eapi):
+				self._invalid_metadata('EAPI.incompatible',
+					"REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
+			else:
+				try:
+					check_required_use(v, (),
+						self.iuse.is_valid_flag)
+				except InvalidDependString as e:
+					# Force unicode format string for python-2.x safety,
+					# ensuring that PortageException.__unicode__() is used
+					# when necessary.
+					self._invalid_metadata(k + ".syntax",
+						_unicode_decode("%s: %s") % (k, e))
+
+		k = 'SRC_URI'
+		v = self.metadata.get(k)
+		if v:
+			try:
+				use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
+					is_valid_flag=self.iuse.is_valid_flag)
+			except InvalidDependString as e:
+				if not self.installed:
+					self._metadata_exception(k, e)
+
+	def copy(self):
+		return Package(built=self.built, cpv=self.cpv, depth=self.depth,
+			installed=self.installed, metadata=self._raw_metadata,
+			onlydeps=self.onlydeps, operation=self.operation,
+			root_config=self.root_config, type_name=self.type_name)
+
+	def _masks(self):
+		masks = {}
+		settings = self.root_config.settings
+
+		if self.invalid is not None:
+			masks['invalid'] = self.invalid
+
+		if not settings._accept_chost(self.cpv, self.metadata):
+			masks['CHOST'] = self.metadata['CHOST']
+
+		eapi = self.metadata["EAPI"]
+		if not portage.eapi_is_supported(eapi):
+			masks['EAPI.unsupported'] = eapi
+		if portage._eapi_is_deprecated(eapi):
+			masks['EAPI.deprecated'] = eapi
+
+		missing_keywords = settings._getMissingKeywords(
+			self.cpv, self.metadata)
+		if missing_keywords:
+			masks['KEYWORDS'] = missing_keywords
+
+		try:
+			missing_properties = settings._getMissingProperties(
+				self.cpv, self.metadata)
+			if missing_properties:
+				masks['PROPERTIES'] = missing_properties
+		except InvalidDependString:
+			# already recorded as 'invalid'
+			pass
+
+		mask_atom = settings._getMaskAtom(self.cpv, self.metadata)
+		if mask_atom is not None:
+			masks['package.mask'] = mask_atom
+
+		system_mask = settings._getProfileMaskAtom(
+			self.cpv, self.metadata)
+		if system_mask is not None:
+			masks['profile.system'] = system_mask
+
+		try:
+			missing_licenses = settings._getMissingLicenses(
+				self.cpv, self.metadata)
+			if missing_licenses:
+				masks['LICENSE'] = missing_licenses
+		except InvalidDependString:
+			# already recorded as 'invalid'
+			pass
+
+		if not masks:
+			masks = None
+
+		return masks
+
+	def _visible(self, masks):
+
+		if masks is not None:
+
+			if 'EAPI.unsupported' in masks:
+				return False
+
+			if 'invalid' in masks:
+				return False
+
+			if not self.installed and ( \
+				'CHOST' in masks or \
+				'EAPI.deprecated' in masks or \
+				'KEYWORDS' in masks or \
+				'PROPERTIES' in masks):
+				return False
+
+			if 'package.mask' in masks or \
+				'profile.system' in masks or \
+				'LICENSE' in masks:
+				return False
+
+		return True
+
+	def get_keyword_mask(self):
+		"""returns None, 'missing', or 'unstable'."""
+
+		missing = self.root_config.settings._getRawMissingKeywords(
+				self.cpv, self.metadata)
+
+		if not missing:
+			return None
+
+		if '**' in missing:
+			return 'missing'
+
+		global_accept_keywords = frozenset(
+			self.root_config.settings.get("ACCEPT_KEYWORDS", "").split())
+
+		for keyword in missing:
+			if keyword.lstrip("~") in global_accept_keywords:
+				return 'unstable'
+
+		return 'missing'
+
+	def isHardMasked(self):
+		"""returns a bool if the cpv is in the list of
+		expanded pmaskdict[cp] available ebuilds"""
+		pmask = self.root_config.settings._getRawMaskAtom(
+			self.cpv, self.metadata)
+		return pmask is not None
+
+	def _metadata_exception(self, k, e):
+
+		# For unicode safety with python-2.x we need to avoid
+		# using the string format operator with a non-unicode
+		# format string, since that will result in the
+		# PortageException.__str__() method being invoked,
+		# followed by unsafe decoding that may result in a
+		# UnicodeDecodeError. Therefore, use _unicode_decode()
+		# to ensure that format strings are unicode, so that
+		# PortageException.__unicode__() is used when necessary
+		# in python-2.x.
+		if not self.installed:
+			categorized_error = False
+			if e.errors:
+				for error in e.errors:
+					if getattr(error, 'category', None) is None:
+						continue
+					categorized_error = True
+					self._invalid_metadata(error.category,
+						_unicode_decode("%s: %s") % (k, error))
+
+			if not categorized_error:
+				self._invalid_metadata(k + ".syntax",
+					_unicode_decode("%s: %s") % (k, e))
+		else:
+			# For installed packages, show the path of the file
+			# containing the invalid metadata, since the user may
+			# want to fix the deps by hand.
+			vardb = self.root_config.trees['vartree'].dbapi
+			path = vardb.getpath(self.cpv, filename=k)
+			self._invalid_metadata(k + ".syntax",
+				_unicode_decode("%s: %s in '%s'") % (k, e, path))
+
+	def _invalid_metadata(self, msg_type, msg):
+		if self.invalid is None:
+			self.invalid = {}
+		msgs = self.invalid.get(msg_type)
+		if msgs is None:
+			msgs = []
+			self.invalid[msg_type] = msgs
+		msgs.append(msg)
+
+	def __str__(self):
+		if self.operation == "merge":
+			if self.type_name == "binary":
+				cpv_color = "PKG_BINARY_MERGE"
+			else:
+				cpv_color = "PKG_MERGE"
+		elif self.operation == "uninstall":
+			cpv_color = "PKG_UNINSTALL"
+		else:
+			cpv_color = "PKG_NOMERGE"
+
+		s = "(%s, %s" \
+			% (portage.output.colorize(cpv_color, self.cpv + _repo_separator + self.repo) , self.type_name)
+
+		if self.type_name == "installed":
+			if self.root != "/":
+				s += " in '%s'" % self.root
+			if self.operation == "uninstall":
+				s += " scheduled for uninstall"
+		else:
+			if self.operation == "merge":
+				s += " scheduled for merge"
+				if self.root != "/":
+					s += " to '%s'" % self.root
+		s += ")"
+		return s
+
+	if sys.hexversion < 0x3000000:
+
+		__unicode__ = __str__
+
+		def __str__(self):
+			return _unicode_encode(self.__unicode__(),
+				encoding=_encodings['content'])
+
+	class _use_class(object):
+
+		__slots__ = ("enabled", "_expand", "_expand_hidden",
+			"_force", "_pkg", "_mask")
+
+		# Share identical frozenset instances when available.
+		_frozensets = {}
+
+		def __init__(self, pkg, use_str):
+			self._pkg = pkg
+			self._expand = None
+			self._expand_hidden = None
+			self._force = None
+			self._mask = None
+			self.enabled = frozenset(use_str.split())
+			if pkg.built:
+				# Use IUSE to validate USE settings for built packages,
+				# in case the package manager that built this package
+				# failed to do that for some reason (or in case of
+				# data corruption).
+				missing_iuse = pkg.iuse.get_missing_iuse(self.enabled)
+				if missing_iuse:
+					self.enabled = self.enabled.difference(missing_iuse)
+
+		def _init_force_mask(self):
+			pkgsettings = self._pkg._get_pkgsettings()
+			frozensets = self._frozensets
+			s = frozenset(
+				pkgsettings.get("USE_EXPAND", "").lower().split())
+			self._expand = frozensets.setdefault(s, s)
+			s = frozenset(
+				pkgsettings.get("USE_EXPAND_HIDDEN", "").lower().split())
+			self._expand_hidden = frozensets.setdefault(s, s)
+			s = pkgsettings.useforce
+			self._force = frozensets.setdefault(s, s)
+			s = pkgsettings.usemask
+			self._mask = frozensets.setdefault(s, s)
+
+		@property
+		def expand(self):
+			if self._expand is None:
+				self._init_force_mask()
+			return self._expand
+
+		@property
+		def expand_hidden(self):
+			if self._expand_hidden is None:
+				self._init_force_mask()
+			return self._expand_hidden
+
+		@property
+		def force(self):
+			if self._force is None:
+				self._init_force_mask()
+			return self._force
+
+		@property
+		def mask(self):
+			if self._mask is None:
+				self._init_force_mask()
+			return self._mask
+
+	@property
+	def repo(self):
+		return self.metadata['repository']
+
+	@property
+	def repo_priority(self):
+		repo_info = self.root_config.settings.repositories.prepos.get(self.repo)
+		if repo_info is None:
+			return None
+		return repo_info.priority
+
+	@property
+	def use(self):
+		if self._use is None:
+			self.metadata._init_use()
+		return self._use
+
+	def _get_pkgsettings(self):
+		pkgsettings = self.root_config.trees[
+			'porttree'].dbapi.doebuild_settings
+		pkgsettings.setcpv(self)
+		return pkgsettings
+
+	class _iuse(object):
+
+		__slots__ = ("__weakref__", "all", "enabled", "disabled",
+			"tokens") + ("_iuse_implicit_match",)
+
+		def __init__(self, tokens, iuse_implicit_match):
+			self.tokens = tuple(tokens)
+			self._iuse_implicit_match = iuse_implicit_match
+			enabled = []
+			disabled = []
+			other = []
+			for x in tokens:
+				prefix = x[:1]
+				if prefix == "+":
+					enabled.append(x[1:])
+				elif prefix == "-":
+					disabled.append(x[1:])
+				else:
+					other.append(x)
+			self.enabled = frozenset(enabled)
+			self.disabled = frozenset(disabled)
+			self.all = frozenset(chain(enabled, disabled, other))
+
+		def is_valid_flag(self, flags):
+			"""
+			@returns: True if all flags are valid USE values which may
+				be specified in USE dependencies, False otherwise.
+			"""
+			if isinstance(flags, basestring):
+				flags = [flags]
+
+			for flag in flags:
+				if not flag in self.all and \
+					not self._iuse_implicit_match(flag):
+					return False
+			return True
+
+		def get_missing_iuse(self, flags):
+			"""
+			@returns: A list of flags missing from IUSE.
+			"""
+			if isinstance(flags, basestring):
+				flags = [flags]
+			missing_iuse = []
+			for flag in flags:
+				if not flag in self.all and \
+					not self._iuse_implicit_match(flag):
+					missing_iuse.append(flag)
+			return missing_iuse
+
+	def __len__(self):
+		return 4
+
+	def __iter__(self):
+		"""
+		This is used to generate mtimedb resume mergelist entries, so we
+		limit it to 4 items for backward compatibility.
+		"""
+		return iter(self._hash_key[:4])
+
+	def __lt__(self, other):
+		if other.cp != self.cp:
+			return False
+		if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
+			return True
+		return False
+
+	def __le__(self, other):
+		if other.cp != self.cp:
+			return False
+		if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
+			return True
+		return False
+
+	def __gt__(self, other):
+		if other.cp != self.cp:
+			return False
+		if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
+			return True
+		return False
+
+	def __ge__(self, other):
+		if other.cp != self.cp:
+			return False
+		if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
+			return True
+		return False
+
+_all_metadata_keys = set(x for x in portage.auxdbkeys \
+	if not x.startswith("UNUSED_"))
+_all_metadata_keys.update(Package.metadata_keys)
+_all_metadata_keys = frozenset(_all_metadata_keys)
+
+_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
+
+class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
+	"""
+	Detect metadata updates and synchronize Package attributes.
+	"""
+
+	__slots__ = ("_pkg",)
+	_wrapped_keys = frozenset(
+		["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
+	_use_conditional_keys = frozenset(
+		['LICENSE', 'PROPERTIES', 'PROVIDE', 'RESTRICT',])
+
+	def __init__(self, pkg, metadata):
+		_PackageMetadataWrapperBase.__init__(self)
+		self._pkg = pkg
+		if not pkg.built:
+			# USE is lazy, but we want it to show up in self.keys().
+			_PackageMetadataWrapperBase.__setitem__(self, 'USE', '')
+
+		self.update(metadata)
+
+	def _init_use(self):
+		if self._pkg.built:
+			use_str = self['USE']
+			self._pkg._use = self._pkg._use_class(
+				self._pkg, use_str)
+		else:
+			try:
+				use_str = _PackageMetadataWrapperBase.__getitem__(self, 'USE')
+			except KeyError:
+				use_str = None
+			calculated_use = False
+			if not use_str:
+				use_str = self._pkg._get_pkgsettings()["PORTAGE_USE"]
+				calculated_use = True
+			_PackageMetadataWrapperBase.__setitem__(self, 'USE', use_str)
+			self._pkg._use = self._pkg._use_class(
+				self._pkg, use_str)
+			# Initialize these now, since USE access has just triggered
+			# setcpv, and we want to cache the result of the force/mask
+			# calculations that were done.
+			if calculated_use:
+				self._pkg._use._init_force_mask()
+
+		return use_str
+
+	def __getitem__(self, k):
+		v = _PackageMetadataWrapperBase.__getitem__(self, k)
+		if k in self._use_conditional_keys:
+			if self._pkg.root_config.settings.local_config and '?' in v:
+				try:
+					v = paren_enclose(use_reduce(v, uselist=self._pkg.use.enabled, \
+						is_valid_flag=self._pkg.iuse.is_valid_flag))
+				except InvalidDependString:
+					# This error should already have been registered via
+					# self._pkg._invalid_metadata().
+					pass
+				else:
+					self[k] = v
+
+		elif k == 'USE' and not self._pkg.built:
+			if not v:
+				# This is lazy because it's expensive.
+				v = self._init_use()
+
+		return v
+
+	def __setitem__(self, k, v):
+		_PackageMetadataWrapperBase.__setitem__(self, k, v)
+		if k in self._wrapped_keys:
+			getattr(self, "_set_" + k.lower())(k, v)
+
+	def _set_inherited(self, k, v):
+		if isinstance(v, basestring):
+			v = frozenset(v.split())
+		self._pkg.inherited = v
+
+	def _set_iuse(self, k, v):
+		self._pkg.iuse = self._pkg._iuse(
+			v.split(), self._pkg.root_config.settings._iuse_implicit_match)
+
+	def _set_slot(self, k, v):
+		self._pkg.slot = v
+
+	def _set_counter(self, k, v):
+		if isinstance(v, basestring):
+			try:
+				v = long(v.strip())
+			except ValueError:
+				v = 0
+		self._pkg.counter = v
+
+	def _set_use(self, k, v):
+		# Force regeneration of _use attribute
+		self._pkg._use = None
+		# Use raw metadata to restore USE conditional values
+		# to unevaluated state
+		raw_metadata = self._pkg._raw_metadata
+		for x in self._use_conditional_keys:
+			try:
+				self[x] = raw_metadata[x]
+			except KeyError:
+				pass
+
+	def _set__mtime_(self, k, v):
+		if isinstance(v, basestring):
+			try:
+				v = long(v.strip())
+			except ValueError:
+				v = 0
+		self._pkg.mtime = v
+
+	@property
+	def properties(self):
+		return self['PROPERTIES'].split()
+
+	@property
+	def restrict(self):
+		return self['RESTRICT'].split()
+
+	@property
+	def defined_phases(self):
+		"""
+		Returns tokens from DEFINED_PHASES metadata if it is defined,
+		otherwise returns a tuple containing all possible phases. This
+		makes it easy to do containment checks to see if it's safe to
+		skip execution of a given phase.
+		"""
+		s = self['DEFINED_PHASES']
+		if s:
+			return s.split()
+		return EBUILD_PHASES

diff --git a/portage_with_autodep/pym/_emerge/PackageArg.py b/portage_with_autodep/pym/_emerge/PackageArg.py
new file mode 100644
index 0000000..ebfe4b2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageArg.py
@@ -0,0 +1,19 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from _emerge.Package import Package
+import portage
+from portage._sets.base import InternalPackageSet
+from portage.dep import _repo_separator
+
+class PackageArg(DependencyArg):
+	def __init__(self, package=None, **kwargs):
+		DependencyArg.__init__(self, **kwargs)
+		self.package = package
+		atom = "=" + package.cpv
+		if package.repo != Package.UNKNOWN_REPO:
+			atom += _repo_separator + package.repo
+		self.atom = portage.dep.Atom(atom, allow_repo=True)
+		self.pset = InternalPackageSet(initial_atoms=(self.atom,),
+			allow_repo=True)

diff --git a/portage_with_autodep/pym/_emerge/PackageMerge.py b/portage_with_autodep/pym/_emerge/PackageMerge.py
new file mode 100644
index 0000000..f8fa04a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageMerge.py
@@ -0,0 +1,40 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from portage.output import colorize
+class PackageMerge(CompositeTask):
+	__slots__ = ("merge",)
+
+	def _start(self):
+
+		self.scheduler = self.merge.scheduler
+		pkg = self.merge.pkg
+		pkg_count = self.merge.pkg_count
+
+		if pkg.installed:
+			action_desc = "Uninstalling"
+			preposition = "from"
+			counter_str = ""
+		else:
+			action_desc = "Installing"
+			preposition = "to"
+			counter_str = "(%s of %s) " % \
+				(colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+				colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
+
+		msg = "%s %s%s" % \
+			(action_desc,
+			counter_str,
+			colorize("GOOD", pkg.cpv))
+
+		if pkg.root != "/":
+			msg += " %s %s" % (preposition, pkg.root)
+
+		if not self.merge.build_opts.fetchonly and \
+			not self.merge.build_opts.pretend and \
+			not self.merge.build_opts.buildpkgonly:
+			self.merge.statusMessage(msg)
+
+		task = self.merge.create_install_task()
+		self._start_task(task, self._default_final_exit)

diff --git a/portage_with_autodep/pym/_emerge/PackageUninstall.py b/portage_with_autodep/pym/_emerge/PackageUninstall.py
new file mode 100644
index 0000000..eb6a947
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageUninstall.py
@@ -0,0 +1,110 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import portage
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+from portage.exception import UnsupportedAPIException
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.emergelog import emergelog
+from _emerge.CompositeTask import CompositeTask
+from _emerge.unmerge import _unmerge_display
+
+class PackageUninstall(CompositeTask):
+	"""
+	Uninstall a package asynchronously in a subprocess. When
+	both parallel-install and ebuild-locks FEATURES are enabled,
+	it is essential for the ebuild-locks code to execute in a
+	subprocess, since the portage.locks module does not behave
+	as desired if we try to lock the same file multiple times
+	concurrently from the same process for ebuild-locks phases
+	such as pkg_setup, pkg_prerm, and pkg_postrm.
+	"""
+
+	__slots__ = ("world_atom", "ldpath_mtimes", "opts",
+			"pkg", "settings", "_builddir_lock")
+
+	def _start(self):
+
+		vardb = self.pkg.root_config.trees["vartree"].dbapi
+		dbdir = vardb.getpath(self.pkg.cpv)
+		if not os.path.exists(dbdir):
+			# Apparently the package got uninstalled
+			# already, so we can safely return early.
+			self.returncode = os.EX_OK
+			self.wait()
+			return
+
+		self.settings.setcpv(self.pkg)
+		cat, pf = portage.catsplit(self.pkg.cpv)
+		myebuildpath = os.path.join(dbdir, pf + ".ebuild")
+
+		try:
+			portage.doebuild_environment(myebuildpath, "prerm",
+				settings=self.settings, db=vardb)
+		except UnsupportedAPIException:
+			# This is safe to ignore since this function is
+			# guaranteed to set PORTAGE_BUILDDIR even though
+			# it raises UnsupportedAPIException. The error
+			# will be logged when it prevents the pkg_prerm
+			# and pkg_postrm phases from executing.
+			pass
+
+		self._builddir_lock = EbuildBuildDir(
+			scheduler=self.scheduler, settings=self.settings)
+		self._builddir_lock.lock()
+
+		portage.prepare_build_dirs(
+			settings=self.settings, cleanup=True)
+
+		# Output only gets logged if it comes after prepare_build_dirs()
+		# which initializes PORTAGE_LOG_FILE.
+		retval, pkgmap = _unmerge_display(self.pkg.root_config,
+			self.opts, "unmerge", [self.pkg.cpv], clean_delay=0,
+			writemsg_level=self._writemsg_level)
+
+		if retval != os.EX_OK:
+			self._builddir_lock.unlock()
+			self.returncode = retval
+			self.wait()
+			return
+
+		self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
+			noiselevel=-1)
+		self._emergelog("=== Unmerging... (%s)" % (self.pkg.cpv,))
+
+		unmerge_task = MergeProcess(
+			mycat=cat, mypkg=pf, settings=self.settings,
+			treetype="vartree", vartree=self.pkg.root_config.trees["vartree"],
+			scheduler=self.scheduler, background=self.background,
+			mydbapi=self.pkg.root_config.trees["vartree"].dbapi,
+			prev_mtimes=self.ldpath_mtimes,
+			logfile=self.settings.get("PORTAGE_LOG_FILE"), unmerge=True)
+
+		self._start_task(unmerge_task, self._unmerge_exit)
+
+	def _unmerge_exit(self, unmerge_task):
+		if self._final_exit(unmerge_task) != os.EX_OK:
+			self._emergelog(" !!! unmerge FAILURE: %s" % (self.pkg.cpv,))
+		else:
+			self._emergelog(" >>> unmerge success: %s" % (self.pkg.cpv,))
+			self.world_atom(self.pkg)
+		self._builddir_lock.unlock()
+		self.wait()
+
+	def _emergelog(self, msg):
+		emergelog("notitles" not in self.settings.features, msg)
+
+	def _writemsg_level(self, msg, level=0, noiselevel=0):
+
+		log_path = self.settings.get("PORTAGE_LOG_FILE")
+		background = self.background
+
+		if log_path is None:
+			if not (background and level < logging.WARNING):
+				portage.util.writemsg_level(msg,
+					level=level, noiselevel=noiselevel)
+		else:
+			self.scheduler.output(msg, log_path=log_path,
+				level=level, noiselevel=noiselevel)

diff --git a/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
new file mode 100644
index 0000000..a692bb6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
@@ -0,0 +1,145 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dbapi import dbapi
+
+class PackageVirtualDbapi(dbapi):
+	"""
+	A dbapi-like interface class that represents the state of the installed
+	package database as new packages are installed, replacing any packages
+	that previously existed in the same slot. The main difference between
+	this class and fakedbapi is that this one uses Package instances
+	internally (passed in via cpv_inject() and cpv_remove() calls).
+	"""
+	def __init__(self, settings):
+		dbapi.__init__(self)
+		self.settings = settings
+		self._match_cache = {}
+		self._cp_map = {}
+		self._cpv_map = {}
+
+	def clear(self):
+		"""
+		Remove all packages.
+		"""
+		if self._cpv_map:
+			self._clear_cache()
+			self._cp_map.clear()
+			self._cpv_map.clear()
+
+	def copy(self):
+		obj = PackageVirtualDbapi(self.settings)
+		obj._match_cache = self._match_cache.copy()
+		obj._cp_map = self._cp_map.copy()
+		for k, v in obj._cp_map.items():
+			obj._cp_map[k] = v[:]
+		obj._cpv_map = self._cpv_map.copy()
+		return obj
+
+	def __bool__(self):
+		return bool(self._cpv_map)
+
+	if sys.hexversion < 0x3000000:
+		__nonzero__ = __bool__
+
+	def __iter__(self):
+		return iter(self._cpv_map.values())
+
+	def __contains__(self, item):
+		existing = self._cpv_map.get(item.cpv)
+		if existing is not None and \
+			existing == item:
+			return True
+		return False
+
+	def get(self, item, default=None):
+		cpv = getattr(item, "cpv", None)
+		if cpv is None:
+			if len(item) != 5:
+				return default
+			type_name, root, cpv, operation, repo_key = item
+
+		existing = self._cpv_map.get(cpv)
+		if existing is not None and \
+			existing == item:
+			return existing
+		return default
+
+	def match_pkgs(self, atom):
+		return [self._cpv_map[cpv] for cpv in self.match(atom)]
+
+	def _clear_cache(self):
+		if self._categories is not None:
+			self._categories = None
+		if self._match_cache:
+			self._match_cache = {}
+
+	def match(self, origdep, use_cache=1):
+		result = self._match_cache.get(origdep)
+		if result is not None:
+			return result[:]
+		result = dbapi.match(self, origdep, use_cache=use_cache)
+		self._match_cache[origdep] = result
+		return result[:]
+
+	def cpv_exists(self, cpv, myrepo=None):
+		return cpv in self._cpv_map
+
+	def cp_list(self, mycp, use_cache=1):
+		cachelist = self._match_cache.get(mycp)
+		# cp_list() doesn't expand old-style virtuals
+		if cachelist and cachelist[0].startswith(mycp):
+			return cachelist[:]
+		cpv_list = self._cp_map.get(mycp)
+		if cpv_list is None:
+			cpv_list = []
+		else:
+			cpv_list = [pkg.cpv for pkg in cpv_list]
+		self._cpv_sort_ascending(cpv_list)
+		if not (not cpv_list and mycp.startswith("virtual/")):
+			self._match_cache[mycp] = cpv_list
+		return cpv_list[:]
+
+	def cp_all(self):
+		return list(self._cp_map)
+
+	def cpv_all(self):
+		return list(self._cpv_map)
+
+	def cpv_inject(self, pkg):
+		cp_list = self._cp_map.get(pkg.cp)
+		if cp_list is None:
+			cp_list = []
+			self._cp_map[pkg.cp] = cp_list
+		e_pkg = self._cpv_map.get(pkg.cpv)
+		if e_pkg is not None:
+			if e_pkg == pkg:
+				return
+			self.cpv_remove(e_pkg)
+		for e_pkg in cp_list:
+			if e_pkg.slot_atom == pkg.slot_atom:
+				if e_pkg == pkg:
+					return
+				self.cpv_remove(e_pkg)
+				break
+		cp_list.append(pkg)
+		self._cpv_map[pkg.cpv] = pkg
+		self._clear_cache()
+
+	def cpv_remove(self, pkg):
+		old_pkg = self._cpv_map.get(pkg.cpv)
+		if old_pkg != pkg:
+			raise KeyError(pkg)
+		self._cp_map[pkg.cp].remove(pkg)
+		del self._cpv_map[pkg.cpv]
+		self._clear_cache()
+
+	def aux_get(self, cpv, wants, myrepo=None):
+		metadata = self._cpv_map[cpv].metadata
+		return [metadata.get(x, "") for x in wants]
+
+	def aux_update(self, cpv, values):
+		self._cpv_map[cpv].metadata.update(values)
+		self._clear_cache()
+

diff --git a/portage_with_autodep/pym/_emerge/PipeReader.py b/portage_with_autodep/pym/_emerge/PipeReader.py
new file mode 100644
index 0000000..375c98f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PipeReader.py
@@ -0,0 +1,96 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.PollConstants import PollConstants
+import fcntl
+import array
+
+class PipeReader(AbstractPollTask):
+
+	"""
+	Reads output from one or more files and saves it in memory,
+	for retrieval via the getvalue() method. This is driven by
+	the scheduler's poll() loop, so it runs entirely within the
+	current process.
+	"""
+
+	__slots__ = ("input_files",) + \
+		("_read_data", "_reg_ids")
+
+	def _start(self):
+		self._reg_ids = set()
+		self._read_data = []
+		for k, f in self.input_files.items():
+			fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
+				fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+			self._reg_ids.add(self.scheduler.register(f.fileno(),
+				self._registered_events, self._output_handler))
+		self._registered = True
+
+	def isAlive(self):
+		return self._registered
+
+	def _cancel(self):
+		if self.returncode is None:
+			self.returncode = 1
+
+	def _wait(self):
+		if self.returncode is not None:
+			return self.returncode
+
+		if self._registered:
+			self.scheduler.schedule(self._reg_ids)
+			self._unregister()
+
+		self.returncode = os.EX_OK
+		return self.returncode
+
+	def getvalue(self):
+		"""Retrieve the entire contents"""
+		return b''.join(self._read_data)
+
+	def close(self):
+		"""Free the memory buffer."""
+		self._read_data = None
+
+	def _output_handler(self, fd, event):
+
+		if event & PollConstants.POLLIN:
+
+			for f in self.input_files.values():
+				if fd == f.fileno():
+					break
+
+			buf = array.array('B')
+			try:
+				buf.fromfile(f, self._bufsize)
+			except (EOFError, IOError):
+				pass
+
+			if buf:
+				self._read_data.append(buf.tostring())
+			else:
+				self._unregister()
+				self.wait()
+
+		self._unregister_if_appropriate(event)
+
+	def _unregister(self):
+		"""
+		Unregister from the scheduler and close open files.
+		"""
+
+		self._registered = False
+
+		if self._reg_ids is not None:
+			for reg_id in self._reg_ids:
+				self.scheduler.unregister(reg_id)
+			self._reg_ids = None
+
+		if self.input_files is not None:
+			for f in self.input_files.values():
+				f.close()
+			self.input_files = None
+

diff --git a/portage_with_autodep/pym/_emerge/PollConstants.py b/portage_with_autodep/pym/_emerge/PollConstants.py
new file mode 100644
index 0000000..d0270a9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollConstants.py
@@ -0,0 +1,18 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import select
+class PollConstants(object):
+
+	"""
+	Provides POLL* constants that are equivalent to those from the
+	select module, for use by PollSelectAdapter.
+	"""
+
+	names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
+	v = 1
+	for k in names:
+		locals()[k] = getattr(select, k, v)
+		v *= 2
+	del k, v
+

diff --git a/portage_with_autodep/pym/_emerge/PollScheduler.py b/portage_with_autodep/pym/_emerge/PollScheduler.py
new file mode 100644
index 0000000..a2b5c24
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollScheduler.py
@@ -0,0 +1,398 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import errno
+import logging
+import select
+import time
+
+try:
+	import threading
+except ImportError:
+	import dummy_threading as threading
+
+from portage import _encodings
+from portage import _unicode_encode
+from portage.util import writemsg_level
+
+from _emerge.SlotObject import SlotObject
+from _emerge.getloadavg import getloadavg
+from _emerge.PollConstants import PollConstants
+from _emerge.PollSelectAdapter import PollSelectAdapter
+
+class PollScheduler(object):
+
+	class _sched_iface_class(SlotObject):
+		__slots__ = ("output", "register", "schedule", "unregister")
+
+	def __init__(self):
+		self._terminated = threading.Event()
+		self._terminated_tasks = False
+		self._max_jobs = 1
+		self._max_load = None
+		self._jobs = 0
+		self._poll_event_queue = []
+		self._poll_event_handlers = {}
+		self._poll_event_handler_ids = {}
+		# Increment id for each new handler.
+		self._event_handler_id = 0
+		self._poll_obj = create_poll_instance()
+		self._scheduling = False
+		self._background = False
+		self.sched_iface = self._sched_iface_class(
+			output=self._task_output,
+			register=self._register,
+			schedule=self._schedule_wait,
+			unregister=self._unregister)
+
+	def terminate(self):
+		"""
+		Schedules asynchronous, graceful termination of the scheduler
+		at the earliest opportunity.
+
+		This method is thread-safe (and safe for signal handlers).
+		"""
+		self._terminated.set()
+
+	def _terminate_tasks(self):
+		"""
+		Send signals to terminate all tasks. This is called once
+		from self._schedule() in the event dispatching thread. This
+		prevents it from being called while the _schedule_tasks()
+		implementation is running, in order to avoid potential
+		interference. All tasks should be cleaned up at the earliest
+		opportunity, but not necessarily before this method returns.
+		"""
+		raise NotImplementedError()
+
+	def _schedule_tasks(self):
+		"""
+		This is called from inside the _schedule() method, which
+		guarantees the following:
+
+		1) It will not be called recursively.
+		2) _terminate_tasks() will not be called while it is running.
+		3) The state of the boolean _terminated_tasks variable will
+		   not change while it is running.
+
+		Unless this method is used to perform user interface updates,
+		or something like that, the first thing it should do is check
+		the state of _terminated_tasks and if that is True then it
+		should return False immediately (since there's no need to
+		schedule anything after _terminate_tasks() has been called).
+		"""
+		raise NotImplementedError()
+
+	def _schedule(self):
+		"""
+		Calls _schedule_tasks() and automatically returns early from
+		any recursive calls to this method that the _schedule_tasks()
+		call might trigger. This makes _schedule() safe to call from
+		inside exit listeners.
+		"""
+		if self._scheduling:
+			return False
+		self._scheduling = True
+		try:
+
+			if self._terminated.is_set() and \
+				not self._terminated_tasks:
+				self._terminated_tasks = True
+				self._terminate_tasks()
+
+			return self._schedule_tasks()
+		finally:
+			self._scheduling = False
+
+	def _running_job_count(self):
+		return self._jobs
+
+	def _can_add_job(self):
+		if self._terminated_tasks:
+			return False
+
+		max_jobs = self._max_jobs
+		max_load = self._max_load
+
+		if self._max_jobs is not True and \
+			self._running_job_count() >= self._max_jobs:
+			return False
+
+		if max_load is not None and \
+			(max_jobs is True or max_jobs > 1) and \
+			self._running_job_count() >= 1:
+			try:
+				avg1, avg5, avg15 = getloadavg()
+			except OSError:
+				return False
+
+			if avg1 >= max_load:
+				return False
+
+		return True
+
+	def _poll(self, timeout=None):
+		"""
+		All poll() calls pass through here. The poll events
+		are added directly to self._poll_event_queue.
+		In order to avoid endless blocking, this raises
+		StopIteration if timeout is None and there are
+		no file descriptors to poll.
+		"""
+		if not self._poll_event_handlers:
+			self._schedule()
+			if timeout is None and \
+				not self._poll_event_handlers:
+				raise StopIteration(
+					"timeout is None and there are no poll() event handlers")
+
+		# The following error is known to occur with Linux kernel versions
+		# less than 2.6.24:
+		#
+		#   select.error: (4, 'Interrupted system call')
+		#
+		# This error has been observed after a SIGSTOP, followed by SIGCONT.
+		# Treat it similar to EAGAIN if timeout is None, otherwise just return
+		# without any events.
+		while True:
+			try:
+				self._poll_event_queue.extend(self._poll_obj.poll(timeout))
+				break
+			except select.error as e:
+				writemsg_level("\n!!! select error: %s\n" % (e,),
+					level=logging.ERROR, noiselevel=-1)
+				del e
+				if timeout is not None:
+					break
+
+	def _next_poll_event(self, timeout=None):
+		"""
+		Since the _schedule_wait() loop is called by event
+		handlers from _poll_loop(), maintain a central event
+		queue for both of them to share events from a single
+		poll() call. In order to avoid endless blocking, this
+		raises StopIteration if timeout is None and there are
+		no file descriptors to poll.
+		"""
+		if not self._poll_event_queue:
+			self._poll(timeout)
+			if not self._poll_event_queue:
+				raise StopIteration()
+		return self._poll_event_queue.pop()
+
+	def _poll_loop(self):
+
+		event_handlers = self._poll_event_handlers
+		event_handled = False
+
+		try:
+			while event_handlers:
+				f, event = self._next_poll_event()
+				handler, reg_id = event_handlers[f]
+				handler(f, event)
+				event_handled = True
+		except StopIteration:
+			event_handled = True
+
+		if not event_handled:
+			raise AssertionError("tight loop")
+
+	def _schedule_yield(self):
+		"""
+		Schedule for a short period of time chosen by the scheduler based
+		on internal state. Synchronous tasks should call this periodically
+		in order to allow the scheduler to service pending poll events. The
+		scheduler will call poll() exactly once, without blocking, and any
+		resulting poll events will be serviced.
+		"""
+		event_handlers = self._poll_event_handlers
+		events_handled = 0
+
+		if not event_handlers:
+			return bool(events_handled)
+
+		if not self._poll_event_queue:
+			self._poll(0)
+
+		try:
+			while event_handlers and self._poll_event_queue:
+				f, event = self._next_poll_event()
+				handler, reg_id = event_handlers[f]
+				handler(f, event)
+				events_handled += 1
+		except StopIteration:
+			events_handled += 1
+
+		return bool(events_handled)
+
+	def _register(self, f, eventmask, handler):
+		"""
+		@rtype: Integer
+		@return: A unique registration id, for use in schedule() or
+			unregister() calls.
+		"""
+		if f in self._poll_event_handlers:
+			raise AssertionError("fd %d is already registered" % f)
+		self._event_handler_id += 1
+		reg_id = self._event_handler_id
+		self._poll_event_handler_ids[reg_id] = f
+		self._poll_event_handlers[f] = (handler, reg_id)
+		self._poll_obj.register(f, eventmask)
+		return reg_id
+
+	def _unregister(self, reg_id):
+		f = self._poll_event_handler_ids[reg_id]
+		self._poll_obj.unregister(f)
+		if self._poll_event_queue:
+			# Discard any unhandled events that belong to this file,
+			# in order to prevent these events from being erroneously
+			# delivered to a future handler that is using a reallocated
+			# file descriptor of the same numeric value (causing
+			# extremely confusing bugs).
+			remaining_events = []
+			discarded_events = False
+			for event in self._poll_event_queue:
+				if event[0] == f:
+					discarded_events = True
+				else:
+					remaining_events.append(event)
+
+			if discarded_events:
+				self._poll_event_queue[:] = remaining_events
+
+		del self._poll_event_handlers[f]
+		del self._poll_event_handler_ids[reg_id]
+
+	def _schedule_wait(self, wait_ids=None, timeout=None, condition=None):
+		"""
+		Schedule until wait_id is not longer registered
+		for poll() events.
+		@type wait_id: int
+		@param wait_id: a task id to wait for
+		"""
+		event_handlers = self._poll_event_handlers
+		handler_ids = self._poll_event_handler_ids
+		event_handled = False
+
+		if isinstance(wait_ids, int):
+			wait_ids = frozenset([wait_ids])
+
+		start_time = None
+		remaining_timeout = timeout
+		timed_out = False
+		if timeout is not None:
+			start_time = time.time()
+		try:
+			while (wait_ids is None and event_handlers) or \
+				(wait_ids is not None and wait_ids.intersection(handler_ids)):
+				f, event = self._next_poll_event(timeout=remaining_timeout)
+				handler, reg_id = event_handlers[f]
+				handler(f, event)
+				event_handled = True
+				if condition is not None and condition():
+					break
+				if timeout is not None:
+					elapsed_time = time.time() - start_time
+					if elapsed_time < 0:
+						# The system clock has changed such that start_time
+						# is now in the future, so just assume that the
+						# timeout has already elapsed.
+						timed_out = True
+						break
+					remaining_timeout = timeout - 1000 * elapsed_time
+					if remaining_timeout <= 0:
+						timed_out = True
+						break
+		except StopIteration:
+			event_handled = True
+
+		return event_handled
+
+	def _task_output(self, msg, log_path=None, background=None,
+		level=0, noiselevel=-1):
+		"""
+		Output msg to stdout if not self._background. If log_path
+		is not None then append msg to the log (appends with
+		compression if the filename extension of log_path
+		corresponds to a supported compression type).
+		"""
+
+		if background is None:
+			# If the task does not have a local background value
+			# (like for parallel-fetch), then use the global value.
+			background = self._background
+
+		msg_shown = False
+		if not background:
+			writemsg_level(msg, level=level, noiselevel=noiselevel)
+			msg_shown = True
+
+		if log_path is not None:
+			try:
+				f = open(_unicode_encode(log_path,
+					encoding=_encodings['fs'], errors='strict'),
+					mode='ab')
+			except IOError as e:
+				if e.errno not in (errno.ENOENT, errno.ESTALE):
+					raise
+				if not msg_shown:
+					writemsg_level(msg, level=level, noiselevel=noiselevel)
+			else:
+
+				if log_path.endswith('.gz'):
+					# NOTE: The empty filename argument prevents us from
+					# triggering a bug in python3 which causes GzipFile
+					# to raise AttributeError if fileobj.name is bytes
+					# instead of unicode.
+					f =  gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+				f.write(_unicode_encode(msg))
+				f.close()
+
+_can_poll_device = None
+
+def can_poll_device():
+	"""
+	Test if it's possible to use poll() on a device such as a pty. This
+	is known to fail on Darwin.
+	@rtype: bool
+	@returns: True if poll() on a device succeeds, False otherwise.
+	"""
+
+	global _can_poll_device
+	if _can_poll_device is not None:
+		return _can_poll_device
+
+	if not hasattr(select, "poll"):
+		_can_poll_device = False
+		return _can_poll_device
+
+	try:
+		dev_null = open('/dev/null', 'rb')
+	except IOError:
+		_can_poll_device = False
+		return _can_poll_device
+
+	p = select.poll()
+	p.register(dev_null.fileno(), PollConstants.POLLIN)
+
+	invalid_request = False
+	for f, event in p.poll():
+		if event & PollConstants.POLLNVAL:
+			invalid_request = True
+			break
+	dev_null.close()
+
+	_can_poll_device = not invalid_request
+	return _can_poll_device
+
+def create_poll_instance():
+	"""
+	Create an instance of select.poll, or an instance of
+	PollSelectAdapter there is no poll() implementation or
+	it is broken somehow.
+	"""
+	if can_poll_device():
+		return select.poll()
+	return PollSelectAdapter()

diff --git a/portage_with_autodep/pym/_emerge/PollSelectAdapter.py b/portage_with_autodep/pym/_emerge/PollSelectAdapter.py
new file mode 100644
index 0000000..c11dab8
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollSelectAdapter.py
@@ -0,0 +1,73 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.PollConstants import PollConstants
+import select
+class PollSelectAdapter(PollConstants):
+
+	"""
+	Use select to emulate a poll object, for
+	systems that don't support poll().
+	"""
+
+	def __init__(self):
+		self._registered = {}
+		self._select_args = [[], [], []]
+
+	def register(self, fd, *args):
+		"""
+		Only POLLIN is currently supported!
+		"""
+		if len(args) > 1:
+			raise TypeError(
+				"register expected at most 2 arguments, got " + \
+				repr(1 + len(args)))
+
+		eventmask = PollConstants.POLLIN | \
+			PollConstants.POLLPRI | PollConstants.POLLOUT
+		if args:
+			eventmask = args[0]
+
+		self._registered[fd] = eventmask
+		self._select_args = None
+
+	def unregister(self, fd):
+		self._select_args = None
+		del self._registered[fd]
+
+	def poll(self, *args):
+		if len(args) > 1:
+			raise TypeError(
+				"poll expected at most 2 arguments, got " + \
+				repr(1 + len(args)))
+
+		timeout = None
+		if args:
+			timeout = args[0]
+
+		select_args = self._select_args
+		if select_args is None:
+			select_args = [list(self._registered), [], []]
+
+		if timeout is not None:
+			select_args = select_args[:]
+			# Translate poll() timeout args to select() timeout args:
+			#
+			#          | units        | value(s) for indefinite block
+			# ---------|--------------|------------------------------
+			#   poll   | milliseconds | omitted, negative, or None
+			# ---------|--------------|------------------------------
+			#   select | seconds      | omitted
+			# ---------|--------------|------------------------------
+
+			if timeout is not None and timeout < 0:
+				timeout = None
+			if timeout is not None:
+				select_args.append(timeout / 1000)
+
+		select_events = select.select(*select_args)
+		poll_events = []
+		for fd in select_events[0]:
+			poll_events.append((fd, PollConstants.POLLIN))
+		return poll_events
+

diff --git a/portage_with_autodep/pym/_emerge/ProgressHandler.py b/portage_with_autodep/pym/_emerge/ProgressHandler.py
new file mode 100644
index 0000000..f5afe6d
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/ProgressHandler.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+class ProgressHandler(object):
+	def __init__(self):
+		self.curval = 0
+		self.maxval = 0
+		self._last_update = 0
+		self.min_latency = 0.2
+
+	def onProgress(self, maxval, curval):
+		self.maxval = maxval
+		self.curval = curval
+		cur_time = time.time()
+		if cur_time - self._last_update >= self.min_latency:
+			self._last_update = cur_time
+			self.display()
+
+	def display(self):
+		raise NotImplementedError(self)
+

diff --git a/portage_with_autodep/pym/_emerge/QueueScheduler.py b/portage_with_autodep/pym/_emerge/QueueScheduler.py
new file mode 100644
index 0000000..a4ab328
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/QueueScheduler.py
@@ -0,0 +1,116 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+
+from _emerge.PollScheduler import PollScheduler
+
+class QueueScheduler(PollScheduler):
+
+	"""
+	Add instances of SequentialTaskQueue and then call run(). The
+	run() method returns when no tasks remain.
+	"""
+
+	def __init__(self, max_jobs=None, max_load=None):
+		PollScheduler.__init__(self)
+
+		if max_jobs is None:
+			max_jobs = 1
+
+		self._max_jobs = max_jobs
+		self._max_load = max_load
+
+		self._queues = []
+		self._schedule_listeners = []
+
+	def add(self, q):
+		self._queues.append(q)
+
+	def remove(self, q):
+		self._queues.remove(q)
+
+	def clear(self):
+		for q in self._queues:
+			q.clear()
+
+	def run(self, timeout=None):
+
+		start_time = None
+		timed_out = False
+		remaining_timeout = timeout
+		if timeout is not None:
+			start_time = time.time()
+
+		while self._schedule():
+			self._schedule_wait(timeout=remaining_timeout)
+			if timeout is not None:
+				elapsed_time = time.time() - start_time
+				if elapsed_time < 0:
+					# The system clock has changed such that start_time
+					# is now in the future, so just assume that the
+					# timeout has already elapsed.
+					timed_out = True
+					break
+				remaining_timeout = timeout - 1000 * elapsed_time
+				if remaining_timeout <= 0:
+					timed_out = True
+					break
+
+		if timeout is None or not timed_out:
+			while self._running_job_count():
+				self._schedule_wait(timeout=remaining_timeout)
+				if timeout is not None:
+					elapsed_time = time.time() - start_time
+					if elapsed_time < 0:
+						# The system clock has changed such that start_time
+						# is now in the future, so just assume that the
+						# timeout has already elapsed.
+						timed_out = True
+						break
+					remaining_timeout = timeout - 1000 * elapsed_time
+					if remaining_timeout <= 0:
+						timed_out = True
+						break
+
+	def _schedule_tasks(self):
+		"""
+		@rtype: bool
+		@returns: True if there may be remaining tasks to schedule,
+			False otherwise.
+		"""
+		if self._terminated_tasks:
+			return False
+
+		while self._can_add_job():
+			n = self._max_jobs - self._running_job_count()
+			if n < 1:
+				break
+
+			if not self._start_next_job(n):
+				return False
+
+		for q in self._queues:
+			if q:
+				return True
+		return False
+
+	def _running_job_count(self):
+		job_count = 0
+		for q in self._queues:
+			job_count += len(q.running_tasks)
+		self._jobs = job_count
+		return job_count
+
+	def _start_next_job(self, n=1):
+		started_count = 0
+		for q in self._queues:
+			initial_job_count = len(q.running_tasks)
+			q.schedule()
+			final_job_count = len(q.running_tasks)
+			if final_job_count > initial_job_count:
+				started_count += (final_job_count - initial_job_count)
+			if started_count >= n:
+				break
+		return started_count
+

diff --git a/portage_with_autodep/pym/_emerge/RootConfig.py b/portage_with_autodep/pym/_emerge/RootConfig.py
new file mode 100644
index 0000000..d84f108
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/RootConfig.py
@@ -0,0 +1,34 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class RootConfig(object):
+	"""This is used internally by depgraph to track information about a
+	particular $ROOT."""
+	__slots__ = ("root", "setconfig", "sets", "settings", "trees")
+
+	pkg_tree_map = {
+		"ebuild"    : "porttree",
+		"binary"    : "bintree",
+		"installed" : "vartree"
+	}
+
+	tree_pkg_map = {}
+	for k, v in pkg_tree_map.items():
+		tree_pkg_map[v] = k
+
+	def __init__(self, settings, trees, setconfig):
+		self.trees = trees
+		self.settings = settings
+		self.root = self.settings["ROOT"]
+		self.setconfig = setconfig
+		if setconfig is None:
+			self.sets = {}
+		else:
+			self.sets = self.setconfig.getSets()
+
+	def update(self, other):
+		"""
+		Shallow copy all attributes from another instance.
+		"""
+		for k in self.__slots__:
+			setattr(self, k, getattr(other, k))

diff --git a/portage_with_autodep/pym/_emerge/Scheduler.py b/portage_with_autodep/pym/_emerge/Scheduler.py
new file mode 100644
index 0000000..6412d82
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Scheduler.py
@@ -0,0 +1,1975 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from collections import deque
+import gc
+import gzip
+import logging
+import shutil
+import signal
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+import weakref
+import zlib
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.elog.messages import eerror
+from portage.localization import _
+from portage.output import colorize, create_color_func, red
+bad = create_color_func("BAD")
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import writemsg, writemsg_level
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+
+import _emerge
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.Blocker import Blocker
+from _emerge.BlockerDB import BlockerDB
+from _emerge.clear_caches import clear_caches
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.create_world_atom import create_world_atom
+from _emerge.DepPriority import DepPriority
+from _emerge.depgraph import depgraph, resume_depgraph
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.FakeVartree import FakeVartree
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.JobStatusDisplay import JobStatusDisplay
+from _emerge.MergeListItem import MergeListItem
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.Package import Package
+from _emerge.PackageMerge import PackageMerge
+from _emerge.PollScheduler import PollScheduler
+from _emerge.RootConfig import RootConfig
+from _emerge.SlotObject import SlotObject
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+class Scheduler(PollScheduler):
+
+	# max time between display status updates (milliseconds)
+	_max_display_latency = 3000
+
+	_opts_ignore_blockers = \
+		frozenset(["--buildpkgonly",
+		"--fetchonly", "--fetch-all-uri",
+		"--nodeps", "--pretend"])
+
+	_opts_no_background = \
+		frozenset(["--pretend",
+		"--fetchonly", "--fetch-all-uri"])
+
+	_opts_no_restart = frozenset(["--buildpkgonly",
+		"--fetchonly", "--fetch-all-uri", "--pretend"])
+
+	_bad_resume_opts = set(["--ask", "--changelog",
+		"--resume", "--skipfirst"])
+
+	class _iface_class(SlotObject):
+		__slots__ = ("fetch",
+			"output", "register", "schedule",
+			"scheduleSetup", "scheduleUnpack", "scheduleYield",
+			"unregister")
+
+	class _fetch_iface_class(SlotObject):
+		__slots__ = ("log_file", "schedule")
+
+	_task_queues_class = slot_dict_class(
+		("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
+
+	class _build_opts_class(SlotObject):
+		__slots__ = ("buildpkg", "buildpkgonly",
+			"fetch_all_uri", "fetchonly", "pretend")
+
+	class _binpkg_opts_class(SlotObject):
+		__slots__ = ("fetchonly", "getbinpkg", "pretend")
+
+	class _pkg_count_class(SlotObject):
+		__slots__ = ("curval", "maxval")
+
+	class _emerge_log_class(SlotObject):
+		__slots__ = ("xterm_titles",)
+
+		def log(self, *pargs, **kwargs):
+			if not self.xterm_titles:
+				# Avoid interference with the scheduler's status display.
+				kwargs.pop("short_msg", None)
+			emergelog(self.xterm_titles, *pargs, **kwargs)
+
+	class _failed_pkg(SlotObject):
+		__slots__ = ("build_dir", "build_log", "pkg", "returncode")
+
+	class _ConfigPool(object):
+		"""Interface for a task to temporarily allocate a config
+		instance from a pool. This allows a task to be constructed
+		long before the config instance actually becomes needed, like
+		when prefetchers are constructed for the whole merge list."""
+		__slots__ = ("_root", "_allocate", "_deallocate")
+		def __init__(self, root, allocate, deallocate):
+			self._root = root
+			self._allocate = allocate
+			self._deallocate = deallocate
+		def allocate(self):
+			return self._allocate(self._root)
+		def deallocate(self, settings):
+			self._deallocate(settings)
+
+	class _unknown_internal_error(portage.exception.PortageException):
+		"""
+		Used internally to terminate scheduling. The specific reason for
+		the failure should have been dumped to stderr.
+		"""
+		def __init__(self, value=""):
+			portage.exception.PortageException.__init__(self, value)
+
+	def __init__(self, settings, trees, mtimedb, myopts,
+		spinner, mergelist=None, favorites=None, graph_config=None):
+		PollScheduler.__init__(self)
+
+		if mergelist is not None:
+			warnings.warn("The mergelist parameter of the " + \
+				"_emerge.Scheduler constructor is now unused. Use " + \
+				"the graph_config parameter instead.",
+				DeprecationWarning, stacklevel=2)
+
+		self.settings = settings
+		self.target_root = settings["ROOT"]
+		self.trees = trees
+		self.myopts = myopts
+		self._spinner = spinner
+		self._mtimedb = mtimedb
+		self._favorites = favorites
+		self._args_set = InternalPackageSet(favorites, allow_repo=True)
+		self._build_opts = self._build_opts_class()
+		for k in self._build_opts.__slots__:
+			setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
+		self._binpkg_opts = self._binpkg_opts_class()
+		for k in self._binpkg_opts.__slots__:
+			setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
+
+		self.curval = 0
+		self._logger = self._emerge_log_class()
+		self._task_queues = self._task_queues_class()
+		for k in self._task_queues.allowed_keys:
+			setattr(self._task_queues, k,
+				SequentialTaskQueue())
+
+		# Holds merges that will wait to be executed when no builds are
+		# executing. This is useful for system packages since dependencies
+		# on system packages are frequently unspecified. For example, see
+		# bug #256616.
+		self._merge_wait_queue = deque()
+		# Holds merges that have been transfered from the merge_wait_queue to
+		# the actual merge queue. They are removed from this list upon
+		# completion. Other packages can start building only when this list is
+		# empty.
+		self._merge_wait_scheduled = []
+
+		# Holds system packages and their deep runtime dependencies. Before
+		# being merged, these packages go to merge_wait_queue, to be merged
+		# when no other packages are building.
+		self._deep_system_deps = set()
+
+		# Holds packages to merge which will satisfy currently unsatisfied
+		# deep runtime dependencies of system packages. If this is not empty
+		# then no parallel builds will be spawned until it is empty. This
+		# minimizes the possibility that a build will fail due to the system
+		# being in a fragile state. For example, see bug #259954.
+		self._unsatisfied_system_deps = set()
+
+		self._status_display = JobStatusDisplay(
+			xterm_titles=('notitles' not in settings.features))
+		self._max_load = myopts.get("--load-average")
+		max_jobs = myopts.get("--jobs")
+		if max_jobs is None:
+			max_jobs = 1
+		self._set_max_jobs(max_jobs)
+
+		# The root where the currently running
+		# portage instance is installed.
+		self._running_root = trees["/"]["root_config"]
+		self.edebug = 0
+		if settings.get("PORTAGE_DEBUG", "") == "1":
+			self.edebug = 1
+		self.pkgsettings = {}
+		self._config_pool = {}
+		for root in self.trees:
+			self._config_pool[root] = []
+
+		self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
+			'emerge-fetch.log')
+		fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
+			schedule=self._schedule_fetch)
+		self._sched_iface = self._iface_class(
+			fetch=fetch_iface, output=self._task_output,
+			register=self._register,
+			schedule=self._schedule_wait,
+			scheduleSetup=self._schedule_setup,
+			scheduleUnpack=self._schedule_unpack,
+			scheduleYield=self._schedule_yield,
+			unregister=self._unregister)
+
+		self._prefetchers = weakref.WeakValueDictionary()
+		self._pkg_queue = []
+		self._running_tasks = {}
+		self._completed_tasks = set()
+
+		self._failed_pkgs = []
+		self._failed_pkgs_all = []
+		self._failed_pkgs_die_msgs = []
+		self._post_mod_echo_msgs = []
+		self._parallel_fetch = False
+		self._init_graph(graph_config)
+		merge_count = len([x for x in self._mergelist \
+			if isinstance(x, Package) and x.operation == "merge"])
+		self._pkg_count = self._pkg_count_class(
+			curval=0, maxval=merge_count)
+		self._status_display.maxval = self._pkg_count.maxval
+
+		# The load average takes some time to respond when new
+		# jobs are added, so we need to limit the rate of adding
+		# new jobs.
+		self._job_delay_max = 10
+		self._job_delay_factor = 1.0
+		self._job_delay_exp = 1.5
+		self._previous_job_start_time = None
+
+		# This is used to memoize the _choose_pkg() result when
+		# no packages can be chosen until one of the existing
+		# jobs completes.
+		self._choose_pkg_return_early = False
+
+		features = self.settings.features
+		if "parallel-fetch" in features and \
+			not ("--pretend" in self.myopts or \
+			"--fetch-all-uri" in self.myopts or \
+			"--fetchonly" in self.myopts):
+			if "distlocks" not in features:
+				portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+				portage.writemsg(red("!!!")+" parallel-fetching " + \
+					"requires the distlocks feature enabled"+"\n",
+					noiselevel=-1)
+				portage.writemsg(red("!!!")+" you have it disabled, " + \
+					"thus parallel-fetching is being disabled"+"\n",
+					noiselevel=-1)
+				portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+			elif merge_count > 1:
+				self._parallel_fetch = True
+
+		if self._parallel_fetch:
+				# clear out existing fetch log if it exists
+				try:
+					open(self._fetch_log, 'w')
+				except EnvironmentError:
+					pass
+
+		self._running_portage = None
+		portage_match = self._running_root.trees["vartree"].dbapi.match(
+			portage.const.PORTAGE_PACKAGE_ATOM)
+		if portage_match:
+			cpv = portage_match.pop()
+			self._running_portage = self._pkg(cpv, "installed",
+				self._running_root, installed=True)
+
+	def _terminate_tasks(self):
+		self._status_display.quiet = True
+		while self._running_tasks:
+			task_id, task = self._running_tasks.popitem()
+			task.cancel()
+		for q in self._task_queues.values():
+			q.clear()
+
+	def _init_graph(self, graph_config):
+		"""
+		Initialization structures used for dependency calculations
+		involving currently installed packages.
+		"""
+		self._set_graph_config(graph_config)
+		self._blocker_db = {}
+		for root in self.trees:
+			if graph_config is None:
+				fake_vartree = FakeVartree(self.trees[root]["root_config"],
+					pkg_cache=self._pkg_cache)
+				fake_vartree.sync()
+			else:
+				fake_vartree = graph_config.trees[root]['vartree']
+			self._blocker_db[root] = BlockerDB(fake_vartree)
+
+	def _destroy_graph(self):
+		"""
+		Use this to free memory at the beginning of _calc_resume_list().
+		After _calc_resume_list(), the _init_graph() method
+		must to be called in order to re-generate the structures that
+		this method destroys. 
+		"""
+		self._blocker_db = None
+		self._set_graph_config(None)
+		gc.collect()
+
+	def _poll(self, timeout=None):
+
+		self._schedule()
+
+		if timeout is None:
+			while True:
+				if not self._poll_event_handlers:
+					self._schedule()
+					if not self._poll_event_handlers:
+						raise StopIteration(
+							"timeout is None and there are no poll() event handlers")
+				previous_count = len(self._poll_event_queue)
+				PollScheduler._poll(self, timeout=self._max_display_latency)
+				self._status_display.display()
+				if previous_count != len(self._poll_event_queue):
+					break
+
+		elif timeout <= self._max_display_latency:
+			PollScheduler._poll(self, timeout=timeout)
+			if timeout == 0:
+				# The display is updated by _schedule() above, so it would be
+				# redundant to update it here when timeout is 0.
+				pass
+			else:
+				self._status_display.display()
+
+		else:
+			remaining_timeout = timeout
+			start_time = time.time()
+			while True:
+				previous_count = len(self._poll_event_queue)
+				PollScheduler._poll(self,
+					timeout=min(self._max_display_latency, remaining_timeout))
+				self._status_display.display()
+				if previous_count != len(self._poll_event_queue):
+					break
+				elapsed_time = time.time() - start_time
+				if elapsed_time < 0:
+					# The system clock has changed such that start_time
+					# is now in the future, so just assume that the
+					# timeout has already elapsed.
+					break
+				remaining_timeout = timeout - 1000 * elapsed_time
+				if remaining_timeout <= 0:
+					break
+
+	def _set_max_jobs(self, max_jobs):
+		self._max_jobs = max_jobs
+		self._task_queues.jobs.max_jobs = max_jobs
+		if "parallel-install" in self.settings.features:
+			self._task_queues.merge.max_jobs = max_jobs
+
+	def _background_mode(self):
+		"""
+		Check if background mode is enabled and adjust states as necessary.
+
+		@rtype: bool
+		@returns: True if background mode is enabled, False otherwise.
+		"""
+		background = (self._max_jobs is True or \
+			self._max_jobs > 1 or "--quiet" in self.myopts \
+			or "--quiet-build" in self.myopts) and \
+			not bool(self._opts_no_background.intersection(self.myopts))
+
+		if background:
+			interactive_tasks = self._get_interactive_tasks()
+			if interactive_tasks:
+				background = False
+				writemsg_level(">>> Sending package output to stdio due " + \
+					"to interactive package(s):\n",
+					level=logging.INFO, noiselevel=-1)
+				msg = [""]
+				for pkg in interactive_tasks:
+					pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
+					if pkg.root != "/":
+						pkg_str += " for " + pkg.root
+					msg.append(pkg_str)
+				msg.append("")
+				writemsg_level("".join("%s\n" % (l,) for l in msg),
+					level=logging.INFO, noiselevel=-1)
+				if self._max_jobs is True or self._max_jobs > 1:
+					self._set_max_jobs(1)
+					writemsg_level(">>> Setting --jobs=1 due " + \
+						"to the above interactive package(s)\n",
+						level=logging.INFO, noiselevel=-1)
+					writemsg_level(">>> In order to temporarily mask " + \
+						"interactive updates, you may\n" + \
+						">>> specify --accept-properties=-interactive\n",
+						level=logging.INFO, noiselevel=-1)
+		self._status_display.quiet = \
+			not background or \
+			("--quiet" in self.myopts and \
+			"--verbose" not in self.myopts)
+
+		self._logger.xterm_titles = \
+			"notitles" not in self.settings.features and \
+			self._status_display.quiet
+
+		return background
+
+	def _get_interactive_tasks(self):
+		interactive_tasks = []
+		for task in self._mergelist:
+			if not (isinstance(task, Package) and \
+				task.operation == "merge"):
+				continue
+			if 'interactive' in task.metadata.properties:
+				interactive_tasks.append(task)
+		return interactive_tasks
+
+	def _set_graph_config(self, graph_config):
+
+		if graph_config is None:
+			self._graph_config = None
+			self._pkg_cache = {}
+			self._digraph = None
+			self._mergelist = []
+			self._deep_system_deps.clear()
+			return
+
+		self._graph_config = graph_config
+		self._pkg_cache = graph_config.pkg_cache
+		self._digraph = graph_config.graph
+		self._mergelist = graph_config.mergelist
+
+		if "--nodeps" in self.myopts or \
+			(self._max_jobs is not True and self._max_jobs < 2):
+			# save some memory
+			self._digraph = None
+			graph_config.graph = None
+			graph_config.pkg_cache.clear()
+			self._deep_system_deps.clear()
+			for pkg in self._mergelist:
+				self._pkg_cache[pkg] = pkg
+			return
+
+		self._find_system_deps()
+		self._prune_digraph()
+		self._prevent_builddir_collisions()
+		if '--debug' in self.myopts:
+			writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
+			self._digraph.debug_print()
+			writemsg("\n", noiselevel=-1)
+
+	def _find_system_deps(self):
+		"""
+		Find system packages and their deep runtime dependencies. Before being
+		merged, these packages go to merge_wait_queue, to be merged when no
+		other packages are building.
+		NOTE: This can only find deep system deps if the system set has been
+		added to the graph and traversed deeply (the depgraph "complete"
+		parameter will do this, triggered by emerge --complete-graph option).
+		"""
+		deep_system_deps = self._deep_system_deps
+		deep_system_deps.clear()
+		deep_system_deps.update(
+			_find_deep_system_runtime_deps(self._digraph))
+		deep_system_deps.difference_update([pkg for pkg in \
+			deep_system_deps if pkg.operation != "merge"])
+
+	def _prune_digraph(self):
+		"""
+		Prune any root nodes that are irrelevant.
+		"""
+
+		graph = self._digraph
+		completed_tasks = self._completed_tasks
+		removed_nodes = set()
+		while True:
+			for node in graph.root_nodes():
+				if not isinstance(node, Package) or \
+					(node.installed and node.operation == "nomerge") or \
+					node.onlydeps or \
+					node in completed_tasks:
+					removed_nodes.add(node)
+			if removed_nodes:
+				graph.difference_update(removed_nodes)
+			if not removed_nodes:
+				break
+			removed_nodes.clear()
+
+	def _prevent_builddir_collisions(self):
+		"""
+		When building stages, sometimes the same exact cpv needs to be merged
+		to both $ROOTs. Add edges to the digraph in order to avoid collisions
+		in the builddir. Currently, normal file locks would be inappropriate
+		for this purpose since emerge holds all of it's build dir locks from
+		the main process.
+		"""
+		cpv_map = {}
+		for pkg in self._mergelist:
+			if not isinstance(pkg, Package):
+				# a satisfied blocker
+				continue
+			if pkg.installed:
+				continue
+			if pkg.cpv not in cpv_map:
+				cpv_map[pkg.cpv] = [pkg]
+				continue
+			for earlier_pkg in cpv_map[pkg.cpv]:
+				self._digraph.add(earlier_pkg, pkg,
+					priority=DepPriority(buildtime=True))
+			cpv_map[pkg.cpv].append(pkg)
+
+	class _pkg_failure(portage.exception.PortageException):
+		"""
+		An instance of this class is raised by unmerge() when
+		an uninstallation fails.
+		"""
+		status = 1
+		def __init__(self, *pargs):
+			portage.exception.PortageException.__init__(self, pargs)
+			if pargs:
+				self.status = pargs[0]
+
+	def _schedule_fetch(self, fetcher):
+		"""
+		Schedule a fetcher, in order to control the number of concurrent
+		fetchers. If self._max_jobs is greater than 1 then the fetch
+		queue is bypassed and the fetcher is started immediately,
+		otherwise it is added to the front of the parallel-fetch queue.
+		NOTE: The parallel-fetch queue is currently used to serialize
+		access to the parallel-fetch log, so changes in the log handling
+		would be required before it would be possible to enable
+		concurrent fetching within the parallel-fetch queue.
+		"""
+		if self._max_jobs > 1:
+			fetcher.start()
+		else:
+			self._task_queues.fetch.addFront(fetcher)
+
+	def _schedule_setup(self, setup_phase):
+		"""
+		Schedule a setup phase on the merge queue, in order to
+		serialize unsandboxed access to the live filesystem.
+		"""
+		if self._task_queues.merge.max_jobs > 1 and \
+			"ebuild-locks" in self.settings.features:
+			# Use a separate queue for ebuild-locks when the merge
+			# queue allows more than 1 job (due to parallel-install),
+			# since the portage.locks module does not behave as desired
+			# if we try to lock the same file multiple times
+			# concurrently from the same process.
+			self._task_queues.ebuild_locks.add(setup_phase)
+		else:
+			self._task_queues.merge.add(setup_phase)
+		self._schedule()
+
+	def _schedule_unpack(self, unpack_phase):
+		"""
+		Schedule an unpack phase on the unpack queue, in order
+		to serialize $DISTDIR access for live ebuilds.
+		"""
+		self._task_queues.unpack.add(unpack_phase)
+
+	def _find_blockers(self, new_pkg):
+		"""
+		Returns a callable.
+		"""
+		def get_blockers():
+			return self._find_blockers_impl(new_pkg)
+		return get_blockers
+
+	def _find_blockers_impl(self, new_pkg):
+		if self._opts_ignore_blockers.intersection(self.myopts):
+			return None
+
+		blocker_db = self._blocker_db[new_pkg.root]
+
+		blocker_dblinks = []
+		for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
+			if new_pkg.slot_atom == blocking_pkg.slot_atom:
+				continue
+			if new_pkg.cpv == blocking_pkg.cpv:
+				continue
+			blocker_dblinks.append(portage.dblink(
+				blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
+				self.pkgsettings[blocking_pkg.root], treetype="vartree",
+				vartree=self.trees[blocking_pkg.root]["vartree"]))
+
+		return blocker_dblinks
+
+	def _generate_digests(self):
+		"""
+		Generate digests if necessary for --digests or FEATURES=digest.
+		In order to avoid interference, this must done before parallel
+		tasks are started.
+		"""
+
+		if '--fetchonly' in self.myopts:
+			return os.EX_OK
+
+		digest = '--digest' in self.myopts
+		if not digest:
+			for pkgsettings in self.pkgsettings.values():
+				if pkgsettings.mycpv is not None:
+					# ensure that we are using global features
+					# settings rather than those from package.env
+					pkgsettings.reset()
+				if 'digest' in pkgsettings.features:
+					digest = True
+					break
+
+		if not digest:
+			return os.EX_OK
+
+		for x in self._mergelist:
+			if not isinstance(x, Package) or \
+				x.type_name != 'ebuild' or \
+				x.operation != 'merge':
+				continue
+			pkgsettings = self.pkgsettings[x.root]
+			if pkgsettings.mycpv is not None:
+				# ensure that we are using global features
+				# settings rather than those from package.env
+				pkgsettings.reset()
+			if '--digest' not in self.myopts and \
+				'digest' not in pkgsettings.features:
+				continue
+			portdb = x.root_config.trees['porttree'].dbapi
+			ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+			if ebuild_path is None:
+				raise AssertionError("ebuild not found for '%s'" % x.cpv)
+			pkgsettings['O'] = os.path.dirname(ebuild_path)
+			if not digestgen(mysettings=pkgsettings, myportdb=portdb):
+				writemsg_level(
+					"!!! Unable to generate manifest for '%s'.\n" \
+					% x.cpv, level=logging.ERROR, noiselevel=-1)
+				return 1
+
+		return os.EX_OK
+
+	def _env_sanity_check(self):
+		"""
+		Verify a sane environment before trying to build anything from source.
+		"""
+		have_src_pkg = False
+		for x in self._mergelist:
+			if isinstance(x, Package) and not x.built:
+				have_src_pkg = True
+				break
+
+		if not have_src_pkg:
+			return os.EX_OK
+
+		for settings in self.pkgsettings.values():
+			for var in ("ARCH", ):
+				value = settings.get(var)
+				if value and value.strip():
+					continue
+				msg = _("%(var)s is not set... "
+					"Are you missing the '%(configroot)setc/make.profile' symlink? "
+					"Is the symlink correct? "
+					"Is your portage tree complete?") % \
+					{"var": var, "configroot": settings["PORTAGE_CONFIGROOT"]}
+
+				out = portage.output.EOutput()
+				for line in textwrap.wrap(msg, 70):
+					out.eerror(line)
+				return 1
+
+		return os.EX_OK
+
+	def _check_manifests(self):
+		# Verify all the manifests now so that the user is notified of failure
+		# as soon as possible.
+		if "strict" not in self.settings.features or \
+			"--fetchonly" in self.myopts or \
+			"--fetch-all-uri" in self.myopts:
+			return os.EX_OK
+
+		shown_verifying_msg = False
+		quiet_settings = {}
+		for myroot, pkgsettings in self.pkgsettings.items():
+			quiet_config = portage.config(clone=pkgsettings)
+			quiet_config["PORTAGE_QUIET"] = "1"
+			quiet_config.backup_changes("PORTAGE_QUIET")
+			quiet_settings[myroot] = quiet_config
+			del quiet_config
+
+		failures = 0
+
+		for x in self._mergelist:
+			if not isinstance(x, Package) or \
+				x.type_name != "ebuild":
+				continue
+
+			if x.operation == "uninstall":
+				continue
+
+			if not shown_verifying_msg:
+				shown_verifying_msg = True
+				self._status_msg("Verifying ebuild manifests")
+
+			root_config = x.root_config
+			portdb = root_config.trees["porttree"].dbapi
+			quiet_config = quiet_settings[root_config.root]
+			ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+			if ebuild_path is None:
+				raise AssertionError("ebuild not found for '%s'" % x.cpv)
+			quiet_config["O"] = os.path.dirname(ebuild_path)
+			if not digestcheck([], quiet_config, strict=True):
+				failures |= 1
+
+		if failures:
+			return 1
+		return os.EX_OK
+
+	def _add_prefetchers(self):
+
+		if not self._parallel_fetch:
+			return
+
+		if self._parallel_fetch:
+			self._status_msg("Starting parallel fetch")
+
+			prefetchers = self._prefetchers
+			getbinpkg = "--getbinpkg" in self.myopts
+
+			for pkg in self._mergelist:
+				# mergelist can contain solved Blocker instances
+				if not isinstance(pkg, Package) or pkg.operation == "uninstall":
+					continue
+				prefetcher = self._create_prefetcher(pkg)
+				if prefetcher is not None:
+					self._task_queues.fetch.add(prefetcher)
+					prefetchers[pkg] = prefetcher
+
+			# Start the first prefetcher immediately so that self._task()
+			# won't discard it. This avoids a case where the first
+			# prefetcher is discarded, causing the second prefetcher to
+			# occupy the fetch queue before the first fetcher has an
+			# opportunity to execute.
+			self._task_queues.fetch.schedule()
+
+	def _create_prefetcher(self, pkg):
+		"""
+		@return: a prefetcher, or None if not applicable
+		"""
+		prefetcher = None
+
+		if not isinstance(pkg, Package):
+			pass
+
+		elif pkg.type_name == "ebuild":
+
+			prefetcher = EbuildFetcher(background=True,
+				config_pool=self._ConfigPool(pkg.root,
+				self._allocate_config, self._deallocate_config),
+				fetchonly=1, logfile=self._fetch_log,
+				pkg=pkg, prefetch=True, scheduler=self._sched_iface)
+
+		elif pkg.type_name == "binary" and \
+			"--getbinpkg" in self.myopts and \
+			pkg.root_config.trees["bintree"].isremote(pkg.cpv):
+
+			prefetcher = BinpkgPrefetcher(background=True,
+				pkg=pkg, scheduler=self._sched_iface)
+
+		return prefetcher
+
+	def _is_restart_scheduled(self):
+		"""
+		Check if the merge list contains a replacement
+		for the current running instance, that will result
+		in restart after merge.
+		@rtype: bool
+		@returns: True if a restart is scheduled, False otherwise.
+		"""
+		if self._opts_no_restart.intersection(self.myopts):
+			return False
+
+		mergelist = self._mergelist
+
+		for i, pkg in enumerate(mergelist):
+			if self._is_restart_necessary(pkg) and \
+				i != len(mergelist) - 1:
+				return True
+
+		return False
+
+	def _is_restart_necessary(self, pkg):
+		"""
+		@return: True if merging the given package
+			requires restart, False otherwise.
+		"""
+
+		# Figure out if we need a restart.
+		if pkg.root == self._running_root.root and \
+			portage.match_from_list(
+			portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
+			if self._running_portage is None:
+				return True
+			elif pkg.cpv != self._running_portage.cpv or \
+				'9999' in pkg.cpv or \
+				'git' in pkg.inherited or \
+				'git-2' in pkg.inherited:
+				return True
+		return False
+
+	def _restart_if_necessary(self, pkg):
+		"""
+		Use execv() to restart emerge. This happens
+		if portage upgrades itself and there are
+		remaining packages in the list.
+		"""
+
+		if self._opts_no_restart.intersection(self.myopts):
+			return
+
+		if not self._is_restart_necessary(pkg):
+			return
+
+		if pkg == self._mergelist[-1]:
+			return
+
+		self._main_loop_cleanup()
+
+		logger = self._logger
+		pkg_count = self._pkg_count
+		mtimedb = self._mtimedb
+		bad_resume_opts = self._bad_resume_opts
+
+		logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
+			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+		logger.log(" *** RESTARTING " + \
+			"emerge via exec() after change of " + \
+			"portage version.")
+
+		mtimedb["resume"]["mergelist"].remove(list(pkg))
+		mtimedb.commit()
+		portage.run_exitfuncs()
+		# Don't trust sys.argv[0] here because eselect-python may modify it.
+		emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
+		mynewargv = [emerge_binary, "--resume"]
+		resume_opts = self.myopts.copy()
+		# For automatic resume, we need to prevent
+		# any of bad_resume_opts from leaking in
+		# via EMERGE_DEFAULT_OPTS.
+		resume_opts["--ignore-default-opts"] = True
+		for myopt, myarg in resume_opts.items():
+			if myopt not in bad_resume_opts:
+				if myarg is True:
+					mynewargv.append(myopt)
+				elif isinstance(myarg, list):
+					# arguments like --exclude that use 'append' action
+					for x in myarg:
+						mynewargv.append("%s=%s" % (myopt, x))
+				else:
+					mynewargv.append("%s=%s" % (myopt, myarg))
+		# priority only needs to be adjusted on the first run
+		os.environ["PORTAGE_NICENESS"] = "0"
+		os.execv(mynewargv[0], mynewargv)
+
+	def _run_pkg_pretend(self):
+		"""
+		Since pkg_pretend output may be important, this method sends all
+		output directly to stdout (regardless of options like --quiet or
+		--jobs).
+		"""
+
+		failures = 0
+
+		# Use a local PollScheduler instance here, since we don't
+		# want tasks here to trigger the usual Scheduler callbacks
+		# that handle job scheduling and status display.
+		sched_iface = PollScheduler().sched_iface
+
+		for x in self._mergelist:
+			if not isinstance(x, Package):
+				continue
+
+			if x.operation == "uninstall":
+				continue
+
+			if x.metadata["EAPI"] in ("0", "1", "2", "3"):
+				continue
+
+			if "pretend" not in x.metadata.defined_phases:
+				continue
+
+			out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
+			portage.util.writemsg_stdout(out_str, noiselevel=-1)
+
+			root_config = x.root_config
+			settings = self.pkgsettings[root_config.root]
+			settings.setcpv(x)
+			tmpdir = tempfile.mkdtemp()
+			tmpdir_orig = settings["PORTAGE_TMPDIR"]
+			settings["PORTAGE_TMPDIR"] = tmpdir
+
+			try:
+				if x.built:
+					tree = "bintree"
+					bintree = root_config.trees["bintree"].dbapi.bintree
+					fetched = False
+
+					# Display fetch on stdout, so that it's always clear what
+					# is consuming time here.
+					if bintree.isremote(x.cpv):
+						fetcher = BinpkgFetcher(pkg=x,
+							scheduler=sched_iface)
+						fetcher.start()
+						if fetcher.wait() != os.EX_OK:
+							failures += 1
+							continue
+						fetched = fetcher.pkg_path
+
+					verifier = BinpkgVerifier(pkg=x,
+						scheduler=sched_iface)
+					verifier.start()
+					if verifier.wait() != os.EX_OK:
+						failures += 1
+						continue
+
+					if fetched:
+						bintree.inject(x.cpv, filename=fetched)
+					tbz2_file = bintree.getname(x.cpv)
+					infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
+					os.makedirs(infloc)
+					portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
+					ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+					settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+					settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+				else:
+					tree = "porttree"
+					portdb = root_config.trees["porttree"].dbapi
+					ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+					if ebuild_path is None:
+						raise AssertionError("ebuild not found for '%s'" % x.cpv)
+					settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+					if self._build_opts.buildpkgonly:
+						settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+					else:
+						settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+
+				portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
+					"pretend", settings=settings,
+					db=self.trees[settings["ROOT"]][tree].dbapi)
+				prepare_build_dirs(root_config.root, settings, cleanup=0)
+
+				vardb = root_config.trees['vartree'].dbapi
+				settings["REPLACING_VERSIONS"] = " ".join(
+					set(portage.versions.cpv_getversion(match) \
+						for match in vardb.match(x.slot_atom) + \
+						vardb.match('='+x.cpv)))
+				pretend_phase = EbuildPhase(
+					phase="pretend", scheduler=sched_iface,
+					settings=settings)
+
+				pretend_phase.start()
+				ret = pretend_phase.wait()
+				if ret != os.EX_OK:
+					failures += 1
+				portage.elog.elog_process(x.cpv, settings)
+			finally:
+				shutil.rmtree(tmpdir)
+				settings["PORTAGE_TMPDIR"] = tmpdir_orig
+
+		if failures:
+			return 1
+		return os.EX_OK
+
+	def merge(self):
+		if "--resume" in self.myopts:
+			# We're resuming.
+			portage.writemsg_stdout(
+				colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
+			self._logger.log(" *** Resuming merge...")
+
+		self._save_resume_list()
+
+		try:
+			self._background = self._background_mode()
+		except self._unknown_internal_error:
+			return 1
+
+		for root in self.trees:
+			root_config = self.trees[root]["root_config"]
+
+			# Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
+			# since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
+			# for ensuring sane $PWD (bug #239560) and storing elog messages.
+			tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
+			if not tmpdir or not os.path.isdir(tmpdir):
+				msg = "The directory specified in your " + \
+					"PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
+				"does not exist. Please create this " + \
+				"directory or correct your PORTAGE_TMPDIR setting."
+				msg = textwrap.wrap(msg, 70)
+				out = portage.output.EOutput()
+				for l in msg:
+					out.eerror(l)
+				return 1
+
+			if self._background:
+				root_config.settings.unlock()
+				root_config.settings["PORTAGE_BACKGROUND"] = "1"
+				root_config.settings.backup_changes("PORTAGE_BACKGROUND")
+				root_config.settings.lock()
+
+			self.pkgsettings[root] = portage.config(
+				clone=root_config.settings)
+
+		keep_going = "--keep-going" in self.myopts
+		fetchonly = self._build_opts.fetchonly
+		mtimedb = self._mtimedb
+		failed_pkgs = self._failed_pkgs
+
+		rval = self._generate_digests()
+		if rval != os.EX_OK:
+			return rval
+
+		rval = self._env_sanity_check()
+		if rval != os.EX_OK:
+			return rval
+
+		# TODO: Immediately recalculate deps here if --keep-going
+		#       is enabled and corrupt manifests are detected.
+		rval = self._check_manifests()
+		if rval != os.EX_OK and not keep_going:
+			return rval
+
+		if not fetchonly:
+			rval = self._run_pkg_pretend()
+			if rval != os.EX_OK:
+				return rval
+
+		while True:
+
+			received_signal = []
+
+			def sighandler(signum, frame):
+				signal.signal(signal.SIGINT, signal.SIG_IGN)
+				signal.signal(signal.SIGTERM, signal.SIG_IGN)
+				portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+					{"signal":signum})
+				self.terminate()
+				received_signal.append(128 + signum)
+
+			earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+			earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+			try:
+				rval = self._merge()
+			finally:
+				# Restore previous handlers
+				if earlier_sigint_handler is not None:
+					signal.signal(signal.SIGINT, earlier_sigint_handler)
+				else:
+					signal.signal(signal.SIGINT, signal.SIG_DFL)
+				if earlier_sigterm_handler is not None:
+					signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+				else:
+					signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+			if received_signal:
+				sys.exit(received_signal[0])
+
+			if rval == os.EX_OK or fetchonly or not keep_going:
+				break
+			if "resume" not in mtimedb:
+				break
+			mergelist = self._mtimedb["resume"].get("mergelist")
+			if not mergelist:
+				break
+
+			if not failed_pkgs:
+				break
+
+			for failed_pkg in failed_pkgs:
+				mergelist.remove(list(failed_pkg.pkg))
+
+			self._failed_pkgs_all.extend(failed_pkgs)
+			del failed_pkgs[:]
+
+			if not mergelist:
+				break
+
+			if not self._calc_resume_list():
+				break
+
+			clear_caches(self.trees)
+			if not self._mergelist:
+				break
+
+			self._save_resume_list()
+			self._pkg_count.curval = 0
+			self._pkg_count.maxval = len([x for x in self._mergelist \
+				if isinstance(x, Package) and x.operation == "merge"])
+			self._status_display.maxval = self._pkg_count.maxval
+
+		self._logger.log(" *** Finished. Cleaning up...")
+
+		if failed_pkgs:
+			self._failed_pkgs_all.extend(failed_pkgs)
+			del failed_pkgs[:]
+
+		printer = portage.output.EOutput()
+		background = self._background
+		failure_log_shown = False
+		if background and len(self._failed_pkgs_all) == 1:
+			# If only one package failed then just show it's
+			# whole log for easy viewing.
+			failed_pkg = self._failed_pkgs_all[-1]
+			build_dir = failed_pkg.build_dir
+			log_file = None
+
+			log_paths = [failed_pkg.build_log]
+
+			log_path = self._locate_failure_log(failed_pkg)
+			if log_path is not None:
+				try:
+					log_file = open(_unicode_encode(log_path,
+						encoding=_encodings['fs'], errors='strict'), mode='rb')
+				except IOError:
+					pass
+				else:
+					if log_path.endswith('.gz'):
+						log_file =  gzip.GzipFile(filename='',
+							mode='rb', fileobj=log_file)
+
+			if log_file is not None:
+				try:
+					for line in log_file:
+						writemsg_level(line, noiselevel=-1)
+				except zlib.error as e:
+					writemsg_level("%s\n" % (e,), level=logging.ERROR,
+						noiselevel=-1)
+				finally:
+					log_file.close()
+				failure_log_shown = True
+
+		# Dump mod_echo output now since it tends to flood the terminal.
+		# This allows us to avoid having more important output, generated
+		# later, from being swept away by the mod_echo output.
+		mod_echo_output =  _flush_elog_mod_echo()
+
+		if background and not failure_log_shown and \
+			self._failed_pkgs_all and \
+			self._failed_pkgs_die_msgs and \
+			not mod_echo_output:
+
+			for mysettings, key, logentries in self._failed_pkgs_die_msgs:
+				root_msg = ""
+				if mysettings["ROOT"] != "/":
+					root_msg = " merged to %s" % mysettings["ROOT"]
+				print()
+				printer.einfo("Error messages for package %s%s:" % \
+					(colorize("INFORM", key), root_msg))
+				print()
+				for phase in portage.const.EBUILD_PHASES:
+					if phase not in logentries:
+						continue
+					for msgtype, msgcontent in logentries[phase]:
+						if isinstance(msgcontent, basestring):
+							msgcontent = [msgcontent]
+						for line in msgcontent:
+							printer.eerror(line.strip("\n"))
+
+		if self._post_mod_echo_msgs:
+			for msg in self._post_mod_echo_msgs:
+				msg()
+
+		if len(self._failed_pkgs_all) > 1 or \
+			(self._failed_pkgs_all and keep_going):
+			if len(self._failed_pkgs_all) > 1:
+				msg = "The following %d packages have " % \
+					len(self._failed_pkgs_all) + \
+					"failed to build or install:"
+			else:
+				msg = "The following package has " + \
+					"failed to build or install:"
+
+			printer.eerror("")
+			for line in textwrap.wrap(msg, 72):
+				printer.eerror(line)
+			printer.eerror("")
+			for failed_pkg in self._failed_pkgs_all:
+				# Use _unicode_decode() to force unicode format string so
+				# that Package.__unicode__() is called in python2.
+				msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
+				log_path = self._locate_failure_log(failed_pkg)
+				if log_path is not None:
+					msg += ", Log file:"
+				printer.eerror(msg)
+				if log_path is not None:
+					printer.eerror("  '%s'" % colorize('INFORM', log_path))
+			printer.eerror("")
+
+		if self._failed_pkgs_all:
+			return 1
+		return os.EX_OK
+
+	def _elog_listener(self, mysettings, key, logentries, fulltext):
+		errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
+		if errors:
+			self._failed_pkgs_die_msgs.append(
+				(mysettings, key, errors))
+
+	def _locate_failure_log(self, failed_pkg):
+
+		build_dir = failed_pkg.build_dir
+		log_file = None
+
+		log_paths = [failed_pkg.build_log]
+
+		for log_path in log_paths:
+			if not log_path:
+				continue
+
+			try:
+				log_size = os.stat(log_path).st_size
+			except OSError:
+				continue
+
+			if log_size == 0:
+				continue
+
+			return log_path
+
+		return None
+
+	def _add_packages(self):
+		pkg_queue = self._pkg_queue
+		for pkg in self._mergelist:
+			if isinstance(pkg, Package):
+				pkg_queue.append(pkg)
+			elif isinstance(pkg, Blocker):
+				pass
+
+	def _system_merge_started(self, merge):
+		"""
+		Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
+		In general, this keeps track of installed system packages with
+		unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
+		a fragile situation, so we don't execute any unrelated builds until
+		the circular dependencies are built and installed.
+		"""
+		graph = self._digraph
+		if graph is None:
+			return
+		pkg = merge.merge.pkg
+
+		# Skip this if $ROOT != / since it shouldn't matter if there
+		# are unsatisfied system runtime deps in this case.
+		if pkg.root != '/':
+			return
+
+		completed_tasks = self._completed_tasks
+		unsatisfied = self._unsatisfied_system_deps
+
+		def ignore_non_runtime_or_satisfied(priority):
+			"""
+			Ignore non-runtime and satisfied runtime priorities.
+			"""
+			if isinstance(priority, DepPriority) and \
+				not priority.satisfied and \
+				(priority.runtime or priority.runtime_post):
+				return False
+			return True
+
+		# When checking for unsatisfied runtime deps, only check
+		# direct deps since indirect deps are checked when the
+		# corresponding parent is merged.
+		for child in graph.child_nodes(pkg,
+			ignore_priority=ignore_non_runtime_or_satisfied):
+			if not isinstance(child, Package) or \
+				child.operation == 'uninstall':
+				continue
+			if child is pkg:
+				continue
+			if child.operation == 'merge' and \
+				child not in completed_tasks:
+				unsatisfied.add(child)
+
+	def _merge_wait_exit_handler(self, task):
+		self._merge_wait_scheduled.remove(task)
+		self._merge_exit(task)
+
+	def _merge_exit(self, merge):
+		self._running_tasks.pop(id(merge), None)
+		self._do_merge_exit(merge)
+		self._deallocate_config(merge.merge.settings)
+		if merge.returncode == os.EX_OK and \
+			not merge.merge.pkg.installed:
+			self._status_display.curval += 1
+		self._status_display.merges = len(self._task_queues.merge)
+		self._schedule()
+
+	def _do_merge_exit(self, merge):
+		pkg = merge.merge.pkg
+		if merge.returncode != os.EX_OK:
+			settings = merge.merge.settings
+			build_dir = settings.get("PORTAGE_BUILDDIR")
+			build_log = settings.get("PORTAGE_LOG_FILE")
+
+			self._failed_pkgs.append(self._failed_pkg(
+				build_dir=build_dir, build_log=build_log,
+				pkg=pkg,
+				returncode=merge.returncode))
+			if not self._terminated_tasks:
+				self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
+				self._status_display.failed = len(self._failed_pkgs)
+			return
+
+		self._task_complete(pkg)
+		pkg_to_replace = merge.merge.pkg_to_replace
+		if pkg_to_replace is not None:
+			# When a package is replaced, mark it's uninstall
+			# task complete (if any).
+			if self._digraph is not None and \
+				pkg_to_replace in self._digraph:
+				try:
+					self._pkg_queue.remove(pkg_to_replace)
+				except ValueError:
+					pass
+				self._task_complete(pkg_to_replace)
+			else:
+				self._pkg_cache.pop(pkg_to_replace, None)
+
+		if pkg.installed:
+			return
+
+		self._restart_if_necessary(pkg)
+
+		# Call mtimedb.commit() after each merge so that
+		# --resume still works after being interrupted
+		# by reboot, sigkill or similar.
+		mtimedb = self._mtimedb
+		mtimedb["resume"]["mergelist"].remove(list(pkg))
+		if not mtimedb["resume"]["mergelist"]:
+			del mtimedb["resume"]
+		mtimedb.commit()
+
+	def _build_exit(self, build):
+		self._running_tasks.pop(id(build), None)
+		if build.returncode == os.EX_OK and self._terminated_tasks:
+			# We've been interrupted, so we won't
+			# add this to the merge queue.
+			self.curval += 1
+			self._deallocate_config(build.settings)
+		elif build.returncode == os.EX_OK:
+			self.curval += 1
+			merge = PackageMerge(merge=build)
+			self._running_tasks[id(merge)] = merge
+			if not build.build_opts.buildpkgonly and \
+				build.pkg in self._deep_system_deps:
+				# Since dependencies on system packages are frequently
+				# unspecified, merge them only when no builds are executing.
+				self._merge_wait_queue.append(merge)
+				merge.addStartListener(self._system_merge_started)
+			else:
+				merge.addExitListener(self._merge_exit)
+				self._task_queues.merge.add(merge)
+				self._status_display.merges = len(self._task_queues.merge)
+		else:
+			settings = build.settings
+			build_dir = settings.get("PORTAGE_BUILDDIR")
+			build_log = settings.get("PORTAGE_LOG_FILE")
+
+			self._failed_pkgs.append(self._failed_pkg(
+				build_dir=build_dir, build_log=build_log,
+				pkg=build.pkg,
+				returncode=build.returncode))
+			if not self._terminated_tasks:
+				self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
+				self._status_display.failed = len(self._failed_pkgs)
+			self._deallocate_config(build.settings)
+		self._jobs -= 1
+		self._status_display.running = self._jobs
+		self._schedule()
+
+	def _extract_exit(self, build):
+		self._build_exit(build)
+
+	def _task_complete(self, pkg):
+		self._completed_tasks.add(pkg)
+		self._unsatisfied_system_deps.discard(pkg)
+		self._choose_pkg_return_early = False
+		blocker_db = self._blocker_db[pkg.root]
+		blocker_db.discardBlocker(pkg)
+
+	def _merge(self):
+
+		self._add_prefetchers()
+		self._add_packages()
+		pkg_queue = self._pkg_queue
+		failed_pkgs = self._failed_pkgs
+		portage.locks._quiet = self._background
+		portage.elog.add_listener(self._elog_listener)
+		rval = os.EX_OK
+
+		try:
+			self._main_loop()
+		finally:
+			self._main_loop_cleanup()
+			portage.locks._quiet = False
+			portage.elog.remove_listener(self._elog_listener)
+			if failed_pkgs:
+				rval = failed_pkgs[-1].returncode
+
+		return rval
+
+	def _main_loop_cleanup(self):
+		del self._pkg_queue[:]
+		self._completed_tasks.clear()
+		self._deep_system_deps.clear()
+		self._unsatisfied_system_deps.clear()
+		self._choose_pkg_return_early = False
+		self._status_display.reset()
+		self._digraph = None
+		self._task_queues.fetch.clear()
+		self._prefetchers.clear()
+
+	def _choose_pkg(self):
+		"""
+		Choose a task that has all its dependencies satisfied. This is used
+		for parallel build scheduling, and ensures that we don't build
+		anything with deep dependencies that have yet to be merged.
+		"""
+
+		if self._choose_pkg_return_early:
+			return None
+
+		if self._digraph is None:
+			if self._is_work_scheduled() and \
+				not ("--nodeps" in self.myopts and \
+				(self._max_jobs is True or self._max_jobs > 1)):
+				self._choose_pkg_return_early = True
+				return None
+			return self._pkg_queue.pop(0)
+
+		if not self._is_work_scheduled():
+			return self._pkg_queue.pop(0)
+
+		self._prune_digraph()
+
+		chosen_pkg = None
+
+		# Prefer uninstall operations when available.
+		graph = self._digraph
+		for pkg in self._pkg_queue:
+			if pkg.operation == 'uninstall' and \
+				not graph.child_nodes(pkg):
+				chosen_pkg = pkg
+				break
+
+		if chosen_pkg is None:
+			later = set(self._pkg_queue)
+			for pkg in self._pkg_queue:
+				later.remove(pkg)
+				if not self._dependent_on_scheduled_merges(pkg, later):
+					chosen_pkg = pkg
+					break
+
+		if chosen_pkg is not None:
+			self._pkg_queue.remove(chosen_pkg)
+
+		if chosen_pkg is None:
+			# There's no point in searching for a package to
+			# choose until at least one of the existing jobs
+			# completes.
+			self._choose_pkg_return_early = True
+
+		return chosen_pkg
+
+	def _dependent_on_scheduled_merges(self, pkg, later):
+		"""
+		Traverse the subgraph of the given packages deep dependencies
+		to see if it contains any scheduled merges.
+		@param pkg: a package to check dependencies for
+		@type pkg: Package
+		@param later: packages for which dependence should be ignored
+			since they will be merged later than pkg anyway and therefore
+			delaying the merge of pkg will not result in a more optimal
+			merge order
+		@type later: set
+		@rtype: bool
+		@returns: True if the package is dependent, False otherwise.
+		"""
+
+		graph = self._digraph
+		completed_tasks = self._completed_tasks
+
+		dependent = False
+		traversed_nodes = set([pkg])
+		direct_deps = graph.child_nodes(pkg)
+		node_stack = direct_deps
+		direct_deps = frozenset(direct_deps)
+		while node_stack:
+			node = node_stack.pop()
+			if node in traversed_nodes:
+				continue
+			traversed_nodes.add(node)
+			if not ((node.installed and node.operation == "nomerge") or \
+				(node.operation == "uninstall" and \
+				node not in direct_deps) or \
+				node in completed_tasks or \
+				node in later):
+				dependent = True
+				break
+
+			# Don't traverse children of uninstall nodes since
+			# those aren't dependencies in the usual sense.
+			if node.operation != "uninstall":
+				node_stack.extend(graph.child_nodes(node))
+
+		return dependent
+
+	def _allocate_config(self, root):
+		"""
+		Allocate a unique config instance for a task in order
+		to prevent interference between parallel tasks.
+		"""
+		if self._config_pool[root]:
+			temp_settings = self._config_pool[root].pop()
+		else:
+			temp_settings = portage.config(clone=self.pkgsettings[root])
+		# Since config.setcpv() isn't guaranteed to call config.reset() due to
+		# performance reasons, call it here to make sure all settings from the
+		# previous package get flushed out (such as PORTAGE_LOG_FILE).
+		temp_settings.reload()
+		temp_settings.reset()
+		return temp_settings
+
+	def _deallocate_config(self, settings):
+		self._config_pool[settings["ROOT"]].append(settings)
+
+	def _main_loop(self):
+
+		# Only allow 1 job max if a restart is scheduled
+		# due to portage update.
+		if self._is_restart_scheduled() or \
+			self._opts_no_background.intersection(self.myopts):
+			self._set_max_jobs(1)
+
+		while self._schedule():
+			self._poll_loop()
+
+		while True:
+			self._schedule()
+			if not self._is_work_scheduled():
+				break
+			self._poll_loop()
+
+	def _keep_scheduling(self):
+		return bool(not self._terminated_tasks and self._pkg_queue and \
+			not (self._failed_pkgs and not self._build_opts.fetchonly))
+
+	def _is_work_scheduled(self):
+		return bool(self._running_tasks)
+
+	def _schedule_tasks(self):
+
+		while True:
+
+			# When the number of jobs and merges drops to zero,
+			# process a single merge from _merge_wait_queue if
+			# it's not empty. We only process one since these are
+			# special packages and we want to ensure that
+			# parallel-install does not cause more than one of
+			# them to install at the same time.
+			if (self._merge_wait_queue and not self._jobs and
+				not self._task_queues.merge):
+				task = self._merge_wait_queue.popleft()
+				task.addExitListener(self._merge_wait_exit_handler)
+				self._task_queues.merge.add(task)
+				self._status_display.merges = len(self._task_queues.merge)
+				self._merge_wait_scheduled.append(task)
+
+			self._schedule_tasks_imp()
+			self._status_display.display()
+
+			state_change = 0
+			for q in self._task_queues.values():
+				if q.schedule():
+					state_change += 1
+
+			# Cancel prefetchers if they're the only reason
+			# the main poll loop is still running.
+			if self._failed_pkgs and not self._build_opts.fetchonly and \
+				not self._is_work_scheduled() and \
+				self._task_queues.fetch:
+				self._task_queues.fetch.clear()
+				state_change += 1
+
+			if not (state_change or \
+				(self._merge_wait_queue and not self._jobs and
+				not self._task_queues.merge)):
+				break
+
+		return self._keep_scheduling()
+
+	def _job_delay(self):
+		"""
+		@rtype: bool
+		@returns: True if job scheduling should be delayed, False otherwise.
+		"""
+
+		if self._jobs and self._max_load is not None:
+
+			current_time = time.time()
+
+			delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
+			if delay > self._job_delay_max:
+				delay = self._job_delay_max
+			if (current_time - self._previous_job_start_time) < delay:
+				return True
+
+		return False
+
+	def _schedule_tasks_imp(self):
+		"""
+		@rtype: bool
+		@returns: True if state changed, False otherwise.
+		"""
+
+		state_change = 0
+
+		while True:
+
+			if not self._keep_scheduling():
+				return bool(state_change)
+
+			if self._choose_pkg_return_early or \
+				self._merge_wait_scheduled or \
+				(self._jobs and self._unsatisfied_system_deps) or \
+				not self._can_add_job() or \
+				self._job_delay():
+				return bool(state_change)
+
+			pkg = self._choose_pkg()
+			if pkg is None:
+				return bool(state_change)
+
+			state_change += 1
+
+			if not pkg.installed:
+				self._pkg_count.curval += 1
+
+			task = self._task(pkg)
+
+			if pkg.installed:
+				merge = PackageMerge(merge=task)
+				self._running_tasks[id(merge)] = merge
+				merge.addExitListener(self._merge_exit)
+				self._task_queues.merge.addFront(merge)
+
+			elif pkg.built:
+				self._jobs += 1
+				self._previous_job_start_time = time.time()
+				self._status_display.running = self._jobs
+				self._running_tasks[id(task)] = task
+				task.addExitListener(self._extract_exit)
+				self._task_queues.jobs.add(task)
+
+			else:
+				self._jobs += 1
+				self._previous_job_start_time = time.time()
+				self._status_display.running = self._jobs
+				self._running_tasks[id(task)] = task
+				task.addExitListener(self._build_exit)
+				self._task_queues.jobs.add(task)
+
+		return bool(state_change)
+
+	def _task(self, pkg):
+
+		pkg_to_replace = None
+		if pkg.operation != "uninstall":
+			vardb = pkg.root_config.trees["vartree"].dbapi
+			previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
+				if portage.cpv_getkey(x) == pkg.cp]
+			if not previous_cpv and vardb.cpv_exists(pkg.cpv):
+				# same cpv, different SLOT
+				previous_cpv = [pkg.cpv]
+			if previous_cpv:
+				previous_cpv = previous_cpv.pop()
+				pkg_to_replace = self._pkg(previous_cpv,
+					"installed", pkg.root_config, installed=True,
+					operation="uninstall")
+
+		prefetcher = self._prefetchers.pop(pkg, None)
+		if prefetcher is not None and not prefetcher.isAlive():
+			try:
+				self._task_queues.fetch._task_queue.remove(prefetcher)
+			except ValueError:
+				pass
+			prefetcher = None
+
+		task = MergeListItem(args_set=self._args_set,
+			background=self._background, binpkg_opts=self._binpkg_opts,
+			build_opts=self._build_opts,
+			config_pool=self._ConfigPool(pkg.root,
+			self._allocate_config, self._deallocate_config),
+			emerge_opts=self.myopts,
+			find_blockers=self._find_blockers(pkg), logger=self._logger,
+			mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
+			pkg_to_replace=pkg_to_replace,
+			prefetcher=prefetcher,
+			scheduler=self._sched_iface,
+			settings=self._allocate_config(pkg.root),
+			statusMessage=self._status_msg,
+			world_atom=self._world_atom)
+
+		return task
+
+	def _failed_pkg_msg(self, failed_pkg, action, preposition):
+		pkg = failed_pkg.pkg
+		msg = "%s to %s %s" % \
+			(bad("Failed"), action, colorize("INFORM", pkg.cpv))
+		if pkg.root != "/":
+			msg += " %s %s" % (preposition, pkg.root)
+
+		log_path = self._locate_failure_log(failed_pkg)
+		if log_path is not None:
+			msg += ", Log file:"
+		self._status_msg(msg)
+
+		if log_path is not None:
+			self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
+
+	def _status_msg(self, msg):
+		"""
+		Display a brief status message (no newlines) in the status display.
+		This is called by tasks to provide feedback to the user. This
+		delegates the resposibility of generating \r and \n control characters,
+		to guarantee that lines are created or erased when necessary and
+		appropriate.
+
+		@type msg: str
+		@param msg: a brief status message (no newlines allowed)
+		"""
+		if not self._background:
+			writemsg_level("\n")
+		self._status_display.displayMessage(msg)
+
+	def _save_resume_list(self):
+		"""
+		Do this before verifying the ebuild Manifests since it might
+		be possible for the user to use --resume --skipfirst get past
+		a non-essential package with a broken digest.
+		"""
+		mtimedb = self._mtimedb
+
+		mtimedb["resume"] = {}
+		# Stored as a dict starting with portage-2.1.6_rc1, and supported
+		# by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
+		# a list type for options.
+		mtimedb["resume"]["myopts"] = self.myopts.copy()
+
+		# Convert Atom instances to plain str.
+		mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
+		mtimedb["resume"]["mergelist"] = [list(x) \
+			for x in self._mergelist \
+			if isinstance(x, Package) and x.operation == "merge"]
+
+		mtimedb.commit()
+
+	def _calc_resume_list(self):
+		"""
+		Use the current resume list to calculate a new one,
+		dropping any packages with unsatisfied deps.
+		@rtype: bool
+		@returns: True if successful, False otherwise.
+		"""
+		print(colorize("GOOD", "*** Resuming merge..."))
+
+		# free some memory before creating
+		# the resume depgraph
+		self._destroy_graph()
+
+		myparams = create_depgraph_params(self.myopts, None)
+		success = False
+		e = None
+		try:
+			success, mydepgraph, dropped_tasks = resume_depgraph(
+				self.settings, self.trees, self._mtimedb, self.myopts,
+				myparams, self._spinner)
+		except depgraph.UnsatisfiedResumeDep as exc:
+			# rename variable to avoid python-3.0 error:
+			# SyntaxError: can not delete variable 'e' referenced in nested
+			#              scope
+			e = exc
+			mydepgraph = e.depgraph
+			dropped_tasks = set()
+
+		if e is not None:
+			def unsatisfied_resume_dep_msg():
+				mydepgraph.display_problems()
+				out = portage.output.EOutput()
+				out.eerror("One or more packages are either masked or " + \
+					"have missing dependencies:")
+				out.eerror("")
+				indent = "  "
+				show_parents = set()
+				for dep in e.value:
+					if dep.parent in show_parents:
+						continue
+					show_parents.add(dep.parent)
+					if dep.atom is None:
+						out.eerror(indent + "Masked package:")
+						out.eerror(2 * indent + str(dep.parent))
+						out.eerror("")
+					else:
+						out.eerror(indent + str(dep.atom) + " pulled in by:")
+						out.eerror(2 * indent + str(dep.parent))
+						out.eerror("")
+				msg = "The resume list contains packages " + \
+					"that are either masked or have " + \
+					"unsatisfied dependencies. " + \
+					"Please restart/continue " + \
+					"the operation manually, or use --skipfirst " + \
+					"to skip the first package in the list and " + \
+					"any other packages that may be " + \
+					"masked or have missing dependencies."
+				for line in textwrap.wrap(msg, 72):
+					out.eerror(line)
+			self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
+			return False
+
+		if success and self._show_list():
+			mylist = mydepgraph.altlist()
+			if mylist:
+				if "--tree" in self.myopts:
+					mylist.reverse()
+				mydepgraph.display(mylist, favorites=self._favorites)
+
+		if not success:
+			self._post_mod_echo_msgs.append(mydepgraph.display_problems)
+			return False
+		mydepgraph.display_problems()
+		self._init_graph(mydepgraph.schedulerGraph())
+
+		msg_width = 75
+		for task in dropped_tasks:
+			if not (isinstance(task, Package) and task.operation == "merge"):
+				continue
+			pkg = task
+			msg = "emerge --keep-going:" + \
+				" %s" % (pkg.cpv,)
+			if pkg.root != "/":
+				msg += " for %s" % (pkg.root,)
+			msg += " dropped due to unsatisfied dependency."
+			for line in textwrap.wrap(msg, msg_width):
+				eerror(line, phase="other", key=pkg.cpv)
+			settings = self.pkgsettings[pkg.root]
+			# Ensure that log collection from $T is disabled inside
+			# elog_process(), since any logs that might exist are
+			# not valid here.
+			settings.pop("T", None)
+			portage.elog.elog_process(pkg.cpv, settings)
+			self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
+
+		return True
+
+	def _show_list(self):
+		myopts = self.myopts
+		if "--quiet" not in myopts and \
+			("--ask" in myopts or "--tree" in myopts or \
+			"--verbose" in myopts):
+			return True
+		return False
+
+	def _world_atom(self, pkg):
+		"""
+		Add or remove the package to the world file, but only if
+		it's supposed to be added or removed. Otherwise, do nothing.
+		"""
+
+		if set(("--buildpkgonly", "--fetchonly",
+			"--fetch-all-uri",
+			"--oneshot", "--onlydeps",
+			"--pretend")).intersection(self.myopts):
+			return
+
+		if pkg.root != self.target_root:
+			return
+
+		args_set = self._args_set
+		if not args_set.findAtomForPackage(pkg):
+			return
+
+		logger = self._logger
+		pkg_count = self._pkg_count
+		root_config = pkg.root_config
+		world_set = root_config.sets["selected"]
+		world_locked = False
+		if hasattr(world_set, "lock"):
+			world_set.lock()
+			world_locked = True
+
+		try:
+			if hasattr(world_set, "load"):
+				world_set.load() # maybe it's changed on disk
+
+			if pkg.operation == "uninstall":
+				if hasattr(world_set, "cleanPackage"):
+					world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
+							pkg.cpv)
+				if hasattr(world_set, "remove"):
+					for s in pkg.root_config.setconfig.active:
+						world_set.remove(SETPREFIX+s)
+			else:
+				atom = create_world_atom(pkg, args_set, root_config)
+				if atom:
+					if hasattr(world_set, "add"):
+						self._status_msg(('Recording %s in "world" ' + \
+							'favorites file...') % atom)
+						logger.log(" === (%s of %s) Updating world file (%s)" % \
+							(pkg_count.curval, pkg_count.maxval, pkg.cpv))
+						world_set.add(atom)
+					else:
+						writemsg_level('\n!!! Unable to record %s in "world"\n' % \
+							(atom,), level=logging.WARN, noiselevel=-1)
+		finally:
+			if world_locked:
+				world_set.unlock()
+
+	def _pkg(self, cpv, type_name, root_config, installed=False,
+		operation=None, myrepo=None):
+		"""
+		Get a package instance from the cache, or create a new
+		one if necessary. Raises KeyError from aux_get if it
+		failures for some reason (package does not exist or is
+		corrupt).
+		"""
+
+		# Reuse existing instance when available.
+		pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
+			type_name=type_name, repo_name=myrepo, root_config=root_config,
+			installed=installed, operation=operation))
+
+		if pkg is not None:
+			return pkg
+
+		tree_type = depgraph.pkg_tree_map[type_name]
+		db = root_config.trees[tree_type].dbapi
+		db_keys = list(self.trees[root_config.root][
+			tree_type].dbapi._aux_cache_keys)
+		metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+		pkg = Package(built=(type_name != "ebuild"),
+			cpv=cpv, installed=installed, metadata=metadata,
+			root_config=root_config, type_name=type_name)
+		self._pkg_cache[pkg] = pkg
+		return pkg

diff --git a/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
new file mode 100644
index 0000000..c1c98c4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
@@ -0,0 +1,89 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from _emerge.SlotObject import SlotObject
+from collections import deque
+class SequentialTaskQueue(SlotObject):
+
+	__slots__ = ("max_jobs", "running_tasks") + \
+		("_dirty", "_scheduling", "_task_queue")
+
+	def __init__(self, **kwargs):
+		SlotObject.__init__(self, **kwargs)
+		self._task_queue = deque()
+		self.running_tasks = set()
+		if self.max_jobs is None:
+			self.max_jobs = 1
+		self._dirty = True
+
+	def add(self, task):
+		self._task_queue.append(task)
+		self._dirty = True
+
+	def addFront(self, task):
+		self._task_queue.appendleft(task)
+		self._dirty = True
+
+	def schedule(self):
+
+		if not self._dirty:
+			return False
+
+		if not self:
+			return False
+
+		if self._scheduling:
+			# Ignore any recursive schedule() calls triggered via
+			# self._task_exit().
+			return False
+
+		self._scheduling = True
+
+		task_queue = self._task_queue
+		running_tasks = self.running_tasks
+		max_jobs = self.max_jobs
+		state_changed = False
+
+		while task_queue and \
+			(max_jobs is True or len(running_tasks) < max_jobs):
+			task = task_queue.popleft()
+			cancelled = getattr(task, "cancelled", None)
+			if not cancelled:
+				running_tasks.add(task)
+				task.addExitListener(self._task_exit)
+				task.start()
+			state_changed = True
+
+		self._dirty = False
+		self._scheduling = False
+
+		return state_changed
+
+	def _task_exit(self, task):
+		"""
+		Since we can always rely on exit listeners being called, the set of
+ 		running tasks is always pruned automatically and there is never any need
+		to actively prune it.
+		"""
+		self.running_tasks.remove(task)
+		if self._task_queue:
+			self._dirty = True
+
+	def clear(self):
+		self._task_queue.clear()
+		running_tasks = self.running_tasks
+		while running_tasks:
+			task = running_tasks.pop()
+			task.removeExitListener(self._task_exit)
+			task.cancel()
+		self._dirty = False
+
+	def __bool__(self):
+		return bool(self._task_queue or self.running_tasks)
+
+	if sys.hexversion < 0x3000000:
+		__nonzero__ = __bool__
+
+	def __len__(self):
+		return len(self._task_queue) + len(self.running_tasks)

diff --git a/portage_with_autodep/pym/_emerge/SetArg.py b/portage_with_autodep/pym/_emerge/SetArg.py
new file mode 100644
index 0000000..94cf0a6
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SetArg.py
@@ -0,0 +1,11 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from portage._sets import SETPREFIX
+class SetArg(DependencyArg):
+	def __init__(self, pset=None, **kwargs):
+		DependencyArg.__init__(self, **kwargs)
+		self.pset = pset
+		self.name = self.arg[len(SETPREFIX):]
+

diff --git a/portage_with_autodep/pym/_emerge/SlotObject.py b/portage_with_autodep/pym/_emerge/SlotObject.py
new file mode 100644
index 0000000..fdc6f35
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SlotObject.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class SlotObject(object):
+	__slots__ = ("__weakref__",)
+
+	def __init__(self, **kwargs):
+		classes = [self.__class__]
+		while classes:
+			c = classes.pop()
+			if c is SlotObject:
+				continue
+			classes.extend(c.__bases__)
+			slots = getattr(c, "__slots__", None)
+			if not slots:
+				continue
+			for myattr in slots:
+				myvalue = kwargs.get(myattr, None)
+				setattr(self, myattr, myvalue)
+
+	def copy(self):
+		"""
+		Create a new instance and copy all attributes
+		defined from __slots__ (including those from
+		inherited classes).
+		"""
+		obj = self.__class__()
+
+		classes = [self.__class__]
+		while classes:
+			c = classes.pop()
+			if c is SlotObject:
+				continue
+			classes.extend(c.__bases__)
+			slots = getattr(c, "__slots__", None)
+			if not slots:
+				continue
+			for myattr in slots:
+				setattr(obj, myattr, getattr(self, myattr))
+
+		return obj
+

diff --git a/portage_with_autodep/pym/_emerge/SpawnProcess.py b/portage_with_autodep/pym/_emerge/SpawnProcess.py
new file mode 100644
index 0000000..b72971c
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SpawnProcess.py
@@ -0,0 +1,235 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+import sys
+from portage.cache.mappings import slot_dict_class
+import portage
+from portage import _encodings
+from portage import _unicode_encode
+from portage import os
+from portage.const import BASH_BINARY
+import fcntl
+import errno
+import gzip
+
+class SpawnProcess(SubProcess):
+
+	"""
+	Constructor keyword args are passed into portage.process.spawn().
+	The required "args" keyword argument will be passed as the first
+	spawn() argument.
+	"""
+
+	_spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
+		"uid", "gid", "groups", "umask", "logfile",
+		"path_lookup", "pre_exec")
+
+	__slots__ = ("args",) + \
+		_spawn_kwarg_names + ("_selinux_type",)
+
+	_file_names = ("log", "process", "stdout")
+	_files_dict = slot_dict_class(_file_names, prefix="")
+
+	def _start(self):
+
+		if self.cancelled:
+			return
+
+		if self.fd_pipes is None:
+			self.fd_pipes = {}
+		fd_pipes = self.fd_pipes
+		fd_pipes.setdefault(0, sys.stdin.fileno())
+		fd_pipes.setdefault(1, sys.stdout.fileno())
+		fd_pipes.setdefault(2, sys.stderr.fileno())
+
+		# flush any pending output
+		for fd in fd_pipes.values():
+			if fd == sys.stdout.fileno():
+				sys.stdout.flush()
+			if fd == sys.stderr.fileno():
+				sys.stderr.flush()
+
+		self._files = self._files_dict()
+		files = self._files
+
+		master_fd, slave_fd = self._pipe(fd_pipes)
+		fcntl.fcntl(master_fd, fcntl.F_SETFL,
+			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+		logfile = None
+		if self._can_log(slave_fd):
+			logfile = self.logfile
+
+		null_input = None
+		fd_pipes_orig = fd_pipes.copy()
+		if self.background:
+			# TODO: Use job control functions like tcsetpgrp() to control
+			# access to stdin. Until then, use /dev/null so that any
+			# attempts to read from stdin will immediately return EOF
+			# instead of blocking indefinitely.
+			null_input = open('/dev/null', 'rb')
+			fd_pipes[0] = null_input.fileno()
+		else:
+			fd_pipes[0] = fd_pipes_orig[0]
+
+		# WARNING: It is very important to use unbuffered mode here,
+		# in order to avoid issue 5380 with python3.
+		files.process = os.fdopen(master_fd, 'rb', 0)
+		if logfile is not None:
+
+			fd_pipes[1] = slave_fd
+			fd_pipes[2] = slave_fd
+
+			files.log = open(_unicode_encode(logfile,
+				encoding=_encodings['fs'], errors='strict'), mode='ab')
+			if logfile.endswith('.gz'):
+				files.log = gzip.GzipFile(filename='', mode='ab',
+					fileobj=files.log)
+
+			portage.util.apply_secpass_permissions(logfile,
+				uid=portage.portage_uid, gid=portage.portage_gid,
+				mode=0o660)
+
+			if not self.background:
+				files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
+
+			output_handler = self._output_handler
+
+		else:
+
+			# Create a dummy pipe so the scheduler can monitor
+			# the process from inside a poll() loop.
+			fd_pipes[self._dummy_pipe_fd] = slave_fd
+			if self.background:
+				fd_pipes[1] = slave_fd
+				fd_pipes[2] = slave_fd
+			output_handler = self._dummy_handler
+
+		kwargs = {}
+		for k in self._spawn_kwarg_names:
+			v = getattr(self, k)
+			if v is not None:
+				kwargs[k] = v
+
+		kwargs["fd_pipes"] = fd_pipes
+		kwargs["returnpid"] = True
+		kwargs.pop("logfile", None)
+
+		self._reg_id = self.scheduler.register(files.process.fileno(),
+			self._registered_events, output_handler)
+		self._registered = True
+
+		retval = self._spawn(self.args, **kwargs)
+
+		os.close(slave_fd)
+		if null_input is not None:
+			null_input.close()
+
+		if isinstance(retval, int):
+			# spawn failed
+			self._unregister()
+			self._set_returncode((self.pid, retval))
+			self.wait()
+			return
+
+		self.pid = retval[0]
+		portage.process.spawned_pids.remove(self.pid)
+
+	def _can_log(self, slave_fd):
+		return True
+
+	def _pipe(self, fd_pipes):
+		"""
+		@type fd_pipes: dict
+		@param fd_pipes: pipes from which to copy terminal size if desired.
+		"""
+		return os.pipe()
+
+	def _spawn(self, args, **kwargs):
+		spawn_func = portage.process.spawn
+
+		if self._selinux_type is not None:
+			spawn_func = portage.selinux.spawn_wrapper(spawn_func,
+				self._selinux_type)
+			# bash is an allowed entrypoint, while most binaries are not
+			if args[0] != BASH_BINARY:
+				args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
+
+		return spawn_func(args, **kwargs)
+
+	def _output_handler(self, fd, event):
+
+		files = self._files
+		buf = self._read_buf(files.process, event)
+
+		if buf is not None:
+
+			if buf:
+				if not self.background:
+					write_successful = False
+					failures = 0
+					while True:
+						try:
+							if not write_successful:
+								buf.tofile(files.stdout)
+								write_successful = True
+							files.stdout.flush()
+							break
+						except IOError as e:
+							if e.errno != errno.EAGAIN:
+								raise
+							del e
+							failures += 1
+							if failures > 50:
+								# Avoid a potentially infinite loop. In
+								# most cases, the failure count is zero
+								# and it's unlikely to exceed 1.
+								raise
+
+							# This means that a subprocess has put an inherited
+							# stdio file descriptor (typically stdin) into
+							# O_NONBLOCK mode. This is not acceptable (see bug
+							# #264435), so revert it. We need to use a loop
+							# here since there's a race condition due to
+							# parallel processes being able to change the
+							# flags on the inherited file descriptor.
+							# TODO: When possible, avoid having child processes
+							# inherit stdio file descriptors from portage
+							# (maybe it can't be avoided with
+							# PROPERTIES=interactive).
+							fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
+								fcntl.fcntl(files.stdout.fileno(),
+								fcntl.F_GETFL) ^ os.O_NONBLOCK)
+
+				try:
+					buf.tofile(files.log)
+				except TypeError:
+					# array.tofile() doesn't work with GzipFile
+					files.log.write(buf.tostring())
+				files.log.flush()
+			else:
+				self._unregister()
+				self.wait()
+
+		self._unregister_if_appropriate(event)
+
+	def _dummy_handler(self, fd, event):
+		"""
+		This method is mainly interested in detecting EOF, since
+		the only purpose of the pipe is to allow the scheduler to
+		monitor the process from inside a poll() loop.
+		"""
+
+		buf = self._read_buf(self._files.process, event)
+
+		if buf is not None:
+
+			if buf:
+				pass
+			else:
+				self._unregister()
+				self.wait()
+
+		self._unregister_if_appropriate(event)
+

diff --git a/portage_with_autodep/pym/_emerge/SubProcess.py b/portage_with_autodep/pym/_emerge/SubProcess.py
new file mode 100644
index 0000000..b99cf0b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SubProcess.py
@@ -0,0 +1,141 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+import signal
+import errno
+
+class SubProcess(AbstractPollTask):
+
+	__slots__ = ("pid",) + \
+		("_files", "_reg_id")
+
+	# A file descriptor is required for the scheduler to monitor changes from
+	# inside a poll() loop. When logging is not enabled, create a pipe just to
+	# serve this purpose alone.
+	_dummy_pipe_fd = 9
+
+	def _poll(self):
+		if self.returncode is not None:
+			return self.returncode
+		if self.pid is None:
+			return self.returncode
+		if self._registered:
+			return self.returncode
+
+		try:
+			# With waitpid and WNOHANG, only check the
+			# first element of the tuple since the second
+			# element may vary (bug #337465).
+			retval = os.waitpid(self.pid, os.WNOHANG)
+		except OSError as e:
+			if e.errno != errno.ECHILD:
+				raise
+			del e
+			retval = (self.pid, 1)
+
+		if retval[0] == 0:
+			return None
+		self._set_returncode(retval)
+		self.wait()
+		return self.returncode
+
+	def _cancel(self):
+		if self.isAlive():
+			try:
+				os.kill(self.pid, signal.SIGTERM)
+			except OSError as e:
+				if e.errno != errno.ESRCH:
+					raise
+
+	def isAlive(self):
+		return self.pid is not None and \
+			self.returncode is None
+
+	def _wait(self):
+
+		if self.returncode is not None:
+			return self.returncode
+
+		if self._registered:
+			if self.cancelled:
+				timeout = 1000
+				self.scheduler.schedule(self._reg_id, timeout=timeout)
+				if self._registered:
+					try:
+						os.kill(self.pid, signal.SIGKILL)
+					except OSError as e:
+						if e.errno != errno.ESRCH:
+							raise
+						del e
+					self.scheduler.schedule(self._reg_id, timeout=timeout)
+					if self._registered:
+						self._orphan_process_warn()
+			else:
+				self.scheduler.schedule(self._reg_id)
+			self._unregister()
+			if self.returncode is not None:
+				return self.returncode
+
+		try:
+			# With waitpid and WNOHANG, only check the
+			# first element of the tuple since the second
+			# element may vary (bug #337465).
+			wait_retval = os.waitpid(self.pid, os.WNOHANG)
+		except OSError as e:
+			if e.errno != errno.ECHILD:
+				raise
+			del e
+			self._set_returncode((self.pid, 1 << 8))
+		else:
+			if wait_retval[0] != 0:
+				self._set_returncode(wait_retval)
+			else:
+				try:
+					wait_retval = os.waitpid(self.pid, 0)
+				except OSError as e:
+					if e.errno != errno.ECHILD:
+						raise
+					del e
+					self._set_returncode((self.pid, 1 << 8))
+				else:
+					self._set_returncode(wait_retval)
+
+		return self.returncode
+
+	def _orphan_process_warn(self):
+		pass
+
+	def _unregister(self):
+		"""
+		Unregister from the scheduler and close open files.
+		"""
+
+		self._registered = False
+
+		if self._reg_id is not None:
+			self.scheduler.unregister(self._reg_id)
+			self._reg_id = None
+
+		if self._files is not None:
+			for f in self._files.values():
+				f.close()
+			self._files = None
+
+	def _set_returncode(self, wait_retval):
+		"""
+		Set the returncode in a manner compatible with
+		subprocess.Popen.returncode: A negative value -N indicates
+		that the child was terminated by signal N (Unix only).
+		"""
+
+		pid, status = wait_retval
+
+		if os.WIFSIGNALED(status):
+			retval = - os.WTERMSIG(status)
+		else:
+			retval = os.WEXITSTATUS(status)
+
+		self.returncode = retval
+

diff --git a/portage_with_autodep/pym/_emerge/Task.py b/portage_with_autodep/pym/_emerge/Task.py
new file mode 100644
index 0000000..efbe3a9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Task.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SlotObject import SlotObject
+class Task(SlotObject):
+	__slots__ = ("_hash_key", "_hash_value")
+
+	def __eq__(self, other):
+		try:
+			return self._hash_key == other._hash_key
+		except AttributeError:
+			# depgraph._pkg() generates _hash_key
+			# for lookups here, so handle that
+			return self._hash_key == other
+
+	def __ne__(self, other):
+		try:
+			return self._hash_key != other._hash_key
+		except AttributeError:
+			return True
+
+	def __hash__(self):
+		return self._hash_value
+
+	def __len__(self):
+		return len(self._hash_key)
+
+	def __getitem__(self, key):
+		return self._hash_key[key]
+
+	def __iter__(self):
+		return iter(self._hash_key)
+
+	def __contains__(self, key):
+		return key in self._hash_key
+
+	def __str__(self):
+		"""
+		Emulate tuple.__repr__, but don't show 'foo' as u'foo' for unicode
+		strings.
+		"""
+		return "(%s)" % ", ".join(("'%s'" % x for x in self._hash_key))

diff --git a/portage_with_autodep/pym/_emerge/TaskScheduler.py b/portage_with_autodep/pym/_emerge/TaskScheduler.py
new file mode 100644
index 0000000..83c0cbe
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/TaskScheduler.py
@@ -0,0 +1,25 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.QueueScheduler import QueueScheduler
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+
+class TaskScheduler(object):
+
+	"""
+	A simple way to handle scheduling of AsynchrousTask instances. Simply
+	add tasks and call run(). The run() method returns when no tasks remain.
+	"""
+
+	def __init__(self, max_jobs=None, max_load=None):
+		self._queue = SequentialTaskQueue(max_jobs=max_jobs)
+		self._scheduler = QueueScheduler(
+			max_jobs=max_jobs, max_load=max_load)
+		self.sched_iface = self._scheduler.sched_iface
+		self.run = self._scheduler.run
+		self.clear = self._scheduler.clear
+		self._scheduler.add(self._queue)
+
+	def add(self, task):
+		self._queue.add(task)
+

diff --git a/portage_with_autodep/pym/_emerge/TaskSequence.py b/portage_with_autodep/pym/_emerge/TaskSequence.py
new file mode 100644
index 0000000..1fecf63
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/TaskSequence.py
@@ -0,0 +1,44 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.CompositeTask import CompositeTask
+from _emerge.AsynchronousTask import AsynchronousTask
+from collections import deque
+
+class TaskSequence(CompositeTask):
+	"""
+	A collection of tasks that executes sequentially. Each task
+	must have a addExitListener() method that can be used as
+	a means to trigger movement from one task to the next.
+	"""
+
+	__slots__ = ("_task_queue",)
+
+	def __init__(self, **kwargs):
+		AsynchronousTask.__init__(self, **kwargs)
+		self._task_queue = deque()
+
+	def add(self, task):
+		self._task_queue.append(task)
+
+	def _start(self):
+		self._start_next_task()
+
+	def _cancel(self):
+		self._task_queue.clear()
+		CompositeTask._cancel(self)
+
+	def _start_next_task(self):
+		self._start_task(self._task_queue.popleft(),
+			self._task_exit_handler)
+
+	def _task_exit_handler(self, task):
+		if self._default_exit(task) != os.EX_OK:
+			self.wait()
+		elif self._task_queue:
+			self._start_next_task()
+		else:
+			self._final_exit(task)
+			self.wait()
+

diff --git a/portage_with_autodep/pym/_emerge/UninstallFailure.py b/portage_with_autodep/pym/_emerge/UninstallFailure.py
new file mode 100644
index 0000000..e4f2834
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UninstallFailure.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+
+class UninstallFailure(portage.exception.PortageException):
+	"""
+	An instance of this class is raised by unmerge() when
+	an uninstallation fails.
+	"""
+	status = 1
+	def __init__(self, *pargs):
+		portage.exception.PortageException.__init__(self, pargs)
+		if pargs:
+			self.status = pargs[0]

diff --git a/portage_with_autodep/pym/_emerge/UnmergeDepPriority.py b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.py
new file mode 100644
index 0000000..4316600
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.py
@@ -0,0 +1,41 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class UnmergeDepPriority(AbstractDepPriority):
+	__slots__ = ("ignored", "optional", "satisfied",)
+	"""
+	Combination of properties           Priority  Category
+
+	runtime                                0       HARD
+	runtime_post                          -1       HARD
+	buildtime                             -2       SOFT
+	(none of the above)                   -2       SOFT
+	"""
+
+	MAX    =  0
+	SOFT   = -2
+	MIN    = -2
+
+	def __init__(self, **kwargs):
+		AbstractDepPriority.__init__(self, **kwargs)
+		if self.buildtime:
+			self.optional = True
+
+	def __int__(self):
+		if self.runtime:
+			return 0
+		if self.runtime_post:
+			return -1
+		if self.buildtime:
+			return -2
+		return -2
+
+	def __str__(self):
+		if self.ignored:
+			return "ignored"
+		myvalue = self.__int__()
+		if myvalue > self.SOFT:
+			return "hard"
+		return "soft"
+

diff --git a/portage_with_autodep/pym/_emerge/UseFlagDisplay.py b/portage_with_autodep/pym/_emerge/UseFlagDisplay.py
new file mode 100644
index 0000000..3daca19
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UseFlagDisplay.py
@@ -0,0 +1,122 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import chain
+import sys
+
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.output import red
+from portage.util import cmp_sort_key
+from portage.output import blue
+
+class UseFlagDisplay(object):
+
+	__slots__ = ('name', 'enabled', 'forced')
+
+	def __init__(self, name, enabled, forced):
+		self.name = name
+		self.enabled = enabled
+		self.forced = forced
+
+	def __str__(self):
+		s = self.name
+		if self.enabled:
+			s = red(s)
+		else:
+			s = '-' + s
+			s = blue(s)
+		if self.forced:
+			s = '(%s)' % s
+		return s
+
+	if sys.hexversion < 0x3000000:
+
+		__unicode__ = __str__
+
+		def __str__(self):
+			return _unicode_encode(self.__unicode__(),
+				encoding=_encodings['content'])
+
+	def _cmp_combined(a, b):
+		"""
+		Sort by name, combining enabled and disabled flags.
+		"""
+		return (a.name > b.name) - (a.name < b.name)
+
+	sort_combined = cmp_sort_key(_cmp_combined)
+	del _cmp_combined
+
+	def _cmp_separated(a, b):
+		"""
+		Sort by name, separating enabled flags from disabled flags.
+		"""
+		enabled_diff = b.enabled - a.enabled
+		if enabled_diff:
+			return enabled_diff
+		return (a.name > b.name) - (a.name < b.name)
+
+	sort_separated = cmp_sort_key(_cmp_separated)
+	del _cmp_separated
+
+def pkg_use_display(pkg, opts, modified_use=None):
+	settings = pkg.root_config.settings
+	use_expand = pkg.use.expand
+	use_expand_hidden = pkg.use.expand_hidden
+	alphabetical_use = '--alphabetical' in opts
+	forced_flags = set(chain(pkg.use.force,
+		pkg.use.mask))
+	if modified_use is None:
+		use = set(pkg.use.enabled)
+	else:
+		use = set(modified_use)
+	use.discard(settings.get('ARCH'))
+	use_expand_flags = set()
+	use_enabled = {}
+	use_disabled = {}
+	for varname in use_expand:
+		flag_prefix = varname.lower() + "_"
+		for f in use:
+			if f.startswith(flag_prefix):
+				use_expand_flags.add(f)
+				use_enabled.setdefault(
+					varname.upper(), []).append(f[len(flag_prefix):])
+
+		for f in pkg.iuse.all:
+			if f.startswith(flag_prefix):
+				use_expand_flags.add(f)
+				if f not in use:
+					use_disabled.setdefault(
+						varname.upper(), []).append(f[len(flag_prefix):])
+
+	var_order = set(use_enabled)
+	var_order.update(use_disabled)
+	var_order = sorted(var_order)
+	var_order.insert(0, 'USE')
+	use.difference_update(use_expand_flags)
+	use_enabled['USE'] = list(use)
+	use_disabled['USE'] = []
+
+	for f in pkg.iuse.all:
+		if f not in use and \
+			f not in use_expand_flags:
+			use_disabled['USE'].append(f)
+
+	flag_displays = []
+	for varname in var_order:
+		if varname.lower() in use_expand_hidden:
+			continue
+		flags = []
+		for f in use_enabled.get(varname, []):
+			flags.append(UseFlagDisplay(f, True, f in forced_flags))
+		for f in use_disabled.get(varname, []):
+			flags.append(UseFlagDisplay(f, False, f in forced_flags))
+		if alphabetical_use:
+			flags.sort(key=UseFlagDisplay.sort_combined)
+		else:
+			flags.sort(key=UseFlagDisplay.sort_separated)
+		# Use _unicode_decode() to force unicode format string so
+		# that UseFlagDisplay.__unicode__() is called in python2.
+		flag_displays.append('%s="%s"' % (varname,
+			' '.join(_unicode_decode("%s") % (f,) for f in flags)))
+
+	return ' '.join(flag_displays)

diff --git a/portage_with_autodep/pym/_emerge/__init__.py b/portage_with_autodep/pym/_emerge/__init__.py
new file mode 100644
index 0000000..f98c564
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py
new file mode 100644
index 0000000..ca09d83
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.py
@@ -0,0 +1,38 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+from _emerge.Package import Package
+
+def _find_deep_system_runtime_deps(graph):
+	deep_system_deps = set()
+	node_stack = []
+	for node in graph:
+		if not isinstance(node, Package) or \
+			node.operation == 'uninstall':
+			continue
+		if node.root_config.sets['system'].findAtomForPackage(node):
+			node_stack.append(node)
+
+	def ignore_priority(priority):
+		"""
+		Ignore non-runtime priorities.
+		"""
+		if isinstance(priority, DepPriority) and \
+			(priority.runtime or priority.runtime_post):
+			return False
+		return True
+
+	while node_stack:
+		node = node_stack.pop()
+		if node in deep_system_deps:
+			continue
+		deep_system_deps.add(node)
+		for child in graph.child_nodes(node, ignore_priority=ignore_priority):
+			if not isinstance(child, Package) or \
+				child.operation == 'uninstall':
+				continue
+			node_stack.append(child)
+
+	return deep_system_deps
+

diff --git a/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
new file mode 100644
index 0000000..eab4168
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.elog import mod_echo
+
+def _flush_elog_mod_echo():
+	"""
+	Dump the mod_echo output now so that our other
+	notifications are shown last.
+	@rtype: bool
+	@returns: True if messages were shown, False otherwise.
+	"""
+	messages_shown = bool(mod_echo._items)
+	mod_echo.finalize()
+	return messages_shown

diff --git a/portage_with_autodep/pym/_emerge/actions.py b/portage_with_autodep/pym/_emerge/actions.py
new file mode 100644
index 0000000..2166963
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/actions.py
@@ -0,0 +1,3123 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import errno
+import logging
+import platform
+import pwd
+import random
+import re
+import shutil
+import signal
+import socket
+import stat
+import sys
+import tempfile
+import textwrap
+import time
+from itertools import chain
+
+import portage
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _unicode_decode
+from portage.cache.cache_errors import CacheError
+from portage.const import GLOBAL_CONFIG_PATH, NEWS_LIB_PATH
+from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_SET_CONFIG
+from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.dep import Atom, extended_cp_match
+from portage.exception import InvalidAtom
+from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
+	red, yellow
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage._sets import load_default_config, SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import cmp_sort_key, writemsg, \
+	writemsg_level, writemsg_stdout
+from portage.util.digraph import digraph
+from portage._global_updates import _global_updates
+
+from _emerge.clear_caches import clear_caches
+from _emerge.countdown import countdown
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.Dependency import Dependency
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.emergelog import emergelog
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.MetadataRegen import MetadataRegen
+from _emerge.Package import Package
+from _emerge.ProgressHandler import ProgressHandler
+from _emerge.RootConfig import RootConfig
+from _emerge.Scheduler import Scheduler
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.sync.getaddrinfo_validate import getaddrinfo_validate
+from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn
+from _emerge.unmerge import unmerge
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.userquery import userquery
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+def action_build(settings, trees, mtimedb,
+	myopts, myaction, myfiles, spinner):
+
+	if '--usepkgonly' not in myopts:
+		old_tree_timestamp_warn(settings['PORTDIR'], settings)
+
+	# It's best for config updates in /etc/portage to be processed
+	# before we get here, so warn if they're not (bug #267103).
+	chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
+
+	# validate the state of the resume data
+	# so that we can make assumptions later.
+	for k in ("resume", "resume_backup"):
+		if k not in mtimedb:
+			continue
+		resume_data = mtimedb[k]
+		if not isinstance(resume_data, dict):
+			del mtimedb[k]
+			continue
+		mergelist = resume_data.get("mergelist")
+		if not isinstance(mergelist, list):
+			del mtimedb[k]
+			continue
+		for x in mergelist:
+			if not (isinstance(x, list) and len(x) == 4):
+				continue
+			pkg_type, pkg_root, pkg_key, pkg_action = x
+			if pkg_root not in trees:
+				# Current $ROOT setting differs,
+				# so the list must be stale.
+				mergelist = None
+				break
+		if not mergelist:
+			del mtimedb[k]
+			continue
+		resume_opts = resume_data.get("myopts")
+		if not isinstance(resume_opts, (dict, list)):
+			del mtimedb[k]
+			continue
+		favorites = resume_data.get("favorites")
+		if not isinstance(favorites, list):
+			del mtimedb[k]
+			continue
+
+	resume = False
+	if "--resume" in myopts and \
+		("resume" in mtimedb or
+		"resume_backup" in mtimedb):
+		resume = True
+		if "resume" not in mtimedb:
+			mtimedb["resume"] = mtimedb["resume_backup"]
+			del mtimedb["resume_backup"]
+			mtimedb.commit()
+		# "myopts" is a list for backward compatibility.
+		resume_opts = mtimedb["resume"].get("myopts", [])
+		if isinstance(resume_opts, list):
+			resume_opts = dict((k,True) for k in resume_opts)
+		for opt in ("--ask", "--color", "--skipfirst", "--tree"):
+			resume_opts.pop(opt, None)
+
+		# Current options always override resume_opts.
+		resume_opts.update(myopts)
+		myopts.clear()
+		myopts.update(resume_opts)
+
+		if "--debug" in myopts:
+			writemsg_level("myopts %s\n" % (myopts,))
+
+		# Adjust config according to options of the command being resumed.
+		for myroot in trees:
+			mysettings =  trees[myroot]["vartree"].settings
+			mysettings.unlock()
+			adjust_config(myopts, mysettings)
+			mysettings.lock()
+			del myroot, mysettings
+
+	ldpath_mtimes = mtimedb["ldpath"]
+	favorites=[]
+	buildpkgonly = "--buildpkgonly" in myopts
+	pretend = "--pretend" in myopts
+	fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+	ask = "--ask" in myopts
+	enter_invalid = '--ask-enter-invalid' in myopts
+	nodeps = "--nodeps" in myopts
+	oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
+	tree = "--tree" in myopts
+	if nodeps and tree:
+		tree = False
+		del myopts["--tree"]
+		portage.writemsg(colorize("WARN", " * ") + \
+			"--tree is broken with --nodeps. Disabling...\n")
+	debug = "--debug" in myopts
+	verbose = "--verbose" in myopts
+	quiet = "--quiet" in myopts
+	myparams = create_depgraph_params(myopts, myaction)
+
+	if pretend or fetchonly:
+		# make the mtimedb readonly
+		mtimedb.filename = None
+	if '--digest' in myopts or 'digest' in settings.features:
+		if '--digest' in myopts:
+			msg = "The --digest option"
+		else:
+			msg = "The FEATURES=digest setting"
+
+		msg += " can prevent corruption from being" + \
+			" noticed. The `repoman manifest` command is the preferred" + \
+			" way to generate manifests and it is capable of doing an" + \
+			" entire repository or category at once."
+		prefix = bad(" * ")
+		writemsg(prefix + "\n")
+		from textwrap import wrap
+		for line in wrap(msg, 72):
+			writemsg("%s%s\n" % (prefix, line))
+		writemsg(prefix + "\n")
+
+	if resume:
+		favorites = mtimedb["resume"].get("favorites")
+		if not isinstance(favorites, list):
+			favorites = []
+
+		resume_data = mtimedb["resume"]
+		mergelist = resume_data["mergelist"]
+		if mergelist and "--skipfirst" in myopts:
+			for i, task in enumerate(mergelist):
+				if isinstance(task, list) and \
+					task and task[-1] == "merge":
+					del mergelist[i]
+					break
+
+		success = False
+		mydepgraph = None
+		try:
+			success, mydepgraph, dropped_tasks = resume_depgraph(
+				settings, trees, mtimedb, myopts, myparams, spinner)
+		except (portage.exception.PackageNotFound,
+			depgraph.UnsatisfiedResumeDep) as e:
+			if isinstance(e, depgraph.UnsatisfiedResumeDep):
+				mydepgraph = e.depgraph
+
+			from textwrap import wrap
+			from portage.output import EOutput
+			out = EOutput()
+
+			resume_data = mtimedb["resume"]
+			mergelist = resume_data.get("mergelist")
+			if not isinstance(mergelist, list):
+				mergelist = []
+			if mergelist and debug or (verbose and not quiet):
+				out.eerror("Invalid resume list:")
+				out.eerror("")
+				indent = "  "
+				for task in mergelist:
+					if isinstance(task, list):
+						out.eerror(indent + str(tuple(task)))
+				out.eerror("")
+
+			if isinstance(e, depgraph.UnsatisfiedResumeDep):
+				out.eerror("One or more packages are either masked or " + \
+					"have missing dependencies:")
+				out.eerror("")
+				indent = "  "
+				for dep in e.value:
+					if dep.atom is None:
+						out.eerror(indent + "Masked package:")
+						out.eerror(2 * indent + str(dep.parent))
+						out.eerror("")
+					else:
+						out.eerror(indent + str(dep.atom) + " pulled in by:")
+						out.eerror(2 * indent + str(dep.parent))
+						out.eerror("")
+				msg = "The resume list contains packages " + \
+					"that are either masked or have " + \
+					"unsatisfied dependencies. " + \
+					"Please restart/continue " + \
+					"the operation manually, or use --skipfirst " + \
+					"to skip the first package in the list and " + \
+					"any other packages that may be " + \
+					"masked or have missing dependencies."
+				for line in wrap(msg, 72):
+					out.eerror(line)
+			elif isinstance(e, portage.exception.PackageNotFound):
+				out.eerror("An expected package is " + \
+					"not available: %s" % str(e))
+				out.eerror("")
+				msg = "The resume list contains one or more " + \
+					"packages that are no longer " + \
+					"available. Please restart/continue " + \
+					"the operation manually."
+				for line in wrap(msg, 72):
+					out.eerror(line)
+
+		if success:
+			if dropped_tasks:
+				portage.writemsg("!!! One or more packages have been " + \
+					"dropped due to\n" + \
+					"!!! masking or unsatisfied dependencies:\n\n",
+					noiselevel=-1)
+				for task in dropped_tasks:
+					portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
+				portage.writemsg("\n", noiselevel=-1)
+			del dropped_tasks
+		else:
+			if mydepgraph is not None:
+				mydepgraph.display_problems()
+			if not (ask or pretend):
+				# delete the current list and also the backup
+				# since it's probably stale too.
+				for k in ("resume", "resume_backup"):
+					mtimedb.pop(k, None)
+				mtimedb.commit()
+
+			return 1
+	else:
+		if ("--resume" in myopts):
+			print(darkgreen("emerge: It seems we have nothing to resume..."))
+			return os.EX_OK
+
+		try:
+			success, mydepgraph, favorites = backtrack_depgraph(
+				settings, trees, myopts, myparams, myaction, myfiles, spinner)
+		except portage.exception.PackageSetNotFound as e:
+			root_config = trees[settings["ROOT"]]["root_config"]
+			display_missing_pkg_set(root_config, e.value)
+			return 1
+
+		if not success:
+			mydepgraph.display_problems()
+			return 1
+
+	if "--pretend" not in myopts and \
+		("--ask" in myopts or "--tree" in myopts or \
+		"--verbose" in myopts) and \
+		not ("--quiet" in myopts and "--ask" not in myopts):
+		if "--resume" in myopts:
+			mymergelist = mydepgraph.altlist()
+			if len(mymergelist) == 0:
+				print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+				return os.EX_OK
+			favorites = mtimedb["resume"]["favorites"]
+			retval = mydepgraph.display(
+				mydepgraph.altlist(reversed=tree),
+				favorites=favorites)
+			mydepgraph.display_problems()
+			if retval != os.EX_OK:
+				return retval
+			prompt="Would you like to resume merging these packages?"
+		else:
+			retval = mydepgraph.display(
+				mydepgraph.altlist(reversed=("--tree" in myopts)),
+				favorites=favorites)
+			mydepgraph.display_problems()
+			if retval != os.EX_OK:
+				return retval
+			mergecount=0
+			for x in mydepgraph.altlist():
+				if isinstance(x, Package) and x.operation == "merge":
+					mergecount += 1
+
+			if mergecount==0:
+				sets = trees[settings["ROOT"]]["root_config"].sets
+				world_candidates = None
+				if "selective" in myparams and \
+					not oneshot and favorites:
+					# Sets that are not world candidates are filtered
+					# out here since the favorites list needs to be
+					# complete for depgraph.loadResumeCommand() to
+					# operate correctly.
+					world_candidates = [x for x in favorites \
+						if not (x.startswith(SETPREFIX) and \
+						not sets[x[1:]].world_candidate)]
+				if "selective" in myparams and \
+					not oneshot and world_candidates:
+					print()
+					for x in world_candidates:
+						print(" %s %s" % (good("*"), x))
+					prompt="Would you like to add these packages to your world favorites?"
+				elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
+					prompt="Nothing to merge; would you like to auto-clean packages?"
+				else:
+					print()
+					print("Nothing to merge; quitting.")
+					print()
+					return os.EX_OK
+			elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+				prompt="Would you like to fetch the source files for these packages?"
+			else:
+				prompt="Would you like to merge these packages?"
+		print()
+		if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
+			print()
+			print("Quitting.")
+			print()
+			return os.EX_OK
+		# Don't ask again (e.g. when auto-cleaning packages after merge)
+		myopts.pop("--ask", None)
+
+	if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
+		if ("--resume" in myopts):
+			mymergelist = mydepgraph.altlist()
+			if len(mymergelist) == 0:
+				print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+				return os.EX_OK
+			favorites = mtimedb["resume"]["favorites"]
+			retval = mydepgraph.display(
+				mydepgraph.altlist(reversed=tree),
+				favorites=favorites)
+			mydepgraph.display_problems()
+			if retval != os.EX_OK:
+				return retval
+		else:
+			retval = mydepgraph.display(
+				mydepgraph.altlist(reversed=("--tree" in myopts)),
+				favorites=favorites)
+			mydepgraph.display_problems()
+			if retval != os.EX_OK:
+				return retval
+			if "--buildpkgonly" in myopts:
+				graph_copy = mydepgraph._dynamic_config.digraph.copy()
+				removed_nodes = set()
+				for node in graph_copy:
+					if not isinstance(node, Package) or \
+						node.operation == "nomerge":
+						removed_nodes.add(node)
+				graph_copy.difference_update(removed_nodes)
+				if not graph_copy.hasallzeros(ignore_priority = \
+					DepPrioritySatisfiedRange.ignore_medium):
+					print("\n!!! --buildpkgonly requires all dependencies to be merged.")
+					print("!!! You have to merge the dependencies before you can build this package.\n")
+					return 1
+	else:
+		if "--buildpkgonly" in myopts:
+			graph_copy = mydepgraph._dynamic_config.digraph.copy()
+			removed_nodes = set()
+			for node in graph_copy:
+				if not isinstance(node, Package) or \
+					node.operation == "nomerge":
+					removed_nodes.add(node)
+			graph_copy.difference_update(removed_nodes)
+			if not graph_copy.hasallzeros(ignore_priority = \
+				DepPrioritySatisfiedRange.ignore_medium):
+				print("\n!!! --buildpkgonly requires all dependencies to be merged.")
+				print("!!! Cannot merge requested packages. Merge deps and try again.\n")
+				return 1
+
+		if ("--resume" in myopts):
+			favorites=mtimedb["resume"]["favorites"]
+
+		else:
+			if "resume" in mtimedb and \
+			"mergelist" in mtimedb["resume"] and \
+			len(mtimedb["resume"]["mergelist"]) > 1:
+				mtimedb["resume_backup"] = mtimedb["resume"]
+				del mtimedb["resume"]
+				mtimedb.commit()
+
+			mydepgraph.saveNomergeFavorites()
+
+		mergetask = Scheduler(settings, trees, mtimedb, myopts,
+			spinner, favorites=favorites,
+			graph_config=mydepgraph.schedulerGraph())
+
+		del mydepgraph
+		clear_caches(trees)
+
+		retval = mergetask.merge()
+
+		if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
+			if "yes" == settings.get("AUTOCLEAN"):
+				portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+				unmerge(trees[settings["ROOT"]]["root_config"],
+					myopts, "clean", [],
+					ldpath_mtimes, autoclean=1)
+			else:
+				portage.writemsg_stdout(colorize("WARN", "WARNING:")
+					+ " AUTOCLEAN is disabled.  This can cause serious"
+					+ " problems due to overlapping packages.\n")
+
+		return retval
+
+def action_config(settings, trees, myopts, myfiles):
+	enter_invalid = '--ask-enter-invalid' in myopts
+	if len(myfiles) != 1:
+		print(red("!!! config can only take a single package atom at this time\n"))
+		sys.exit(1)
+	if not is_valid_package_atom(myfiles[0]):
+		portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
+			noiselevel=-1)
+		portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+		portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+		sys.exit(1)
+	print()
+	try:
+		pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
+	except portage.exception.AmbiguousPackageName as e:
+		# Multiple matches thrown from cpv_expand
+		pkgs = e.args[0]
+	if len(pkgs) == 0:
+		print("No packages found.\n")
+		sys.exit(0)
+	elif len(pkgs) > 1:
+		if "--ask" in myopts:
+			options = []
+			print("Please select a package to configure:")
+			idx = 0
+			for pkg in pkgs:
+				idx += 1
+				options.append(str(idx))
+				print(options[-1]+") "+pkg)
+			print("X) Cancel")
+			options.append("X")
+			idx = userquery("Selection?", enter_invalid, responses=options)
+			if idx == "X":
+				sys.exit(0)
+			pkg = pkgs[int(idx)-1]
+		else:
+			print("The following packages available:")
+			for pkg in pkgs:
+				print("* "+pkg)
+			print("\nPlease use a specific atom or the --ask option.")
+			sys.exit(1)
+	else:
+		pkg = pkgs[0]
+
+	print()
+	if "--ask" in myopts:
+		if userquery("Ready to configure %s?" % pkg, enter_invalid) == "No":
+			sys.exit(0)
+	else:
+		print("Configuring pkg...")
+	print()
+	ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
+	mysettings = portage.config(clone=settings)
+	vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
+	debug = mysettings.get("PORTAGE_DEBUG") == "1"
+	retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
+		mysettings,
+		debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
+		mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
+	if retval == os.EX_OK:
+		portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
+			mysettings, debug=debug, mydbapi=vardb, tree="vartree")
+	print()
+
+def action_depclean(settings, trees, ldpath_mtimes,
+	myopts, action, myfiles, spinner, scheduler=None):
+	# Kill packages that aren't explicitly merged or are required as a
+	# dependency of another package. World file is explicit.
+
+	# Global depclean or prune operations are not very safe when there are
+	# missing dependencies since it's unknown how badly incomplete
+	# the dependency graph is, and we might accidentally remove packages
+	# that should have been pulled into the graph. On the other hand, it's
+	# relatively safe to ignore missing deps when only asked to remove
+	# specific packages.
+
+	msg = []
+	if not _ENABLE_DYN_LINK_MAP:
+		msg.append("Depclean may break link level dependencies. Thus, it is\n")
+		msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
+		msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
+		msg.append("\n")
+	msg.append("Always study the list of packages to be cleaned for any obvious\n")
+	msg.append("mistakes. Packages that are part of the world set will always\n")
+	msg.append("be kept.  They can be manually added to this set with\n")
+	msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
+	msg.append("package.provided (see portage(5)) will be removed by\n")
+	msg.append("depclean, even if they are part of the world set.\n")
+	msg.append("\n")
+	msg.append("As a safety measure, depclean will not remove any packages\n")
+	msg.append("unless *all* required dependencies have been resolved.  As a\n")
+	msg.append("consequence, it is often necessary to run %s\n" % \
+		good("`emerge --update"))
+	msg.append(good("--newuse --deep @world`") + \
+		" prior to depclean.\n")
+
+	if action == "depclean" and "--quiet" not in myopts and not myfiles:
+		portage.writemsg_stdout("\n")
+		for x in msg:
+			portage.writemsg_stdout(colorize("WARN", " * ") + x)
+
+	root_config = trees[settings['ROOT']]['root_config']
+	vardb = root_config.trees['vartree'].dbapi
+
+	args_set = InternalPackageSet(allow_repo=True)
+	if myfiles:
+		args_set.update(myfiles)
+		matched_packages = False
+		for x in args_set:
+			if vardb.match(x):
+				matched_packages = True
+			else:
+				writemsg_level("--- Couldn't find '%s' to %s.\n" % \
+					(x.replace("null/", ""), action),
+					level=logging.WARN, noiselevel=-1)
+		if not matched_packages:
+			writemsg_level(">>> No packages selected for removal by %s\n" % \
+				action)
+			return 0
+
+	# The calculation is done in a separate function so that depgraph
+	# references go out of scope and the corresponding memory
+	# is freed before we call unmerge().
+	rval, cleanlist, ordered, req_pkg_count = \
+		calc_depclean(settings, trees, ldpath_mtimes,
+			myopts, action, args_set, spinner)
+
+	clear_caches(trees)
+
+	if rval != os.EX_OK:
+		return rval
+
+	if cleanlist:
+		unmerge(root_config, myopts, "unmerge",
+			cleanlist, ldpath_mtimes, ordered=ordered,
+			scheduler=scheduler)
+
+	if action == "prune":
+		return
+
+	if not cleanlist and "--quiet" in myopts:
+		return
+
+	print("Packages installed:   " + str(len(vardb.cpv_all())))
+	print("Packages in world:    " + \
+		str(len(root_config.sets["selected"].getAtoms())))
+	print("Packages in system:   " + \
+		str(len(root_config.sets["system"].getAtoms())))
+	print("Required packages:    "+str(req_pkg_count))
+	if "--pretend" in myopts:
+		print("Number to remove:     "+str(len(cleanlist)))
+	else:
+		print("Number removed:       "+str(len(cleanlist)))
+
+def calc_depclean(settings, trees, ldpath_mtimes,
+	myopts, action, args_set, spinner):
+	allow_missing_deps = bool(args_set)
+
+	debug = '--debug' in myopts
+	xterm_titles = "notitles" not in settings.features
+	myroot = settings["ROOT"]
+	root_config = trees[myroot]["root_config"]
+	psets = root_config.setconfig.psets
+	deselect = myopts.get('--deselect') != 'n'
+	required_sets = {}
+	required_sets['world'] = psets['world']
+
+	# When removing packages, a temporary version of the world 'selected'
+	# set may be used which excludes packages that are intended to be
+	# eligible for removal.
+	selected_set = psets['selected']
+	required_sets['selected'] = selected_set
+	protected_set = InternalPackageSet()
+	protected_set_name = '____depclean_protected_set____'
+	required_sets[protected_set_name] = protected_set
+	system_set = psets["system"]
+
+	if not system_set or not selected_set:
+
+		if not system_set:
+			writemsg_level("!!! You have no system list.\n",
+				level=logging.ERROR, noiselevel=-1)
+
+		if not selected_set:
+			writemsg_level("!!! You have no world file.\n",
+					level=logging.WARNING, noiselevel=-1)
+
+		writemsg_level("!!! Proceeding is likely to " + \
+			"break your installation.\n",
+			level=logging.WARNING, noiselevel=-1)
+		if "--pretend" not in myopts:
+			countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
+
+	if action == "depclean":
+		emergelog(xterm_titles, " >>> depclean")
+
+	writemsg_level("\nCalculating dependencies  ")
+	resolver_params = create_depgraph_params(myopts, "remove")
+	resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
+	resolver._load_vdb()
+	vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
+	real_vardb = trees[myroot]["vartree"].dbapi
+
+	if action == "depclean":
+
+		if args_set:
+
+			if deselect:
+				# Start with an empty set.
+				selected_set = InternalPackageSet()
+				required_sets['selected'] = selected_set
+				# Pull in any sets nested within the selected set.
+				selected_set.update(psets['selected'].getNonAtoms())
+
+			# Pull in everything that's installed but not matched
+			# by an argument atom since we don't want to clean any
+			# package if something depends on it.
+			for pkg in vardb:
+				if spinner:
+					spinner.update()
+
+				try:
+					if args_set.findAtomForPackage(pkg) is None:
+						protected_set.add("=" + pkg.cpv)
+						continue
+				except portage.exception.InvalidDependString as e:
+					show_invalid_depstring_notice(pkg,
+						pkg.metadata["PROVIDE"], str(e))
+					del e
+					protected_set.add("=" + pkg.cpv)
+					continue
+
+	elif action == "prune":
+
+		if deselect:
+			# Start with an empty set.
+			selected_set = InternalPackageSet()
+			required_sets['selected'] = selected_set
+			# Pull in any sets nested within the selected set.
+			selected_set.update(psets['selected'].getNonAtoms())
+
+		# Pull in everything that's installed since we don't
+		# to prune a package if something depends on it.
+		protected_set.update(vardb.cp_all())
+
+		if not args_set:
+
+			# Try to prune everything that's slotted.
+			for cp in vardb.cp_all():
+				if len(vardb.cp_list(cp)) > 1:
+					args_set.add(cp)
+
+		# Remove atoms from world that match installed packages
+		# that are also matched by argument atoms, but do not remove
+		# them if they match the highest installed version.
+		for pkg in vardb:
+			spinner.update()
+			pkgs_for_cp = vardb.match_pkgs(pkg.cp)
+			if not pkgs_for_cp or pkg not in pkgs_for_cp:
+				raise AssertionError("package expected in matches: " + \
+					"cp = %s, cpv = %s matches = %s" % \
+					(pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+			highest_version = pkgs_for_cp[-1]
+			if pkg == highest_version:
+				# pkg is the highest version
+				protected_set.add("=" + pkg.cpv)
+				continue
+
+			if len(pkgs_for_cp) <= 1:
+				raise AssertionError("more packages expected: " + \
+					"cp = %s, cpv = %s matches = %s" % \
+					(pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+			try:
+				if args_set.findAtomForPackage(pkg) is None:
+					protected_set.add("=" + pkg.cpv)
+					continue
+			except portage.exception.InvalidDependString as e:
+				show_invalid_depstring_notice(pkg,
+					pkg.metadata["PROVIDE"], str(e))
+				del e
+				protected_set.add("=" + pkg.cpv)
+				continue
+
+	if resolver._frozen_config.excluded_pkgs:
+		excluded_set = resolver._frozen_config.excluded_pkgs
+		required_sets['__excluded__'] = InternalPackageSet()
+
+		for pkg in vardb:
+			if spinner:
+				spinner.update()
+
+			try:
+				if excluded_set.findAtomForPackage(pkg):
+					required_sets['__excluded__'].add("=" + pkg.cpv)
+			except portage.exception.InvalidDependString as e:
+				show_invalid_depstring_notice(pkg,
+					pkg.metadata["PROVIDE"], str(e))
+				del e
+				required_sets['__excluded__'].add("=" + pkg.cpv)
+
+	success = resolver._complete_graph(required_sets={myroot:required_sets})
+	writemsg_level("\b\b... done!\n")
+
+	resolver.display_problems()
+
+	if not success:
+		return 1, [], False, 0
+
+	def unresolved_deps():
+
+		unresolvable = set()
+		for dep in resolver._dynamic_config._initially_unsatisfied_deps:
+			if isinstance(dep.parent, Package) and \
+				(dep.priority > UnmergeDepPriority.SOFT):
+				unresolvable.add((dep.atom, dep.parent.cpv))
+
+		if not unresolvable:
+			return False
+
+		if unresolvable and not allow_missing_deps:
+
+			if "--debug" in myopts:
+				writemsg("\ndigraph:\n\n", noiselevel=-1)
+				resolver._dynamic_config.digraph.debug_print()
+				writemsg("\n", noiselevel=-1)
+
+			prefix = bad(" * ")
+			msg = []
+			msg.append("Dependencies could not be completely resolved due to")
+			msg.append("the following required packages not being installed:")
+			msg.append("")
+			for atom, parent in unresolvable:
+				msg.append("  %s pulled in by:" % (atom,))
+				msg.append("    %s" % (parent,))
+				msg.append("")
+			msg.extend(textwrap.wrap(
+				"Have you forgotten to do a complete update prior " + \
+				"to depclean? The most comprehensive command for this " + \
+				"purpose is as follows:", 65
+			))
+			msg.append("")
+			msg.append("  " + \
+				good("emerge --update --newuse --deep --with-bdeps=y @world"))
+			msg.append("")
+			msg.extend(textwrap.wrap(
+				"Note that the --with-bdeps=y option is not required in " + \
+				"many situations. Refer to the emerge manual page " + \
+				"(run `man emerge`) for more information about " + \
+				"--with-bdeps.", 65
+			))
+			msg.append("")
+			msg.extend(textwrap.wrap(
+				"Also, note that it may be necessary to manually uninstall " + \
+				"packages that no longer exist in the portage tree, since " + \
+				"it may not be possible to satisfy their dependencies.", 65
+			))
+			if action == "prune":
+				msg.append("")
+				msg.append("If you would like to ignore " + \
+					"dependencies then use %s." % good("--nodeps"))
+			writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
+				level=logging.ERROR, noiselevel=-1)
+			return True
+		return False
+
+	if unresolved_deps():
+		return 1, [], False, 0
+
+	graph = resolver._dynamic_config.digraph.copy()
+	required_pkgs_total = 0
+	for node in graph:
+		if isinstance(node, Package):
+			required_pkgs_total += 1
+
+	def show_parents(child_node):
+		parent_nodes = graph.parent_nodes(child_node)
+		if not parent_nodes:
+			# With --prune, the highest version can be pulled in without any
+			# real parent since all installed packages are pulled in.  In that
+			# case there's nothing to show here.
+			return
+		parent_strs = []
+		for node in parent_nodes:
+			parent_strs.append(str(getattr(node, "cpv", node)))
+		parent_strs.sort()
+		msg = []
+		msg.append("  %s pulled in by:\n" % (child_node.cpv,))
+		for parent_str in parent_strs:
+			msg.append("    %s\n" % (parent_str,))
+		msg.append("\n")
+		portage.writemsg_stdout("".join(msg), noiselevel=-1)
+
+	def cmp_pkg_cpv(pkg1, pkg2):
+		"""Sort Package instances by cpv."""
+		if pkg1.cpv > pkg2.cpv:
+			return 1
+		elif pkg1.cpv == pkg2.cpv:
+			return 0
+		else:
+			return -1
+
+	def create_cleanlist():
+
+		if "--debug" in myopts:
+			writemsg("\ndigraph:\n\n", noiselevel=-1)
+			graph.debug_print()
+			writemsg("\n", noiselevel=-1)
+
+		# Never display the special internal protected_set.
+		for node in graph:
+			if isinstance(node, SetArg) and node.name == protected_set_name:
+				graph.remove(node)
+				break
+
+		pkgs_to_remove = []
+
+		if action == "depclean":
+			if args_set:
+
+				for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+					arg_atom = None
+					try:
+						arg_atom = args_set.findAtomForPackage(pkg)
+					except portage.exception.InvalidDependString:
+						# this error has already been displayed by now
+						continue
+
+					if arg_atom:
+						if pkg not in graph:
+							pkgs_to_remove.append(pkg)
+						elif "--verbose" in myopts:
+							show_parents(pkg)
+
+			else:
+				for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+					if pkg not in graph:
+						pkgs_to_remove.append(pkg)
+					elif "--verbose" in myopts:
+						show_parents(pkg)
+
+		elif action == "prune":
+
+			for atom in args_set:
+				for pkg in vardb.match_pkgs(atom):
+					if pkg not in graph:
+						pkgs_to_remove.append(pkg)
+					elif "--verbose" in myopts:
+						show_parents(pkg)
+
+		if not pkgs_to_remove:
+			writemsg_level(
+				">>> No packages selected for removal by %s\n" % action)
+			if "--verbose" not in myopts:
+				writemsg_level(
+					">>> To see reverse dependencies, use %s\n" % \
+						good("--verbose"))
+			if action == "prune":
+				writemsg_level(
+					">>> To ignore dependencies, use %s\n" % \
+						good("--nodeps"))
+
+		return pkgs_to_remove
+
+	cleanlist = create_cleanlist()
+	clean_set = set(cleanlist)
+
+	if cleanlist and \
+		real_vardb._linkmap is not None and \
+		myopts.get("--depclean-lib-check") != "n" and \
+		"preserve-libs" not in settings.features:
+
+		# Check if any of these packages are the sole providers of libraries
+		# with consumers that have not been selected for removal. If so, these
+		# packages and any dependencies need to be added to the graph.
+		linkmap = real_vardb._linkmap
+		consumer_cache = {}
+		provider_cache = {}
+		consumer_map = {}
+
+		writemsg_level(">>> Checking for lib consumers...\n")
+
+		for pkg in cleanlist:
+			pkg_dblink = real_vardb._dblink(pkg.cpv)
+			consumers = {}
+
+			for lib in pkg_dblink.getcontents():
+				lib = lib[len(myroot):]
+				lib_key = linkmap._obj_key(lib)
+				lib_consumers = consumer_cache.get(lib_key)
+				if lib_consumers is None:
+					try:
+						lib_consumers = linkmap.findConsumers(lib_key)
+					except KeyError:
+						continue
+					consumer_cache[lib_key] = lib_consumers
+				if lib_consumers:
+					consumers[lib_key] = lib_consumers
+
+			if not consumers:
+				continue
+
+			for lib, lib_consumers in list(consumers.items()):
+				for consumer_file in list(lib_consumers):
+					if pkg_dblink.isowner(consumer_file):
+						lib_consumers.remove(consumer_file)
+				if not lib_consumers:
+					del consumers[lib]
+
+			if not consumers:
+				continue
+
+			for lib, lib_consumers in consumers.items():
+
+				soname = linkmap.getSoname(lib)
+
+				consumer_providers = []
+				for lib_consumer in lib_consumers:
+					providers = provider_cache.get(lib)
+					if providers is None:
+						providers = linkmap.findProviders(lib_consumer)
+						provider_cache[lib_consumer] = providers
+					if soname not in providers:
+						# Why does this happen?
+						continue
+					consumer_providers.append(
+						(lib_consumer, providers[soname]))
+
+				consumers[lib] = consumer_providers
+
+			consumer_map[pkg] = consumers
+
+		if consumer_map:
+
+			search_files = set()
+			for consumers in consumer_map.values():
+				for lib, consumer_providers in consumers.items():
+					for lib_consumer, providers in consumer_providers:
+						search_files.add(lib_consumer)
+						search_files.update(providers)
+
+			writemsg_level(">>> Assigning files to packages...\n")
+			file_owners = {}
+			for f in search_files:
+				owner_set = set()
+				for owner in linkmap.getOwners(f):
+					owner_dblink = real_vardb._dblink(owner)
+					if owner_dblink.exists():
+						owner_set.add(owner_dblink)
+				if owner_set:
+					file_owners[f] = owner_set
+
+			for pkg, consumers in list(consumer_map.items()):
+				for lib, consumer_providers in list(consumers.items()):
+					lib_consumers = set()
+
+					for lib_consumer, providers in consumer_providers:
+						owner_set = file_owners.get(lib_consumer)
+						provider_dblinks = set()
+						provider_pkgs = set()
+
+						if len(providers) > 1:
+							for provider in providers:
+								provider_set = file_owners.get(provider)
+								if provider_set is not None:
+									provider_dblinks.update(provider_set)
+
+						if len(provider_dblinks) > 1:
+							for provider_dblink in provider_dblinks:
+								provider_pkg = resolver._pkg(
+									provider_dblink.mycpv, "installed",
+									root_config, installed=True)
+								if provider_pkg not in clean_set:
+									provider_pkgs.add(provider_pkg)
+
+						if provider_pkgs:
+							continue
+
+						if owner_set is not None:
+							lib_consumers.update(owner_set)
+
+					for consumer_dblink in list(lib_consumers):
+						if resolver._pkg(consumer_dblink.mycpv, "installed",
+							root_config, installed=True) in clean_set:
+							lib_consumers.remove(consumer_dblink)
+							continue
+
+					if lib_consumers:
+						consumers[lib] = lib_consumers
+					else:
+						del consumers[lib]
+				if not consumers:
+					del consumer_map[pkg]
+
+		if consumer_map:
+			# TODO: Implement a package set for rebuilding consumer packages.
+
+			msg = "In order to avoid breakage of link level " + \
+				"dependencies, one or more packages will not be removed. " + \
+				"This can be solved by rebuilding " + \
+				"the packages that pulled them in."
+
+			prefix = bad(" * ")
+			from textwrap import wrap
+			writemsg_level("".join(prefix + "%s\n" % line for \
+				line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
+
+			msg = []
+			for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
+				consumers = consumer_map[pkg]
+				consumer_libs = {}
+				for lib, lib_consumers in consumers.items():
+					for consumer in lib_consumers:
+						consumer_libs.setdefault(
+							consumer.mycpv, set()).add(linkmap.getSoname(lib))
+				unique_consumers = set(chain(*consumers.values()))
+				unique_consumers = sorted(consumer.mycpv \
+					for consumer in unique_consumers)
+				msg.append("")
+				msg.append("  %s pulled in by:" % (pkg.cpv,))
+				for consumer in unique_consumers:
+					libs = consumer_libs[consumer]
+					msg.append("    %s needs %s" % \
+						(consumer, ', '.join(sorted(libs))))
+			msg.append("")
+			writemsg_level("".join(prefix + "%s\n" % line for line in msg),
+				level=logging.WARNING, noiselevel=-1)
+
+			# Add lib providers to the graph as children of lib consumers,
+			# and also add any dependencies pulled in by the provider.
+			writemsg_level(">>> Adding lib providers to graph...\n")
+
+			for pkg, consumers in consumer_map.items():
+				for consumer_dblink in set(chain(*consumers.values())):
+					consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
+						"installed", root_config, installed=True)
+					if not resolver._add_pkg(pkg,
+						Dependency(parent=consumer_pkg,
+						priority=UnmergeDepPriority(runtime=True),
+						root=pkg.root)):
+						resolver.display_problems()
+						return 1, [], False, 0
+
+			writemsg_level("\nCalculating dependencies  ")
+			success = resolver._complete_graph(
+				required_sets={myroot:required_sets})
+			writemsg_level("\b\b... done!\n")
+			resolver.display_problems()
+			if not success:
+				return 1, [], False, 0
+			if unresolved_deps():
+				return 1, [], False, 0
+
+			graph = resolver._dynamic_config.digraph.copy()
+			required_pkgs_total = 0
+			for node in graph:
+				if isinstance(node, Package):
+					required_pkgs_total += 1
+			cleanlist = create_cleanlist()
+			if not cleanlist:
+				return 0, [], False, required_pkgs_total
+			clean_set = set(cleanlist)
+
+	if clean_set:
+		writemsg_level(">>> Calculating removal order...\n")
+		# Use a topological sort to create an unmerge order such that
+		# each package is unmerged before it's dependencies. This is
+		# necessary to avoid breaking things that may need to run
+		# during pkg_prerm or pkg_postrm phases.
+
+		# Create a new graph to account for dependencies between the
+		# packages being unmerged.
+		graph = digraph()
+		del cleanlist[:]
+
+		dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+		runtime = UnmergeDepPriority(runtime=True)
+		runtime_post = UnmergeDepPriority(runtime_post=True)
+		buildtime = UnmergeDepPriority(buildtime=True)
+		priority_map = {
+			"RDEPEND": runtime,
+			"PDEPEND": runtime_post,
+			"DEPEND": buildtime,
+		}
+
+		for node in clean_set:
+			graph.add(node, None)
+			mydeps = []
+			for dep_type in dep_keys:
+				depstr = node.metadata[dep_type]
+				if not depstr:
+					continue
+				priority = priority_map[dep_type]
+
+				if debug:
+					writemsg_level(_unicode_decode("\nParent:    %s\n") \
+						% (node,), noiselevel=-1, level=logging.DEBUG)
+					writemsg_level(_unicode_decode(  "Depstring: %s\n") \
+						% (depstr,), noiselevel=-1, level=logging.DEBUG)
+					writemsg_level(_unicode_decode(  "Priority:  %s\n") \
+						% (priority,), noiselevel=-1, level=logging.DEBUG)
+
+				try:
+					atoms = resolver._select_atoms(myroot, depstr,
+						myuse=node.use.enabled, parent=node,
+						priority=priority)[node]
+				except portage.exception.InvalidDependString:
+					# Ignore invalid deps of packages that will
+					# be uninstalled anyway.
+					continue
+
+				if debug:
+					writemsg_level("Candidates: [%s]\n" % \
+						', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
+						noiselevel=-1, level=logging.DEBUG)
+
+				for atom in atoms:
+					if not isinstance(atom, portage.dep.Atom):
+						# Ignore invalid atoms returned from dep_check().
+						continue
+					if atom.blocker:
+						continue
+					matches = vardb.match_pkgs(atom)
+					if not matches:
+						continue
+					for child_node in matches:
+						if child_node in clean_set:
+							graph.add(child_node, node, priority=priority)
+
+		if debug:
+			writemsg_level("\nunmerge digraph:\n\n",
+				noiselevel=-1, level=logging.DEBUG)
+			graph.debug_print()
+			writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
+
+		ordered = True
+		if len(graph.order) == len(graph.root_nodes()):
+			# If there are no dependencies between packages
+			# let unmerge() group them by cat/pn.
+			ordered = False
+			cleanlist = [pkg.cpv for pkg in graph.order]
+		else:
+			# Order nodes from lowest to highest overall reference count for
+			# optimal root node selection (this can help minimize issues
+			# with unaccounted implicit dependencies).
+			node_refcounts = {}
+			for node in graph.order:
+				node_refcounts[node] = len(graph.parent_nodes(node))
+			def cmp_reference_count(node1, node2):
+				return node_refcounts[node1] - node_refcounts[node2]
+			graph.order.sort(key=cmp_sort_key(cmp_reference_count))
+
+			ignore_priority_range = [None]
+			ignore_priority_range.extend(
+				range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
+			while graph:
+				for ignore_priority in ignore_priority_range:
+					nodes = graph.root_nodes(ignore_priority=ignore_priority)
+					if nodes:
+						break
+				if not nodes:
+					raise AssertionError("no root nodes")
+				if ignore_priority is not None:
+					# Some deps have been dropped due to circular dependencies,
+					# so only pop one node in order to minimize the number that
+					# are dropped.
+					del nodes[1:]
+				for node in nodes:
+					graph.remove(node)
+					cleanlist.append(node.cpv)
+
+		return 0, cleanlist, ordered, required_pkgs_total
+	return 0, [], False, required_pkgs_total
+
+def action_deselect(settings, trees, opts, atoms):
+	enter_invalid = '--ask-enter-invalid' in opts
+	root_config = trees[settings['ROOT']]['root_config']
+	world_set = root_config.sets['selected']
+	if not hasattr(world_set, 'update'):
+		writemsg_level("World @selected set does not appear to be mutable.\n",
+			level=logging.ERROR, noiselevel=-1)
+		return 1
+
+	pretend = '--pretend' in opts
+	locked = False
+	if not pretend and hasattr(world_set, 'lock'):
+		world_set.lock()
+		locked = True
+	try:
+		world_set.load()
+		world_atoms = world_set.getAtoms()
+		vardb = root_config.trees["vartree"].dbapi
+		expanded_atoms = set(atoms)
+
+		for atom in atoms:
+			if not atom.startswith(SETPREFIX):
+				if atom.cp.startswith("null/"):
+					# try to expand category from world set
+					null_cat, pn = portage.catsplit(atom.cp)
+					for world_atom in world_atoms:
+						cat, world_pn = portage.catsplit(world_atom.cp)
+						if pn == world_pn:
+							expanded_atoms.add(
+								Atom(atom.replace("null", cat, 1),
+								allow_repo=True, allow_wildcard=True))
+
+				for cpv in vardb.match(atom):
+					slot, = vardb.aux_get(cpv, ["SLOT"])
+					if not slot:
+						slot = "0"
+					expanded_atoms.add(Atom("%s:%s" % \
+						(portage.cpv_getkey(cpv), slot)))
+
+		discard_atoms = set()
+		for atom in world_set:
+			for arg_atom in expanded_atoms:
+				if arg_atom.startswith(SETPREFIX):
+					if atom.startswith(SETPREFIX) and \
+						arg_atom == atom:
+						discard_atoms.add(atom)
+						break
+				else:
+					if not atom.startswith(SETPREFIX) and \
+						arg_atom.intersects(atom) and \
+						not (arg_atom.slot and not atom.slot) and \
+						not (arg_atom.repo and not atom.repo):
+						discard_atoms.add(atom)
+						break
+		if discard_atoms:
+			for atom in sorted(discard_atoms):
+				if pretend:
+					print(">>> Would remove %s from \"world\" favorites file..." % \
+						colorize("INFORM", str(atom)))
+				else:
+					print(">>> Removing %s from \"world\" favorites file..." % \
+						colorize("INFORM", str(atom)))
+
+			if '--ask' in opts:
+				prompt = "Would you like to remove these " + \
+					"packages from your world favorites?"
+				if userquery(prompt, enter_invalid) == 'No':
+					return os.EX_OK
+
+			remaining = set(world_set)
+			remaining.difference_update(discard_atoms)
+			if not pretend:
+				world_set.replace(remaining)
+		else:
+			print(">>> No matching atoms found in \"world\" favorites file...")
+	finally:
+		if locked:
+			world_set.unlock()
+	return os.EX_OK
+
+class _info_pkgs_ver(object):
+	def __init__(self, ver, repo_suffix, provide_suffix):
+		self.ver = ver
+		self.repo_suffix = repo_suffix
+		self.provide_suffix = provide_suffix
+
+	def __lt__(self, other):
+		return portage.versions.vercmp(self.ver, other.ver) < 0
+
+	def toString(self):
+		"""
+		This may return unicode if repo_name contains unicode.
+		Don't use __str__ and str() since unicode triggers compatibility
+		issues between python 2.x and 3.x.
+		"""
+		return self.ver + self.repo_suffix + self.provide_suffix
+
+def action_info(settings, trees, myopts, myfiles):
+
+	output_buffer = []
+	append = output_buffer.append
+	root_config = trees[settings['ROOT']]['root_config']
+
+	append(getportageversion(settings["PORTDIR"], settings["ROOT"],
+		settings.profile_path, settings["CHOST"],
+		trees[settings["ROOT"]]["vartree"].dbapi))
+
+	header_width = 65
+	header_title = "System Settings"
+	if myfiles:
+		append(header_width * "=")
+		append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+	append(header_width * "=")
+	append("System uname: %s" % (platform.platform(aliased=1),))
+
+	lastSync = portage.grabfile(os.path.join(
+		settings["PORTDIR"], "metadata", "timestamp.chk"))
+	if lastSync:
+		lastSync = lastSync[0]
+	else:
+		lastSync = "Unknown"
+	append("Timestamp of tree: %s" % (lastSync,))
+
+	output=subprocess_getstatusoutput("distcc --version")
+	if output[0] == os.EX_OK:
+		distcc_str = output[1].split("\n", 1)[0]
+		if "distcc" in settings.features:
+			distcc_str += " [enabled]"
+		else:
+			distcc_str += " [disabled]"
+		append(distcc_str)
+
+	output=subprocess_getstatusoutput("ccache -V")
+	if output[0] == os.EX_OK:
+		ccache_str = output[1].split("\n", 1)[0]
+		if "ccache" in settings.features:
+			ccache_str += " [enabled]"
+		else:
+			ccache_str += " [disabled]"
+		append(ccache_str)
+
+	myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
+	           "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
+	myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
+	atoms = []
+	vardb = trees["/"]["vartree"].dbapi
+	for x in myvars:
+		try:
+			x = Atom(x)
+		except InvalidAtom:
+			append("%-20s %s" % (x+":", "[NOT VALID]"))
+		else:
+			for atom in expand_new_virt(vardb, x):
+				if not atom.blocker:
+					atoms.append((x, atom))
+
+	myvars = sorted(set(atoms))
+
+	portdb = trees["/"]["porttree"].dbapi
+	main_repo = portdb.getRepositoryName(portdb.porttree_root)
+	cp_map = {}
+	cp_max_len = 0
+
+	for orig_atom, x in myvars:
+			pkg_matches = vardb.match(x)
+
+			versions = []
+			for cpv in pkg_matches:
+				matched_cp = portage.versions.cpv_getkey(cpv)
+				ver = portage.versions.cpv_getversion(cpv)
+				ver_map = cp_map.setdefault(matched_cp, {})
+				prev_match = ver_map.get(ver)
+				if prev_match is not None:
+					if prev_match.provide_suffix:
+						# prefer duplicate matches that include
+						# additional virtual provider info
+						continue
+
+				if len(matched_cp) > cp_max_len:
+					cp_max_len = len(matched_cp)
+				repo = vardb.aux_get(cpv, ["repository"])[0]
+				if repo == main_repo:
+					repo_suffix = ""
+				elif not repo:
+					repo_suffix = "::<unknown repository>"
+				else:
+					repo_suffix = "::" + repo
+
+				if matched_cp == orig_atom.cp:
+					provide_suffix = ""
+				else:
+					provide_suffix = " (%s)" % (orig_atom,)
+
+				ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
+
+	for cp in sorted(cp_map):
+		versions = sorted(cp_map[cp].values())
+		versions = ", ".join(ver.toString() for ver in versions)
+		append("%s %s" % \
+			((cp + ":").ljust(cp_max_len + 1), versions))
+
+	libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
+
+	repos = portdb.settings.repositories
+	if "--verbose" in myopts:
+		append("Repositories:\n")
+		for repo in repos:
+			append(repo.info_string())
+	else:
+		append("Repositories: %s" % \
+			" ".join(repo.name for repo in repos))
+
+	if _ENABLE_SET_CONFIG:
+		sets_line = "Installed sets: "
+		sets_line += ", ".join(s for s in \
+			sorted(root_config.sets['selected'].getNonAtoms()) \
+			if s.startswith(SETPREFIX))
+		append(sets_line)
+
+	if "--verbose" in myopts:
+		myvars = list(settings)
+	else:
+		myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
+		          'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
+		          'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
+		          'PORTAGE_BZIP2_COMMAND',
+		          'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
+		          'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
+		          'EMERGE_DEFAULT_OPTS']
+
+		myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
+
+	myvars_ignore_defaults = {
+		'PORTAGE_BZIP2_COMMAND' : 'bzip2',
+	}
+
+	myvars = portage.util.unique_array(myvars)
+	use_expand = settings.get('USE_EXPAND', '').split()
+	use_expand.sort()
+	use_expand_hidden = set(
+		settings.get('USE_EXPAND_HIDDEN', '').upper().split())
+	alphabetical_use = '--alphabetical' in myopts
+	unset_vars = []
+	myvars.sort()
+	for k in myvars:
+		v = settings.get(k)
+		if v is not None:
+			if k != "USE":
+				default = myvars_ignore_defaults.get(k)
+				if default is not None and \
+					default == v:
+					continue
+				append('%s="%s"' % (k, v))
+			else:
+				use = set(v.split())
+				for varname in use_expand:
+					flag_prefix = varname.lower() + "_"
+					for f in list(use):
+						if f.startswith(flag_prefix):
+							use.remove(f)
+				use = list(use)
+				use.sort()
+				use = ['USE="%s"' % " ".join(use)]
+				for varname in use_expand:
+					myval = settings.get(varname)
+					if myval:
+						use.append('%s="%s"' % (varname, myval))
+				append(" ".join(use))
+		else:
+			unset_vars.append(k)
+	if unset_vars:
+		append("Unset:  "+", ".join(unset_vars))
+	append("")
+	append("")
+	writemsg_stdout("\n".join(output_buffer),
+		noiselevel=-1)
+
+	# See if we can find any packages installed matching the strings
+	# passed on the command line
+	mypkgs = []
+	vardb = trees[settings["ROOT"]]["vartree"].dbapi
+	portdb = trees[settings["ROOT"]]["porttree"].dbapi
+	bindb = trees[settings["ROOT"]]["bintree"].dbapi
+	for x in myfiles:
+		match_found = False
+		installed_match = vardb.match(x)
+		for installed in installed_match:
+			mypkgs.append((installed, "installed"))
+			match_found = True
+
+		if match_found:
+			continue
+
+		for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+			if pkg_type == "binary" and "--usepkg" not in myopts:
+				continue
+
+			matches = db.match(x)
+			matches.reverse()
+			for match in matches:
+				if pkg_type == "binary":
+					if db.bintree.isremote(match):
+						continue
+				auxkeys = ["EAPI", "DEFINED_PHASES"]
+				metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+				if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+					"info" in metadata["DEFINED_PHASES"].split():
+					mypkgs.append((match, pkg_type))
+					break
+
+	# If some packages were found...
+	if mypkgs:
+		# Get our global settings (we only print stuff if it varies from
+		# the current config)
+		mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
+		auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
+		auxkeys.append('DEFINED_PHASES')
+		global_vals = {}
+		pkgsettings = portage.config(clone=settings)
+
+		# Loop through each package
+		# Only print settings if they differ from global settings
+		header_title = "Package Settings"
+		print(header_width * "=")
+		print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+		print(header_width * "=")
+		from portage.output import EOutput
+		out = EOutput()
+		for mypkg in mypkgs:
+			cpv = mypkg[0]
+			pkg_type = mypkg[1]
+			# Get all package specific variables
+			if pkg_type == "installed":
+				metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
+			elif pkg_type == "ebuild":
+				metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
+			elif pkg_type == "binary":
+				metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
+
+			pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
+				installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
+				(metadata.get(x, '') for x in Package.metadata_keys)),
+				root_config=root_config, type_name=pkg_type)
+
+			if pkg_type == "installed":
+				print("\n%s was built with the following:" % \
+					colorize("INFORM", str(pkg.cpv)))
+			elif pkg_type == "ebuild":
+				print("\n%s would be build with the following:" % \
+					colorize("INFORM", str(pkg.cpv)))
+			elif pkg_type == "binary":
+				print("\n%s (non-installed binary) was built with the following:" % \
+					colorize("INFORM", str(pkg.cpv)))
+
+			writemsg_stdout('%s\n' % pkg_use_display(pkg, myopts),
+				noiselevel=-1)
+			if pkg_type == "installed":
+				for myvar in mydesiredvars:
+					if metadata[myvar].split() != settings.get(myvar, '').split():
+						print("%s=\"%s\"" % (myvar, metadata[myvar]))
+			print()
+
+			if metadata['DEFINED_PHASES']:
+				if 'info' not in metadata['DEFINED_PHASES'].split():
+					continue
+
+			print(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
+
+			if pkg_type == "installed":
+				ebuildpath = vardb.findname(pkg.cpv)
+			elif pkg_type == "ebuild":
+				ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+			elif pkg_type == "binary":
+				tbz2_file = bindb.bintree.getname(pkg.cpv)
+				ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
+				ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
+				tmpdir = tempfile.mkdtemp()
+				ebuildpath = os.path.join(tmpdir, ebuild_file_name)
+				file = open(ebuildpath, 'w')
+				file.write(ebuild_file_contents)
+				file.close()
+
+			if not ebuildpath or not os.path.exists(ebuildpath):
+				out.ewarn("No ebuild found for '%s'" % pkg.cpv)
+				continue
+
+			if pkg_type == "installed":
+				portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
+					pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+					mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
+					tree="vartree")
+			elif pkg_type == "ebuild":
+				portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
+					pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+					mydbapi=trees[settings["ROOT"]]["porttree"].dbapi,
+					tree="porttree")
+			elif pkg_type == "binary":
+				portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
+					pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+					mydbapi=trees[settings["ROOT"]]["bintree"].dbapi,
+					tree="bintree")
+				shutil.rmtree(tmpdir)
+
+def action_metadata(settings, portdb, myopts, porttrees=None):
+	if porttrees is None:
+		porttrees = portdb.porttrees
+	portage.writemsg_stdout("\n>>> Updating Portage cache\n")
+	old_umask = os.umask(0o002)
+	cachedir = os.path.normpath(settings.depcachedir)
+	if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
+					"/lib", "/opt", "/proc", "/root", "/sbin",
+					"/sys", "/tmp", "/usr",  "/var"]:
+		print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
+			"ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr)
+		print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr)
+		sys.exit(73)
+	if not os.path.exists(cachedir):
+		os.makedirs(cachedir)
+
+	auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
+	auxdbkeys = tuple(auxdbkeys)
+
+	class TreeData(object):
+		__slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
+		def __init__(self, dest_db, eclass_db, path, src_db):
+			self.dest_db = dest_db
+			self.eclass_db = eclass_db
+			self.path = path
+			self.src_db = src_db
+			self.valid_nodes = set()
+
+	porttrees_data = []
+	for path in porttrees:
+		src_db = portdb._pregen_auxdb.get(path)
+		if src_db is None and \
+			os.path.isdir(os.path.join(path, 'metadata', 'cache')):
+			src_db = portdb.metadbmodule(
+				path, 'metadata/cache', auxdbkeys, readonly=True)
+			try:
+				src_db.ec = portdb._repo_info[path].eclass_db
+			except AttributeError:
+				pass
+
+		if src_db is not None:
+			porttrees_data.append(TreeData(portdb.auxdb[path],
+				portdb._repo_info[path].eclass_db, path, src_db))
+
+	porttrees = [tree_data.path for tree_data in porttrees_data]
+
+	quiet = settings.get('TERM') == 'dumb' or \
+		'--quiet' in myopts or \
+		not sys.stdout.isatty()
+
+	onProgress = None
+	if not quiet:
+		progressBar = portage.output.TermProgressBar()
+		progressHandler = ProgressHandler()
+		onProgress = progressHandler.onProgress
+		def display():
+			progressBar.set(progressHandler.curval, progressHandler.maxval)
+		progressHandler.display = display
+		def sigwinch_handler(signum, frame):
+			lines, progressBar.term_columns = \
+				portage.output.get_term_size()
+		signal.signal(signal.SIGWINCH, sigwinch_handler)
+
+	# Temporarily override portdb.porttrees so portdb.cp_all()
+	# will only return the relevant subset.
+	portdb_porttrees = portdb.porttrees
+	portdb.porttrees = porttrees
+	try:
+		cp_all = portdb.cp_all()
+	finally:
+		portdb.porttrees = portdb_porttrees
+
+	curval = 0
+	maxval = len(cp_all)
+	if onProgress is not None:
+		onProgress(maxval, curval)
+
+	from portage.cache.util import quiet_mirroring
+	from portage import eapi_is_supported, \
+		_validate_cache_for_unsupported_eapis
+
+	# TODO: Display error messages, but do not interfere with the progress bar.
+	# Here's how:
+	#  1) erase the progress bar
+	#  2) show the error message
+	#  3) redraw the progress bar on a new line
+	noise = quiet_mirroring()
+
+	for cp in cp_all:
+		for tree_data in porttrees_data:
+			for cpv in portdb.cp_list(cp, mytree=tree_data.path):
+				tree_data.valid_nodes.add(cpv)
+				try:
+					src = tree_data.src_db[cpv]
+				except KeyError as e:
+					noise.missing_entry(cpv)
+					del e
+					continue
+				except CacheError as ce:
+					noise.exception(cpv, ce)
+					del ce
+					continue
+
+				eapi = src.get('EAPI')
+				if not eapi:
+					eapi = '0'
+				eapi = eapi.lstrip('-')
+				eapi_supported = eapi_is_supported(eapi)
+				if not eapi_supported:
+					if not _validate_cache_for_unsupported_eapis:
+						noise.misc(cpv, "unable to validate " + \
+							"cache for EAPI='%s'" % eapi)
+						continue
+
+				dest = None
+				try:
+					dest = tree_data.dest_db[cpv]
+				except (KeyError, CacheError):
+					pass
+
+				for d in (src, dest):
+					if d is not None and d.get('EAPI') in ('', '0'):
+						del d['EAPI']
+
+				if dest is not None:
+					if not (dest['_mtime_'] == src['_mtime_'] and \
+						tree_data.eclass_db.is_eclass_data_valid(
+							dest['_eclasses_']) and \
+						set(dest['_eclasses_']) == set(src['_eclasses_'])):
+						dest = None
+					else:
+						# We don't want to skip the write unless we're really
+						# sure that the existing cache is identical, so don't
+						# trust _mtime_ and _eclasses_ alone.
+						for k in set(chain(src, dest)).difference(
+							('_mtime_', '_eclasses_')):
+							if dest.get(k, '') != src.get(k, ''):
+								dest = None
+								break
+
+				if dest is not None:
+					# The existing data is valid and identical,
+					# so there's no need to overwrite it.
+					continue
+
+				try:
+					inherited = src.get('INHERITED', '')
+					eclasses = src.get('_eclasses_')
+				except CacheError as ce:
+					noise.exception(cpv, ce)
+					del ce
+					continue
+
+				if eclasses is not None:
+					if not tree_data.eclass_db.is_eclass_data_valid(
+						src['_eclasses_']):
+						noise.eclass_stale(cpv)
+						continue
+					inherited = eclasses
+				else:
+					inherited = inherited.split()
+
+				if tree_data.src_db.complete_eclass_entries and \
+					eclasses is None:
+					noise.corruption(cpv, "missing _eclasses_ field")
+					continue
+
+				if inherited:
+					# Even if _eclasses_ already exists, replace it with data from
+					# eclass_cache, in order to insert local eclass paths.
+					try:
+						eclasses = tree_data.eclass_db.get_eclass_data(inherited)
+					except KeyError:
+						# INHERITED contains a non-existent eclass.
+						noise.eclass_stale(cpv)
+						continue
+
+					if eclasses is None:
+						noise.eclass_stale(cpv)
+						continue
+					src['_eclasses_'] = eclasses
+				else:
+					src['_eclasses_'] = {}
+
+				if not eapi_supported:
+					src = {
+						'EAPI'       : '-' + eapi,
+						'_mtime_'    : src['_mtime_'],
+						'_eclasses_' : src['_eclasses_'],
+					}
+
+				try:
+					tree_data.dest_db[cpv] = src
+				except CacheError as ce:
+					noise.exception(cpv, ce)
+					del ce
+
+		curval += 1
+		if onProgress is not None:
+			onProgress(maxval, curval)
+
+	if onProgress is not None:
+		onProgress(maxval, curval)
+
+	for tree_data in porttrees_data:
+		try:
+			dead_nodes = set(tree_data.dest_db)
+		except CacheError as e:
+			writemsg_level("Error listing cache entries for " + \
+				"'%s': %s, continuing...\n" % (tree_data.path, e),
+				level=logging.ERROR, noiselevel=-1)
+			del e
+		else:
+			dead_nodes.difference_update(tree_data.valid_nodes)
+			for cpv in dead_nodes:
+				try:
+					del tree_data.dest_db[cpv]
+				except (KeyError, CacheError):
+					pass
+
+	if not quiet:
+		# make sure the final progress is displayed
+		progressHandler.display()
+		print()
+		signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+
+	sys.stdout.flush()
+	os.umask(old_umask)
+
+def action_regen(settings, portdb, max_jobs, max_load):
+	xterm_titles = "notitles" not in settings.features
+	emergelog(xterm_titles, " === regen")
+	#regenerate cache entries
+	try:
+		os.close(sys.stdin.fileno())
+	except SystemExit as e:
+		raise # Needed else can't exit
+	except:
+		pass
+	sys.stdout.flush()
+
+	regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
+	received_signal = []
+
+	def emergeexitsig(signum, frame):
+		signal.signal(signal.SIGINT, signal.SIG_IGN)
+		signal.signal(signal.SIGTERM, signal.SIG_IGN)
+		portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+			{"signal":signum})
+		regen.terminate()
+		received_signal.append(128 + signum)
+
+	earlier_sigint_handler = signal.signal(signal.SIGINT, emergeexitsig)
+	earlier_sigterm_handler = signal.signal(signal.SIGTERM, emergeexitsig)
+
+	try:
+		regen.run()
+	finally:
+		# Restore previous handlers
+		if earlier_sigint_handler is not None:
+			signal.signal(signal.SIGINT, earlier_sigint_handler)
+		else:
+			signal.signal(signal.SIGINT, signal.SIG_DFL)
+		if earlier_sigterm_handler is not None:
+			signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+		else:
+			signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+	if received_signal:
+		sys.exit(received_signal[0])
+
+	portage.writemsg_stdout("done!\n")
+	return regen.returncode
+
+def action_search(root_config, myopts, myfiles, spinner):
+	if not myfiles:
+		print("emerge: no search terms provided.")
+	else:
+		searchinstance = search(root_config,
+			spinner, "--searchdesc" in myopts,
+			"--quiet" not in myopts, "--usepkg" in myopts,
+			"--usepkgonly" in myopts)
+		for mysearch in myfiles:
+			try:
+				searchinstance.execute(mysearch)
+			except re.error as comment:
+				print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
+				sys.exit(1)
+			searchinstance.output()
+
+def action_sync(settings, trees, mtimedb, myopts, myaction):
+	enter_invalid = '--ask-enter-invalid' in myopts
+	xterm_titles = "notitles" not in settings.features
+	emergelog(xterm_titles, " === sync")
+	portdb = trees[settings["ROOT"]]["porttree"].dbapi
+	myportdir = portdb.porttree_root
+	if not myportdir:
+		myportdir = settings.get('PORTDIR', '')
+		if myportdir and myportdir.strip():
+			myportdir = os.path.realpath(myportdir)
+		else:
+			myportdir = None
+	out = portage.output.EOutput()
+	global_config_path = GLOBAL_CONFIG_PATH
+	if settings['EPREFIX']:
+		global_config_path = os.path.join(settings['EPREFIX'],
+				GLOBAL_CONFIG_PATH.lstrip(os.sep))
+	if not myportdir:
+		sys.stderr.write("!!! PORTDIR is undefined.  " + \
+			"Is %s/make.globals missing?\n" % global_config_path)
+		sys.exit(1)
+	if myportdir[-1]=="/":
+		myportdir=myportdir[:-1]
+	try:
+		st = os.stat(myportdir)
+	except OSError:
+		st = None
+	if st is None:
+		print(">>>",myportdir,"not found, creating it.")
+		portage.util.ensure_dirs(myportdir, mode=0o755)
+		st = os.stat(myportdir)
+
+	usersync_uid = None
+	spawn_kwargs = {}
+	spawn_kwargs["env"] = settings.environ()
+	if 'usersync' in settings.features and \
+		portage.data.secpass >= 2 and \
+		(st.st_uid != os.getuid() and st.st_mode & 0o700 or \
+		st.st_gid != os.getgid() and st.st_mode & 0o070):
+		try:
+			homedir = pwd.getpwuid(st.st_uid).pw_dir
+		except KeyError:
+			pass
+		else:
+			# Drop privileges when syncing, in order to match
+			# existing uid/gid settings.
+			usersync_uid = st.st_uid
+			spawn_kwargs["uid"]    = st.st_uid
+			spawn_kwargs["gid"]    = st.st_gid
+			spawn_kwargs["groups"] = [st.st_gid]
+			spawn_kwargs["env"]["HOME"] = homedir
+			umask = 0o002
+			if not st.st_mode & 0o020:
+				umask = umask | 0o020
+			spawn_kwargs["umask"] = umask
+
+	if usersync_uid is not None:
+		# PORTAGE_TMPDIR is used below, so validate it and
+		# bail out if necessary.
+		rval = _check_temp_dir(settings)
+		if rval != os.EX_OK:
+			return rval
+
+	syncuri = settings.get("SYNC", "").strip()
+	if not syncuri:
+		writemsg_level("!!! SYNC is undefined. " + \
+			"Is %s/make.globals missing?\n" % global_config_path,
+			noiselevel=-1, level=logging.ERROR)
+		return 1
+
+	vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
+	vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
+
+	os.umask(0o022)
+	dosyncuri = syncuri
+	updatecache_flg = False
+	if myaction == "metadata":
+		print("skipping sync")
+		updatecache_flg = True
+	elif ".git" in vcs_dirs:
+		# Update existing git repository, and ignore the syncuri. We are
+		# going to trust the user and assume that the user is in the branch
+		# that he/she wants updated. We'll let the user manage branches with
+		# git directly.
+		if portage.process.find_binary("git") is None:
+			msg = ["Command not found: git",
+			"Type \"emerge dev-util/git\" to enable git support."]
+			for l in msg:
+				writemsg_level("!!! %s\n" % l,
+					level=logging.ERROR, noiselevel=-1)
+			return 1
+		msg = ">>> Starting git pull in %s..." % myportdir
+		emergelog(xterm_titles, msg )
+		writemsg_level(msg + "\n")
+		exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
+			(portage._shell_quote(myportdir),), **spawn_kwargs)
+		if exitcode != os.EX_OK:
+			msg = "!!! git pull error in %s." % myportdir
+			emergelog(xterm_titles, msg)
+			writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+			return exitcode
+		msg = ">>> Git pull in %s successful" % myportdir
+		emergelog(xterm_titles, msg)
+		writemsg_level(msg + "\n")
+		exitcode = git_sync_timestamps(settings, myportdir)
+		if exitcode == os.EX_OK:
+			updatecache_flg = True
+	elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
+		for vcs_dir in vcs_dirs:
+			writemsg_level(("!!! %s appears to be under revision " + \
+				"control (contains %s).\n!!! Aborting rsync sync.\n") % \
+				(myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
+			return 1
+		if not os.path.exists("/usr/bin/rsync"):
+			print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
+			print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
+			sys.exit(1)
+		mytimeout=180
+
+		rsync_opts = []
+		if settings["PORTAGE_RSYNC_OPTS"] == "":
+			portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
+			rsync_opts.extend([
+				"--recursive",    # Recurse directories
+				"--links",        # Consider symlinks
+				"--safe-links",   # Ignore links outside of tree
+				"--perms",        # Preserve permissions
+				"--times",        # Preserive mod times
+				"--compress",     # Compress the data transmitted
+				"--force",        # Force deletion on non-empty dirs
+				"--whole-file",   # Don't do block transfers, only entire files
+				"--delete",       # Delete files that aren't in the master tree
+				"--stats",        # Show final statistics about what was transfered
+				"--timeout="+str(mytimeout), # IO timeout if not done in X seconds
+				"--exclude=/distfiles",   # Exclude distfiles from consideration
+				"--exclude=/local",       # Exclude local     from consideration
+				"--exclude=/packages",    # Exclude packages  from consideration
+			])
+
+		else:
+			# The below validation is not needed when using the above hardcoded
+			# defaults.
+
+			portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
+			rsync_opts.extend(portage.util.shlex_split(
+				settings.get("PORTAGE_RSYNC_OPTS", "")))
+			for opt in ("--recursive", "--times"):
+				if opt not in rsync_opts:
+					portage.writemsg(yellow("WARNING:") + " adding required option " + \
+					"%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+					rsync_opts.append(opt)
+
+			for exclude in ("distfiles", "local", "packages"):
+				opt = "--exclude=/%s" % exclude
+				if opt not in rsync_opts:
+					portage.writemsg(yellow("WARNING:") + \
+					" adding required option %s not included in "  % opt + \
+					"PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
+					rsync_opts.append(opt)
+
+			if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
+				def rsync_opt_startswith(opt_prefix):
+					for x in rsync_opts:
+						if x.startswith(opt_prefix):
+							return True
+					return False
+
+				if not rsync_opt_startswith("--timeout="):
+					rsync_opts.append("--timeout=%d" % mytimeout)
+
+				for opt in ("--compress", "--whole-file"):
+					if opt not in rsync_opts:
+						portage.writemsg(yellow("WARNING:") + " adding required option " + \
+						"%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+						rsync_opts.append(opt)
+
+		if "--quiet" in myopts:
+			rsync_opts.append("--quiet")    # Shut up a lot
+		else:
+			rsync_opts.append("--verbose")	# Print filelist
+
+		if "--verbose" in myopts:
+			rsync_opts.append("--progress")  # Progress meter for each file
+
+		if "--debug" in myopts:
+			rsync_opts.append("--checksum") # Force checksum on all files
+
+		# Real local timestamp file.
+		servertimestampfile = os.path.join(
+			myportdir, "metadata", "timestamp.chk")
+
+		content = portage.util.grabfile(servertimestampfile)
+		mytimestamp = 0
+		if content:
+			try:
+				mytimestamp = time.mktime(time.strptime(content[0],
+					"%a, %d %b %Y %H:%M:%S +0000"))
+			except (OverflowError, ValueError):
+				pass
+		del content
+
+		try:
+			rsync_initial_timeout = \
+				int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
+		except ValueError:
+			rsync_initial_timeout = 15
+
+		try:
+			maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
+		except SystemExit as e:
+			raise # Needed else can't exit
+		except:
+			maxretries = -1 #default number of retries
+
+		retries=0
+		try:
+			proto, user_name, hostname, port = re.split(
+				r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
+				syncuri, maxsplit=4)[1:5]
+		except ValueError:
+			writemsg_level("!!! SYNC is invalid: %s\n" % syncuri,
+				noiselevel=-1, level=logging.ERROR)
+			return 1
+		if port is None:
+			port=""
+		if user_name is None:
+			user_name=""
+		if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
+			getaddrinfo_host = hostname
+		else:
+			# getaddrinfo needs the brackets stripped
+			getaddrinfo_host = hostname[1:-1]
+		updatecache_flg=True
+		all_rsync_opts = set(rsync_opts)
+		extra_rsync_opts = portage.util.shlex_split(
+			settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
+		all_rsync_opts.update(extra_rsync_opts)
+
+		family = socket.AF_UNSPEC
+		if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
+			family = socket.AF_INET
+		elif socket.has_ipv6 and \
+			("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
+			family = socket.AF_INET6
+
+		addrinfos = None
+		uris = []
+
+		try:
+			addrinfos = getaddrinfo_validate(
+				socket.getaddrinfo(getaddrinfo_host, None,
+				family, socket.SOCK_STREAM))
+		except socket.error as e:
+			writemsg_level(
+				"!!! getaddrinfo failed for '%s': %s\n" % (hostname, e),
+				noiselevel=-1, level=logging.ERROR)
+
+		if addrinfos:
+
+			AF_INET = socket.AF_INET
+			AF_INET6 = None
+			if socket.has_ipv6:
+				AF_INET6 = socket.AF_INET6
+
+			ips_v4 = []
+			ips_v6 = []
+
+			for addrinfo in addrinfos:
+				if addrinfo[0] == AF_INET:
+					ips_v4.append("%s" % addrinfo[4][0])
+				elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
+					# IPv6 addresses need to be enclosed in square brackets
+					ips_v6.append("[%s]" % addrinfo[4][0])
+
+			random.shuffle(ips_v4)
+			random.shuffle(ips_v6)
+
+			# Give priority to the address family that
+			# getaddrinfo() returned first.
+			if AF_INET6 is not None and addrinfos and \
+				addrinfos[0][0] == AF_INET6:
+				ips = ips_v6 + ips_v4
+			else:
+				ips = ips_v4 + ips_v6
+
+			for ip in ips:
+				uris.append(syncuri.replace(
+					"//" + user_name + hostname + port + "/",
+					"//" + user_name + ip + port + "/", 1))
+
+		if not uris:
+			# With some configurations we need to use the plain hostname
+			# rather than try to resolve the ip addresses (bug #340817).
+			uris.append(syncuri)
+
+		# reverse, for use with pop()
+		uris.reverse()
+
+		effective_maxretries = maxretries
+		if effective_maxretries < 0:
+			effective_maxretries = len(uris) - 1
+
+		SERVER_OUT_OF_DATE = -1
+		EXCEEDED_MAX_RETRIES = -2
+		while (1):
+			if uris:
+				dosyncuri = uris.pop()
+			else:
+				writemsg("!!! Exhausted addresses for %s\n" % \
+					hostname, noiselevel=-1)
+				return 1
+
+			if (retries==0):
+				if "--ask" in myopts:
+					if userquery("Do you want to sync your Portage tree " + \
+						"with the mirror at\n" + blue(dosyncuri) + bold("?"),
+						enter_invalid) == "No":
+						print()
+						print("Quitting.")
+						print()
+						sys.exit(0)
+				emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
+				if "--quiet" not in myopts:
+					print(">>> Starting rsync with "+dosyncuri+"...")
+			else:
+				emergelog(xterm_titles,
+					">>> Starting retry %d of %d with %s" % \
+						(retries, effective_maxretries, dosyncuri))
+				writemsg_stdout(
+					"\n\n>>> Starting retry %d of %d with %s\n" % \
+					(retries, effective_maxretries, dosyncuri), noiselevel=-1)
+
+			if dosyncuri.startswith('ssh://'):
+				dosyncuri = dosyncuri[6:].replace('/', ':/', 1)
+
+			if mytimestamp != 0 and "--quiet" not in myopts:
+				print(">>> Checking server timestamp ...")
+
+			rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
+
+			if "--debug" in myopts:
+				print(rsynccommand)
+
+			exitcode = os.EX_OK
+			servertimestamp = 0
+			# Even if there's no timestamp available locally, fetch the
+			# timestamp anyway as an initial probe to verify that the server is
+			# responsive.  This protects us from hanging indefinitely on a
+			# connection attempt to an unresponsive server which rsync's
+			# --timeout option does not prevent.
+			if True:
+				# Temporary file for remote server timestamp comparison.
+				# NOTE: If FEATURES=usersync is enabled then the tempfile
+				# needs to be in a directory that's readable by the usersync
+				# user. We assume that PORTAGE_TMPDIR will satisfy this
+				# requirement, since that's not necessarily true for the
+				# default directory used by the tempfile module.
+				if usersync_uid is not None:
+					tmpdir = settings['PORTAGE_TMPDIR']
+				else:
+					# use default dir from tempfile module
+					tmpdir = None
+				fd, tmpservertimestampfile = \
+					tempfile.mkstemp(dir=tmpdir)
+				os.close(fd)
+				if usersync_uid is not None:
+					portage.util.apply_permissions(tmpservertimestampfile,
+						uid=usersync_uid)
+				mycommand = rsynccommand[:]
+				mycommand.append(dosyncuri.rstrip("/") + \
+					"/metadata/timestamp.chk")
+				mycommand.append(tmpservertimestampfile)
+				content = None
+				mypids = []
+				try:
+					# Timeout here in case the server is unresponsive.  The
+					# --timeout rsync option doesn't apply to the initial
+					# connection attempt.
+					try:
+						if rsync_initial_timeout:
+							portage.exception.AlarmSignal.register(
+								rsync_initial_timeout)
+
+						mypids.extend(portage.process.spawn(
+							mycommand, returnpid=True, **spawn_kwargs))
+						exitcode = os.waitpid(mypids[0], 0)[1]
+						if usersync_uid is not None:
+							portage.util.apply_permissions(tmpservertimestampfile,
+								uid=os.getuid())
+						content = portage.grabfile(tmpservertimestampfile)
+					finally:
+						if rsync_initial_timeout:
+							portage.exception.AlarmSignal.unregister()
+						try:
+							os.unlink(tmpservertimestampfile)
+						except OSError:
+							pass
+				except portage.exception.AlarmSignal:
+					# timed out
+					print('timed out')
+					# With waitpid and WNOHANG, only check the
+					# first element of the tuple since the second
+					# element may vary (bug #337465).
+					if mypids and os.waitpid(mypids[0], os.WNOHANG)[0] == 0:
+						os.kill(mypids[0], signal.SIGTERM)
+						os.waitpid(mypids[0], 0)
+					# This is the same code rsync uses for timeout.
+					exitcode = 30
+				else:
+					if exitcode != os.EX_OK:
+						if exitcode & 0xff:
+							exitcode = (exitcode & 0xff) << 8
+						else:
+							exitcode = exitcode >> 8
+				if mypids:
+					portage.process.spawned_pids.remove(mypids[0])
+				if content:
+					try:
+						servertimestamp = time.mktime(time.strptime(
+							content[0], "%a, %d %b %Y %H:%M:%S +0000"))
+					except (OverflowError, ValueError):
+						pass
+				del mycommand, mypids, content
+			if exitcode == os.EX_OK:
+				if (servertimestamp != 0) and (servertimestamp == mytimestamp):
+					emergelog(xterm_titles,
+						">>> Cancelling sync -- Already current.")
+					print()
+					print(">>>")
+					print(">>> Timestamps on the server and in the local repository are the same.")
+					print(">>> Cancelling all further sync action. You are already up to date.")
+					print(">>>")
+					print(">>> In order to force sync, remove '%s'." % servertimestampfile)
+					print(">>>")
+					print()
+					sys.exit(0)
+				elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
+					emergelog(xterm_titles,
+						">>> Server out of date: %s" % dosyncuri)
+					print()
+					print(">>>")
+					print(">>> SERVER OUT OF DATE: %s" % dosyncuri)
+					print(">>>")
+					print(">>> In order to force sync, remove '%s'." % servertimestampfile)
+					print(">>>")
+					print()
+					exitcode = SERVER_OUT_OF_DATE
+				elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
+					# actual sync
+					mycommand = rsynccommand + [dosyncuri+"/", myportdir]
+					exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
+					if exitcode in [0,1,3,4,11,14,20,21]:
+						break
+			elif exitcode in [1,3,4,11,14,20,21]:
+				break
+			else:
+				# Code 2 indicates protocol incompatibility, which is expected
+				# for servers with protocol < 29 that don't support
+				# --prune-empty-directories.  Retry for a server that supports
+				# at least rsync protocol version 29 (>=rsync-2.6.4).
+				pass
+
+			retries=retries+1
+
+			if maxretries < 0 or retries <= maxretries:
+				print(">>> Retrying...")
+			else:
+				# over retries
+				# exit loop
+				updatecache_flg=False
+				exitcode = EXCEEDED_MAX_RETRIES
+				break
+
+		if (exitcode==0):
+			emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
+		elif exitcode == SERVER_OUT_OF_DATE:
+			sys.exit(1)
+		elif exitcode == EXCEEDED_MAX_RETRIES:
+			sys.stderr.write(
+				">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
+			sys.exit(1)
+		elif (exitcode>0):
+			msg = []
+			if exitcode==1:
+				msg.append("Rsync has reported that there is a syntax error. Please ensure")
+				msg.append("that your SYNC statement is proper.")
+				msg.append("SYNC=" + settings["SYNC"])
+			elif exitcode==11:
+				msg.append("Rsync has reported that there is a File IO error. Normally")
+				msg.append("this means your disk is full, but can be caused by corruption")
+				msg.append("on the filesystem that contains PORTDIR. Please investigate")
+				msg.append("and try again after the problem has been fixed.")
+				msg.append("PORTDIR=" + settings["PORTDIR"])
+			elif exitcode==20:
+				msg.append("Rsync was killed before it finished.")
+			else:
+				msg.append("Rsync has not successfully finished. It is recommended that you keep")
+				msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
+				msg.append("to use rsync due to firewall or other restrictions. This should be a")
+				msg.append("temporary problem unless complications exist with your network")
+				msg.append("(and possibly your system's filesystem) configuration.")
+			for line in msg:
+				out.eerror(line)
+			sys.exit(exitcode)
+	elif syncuri[:6]=="cvs://":
+		if not os.path.exists("/usr/bin/cvs"):
+			print("!!! /usr/bin/cvs does not exist, so CVS support is disabled.")
+			print("!!! Type \"emerge dev-vcs/cvs\" to enable CVS support.")
+			sys.exit(1)
+		cvsroot=syncuri[6:]
+		cvsdir=os.path.dirname(myportdir)
+		if not os.path.exists(myportdir+"/CVS"):
+			#initial checkout
+			print(">>> Starting initial cvs checkout with "+syncuri+"...")
+			if os.path.exists(cvsdir+"/gentoo-x86"):
+				print("!!! existing",cvsdir+"/gentoo-x86 directory; exiting.")
+				sys.exit(1)
+			try:
+				os.rmdir(myportdir)
+			except OSError as e:
+				if e.errno != errno.ENOENT:
+					sys.stderr.write(
+						"!!! existing '%s' directory; exiting.\n" % myportdir)
+					sys.exit(1)
+				del e
+			if portage.process.spawn_bash(
+					"cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
+					(portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
+					**spawn_kwargs) != os.EX_OK:
+				print("!!! cvs checkout error; exiting.")
+				sys.exit(1)
+			os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
+		else:
+			#cvs update
+			print(">>> Starting cvs update with "+syncuri+"...")
+			retval = portage.process.spawn_bash(
+				"cd %s; exec cvs -z0 -q update -dP" % \
+				(portage._shell_quote(myportdir),), **spawn_kwargs)
+			if retval != os.EX_OK:
+				writemsg_level("!!! cvs update error; exiting.\n",
+					noiselevel=-1, level=logging.ERROR)
+				sys.exit(retval)
+		dosyncuri = syncuri
+	else:
+		writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
+			noiselevel=-1, level=logging.ERROR)
+		return 1
+
+	if updatecache_flg and  \
+		myaction != "metadata" and \
+		"metadata-transfer" not in settings.features:
+		updatecache_flg = False
+
+	# Reload the whole config from scratch.
+	settings, trees, mtimedb = load_emerge_config(trees=trees)
+	adjust_configs(myopts, trees)
+	root_config = trees[settings["ROOT"]]["root_config"]
+	portdb = trees[settings["ROOT"]]["porttree"].dbapi
+
+	if updatecache_flg and \
+		os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
+
+		# Only update cache for myportdir since that's
+		# the only one that's been synced here.
+		action_metadata(settings, portdb, myopts, porttrees=[myportdir])
+
+	if myopts.get('--package-moves') != 'n' and \
+		_global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
+		mtimedb.commit()
+		# Reload the whole config from scratch.
+		settings, trees, mtimedb = load_emerge_config(trees=trees)
+		adjust_configs(myopts, trees)
+		portdb = trees[settings["ROOT"]]["porttree"].dbapi
+		root_config = trees[settings["ROOT"]]["root_config"]
+
+	mybestpv = portdb.xmatch("bestmatch-visible",
+		portage.const.PORTAGE_PACKAGE_ATOM)
+	mypvs = portage.best(
+		trees[settings["ROOT"]]["vartree"].dbapi.match(
+		portage.const.PORTAGE_PACKAGE_ATOM))
+
+	chk_updated_cfg_files(settings["EROOT"],
+		portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")))
+
+	if myaction != "metadata":
+		postsync = os.path.join(settings["PORTAGE_CONFIGROOT"],
+			portage.USER_CONFIG_PATH, "bin", "post_sync")
+		if os.access(postsync, os.X_OK):
+			retval = portage.process.spawn(
+				[postsync, dosyncuri], env=settings.environ())
+			if retval != os.EX_OK:
+				writemsg_level(
+					" %s spawn failed of %s\n" % (bad("*"), postsync,),
+					level=logging.ERROR, noiselevel=-1)
+
+	if(mybestpv != mypvs) and not "--quiet" in myopts:
+		print()
+		print(red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+		print(red(" * ")+"that you update portage now, before any other packages are updated.")
+		print()
+		print(red(" * ")+"To update portage, run 'emerge portage' now.")
+		print()
+
+	display_news_notification(root_config, myopts)
+	return os.EX_OK
+
+def action_uninstall(settings, trees, ldpath_mtimes,
+	opts, action, files, spinner):
+	# For backward compat, some actions do not require leading '='.
+	ignore_missing_eq = action in ('clean', 'unmerge')
+	root = settings['ROOT']
+	vardb = trees[root]['vartree'].dbapi
+	valid_atoms = []
+	lookup_owners = []
+
+	# Ensure atoms are valid before calling unmerge().
+	# For backward compat, leading '=' is not required.
+	for x in files:
+		if is_valid_package_atom(x, allow_repo=True) or \
+			(ignore_missing_eq and is_valid_package_atom('=' + x)):
+
+			try:
+				atom = dep_expand(x, mydb=vardb, settings=settings)
+			except portage.exception.AmbiguousPackageName as e:
+				msg = "The short ebuild name \"" + x + \
+					"\" is ambiguous.  Please specify " + \
+					"one of the following " + \
+					"fully-qualified ebuild names instead:"
+				for line in textwrap.wrap(msg, 70):
+					writemsg_level("!!! %s\n" % (line,),
+						level=logging.ERROR, noiselevel=-1)
+				for i in e.args[0]:
+					writemsg_level("    %s\n" % colorize("INFORM", i),
+						level=logging.ERROR, noiselevel=-1)
+				writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+				return 1
+			else:
+				if atom.use and atom.use.conditional:
+					writemsg_level(
+						("\n\n!!! '%s' contains a conditional " + \
+						"which is not allowed.\n") % (x,),
+						level=logging.ERROR, noiselevel=-1)
+					writemsg_level(
+						"!!! Please check ebuild(5) for full details.\n",
+						level=logging.ERROR)
+					return 1
+				valid_atoms.append(atom)
+
+		elif x.startswith(os.sep):
+			if not x.startswith(root):
+				writemsg_level(("!!! '%s' does not start with" + \
+					" $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
+				return 1
+			# Queue these up since it's most efficient to handle
+			# multiple files in a single iter_owners() call.
+			lookup_owners.append(x)
+
+		elif x.startswith(SETPREFIX) and action == "deselect":
+			valid_atoms.append(x)
+
+		elif "*" in x:
+			try:
+				ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
+			except InvalidAtom:
+				msg = []
+				msg.append("'%s' is not a valid package atom." % (x,))
+				msg.append("Please check ebuild(5) for full details.")
+				writemsg_level("".join("!!! %s\n" % line for line in msg),
+					level=logging.ERROR, noiselevel=-1)
+				return 1
+
+			for cp in vardb.cp_all():
+				if extended_cp_match(ext_atom.cp, cp):
+					atom = cp
+					if ext_atom.slot:
+						atom += ":" + ext_atom.slot
+					if ext_atom.repo:
+						atom += "::" + ext_atom.repo
+
+					if vardb.match(atom):
+						valid_atoms.append(Atom(atom, allow_repo=True))
+
+		else:
+			msg = []
+			msg.append("'%s' is not a valid package atom." % (x,))
+			msg.append("Please check ebuild(5) for full details.")
+			writemsg_level("".join("!!! %s\n" % line for line in msg),
+				level=logging.ERROR, noiselevel=-1)
+			return 1
+
+	if lookup_owners:
+		relative_paths = []
+		search_for_multiple = False
+		if len(lookup_owners) > 1:
+			search_for_multiple = True
+
+		for x in lookup_owners:
+			if not search_for_multiple and os.path.isdir(x):
+				search_for_multiple = True
+			relative_paths.append(x[len(root)-1:])
+
+		owners = set()
+		for pkg, relative_path in \
+			vardb._owners.iter_owners(relative_paths):
+			owners.add(pkg.mycpv)
+			if not search_for_multiple:
+				break
+
+		if owners:
+			for cpv in owners:
+				slot = vardb.aux_get(cpv, ['SLOT'])[0]
+				if not slot:
+					# portage now masks packages with missing slot, but it's
+					# possible that one was installed by an older version
+					atom = portage.cpv_getkey(cpv)
+				else:
+					atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
+				valid_atoms.append(portage.dep.Atom(atom))
+		else:
+			writemsg_level(("!!! '%s' is not claimed " + \
+				"by any package.\n") % lookup_owners[0],
+				level=logging.WARNING, noiselevel=-1)
+
+	if files and not valid_atoms:
+		return 1
+
+	if action == 'unmerge' and \
+		'--quiet' not in opts and \
+		'--quiet-unmerge-warn' not in opts:
+		msg = "This action can remove important packages! " + \
+			"In order to be safer, use " + \
+			"`emerge -pv --depclean <atom>` to check for " + \
+			"reverse dependencies before removing packages."
+		out = portage.output.EOutput()
+		for line in textwrap.wrap(msg, 72):
+			out.ewarn(line)
+
+	if action == 'deselect':
+		return action_deselect(settings, trees, opts, valid_atoms)
+
+	# Create a Scheduler for calls to unmerge(), in order to cause
+	# redirection of ebuild phase output to logs as required for
+	# options such as --quiet.
+	sched = Scheduler(settings, trees, None, opts,
+		spinner)
+	sched._background = sched._background_mode()
+	sched._status_display.quiet = True
+
+	if sched._background:
+		sched.settings.unlock()
+		sched.settings["PORTAGE_BACKGROUND"] = "1"
+		sched.settings.backup_changes("PORTAGE_BACKGROUND")
+		sched.settings.lock()
+		sched.pkgsettings[root] = portage.config(clone=sched.settings)
+
+	if action in ('clean', 'unmerge') or \
+		(action == 'prune' and "--nodeps" in opts):
+		# When given a list of atoms, unmerge them in the order given.
+		ordered = action == 'unmerge'
+		unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
+			valid_atoms, ldpath_mtimes, ordered=ordered,
+			scheduler=sched._sched_iface)
+		rval = os.EX_OK
+	else:
+		rval = action_depclean(settings, trees, ldpath_mtimes,
+			opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
+
+	return rval
+
+def adjust_configs(myopts, trees):
+	for myroot in trees:
+		mysettings =  trees[myroot]["vartree"].settings
+		mysettings.unlock()
+		adjust_config(myopts, mysettings)
+		mysettings.lock()
+
+def adjust_config(myopts, settings):
+	"""Make emerge specific adjustments to the config."""
+
+	# Kill noauto as it will break merges otherwise.
+	if "noauto" in settings.features:
+		settings.features.remove('noauto')
+
+	fail_clean = myopts.get('--fail-clean')
+	if fail_clean is not None:
+		if fail_clean is True and \
+			'fail-clean' not in settings.features:
+			settings.features.add('fail-clean')
+		elif fail_clean == 'n' and \
+			'fail-clean' in settings.features:
+			settings.features.remove('fail-clean')
+
+	CLEAN_DELAY = 5
+	try:
+		CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
+	except ValueError as e:
+		portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+		portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
+			settings["CLEAN_DELAY"], noiselevel=-1)
+	settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
+	settings.backup_changes("CLEAN_DELAY")
+
+	EMERGE_WARNING_DELAY = 10
+	try:
+		EMERGE_WARNING_DELAY = int(settings.get(
+			"EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
+	except ValueError as e:
+		portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+		portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
+			settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
+	settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
+	settings.backup_changes("EMERGE_WARNING_DELAY")
+
+	if "--quiet" in myopts or "--quiet-build" in myopts:
+		settings["PORTAGE_QUIET"]="1"
+		settings.backup_changes("PORTAGE_QUIET")
+
+	if "--verbose" in myopts:
+		settings["PORTAGE_VERBOSE"] = "1"
+		settings.backup_changes("PORTAGE_VERBOSE")
+
+	# Set so that configs will be merged regardless of remembered status
+	if ("--noconfmem" in myopts):
+		settings["NOCONFMEM"]="1"
+		settings.backup_changes("NOCONFMEM")
+
+	# Set various debug markers... They should be merged somehow.
+	PORTAGE_DEBUG = 0
+	try:
+		PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
+		if PORTAGE_DEBUG not in (0, 1):
+			portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
+				PORTAGE_DEBUG, noiselevel=-1)
+			portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
+				noiselevel=-1)
+			PORTAGE_DEBUG = 0
+	except ValueError as e:
+		portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+		portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
+			settings["PORTAGE_DEBUG"], noiselevel=-1)
+		del e
+	if "--debug" in myopts:
+		PORTAGE_DEBUG = 1
+	settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
+	settings.backup_changes("PORTAGE_DEBUG")
+
+	if settings.get("NOCOLOR") not in ("yes","true"):
+		portage.output.havecolor = 1
+
+	"""The explicit --color < y | n > option overrides the NOCOLOR environment
+	variable and stdout auto-detection."""
+	if "--color" in myopts:
+		if "y" == myopts["--color"]:
+			portage.output.havecolor = 1
+			settings["NOCOLOR"] = "false"
+		else:
+			portage.output.havecolor = 0
+			settings["NOCOLOR"] = "true"
+		settings.backup_changes("NOCOLOR")
+	elif settings.get('TERM') == 'dumb' or \
+		not sys.stdout.isatty():
+		portage.output.havecolor = 0
+		settings["NOCOLOR"] = "true"
+		settings.backup_changes("NOCOLOR")
+
+def display_missing_pkg_set(root_config, set_name):
+
+	msg = []
+	msg.append(("emerge: There are no sets to satisfy '%s'. " + \
+		"The following sets exist:") % \
+		colorize("INFORM", set_name))
+	msg.append("")
+
+	for s in sorted(root_config.sets):
+		msg.append("    %s" % s)
+	msg.append("")
+
+	writemsg_level("".join("%s\n" % l for l in msg),
+		level=logging.ERROR, noiselevel=-1)
+
+def relative_profile_path(portdir, abs_profile):
+	realpath = os.path.realpath(abs_profile)
+	basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
+	if realpath.startswith(basepath):
+		profilever = realpath[1 + len(basepath):]
+	else:
+		profilever = None
+	return profilever
+
+def getportageversion(portdir, target_root, profile, chost, vardb):
+	profilever = None
+	if profile:
+		profilever = relative_profile_path(portdir, profile)
+		if profilever is None:
+			try:
+				for parent in portage.grabfile(
+					os.path.join(profile, 'parent')):
+					profilever = relative_profile_path(portdir,
+						os.path.join(profile, parent))
+					if profilever is not None:
+						break
+			except portage.exception.PortageException:
+				pass
+
+			if profilever is None:
+				try:
+					profilever = "!" + os.readlink(profile)
+				except (OSError):
+					pass
+
+	if profilever is None:
+		profilever = "unavailable"
+
+	libcver = []
+	libclist = set()
+	for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
+		if not atom.blocker:
+			libclist.update(vardb.match(atom))
+	if libclist:
+		for cpv in sorted(libclist):
+			libc_split = portage.catpkgsplit(cpv)[1:]
+			if libc_split[-1] == "r0":
+				libc_split[:-1]
+			libcver.append("-".join(libc_split))
+	else:
+		libcver = ["unavailable"]
+
+	gccver = getgccversion(chost)
+	unameout=platform.release()+" "+platform.machine()
+
+	return "Portage %s (%s, %s, %s, %s)" % \
+		(portage.VERSION, profilever, gccver, ",".join(libcver), unameout)
+
+def git_sync_timestamps(settings, portdir):
+	"""
+	Since git doesn't preserve timestamps, synchronize timestamps between
+	entries and ebuilds/eclasses. Assume the cache has the correct timestamp
+	for a given file as long as the file in the working tree is not modified
+	(relative to HEAD).
+	"""
+	cache_dir = os.path.join(portdir, "metadata", "cache")
+	if not os.path.isdir(cache_dir):
+		return os.EX_OK
+	writemsg_level(">>> Synchronizing timestamps...\n")
+
+	from portage.cache.cache_errors import CacheError
+	try:
+		cache_db = settings.load_best_module("portdbapi.metadbmodule")(
+			portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
+	except CacheError as e:
+		writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
+			level=logging.ERROR, noiselevel=-1)
+		return 1
+
+	ec_dir = os.path.join(portdir, "eclass")
+	try:
+		ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
+			if f.endswith(".eclass"))
+	except OSError as e:
+		writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
+			level=logging.ERROR, noiselevel=-1)
+		return 1
+
+	args = [portage.const.BASH_BINARY, "-c",
+		"cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
+		portage._shell_quote(portdir)]
+	import subprocess
+	proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+	modified_files = set(_unicode_decode(l).rstrip("\n") for l in proc.stdout)
+	rval = proc.wait()
+	if rval != os.EX_OK:
+		return rval
+
+	modified_eclasses = set(ec for ec in ec_names \
+		if os.path.join("eclass", ec + ".eclass") in modified_files)
+
+	updated_ec_mtimes = {}
+
+	for cpv in cache_db:
+		cpv_split = portage.catpkgsplit(cpv)
+		if cpv_split is None:
+			writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
+				level=logging.ERROR, noiselevel=-1)
+			continue
+
+		cat, pn, ver, rev = cpv_split
+		cat, pf = portage.catsplit(cpv)
+		relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
+		if relative_eb_path in modified_files:
+			continue
+
+		try:
+			cache_entry = cache_db[cpv]
+			eb_mtime = cache_entry.get("_mtime_")
+			ec_mtimes = cache_entry.get("_eclasses_")
+		except KeyError:
+			writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
+				level=logging.ERROR, noiselevel=-1)
+			continue
+		except CacheError as e:
+			writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
+				(cpv, e), level=logging.ERROR, noiselevel=-1)
+			continue
+
+		if eb_mtime is None:
+			writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
+				level=logging.ERROR, noiselevel=-1)
+			continue
+
+		try:
+			eb_mtime = long(eb_mtime)
+		except ValueError:
+			writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
+				(cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
+			continue
+
+		if ec_mtimes is None:
+			writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
+				level=logging.ERROR, noiselevel=-1)
+			continue
+
+		if modified_eclasses.intersection(ec_mtimes):
+			continue
+
+		missing_eclasses = set(ec_mtimes).difference(ec_names)
+		if missing_eclasses:
+			writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
+				(cpv, sorted(missing_eclasses)), level=logging.ERROR,
+				noiselevel=-1)
+			continue
+
+		eb_path = os.path.join(portdir, relative_eb_path)
+		try:
+			current_eb_mtime = os.stat(eb_path)
+		except OSError:
+			writemsg_level("!!! Missing ebuild: %s\n" % \
+				(cpv,), level=logging.ERROR, noiselevel=-1)
+			continue
+
+		inconsistent = False
+		for ec, (ec_path, ec_mtime) in ec_mtimes.items():
+			updated_mtime = updated_ec_mtimes.get(ec)
+			if updated_mtime is not None and updated_mtime != ec_mtime:
+				writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
+					(cpv, ec), level=logging.ERROR, noiselevel=-1)
+				inconsistent = True
+				break
+
+		if inconsistent:
+			continue
+
+		if current_eb_mtime != eb_mtime:
+			os.utime(eb_path, (eb_mtime, eb_mtime))
+
+		for ec, (ec_path, ec_mtime) in ec_mtimes.items():
+			if ec in updated_ec_mtimes:
+				continue
+			ec_path = os.path.join(ec_dir, ec + ".eclass")
+			current_mtime = os.stat(ec_path)[stat.ST_MTIME]
+			if current_mtime != ec_mtime:
+				os.utime(ec_path, (ec_mtime, ec_mtime))
+			updated_ec_mtimes[ec] = ec_mtime
+
+	return os.EX_OK
+
+def load_emerge_config(trees=None):
+	kwargs = {}
+	for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+		v = os.environ.get(envvar, None)
+		if v and v.strip():
+			kwargs[k] = v
+	trees = portage.create_trees(trees=trees, **kwargs)
+
+	for root, root_trees in trees.items():
+		settings = root_trees["vartree"].settings
+		settings._init_dirs()
+		setconfig = load_default_config(settings, root_trees)
+		root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
+
+	settings = trees["/"]["vartree"].settings
+
+	for myroot in trees:
+		if myroot != "/":
+			settings = trees[myroot]["vartree"].settings
+			break
+
+	mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
+	mtimedb = portage.MtimeDB(mtimedbfile)
+	portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
+	QueryCommand._db = trees
+	return settings, trees, mtimedb
+
+def chk_updated_cfg_files(eroot, config_protect):
+	target_root = eroot
+	result = list(
+		portage.util.find_updated_config_files(target_root, config_protect))
+
+	for x in result:
+		writemsg_level("\n %s " % (colorize("WARN", "* IMPORTANT:"),),
+			level=logging.INFO, noiselevel=-1)
+		if not x[1]: # it's a protected file
+			writemsg_level("config file '%s' needs updating.\n" % x[0],
+				level=logging.INFO, noiselevel=-1)
+		else: # it's a protected dir
+			if len(x[1]) == 1:
+				head, tail = os.path.split(x[1][0])
+				tail = tail[len("._cfg0000_"):]
+				fpath = os.path.join(head, tail)
+				writemsg_level("config file '%s' needs updating.\n" % fpath,
+					level=logging.INFO, noiselevel=-1)
+			else:
+				writemsg_level("%d config files in '%s' need updating.\n" % \
+					(len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+	if result:
+		print(" "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")\
+				+ " section of the " + bold("emerge"))
+		print(" "+yellow("*")+" man page to learn how to update config files.")
+
+def display_news_notification(root_config, myopts):
+	target_root = root_config.settings['EROOT']
+	trees = root_config.trees
+	settings = trees["vartree"].settings
+	portdb = trees["porttree"].dbapi
+	vardb = trees["vartree"].dbapi
+	NEWS_PATH = os.path.join("metadata", "news")
+	UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
+	newsReaderDisplay = False
+	update = "--pretend" not in myopts
+	if "news" not in settings.features:
+		return
+
+	for repo in portdb.getRepositories():
+		unreadItems = checkUpdatedNewsItems(
+			portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
+		if unreadItems:
+			if not newsReaderDisplay:
+				newsReaderDisplay = True
+				print()
+			print(colorize("WARN", " * IMPORTANT:"), end=' ')
+			print("%s news items need reading for repository '%s'." % (unreadItems, repo))
+
+
+	if newsReaderDisplay:
+		print(colorize("WARN", " *"), end=' ')
+		print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
+		print()
+
+def getgccversion(chost):
+	"""
+	rtype: C{str}
+	return:  the current in-use gcc version
+	"""
+
+	gcc_ver_command = 'gcc -dumpversion'
+	gcc_ver_prefix = 'gcc-'
+
+	gcc_not_found_error = red(
+	"!!! No gcc found. You probably need to 'source /etc/profile'\n" +
+	"!!! to update the environment of this terminal and possibly\n" +
+	"!!! other terminals also.\n"
+	)
+
+	mystatus, myoutput = subprocess_getstatusoutput("gcc-config -c")
+	if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
+		return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
+
+	mystatus, myoutput = subprocess_getstatusoutput(
+		chost + "-" + gcc_ver_command)
+	if mystatus == os.EX_OK:
+		return gcc_ver_prefix + myoutput
+
+	mystatus, myoutput = subprocess_getstatusoutput(gcc_ver_command)
+	if mystatus == os.EX_OK:
+		return gcc_ver_prefix + myoutput
+
+	portage.writemsg(gcc_not_found_error, noiselevel=-1)
+	return "[unavailable]"
+
+def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
+	update=False):
+	"""
+	Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
+	Returns the number of unread (yet relevent) items.
+
+	@param portdb: a portage tree database
+	@type portdb: pordbapi
+	@param vardb: an installed package database
+	@type vardb: vardbapi
+	@param NEWS_PATH:
+	@type NEWS_PATH:
+	@param UNREAD_PATH:
+	@type UNREAD_PATH:
+	@param repo_id:
+	@type repo_id:
+	@rtype: Integer
+	@returns:
+	1.  The number of unread but relevant news items.
+
+	"""
+	from portage.news import NewsManager
+	manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
+	return manager.getUnreadItems( repo_id, update=update )
+

diff --git a/portage_with_autodep/pym/_emerge/clear_caches.py b/portage_with_autodep/pym/_emerge/clear_caches.py
new file mode 100644
index 0000000..7b7c5ec
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/clear_caches.py
@@ -0,0 +1,19 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gc
+from portage.util.listdir import dircache
+
+def clear_caches(trees):
+	for d in trees.values():
+		d["porttree"].dbapi.melt()
+		d["porttree"].dbapi._aux_cache.clear()
+		d["bintree"].dbapi._aux_cache.clear()
+		d["bintree"].dbapi._clear_cache()
+		if d["vartree"].dbapi._linkmap is None:
+			# preserve-libs is entirely disabled
+			pass
+		else:
+			d["vartree"].dbapi._linkmap._clear_cache()
+	dircache.clear()
+	gc.collect()

diff --git a/portage_with_autodep/pym/_emerge/countdown.py b/portage_with_autodep/pym/_emerge/countdown.py
new file mode 100644
index 0000000..5abdc8a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/countdown.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+import time
+
+from portage.output import colorize
+
+def countdown(secs=5, doing="Starting"):
+	if secs:
+		print(">>> Waiting",secs,"seconds before starting...")
+		print(">>> (Control-C to abort)...\n"+doing+" in: ", end=' ')
+		ticks=list(range(secs))
+		ticks.reverse()
+		for sec in ticks:
+			sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
+			sys.stdout.flush()
+			time.sleep(1)
+		print()
+

diff --git a/portage_with_autodep/pym/_emerge/create_depgraph_params.py b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
new file mode 100644
index 0000000..44dceda
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
@@ -0,0 +1,72 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+from portage.util import writemsg_level
+
+def create_depgraph_params(myopts, myaction):
+	#configure emerge engine parameters
+	#
+	# self:      include _this_ package regardless of if it is merged.
+	# selective: exclude the package if it is merged
+	# recurse:   go into the dependencies
+	# deep:      go into the dependencies of already merged packages
+	# empty:     pretend nothing is merged
+	# complete:  completely account for all known dependencies
+	# remove:    build graph for use in removing packages
+	# rebuilt_binaries: replace installed packages with rebuilt binaries
+	myparams = {"recurse" : True}
+
+	bdeps = myopts.get("--with-bdeps")
+	if bdeps is not None:
+		myparams["bdeps"] = bdeps
+
+	if myaction == "remove":
+		myparams["remove"] = True
+		myparams["complete"] = True
+		myparams["selective"] = True
+		return myparams
+
+	if "--update" in myopts or \
+		"--newuse" in myopts or \
+		"--reinstall" in myopts or \
+		"--noreplace" in myopts or \
+		myopts.get("--selective", "n") != "n":
+		myparams["selective"] = True
+
+	deep = myopts.get("--deep")
+	if deep is not None and deep != 0:
+		myparams["deep"] = deep
+	if ("--complete-graph" in myopts or "--rebuild-if-new-rev" in myopts or
+		"--rebuild-if-new-ver" in myopts or "--rebuild-if-unbuilt" in myopts):
+		myparams["complete"] = True
+	if "--emptytree" in myopts:
+		myparams["empty"] = True
+		myparams["deep"] = True
+		myparams.pop("selective", None)
+
+	if "--nodeps" in myopts:
+		myparams.pop("recurse", None)
+		myparams.pop("deep", None)
+		myparams.pop("complete", None)
+
+	rebuilt_binaries = myopts.get('--rebuilt-binaries')
+	if rebuilt_binaries is True or \
+		rebuilt_binaries != 'n' and \
+		'--usepkgonly' in myopts and \
+		myopts.get('--deep') is True and \
+		'--update' in myopts:
+		myparams['rebuilt_binaries'] = True
+
+	if myopts.get("--selective") == "n":
+		# --selective=n can be used to remove selective
+		# behavior that may have been implied by some
+		# other option like --update.
+		myparams.pop("selective", None)
+
+	if '--debug' in myopts:
+		writemsg_level('\n\nmyparams %s\n\n' % myparams,
+			noiselevel=-1, level=logging.DEBUG)
+
+	return myparams
+

diff --git a/portage_with_autodep/pym/_emerge/create_world_atom.py b/portage_with_autodep/pym/_emerge/create_world_atom.py
new file mode 100644
index 0000000..fa7cffc
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/create_world_atom.py
@@ -0,0 +1,92 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.dep import _repo_separator
+
+def create_world_atom(pkg, args_set, root_config):
+	"""Create a new atom for the world file if one does not exist.  If the
+	argument atom is precise enough to identify a specific slot then a slot
+	atom will be returned. Atoms that are in the system set may also be stored
+	in world since system atoms can only match one slot while world atoms can
+	be greedy with respect to slots.  Unslotted system packages will not be
+	stored in world."""
+
+	arg_atom = args_set.findAtomForPackage(pkg)
+	if not arg_atom:
+		return None
+	cp = arg_atom.cp
+	new_world_atom = cp
+	if arg_atom.repo:
+		new_world_atom += _repo_separator + arg_atom.repo
+	sets = root_config.sets
+	portdb = root_config.trees["porttree"].dbapi
+	vardb = root_config.trees["vartree"].dbapi
+	available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
+		for cpv in portdb.match(cp))
+	slotted = len(available_slots) > 1 or \
+		(len(available_slots) == 1 and "0" not in available_slots)
+	if not slotted:
+		# check the vdb in case this is multislot
+		available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
+			for cpv in vardb.match(cp))
+		slotted = len(available_slots) > 1 or \
+			(len(available_slots) == 1 and "0" not in available_slots)
+	if slotted and arg_atom.without_repo != cp:
+		# If the user gave a specific atom, store it as a
+		# slot atom in the world file.
+		slot_atom = pkg.slot_atom
+
+		# For USE=multislot, there are a couple of cases to
+		# handle here:
+		#
+		# 1) SLOT="0", but the real SLOT spontaneously changed to some
+		#    unknown value, so just record an unslotted atom.
+		#
+		# 2) SLOT comes from an installed package and there is no
+		#    matching SLOT in the portage tree.
+		#
+		# Make sure that the slot atom is available in either the
+		# portdb or the vardb, since otherwise the user certainly
+		# doesn't want the SLOT atom recorded in the world file
+		# (case 1 above).  If it's only available in the vardb,
+		# the user may be trying to prevent a USE=multislot
+		# package from being removed by --depclean (case 2 above).
+
+		mydb = portdb
+		if not portdb.match(slot_atom):
+			# SLOT seems to come from an installed multislot package
+			mydb = vardb
+		# If there is no installed package matching the SLOT atom,
+		# it probably changed SLOT spontaneously due to USE=multislot,
+		# so just record an unslotted atom.
+		if vardb.match(slot_atom):
+			# Now verify that the argument is precise
+			# enough to identify a specific slot.
+			matches = mydb.match(arg_atom)
+			matched_slots = set()
+			for cpv in matches:
+				matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+			if len(matched_slots) == 1:
+				new_world_atom = slot_atom
+				if arg_atom.repo:
+					new_world_atom += _repo_separator + arg_atom.repo
+
+	if new_world_atom == sets["selected"].findAtomForPackage(pkg):
+		# Both atoms would be identical, so there's nothing to add.
+		return None
+	if not slotted and not arg_atom.repo:
+		# Unlike world atoms, system atoms are not greedy for slots, so they
+		# can't be safely excluded from world if they are slotted.
+		system_atom = sets["system"].findAtomForPackage(pkg)
+		if system_atom:
+			if not system_atom.cp.startswith("virtual/"):
+				return None
+			# System virtuals aren't safe to exclude from world since they can
+			# match multiple old-style virtuals but only one of them will be
+			# pulled in by update or depclean.
+			providers = portdb.settings.getvirtuals().get(system_atom.cp)
+			if providers and len(providers) == 1 and \
+				providers[0].cp == arg_atom.cp:
+				return None
+	return new_world_atom
+

diff --git a/portage_with_autodep/pym/_emerge/depgraph.py b/portage_with_autodep/pym/_emerge/depgraph.py
new file mode 100644
index 0000000..5b48aca
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/depgraph.py
@@ -0,0 +1,7029 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import difflib
+import errno
+import io
+import logging
+import stat
+import sys
+import textwrap
+from collections import deque
+from itertools import chain
+
+import portage
+from portage import os, OrderedDict
+from portage import _unicode_decode, _unicode_encode, _encodings
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
+from portage.dbapi import dbapi
+from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
+from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
+from portage.exception import InvalidAtom, InvalidDependString, PortageException
+from portage.output import colorize, create_color_func, \
+	darkgreen, green
+bad = create_color_func("BAD")
+from portage.package.ebuild.getmaskingstatus import \
+	_getmaskingstatus, _MaskReason
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import ConfigProtect, shlex_split, new_protect_filename
+from portage.util import cmp_sort_key, writemsg, writemsg_stdout
+from portage.util import ensure_dirs
+from portage.util import writemsg_level, write_atomic
+from portage.util.digraph import digraph
+from portage.util.listdir import _ignorecvs_dirs
+from portage.versions import catpkgsplit
+
+from _emerge.AtomArg import AtomArg
+from _emerge.Blocker import Blocker
+from _emerge.BlockerCache import BlockerCache
+from _emerge.BlockerDepPriority import BlockerDepPriority
+from _emerge.countdown import countdown
+from _emerge.create_world_atom import create_world_atom
+from _emerge.Dependency import Dependency
+from _emerge.DependencyArg import DependencyArg
+from _emerge.DepPriority import DepPriority
+from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.FakeVartree import FakeVartree
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge.is_valid_package_atom import insert_category_into_atom, \
+	is_valid_package_atom
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from _emerge.RootConfig import RootConfig
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.userquery import userquery
+
+from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
+from _emerge.resolver.slot_collision import slot_conflict_handler
+from _emerge.resolver.circular_dependency import circular_dependency_handler
+from _emerge.resolver.output import Display
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+	long = int
+
+class _scheduler_graph_config(object):
+	def __init__(self, trees, pkg_cache, graph, mergelist):
+		self.trees = trees
+		self.pkg_cache = pkg_cache
+		self.graph = graph
+		self.mergelist = mergelist
+
+def _wildcard_set(atoms):
+	pkgs = InternalPackageSet(allow_wildcard=True)
+	for x in atoms:
+		try:
+			x = Atom(x, allow_wildcard=True)
+		except portage.exception.InvalidAtom:
+			x = Atom("*/" + x, allow_wildcard=True)
+		pkgs.add(x)
+	return pkgs
+
+class _frozen_depgraph_config(object):
+
+	def __init__(self, settings, trees, myopts, spinner):
+		self.settings = settings
+		self.target_root = settings["ROOT"]
+		self.myopts = myopts
+		self.edebug = 0
+		if settings.get("PORTAGE_DEBUG", "") == "1":
+			self.edebug = 1
+		self.spinner = spinner
+		self._running_root = trees["/"]["root_config"]
+		self._opts_no_restart = frozenset(["--buildpkgonly",
+			"--fetchonly", "--fetch-all-uri", "--pretend"])
+		self.pkgsettings = {}
+		self.trees = {}
+		self._trees_orig = trees
+		self.roots = {}
+		# All Package instances
+		self._pkg_cache = {}
+		self._highest_license_masked = {}
+		for myroot in trees:
+			self.trees[myroot] = {}
+			# Create a RootConfig instance that references
+			# the FakeVartree instead of the real one.
+			self.roots[myroot] = RootConfig(
+				trees[myroot]["vartree"].settings,
+				self.trees[myroot],
+				trees[myroot]["root_config"].setconfig)
+			for tree in ("porttree", "bintree"):
+				self.trees[myroot][tree] = trees[myroot][tree]
+			self.trees[myroot]["vartree"] = \
+				FakeVartree(trees[myroot]["root_config"],
+					pkg_cache=self._pkg_cache,
+					pkg_root_config=self.roots[myroot])
+			self.pkgsettings[myroot] = portage.config(
+				clone=self.trees[myroot]["vartree"].settings)
+
+		self._required_set_names = set(["world"])
+
+		atoms = ' '.join(myopts.get("--exclude", [])).split()
+		self.excluded_pkgs = _wildcard_set(atoms)
+		atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
+		self.reinstall_atoms = _wildcard_set(atoms)
+		atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
+		self.usepkg_exclude = _wildcard_set(atoms)
+		atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
+		self.useoldpkg_atoms = _wildcard_set(atoms)
+		atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
+		self.rebuild_exclude = _wildcard_set(atoms)
+		atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
+		self.rebuild_ignore = _wildcard_set(atoms)
+
+		self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
+		self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
+		self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
+
+class _depgraph_sets(object):
+	def __init__(self):
+		# contains all sets added to the graph
+		self.sets = {}
+		# contains non-set atoms given as arguments
+		self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
+		# contains all atoms from all sets added to the graph, including
+		# atoms given as arguments
+		self.atoms = InternalPackageSet(allow_repo=True)
+		self.atom_arg_map = {}
+
+class _rebuild_config(object):
+	def __init__(self, frozen_config, backtrack_parameters):
+		self._graph = digraph()
+		self._frozen_config = frozen_config
+		self.rebuild_list = backtrack_parameters.rebuild_list.copy()
+		self.orig_rebuild_list = self.rebuild_list.copy()
+		self.reinstall_list = backtrack_parameters.reinstall_list.copy()
+		self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
+		self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
+		self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
+		self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
+			self.rebuild_if_unbuilt)
+
+	def add(self, dep_pkg, dep):
+		parent = dep.collapsed_parent
+		priority = dep.collapsed_priority
+		rebuild_exclude = self._frozen_config.rebuild_exclude
+		rebuild_ignore = self._frozen_config.rebuild_ignore
+		if (self.rebuild and isinstance(parent, Package) and
+			parent.built and (priority.buildtime or priority.runtime) and
+			isinstance(dep_pkg, Package) and
+			not rebuild_exclude.findAtomForPackage(parent) and
+			not rebuild_ignore.findAtomForPackage(dep_pkg)):
+			self._graph.add(dep_pkg, parent, priority)
+
+	def _needs_rebuild(self, dep_pkg):
+		"""Check whether packages that depend on dep_pkg need to be rebuilt."""
+		dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
+		if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
+			return False
+
+		if self.rebuild_if_unbuilt:
+			# dep_pkg is being installed from source, so binary
+			# packages for parents are invalid. Force rebuild
+			return True
+
+		trees = self._frozen_config.trees
+		vardb = trees[dep_pkg.root]["vartree"].dbapi
+		if self.rebuild_if_new_rev:
+			# Parent packages are valid if a package with the same
+			# cpv is already installed.
+			return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
+
+		# Otherwise, parent packages are valid if a package with the same
+		# version (excluding revision) is already installed.
+		assert self.rebuild_if_new_ver
+		cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+		for inst_cpv in vardb.match(dep_pkg.slot_atom):
+			inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
+			if inst_cpv_norev == cpv_norev:
+				return False
+
+		return True
+
+	def _trigger_rebuild(self, parent, build_deps, runtime_deps):
+		root_slot = (parent.root, parent.slot_atom)
+		if root_slot in self.rebuild_list:
+			return False
+		trees = self._frozen_config.trees
+		children = set(build_deps).intersection(runtime_deps)
+		reinstall = False
+		for slot_atom in children:
+			kids = set([build_deps[slot_atom], runtime_deps[slot_atom]])
+			for dep_pkg in kids:
+				dep_root_slot = (dep_pkg.root, slot_atom)
+				if self._needs_rebuild(dep_pkg):
+					self.rebuild_list.add(root_slot)
+					return True
+				elif ("--usepkg" in self._frozen_config.myopts and
+					(dep_root_slot in self.reinstall_list or
+					dep_root_slot in self.rebuild_list or
+					not dep_pkg.installed)):
+
+					# A direct rebuild dependency is being installed. We
+					# should update the parent as well to the latest binary,
+					# if that binary is valid.
+					#
+					# To validate the binary, we check whether all of the
+					# rebuild dependencies are present on the same binhost.
+					#
+					# 1) If parent is present on the binhost, but one of its
+					#    rebuild dependencies is not, then the parent should
+					#    be rebuilt from source.
+					# 2) Otherwise, the parent binary is assumed to be valid,
+					#    because all of its rebuild dependencies are
+					#    consistent.
+					bintree = trees[parent.root]["bintree"]
+					uri = bintree.get_pkgindex_uri(parent.cpv)
+					dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
+					bindb = bintree.dbapi
+					if self.rebuild_if_new_ver and uri and uri != dep_uri:
+						cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+						for cpv in bindb.match(dep_pkg.slot_atom):
+							if cpv_norev == catpkgsplit(cpv)[:-1]:
+								dep_uri = bintree.get_pkgindex_uri(cpv)
+								if uri == dep_uri:
+									break
+					if uri and uri != dep_uri:
+						# 1) Remote binary package is invalid because it was
+						#    built without dep_pkg. Force rebuild.
+						self.rebuild_list.add(root_slot)
+						return True
+					elif (parent.installed and
+						root_slot not in self.reinstall_list):
+						inst_build_time = parent.metadata.get("BUILD_TIME")
+						try:
+							bin_build_time, = bindb.aux_get(parent.cpv,
+								["BUILD_TIME"])
+						except KeyError:
+							continue
+						if bin_build_time != inst_build_time:
+							# 2) Remote binary package is valid, and local package
+							#    is not up to date. Force reinstall.
+							reinstall = True
+		if reinstall:
+			self.reinstall_list.add(root_slot)
+		return reinstall
+
+	def trigger_rebuilds(self):
+		"""
+		Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
+		depends on pkgA at both build-time and run-time, pkgB needs to be
+		rebuilt.
+		"""
+		need_restart = False
+		graph = self._graph
+		build_deps = {}
+		runtime_deps = {}
+		leaf_nodes = deque(graph.leaf_nodes())
+
+		def ignore_non_runtime(priority):
+			return not priority.runtime
+
+		def ignore_non_buildtime(priority):
+			return not priority.buildtime
+
+		# Trigger rebuilds bottom-up (starting with the leaves) so that parents
+		# will always know which children are being rebuilt.
+		while graph:
+			if not leaf_nodes:
+				# We're interested in intersection of buildtime and runtime,
+				# so ignore edges that do not contain both.
+				leaf_nodes.extend(graph.leaf_nodes(
+					ignore_priority=ignore_non_runtime))
+				if not leaf_nodes:
+					leaf_nodes.extend(graph.leaf_nodes(
+						ignore_priority=ignore_non_buildtime))
+					if not leaf_nodes:
+						# We'll have to drop an edge that is both
+						# buildtime and runtime. This should be
+						# quite rare.
+						leaf_nodes.append(graph.order[-1])
+
+			node = leaf_nodes.popleft()
+			if node not in graph:
+				# This can be triggered by circular dependencies.
+				continue
+			slot_atom = node.slot_atom
+
+			# Remove our leaf node from the graph, keeping track of deps.
+			parents = graph.nodes[node][1].items()
+			graph.remove(node)
+			node_build_deps = build_deps.get(node, {})
+			node_runtime_deps = runtime_deps.get(node, {})
+			for parent, priorities in parents:
+				if parent == node:
+					# Ignore a direct cycle.
+					continue
+				parent_bdeps = build_deps.setdefault(parent, {})
+				parent_rdeps = runtime_deps.setdefault(parent, {})
+				for priority in priorities:
+					if priority.buildtime:
+						parent_bdeps[slot_atom] = node
+					if priority.runtime:
+						parent_rdeps[slot_atom] = node
+				if slot_atom in parent_bdeps and slot_atom in parent_rdeps:
+					parent_rdeps.update(node_runtime_deps)
+				if not graph.child_nodes(parent):
+					leaf_nodes.append(parent)
+
+			# Trigger rebuilds for our leaf node. Because all of our children
+			# have been processed, build_deps and runtime_deps will be
+			# completely filled in, and self.rebuild_list / self.reinstall_list
+			# will tell us whether any of our children need to be rebuilt or
+			# reinstalled.
+			if self._trigger_rebuild(node, node_build_deps, node_runtime_deps):
+				need_restart = True
+
+		return need_restart
+
+
+class _dynamic_depgraph_config(object):
+
+	def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
+		self.myparams = myparams.copy()
+		self._vdb_loaded = False
+		self._allow_backtracking = allow_backtracking
+		# Maps slot atom to package for each Package added to the graph.
+		self._slot_pkg_map = {}
+		# Maps nodes to the reasons they were selected for reinstallation.
+		self._reinstall_nodes = {}
+		self.mydbapi = {}
+		# Contains a filtered view of preferred packages that are selected
+		# from available repositories.
+		self._filtered_trees = {}
+		# Contains installed packages and new packages that have been added
+		# to the graph.
+		self._graph_trees = {}
+		# Caches visible packages returned from _select_package, for use in
+		# depgraph._iter_atoms_for_pkg() SLOT logic.
+		self._visible_pkgs = {}
+		#contains the args created by select_files
+		self._initial_arg_list = []
+		self.digraph = portage.digraph()
+		# manages sets added to the graph
+		self.sets = {}
+		# contains all nodes pulled in by self.sets
+		self._set_nodes = set()
+		# Contains only Blocker -> Uninstall edges
+		self._blocker_uninstalls = digraph()
+		# Contains only Package -> Blocker edges
+		self._blocker_parents = digraph()
+		# Contains only irrelevant Package -> Blocker edges
+		self._irrelevant_blockers = digraph()
+		# Contains only unsolvable Package -> Blocker edges
+		self._unsolvable_blockers = digraph()
+		# Contains all Blocker -> Blocked Package edges
+		self._blocked_pkgs = digraph()
+		# Contains world packages that have been protected from
+		# uninstallation but may not have been added to the graph
+		# if the graph is not complete yet.
+		self._blocked_world_pkgs = {}
+		# Contains packages whose dependencies have been traversed.
+		# This use used to check if we have accounted for blockers
+		# relevant to a package.
+		self._traversed_pkg_deps = set()
+		self._slot_collision_info = {}
+		# Slot collision nodes are not allowed to block other packages since
+		# blocker validation is only able to account for one package per slot.
+		self._slot_collision_nodes = set()
+		self._parent_atoms = {}
+		self._slot_conflict_parent_atoms = set()
+		self._slot_conflict_handler = None
+		self._circular_dependency_handler = None
+		self._serialized_tasks_cache = None
+		self._scheduler_graph = None
+		self._displayed_list = None
+		self._pprovided_args = []
+		self._missing_args = []
+		self._masked_installed = set()
+		self._masked_license_updates = set()
+		self._unsatisfied_deps_for_display = []
+		self._unsatisfied_blockers_for_display = None
+		self._circular_deps_for_display = None
+		self._dep_stack = []
+		self._dep_disjunctive_stack = []
+		self._unsatisfied_deps = []
+		self._initially_unsatisfied_deps = []
+		self._ignored_deps = []
+		self._highest_pkg_cache = {}
+
+		self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
+		self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
+		self._needed_license_changes = backtrack_parameters.needed_license_changes
+		self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
+		self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
+		self._need_restart = False
+		# For conditions that always require user intervention, such as
+		# unsatisfied REQUIRED_USE (currently has no autounmask support).
+		self._skip_restart = False
+		self._backtrack_infos = {}
+
+		self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
+		self._success_without_autounmask = False
+		self._traverse_ignored_deps = False
+
+		for myroot in depgraph._frozen_config.trees:
+			self.sets[myroot] = _depgraph_sets()
+			self._slot_pkg_map[myroot] = {}
+			vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+			# This dbapi instance will model the state that the vdb will
+			# have after new packages have been installed.
+			fakedb = PackageVirtualDbapi(vardb.settings)
+
+			self.mydbapi[myroot] = fakedb
+			def graph_tree():
+				pass
+			graph_tree.dbapi = fakedb
+			self._graph_trees[myroot] = {}
+			self._filtered_trees[myroot] = {}
+			# Substitute the graph tree for the vartree in dep_check() since we
+			# want atom selections to be consistent with package selections
+			# have already been made.
+			self._graph_trees[myroot]["porttree"]   = graph_tree
+			self._graph_trees[myroot]["vartree"]    = graph_tree
+			self._graph_trees[myroot]["graph_db"]   = graph_tree.dbapi
+			self._graph_trees[myroot]["graph"]      = self.digraph
+			def filtered_tree():
+				pass
+			filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
+			self._filtered_trees[myroot]["porttree"] = filtered_tree
+			self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
+
+			# Passing in graph_tree as the vartree here could lead to better
+			# atom selections in some cases by causing atoms for packages that
+			# have been added to the graph to be preferred over other choices.
+			# However, it can trigger atom selections that result in
+			# unresolvable direct circular dependencies. For example, this
+			# happens with gwydion-dylan which depends on either itself or
+			# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
+			# gwydion-dylan-bin needs to be selected in order to avoid a
+			# an unresolvable direct circular dependency.
+			#
+			# To solve the problem described above, pass in "graph_db" so that
+			# packages that have been added to the graph are distinguishable
+			# from other available packages and installed packages. Also, pass
+			# the parent package into self._select_atoms() calls so that
+			# unresolvable direct circular dependencies can be detected and
+			# avoided when possible.
+			self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
+			self._filtered_trees[myroot]["graph"]    = self.digraph
+			self._filtered_trees[myroot]["vartree"] = \
+				depgraph._frozen_config.trees[myroot]["vartree"]
+
+			dbs = []
+			#               (db, pkg_type, built, installed, db_keys)
+			if "remove" in self.myparams:
+				# For removal operations, use _dep_check_composite_db
+				# for availability and visibility checks. This provides
+				# consistency with install operations, so we don't
+				# get install/uninstall cycles like in bug #332719.
+				self._graph_trees[myroot]["porttree"] = filtered_tree
+			else:
+				if "--usepkgonly" not in depgraph._frozen_config.myopts:
+					portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
+					db_keys = list(portdb._aux_cache_keys)
+					dbs.append((portdb, "ebuild", False, False, db_keys))
+
+				if "--usepkg" in depgraph._frozen_config.myopts:
+					bindb  = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
+					db_keys = list(bindb._aux_cache_keys)
+					dbs.append((bindb,  "binary", True, False, db_keys))
+
+			vardb  = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+			db_keys = list(depgraph._frozen_config._trees_orig[myroot
+				]["vartree"].dbapi._aux_cache_keys)
+			dbs.append((vardb, "installed", True, True, db_keys))
+			self._filtered_trees[myroot]["dbs"] = dbs
+
+class depgraph(object):
+
+	pkg_tree_map = RootConfig.pkg_tree_map
+
+	_dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
+	
+	def __init__(self, settings, trees, myopts, myparams, spinner,
+		frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
+		if frozen_config is None:
+			frozen_config = _frozen_depgraph_config(settings, trees,
+			myopts, spinner)
+		self._frozen_config = frozen_config
+		self._dynamic_config = _dynamic_depgraph_config(self, myparams,
+			allow_backtracking, backtrack_parameters)
+		self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
+
+		self._select_atoms = self._select_atoms_highest_available
+		self._select_package = self._select_pkg_highest_available
+
+	def _load_vdb(self):
+		"""
+		Load installed package metadata if appropriate. This used to be called
+		from the constructor, but that wasn't very nice since this procedure
+		is slow and it generates spinner output. So, now it's called on-demand
+		by various methods when necessary.
+		"""
+
+		if self._dynamic_config._vdb_loaded:
+			return
+
+		for myroot in self._frozen_config.trees:
+
+			preload_installed_pkgs = \
+				"--nodeps" not in self._frozen_config.myopts
+
+			fake_vartree = self._frozen_config.trees[myroot]["vartree"]
+			if not fake_vartree.dbapi:
+				# This needs to be called for the first depgraph, but not for
+				# backtracking depgraphs that share the same frozen_config.
+				fake_vartree.sync()
+
+				# FakeVartree.sync() populates virtuals, and we want
+				# self.pkgsettings to have them populated too.
+				self._frozen_config.pkgsettings[myroot] = \
+					portage.config(clone=fake_vartree.settings)
+
+			if preload_installed_pkgs:
+				vardb = fake_vartree.dbapi
+				fakedb = self._dynamic_config._graph_trees[
+					myroot]["vartree"].dbapi
+
+				for pkg in vardb:
+					self._spinner_update()
+					# This triggers metadata updates via FakeVartree.
+					vardb.aux_get(pkg.cpv, [])
+					fakedb.cpv_inject(pkg)
+
+		self._dynamic_config._vdb_loaded = True
+
+	def _spinner_update(self):
+		if self._frozen_config.spinner:
+			self._frozen_config.spinner.update()
+
+	def _show_missed_update(self):
+
+		# In order to minimize noise, show only the highest
+		# missed update from each SLOT.
+		missed_updates = {}
+		for pkg, mask_reasons in \
+			self._dynamic_config._runtime_pkg_mask.items():
+			if pkg.installed:
+				# Exclude installed here since we only
+				# want to show available updates.
+				continue
+			k = (pkg.root, pkg.slot_atom)
+			if k in missed_updates:
+				other_pkg, mask_type, parent_atoms = missed_updates[k]
+				if other_pkg > pkg:
+					continue
+			for mask_type, parent_atoms in mask_reasons.items():
+				if not parent_atoms:
+					continue
+				missed_updates[k] = (pkg, mask_type, parent_atoms)
+				break
+
+		if not missed_updates:
+			return
+
+		missed_update_types = {}
+		for pkg, mask_type, parent_atoms in missed_updates.values():
+			missed_update_types.setdefault(mask_type,
+				[]).append((pkg, parent_atoms))
+
+		if '--quiet' in self._frozen_config.myopts and \
+			'--debug' not in self._frozen_config.myopts:
+			missed_update_types.pop("slot conflict", None)
+			missed_update_types.pop("missing dependency", None)
+
+		self._show_missed_update_slot_conflicts(
+			missed_update_types.get("slot conflict"))
+
+		self._show_missed_update_unsatisfied_dep(
+			missed_update_types.get("missing dependency"))
+
+	def _show_missed_update_unsatisfied_dep(self, missed_updates):
+
+		if not missed_updates:
+			return
+
+		backtrack_masked = []
+
+		for pkg, parent_atoms in missed_updates:
+
+			try:
+				for parent, root, atom in parent_atoms:
+					self._show_unsatisfied_dep(root, atom, myparent=parent,
+						check_backtrack=True)
+			except self._backtrack_mask:
+				# This is displayed below in abbreviated form.
+				backtrack_masked.append((pkg, parent_atoms))
+				continue
+
+			writemsg("\n!!! The following update has been skipped " + \
+				"due to unsatisfied dependencies:\n\n", noiselevel=-1)
+
+			writemsg(str(pkg.slot_atom), noiselevel=-1)
+			if pkg.root != '/':
+				writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+			writemsg("\n", noiselevel=-1)
+
+			for parent, root, atom in parent_atoms:
+				self._show_unsatisfied_dep(root, atom, myparent=parent)
+				writemsg("\n", noiselevel=-1)
+
+		if backtrack_masked:
+			# These are shown in abbreviated form, in order to avoid terminal
+			# flooding from mask messages as reported in bug #285832.
+			writemsg("\n!!! The following update(s) have been skipped " + \
+				"due to unsatisfied dependencies\n" + \
+				"!!! triggered by backtracking:\n\n", noiselevel=-1)
+			for pkg, parent_atoms in backtrack_masked:
+				writemsg(str(pkg.slot_atom), noiselevel=-1)
+				if pkg.root != '/':
+					writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+				writemsg("\n", noiselevel=-1)
+
+	def _show_missed_update_slot_conflicts(self, missed_updates):
+
+		if not missed_updates:
+			return
+
+		msg = []
+		msg.append("\nWARNING: One or more updates have been " + \
+			"skipped due to a dependency conflict:\n\n")
+
+		indent = "  "
+		for pkg, parent_atoms in missed_updates:
+			msg.append(str(pkg.slot_atom))
+			if pkg.root != '/':
+				msg.append(" for %s" % (pkg.root,))
+			msg.append("\n\n")
+
+			for parent, atom in parent_atoms:
+				msg.append(indent)
+				msg.append(str(pkg))
+
+				msg.append(" conflicts with\n")
+				msg.append(2*indent)
+				if isinstance(parent,
+					(PackageArg, AtomArg)):
+					# For PackageArg and AtomArg types, it's
+					# redundant to display the atom attribute.
+					msg.append(str(parent))
+				else:
+					# Display the specific atom from SetArg or
+					# Package types.
+					msg.append("%s required by %s" % (atom, parent))
+				msg.append("\n")
+			msg.append("\n")
+
+		writemsg("".join(msg), noiselevel=-1)
+
+	def _show_slot_collision_notice(self):
+		"""Show an informational message advising the user to mask one of the
+		the packages. In some cases it may be possible to resolve this
+		automatically, but support for backtracking (removal nodes that have
+		already been selected) will be required in order to handle all possible
+		cases.
+		"""
+
+		if not self._dynamic_config._slot_collision_info:
+			return
+
+		self._show_merge_list()
+
+		self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
+		handler = self._dynamic_config._slot_conflict_handler
+
+		conflict = handler.get_conflict()
+		writemsg(conflict, noiselevel=-1)
+		
+		explanation = handler.get_explanation()
+		if explanation:
+			writemsg(explanation, noiselevel=-1)
+			return
+
+		if "--quiet" in self._frozen_config.myopts:
+			return
+
+		msg = []
+		msg.append("It may be possible to solve this problem ")
+		msg.append("by using package.mask to prevent one of ")
+		msg.append("those packages from being selected. ")
+		msg.append("However, it is also possible that conflicting ")
+		msg.append("dependencies exist such that they are impossible to ")
+		msg.append("satisfy simultaneously.  If such a conflict exists in ")
+		msg.append("the dependencies of two different packages, then those ")
+		msg.append("packages can not be installed simultaneously.")
+		backtrack_opt = self._frozen_config.myopts.get('--backtrack')
+		if not self._dynamic_config._allow_backtracking and \
+			(backtrack_opt is None or \
+			(backtrack_opt > 0 and backtrack_opt < 30)):
+			msg.append(" You may want to try a larger value of the ")
+			msg.append("--backtrack option, such as --backtrack=30, ")
+			msg.append("in order to see if that will solve this conflict ")
+			msg.append("automatically.")
+
+		for line in textwrap.wrap(''.join(msg), 70):
+			writemsg(line + '\n', noiselevel=-1)
+		writemsg('\n', noiselevel=-1)
+
+		msg = []
+		msg.append("For more information, see MASKED PACKAGES ")
+		msg.append("section in the emerge man page or refer ")
+		msg.append("to the Gentoo Handbook.")
+		for line in textwrap.wrap(''.join(msg), 70):
+			writemsg(line + '\n', noiselevel=-1)
+		writemsg('\n', noiselevel=-1)
+
+	def _process_slot_conflicts(self):
+		"""
+		Process slot conflict data to identify specific atoms which
+		lead to conflict. These atoms only match a subset of the
+		packages that have been pulled into a given slot.
+		"""
+		for (slot_atom, root), slot_nodes \
+			in self._dynamic_config._slot_collision_info.items():
+
+			all_parent_atoms = set()
+			for pkg in slot_nodes:
+				parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+				if not parent_atoms:
+					continue
+				all_parent_atoms.update(parent_atoms)
+
+			for pkg in slot_nodes:
+				parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+				if parent_atoms is None:
+					parent_atoms = set()
+					self._dynamic_config._parent_atoms[pkg] = parent_atoms
+				for parent_atom in all_parent_atoms:
+					if parent_atom in parent_atoms:
+						continue
+					# Use package set for matching since it will match via
+					# PROVIDE when necessary, while match_from_list does not.
+					parent, atom = parent_atom
+					atom_set = InternalPackageSet(
+						initial_atoms=(atom,), allow_repo=True)
+					if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+						parent_atoms.add(parent_atom)
+					else:
+						self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
+
+	def _reinstall_for_flags(self, forced_flags,
+		orig_use, orig_iuse, cur_use, cur_iuse):
+		"""Return a set of flags that trigger reinstallation, or None if there
+		are no such flags."""
+		if "--newuse" in self._frozen_config.myopts or \
+			"--binpkg-respect-use" in self._frozen_config.myopts:
+			flags = set(orig_iuse.symmetric_difference(
+				cur_iuse).difference(forced_flags))
+			flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
+				cur_iuse.intersection(cur_use)))
+			if flags:
+				return flags
+		elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
+			flags = orig_iuse.intersection(orig_use).symmetric_difference(
+				cur_iuse.intersection(cur_use))
+			if flags:
+				return flags
+		return None
+
+	def _create_graph(self, allow_unsatisfied=False):
+		dep_stack = self._dynamic_config._dep_stack
+		dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
+		while dep_stack or dep_disjunctive_stack:
+			self._spinner_update()
+			while dep_stack:
+				dep = dep_stack.pop()
+				if isinstance(dep, Package):
+					if not self._add_pkg_deps(dep,
+						allow_unsatisfied=allow_unsatisfied):
+						return 0
+					continue
+				if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
+					return 0
+			if dep_disjunctive_stack:
+				if not self._pop_disjunction(allow_unsatisfied):
+					return 0
+		return 1
+
+	def _expand_set_args(self, input_args, add_to_digraph=False):
+		"""
+		Iterate over a list of DependencyArg instances and yield all
+		instances given in the input together with additional SetArg
+		instances that are generated from nested sets.
+		@param input_args: An iterable of DependencyArg instances
+		@type input_args: Iterable
+		@param add_to_digraph: If True then add SetArg instances
+			to the digraph, in order to record parent -> child
+			relationships from nested sets
+		@type add_to_digraph: Boolean
+		@rtype: Iterable
+		@returns: All args given in the input together with additional
+			SetArg instances that are generated from nested sets
+		"""
+
+		traversed_set_args = set()
+
+		for arg in input_args:
+			if not isinstance(arg, SetArg):
+				yield arg
+				continue
+
+			root_config = arg.root_config
+			depgraph_sets = self._dynamic_config.sets[root_config.root]
+			arg_stack = [arg]
+			while arg_stack:
+				arg = arg_stack.pop()
+				if arg in traversed_set_args:
+					continue
+				traversed_set_args.add(arg)
+
+				if add_to_digraph:
+					self._dynamic_config.digraph.add(arg, None,
+						priority=BlockerDepPriority.instance)
+
+				yield arg
+
+				# Traverse nested sets and add them to the stack
+				# if they're not already in the graph. Also, graph
+				# edges between parent and nested sets.
+				for token in arg.pset.getNonAtoms():
+					if not token.startswith(SETPREFIX):
+						continue
+					s = token[len(SETPREFIX):]
+					nested_set = depgraph_sets.sets.get(s)
+					if nested_set is None:
+						nested_set = root_config.sets.get(s)
+					if nested_set is not None:
+						nested_arg = SetArg(arg=token, pset=nested_set,
+							root_config=root_config)
+						arg_stack.append(nested_arg)
+						if add_to_digraph:
+							self._dynamic_config.digraph.add(nested_arg, arg,
+								priority=BlockerDepPriority.instance)
+							depgraph_sets.sets[nested_arg.name] = nested_arg.pset
+
+	def _add_dep(self, dep, allow_unsatisfied=False):
+		debug = "--debug" in self._frozen_config.myopts
+		buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
+		nodeps = "--nodeps" in self._frozen_config.myopts
+		deep = self._dynamic_config.myparams.get("deep", 0)
+		recurse = deep is True or dep.depth <= deep
+		if dep.blocker:
+			if not buildpkgonly and \
+				not nodeps and \
+				not dep.collapsed_priority.ignored and \
+				not dep.collapsed_priority.optional and \
+				dep.parent not in self._dynamic_config._slot_collision_nodes:
+				if dep.parent.onlydeps:
+					# It's safe to ignore blockers if the
+					# parent is an --onlydeps node.
+					return 1
+				# The blocker applies to the root where
+				# the parent is or will be installed.
+				blocker = Blocker(atom=dep.atom,
+					eapi=dep.parent.metadata["EAPI"],
+					priority=dep.priority, root=dep.parent.root)
+				self._dynamic_config._blocker_parents.add(blocker, dep.parent)
+			return 1
+
+		if dep.child is None:
+			dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
+				onlydeps=dep.onlydeps)
+		else:
+			# The caller has selected a specific package
+			# via self._minimize_packages().
+			dep_pkg = dep.child
+			existing_node = self._dynamic_config._slot_pkg_map[
+				dep.root].get(dep_pkg.slot_atom)
+
+		if not dep_pkg:
+			if (dep.collapsed_priority.optional or
+				dep.collapsed_priority.ignored):
+				# This is an unnecessary build-time dep.
+				return 1
+			if allow_unsatisfied:
+				self._dynamic_config._unsatisfied_deps.append(dep)
+				return 1
+			self._dynamic_config._unsatisfied_deps_for_display.append(
+				((dep.root, dep.atom), {"myparent":dep.parent}))
+
+			# The parent node should not already be in
+			# runtime_pkg_mask, since that would trigger an
+			# infinite backtracking loop.
+			if self._dynamic_config._allow_backtracking:
+				if dep.parent in self._dynamic_config._runtime_pkg_mask:
+					if "--debug" in self._frozen_config.myopts:
+						writemsg(
+							"!!! backtracking loop detected: %s %s\n" % \
+							(dep.parent,
+							self._dynamic_config._runtime_pkg_mask[
+							dep.parent]), noiselevel=-1)
+				elif not self.need_restart():
+					# Do not backtrack if only USE have to be changed in
+					# order to satisfy the dependency.
+					dep_pkg, existing_node = \
+						self._select_package(dep.root, dep.atom.without_use,
+							onlydeps=dep.onlydeps)
+					if dep_pkg is None:
+						self._dynamic_config._backtrack_infos["missing dependency"] = dep
+						self._dynamic_config._need_restart = True
+						if "--debug" in self._frozen_config.myopts:
+							msg = []
+							msg.append("")
+							msg.append("")
+							msg.append("backtracking due to unsatisfied dep:")
+							msg.append("    parent: %s" % dep.parent)
+							msg.append("  priority: %s" % dep.priority)
+							msg.append("      root: %s" % dep.root)
+							msg.append("      atom: %s" % dep.atom)
+							msg.append("")
+							writemsg_level("".join("%s\n" % l for l in msg),
+								noiselevel=-1, level=logging.DEBUG)
+
+			return 0
+
+		self._rebuild.add(dep_pkg, dep)
+
+		ignore = dep.collapsed_priority.ignored and \
+			not self._dynamic_config._traverse_ignored_deps
+		if not ignore and not self._add_pkg(dep_pkg, dep):
+			return 0
+		return 1
+
+	def _check_slot_conflict(self, pkg, atom):
+		existing_node = self._dynamic_config._slot_pkg_map[pkg.root].get(pkg.slot_atom)
+		matches = None
+		if existing_node:
+			matches = pkg.cpv == existing_node.cpv
+			if pkg != existing_node and \
+				atom is not None:
+				# Use package set for matching since it will match via
+				# PROVIDE when necessary, while match_from_list does not.
+				matches = bool(InternalPackageSet(initial_atoms=(atom,),
+					allow_repo=True).findAtomForPackage(existing_node,
+					modified_use=self._pkg_use_enabled(existing_node)))
+
+		return (existing_node, matches)
+
+	def _add_pkg(self, pkg, dep):
+		"""
+		Adds a package to the depgraph, queues dependencies, and handles
+		slot conflicts.
+		"""
+		debug = "--debug" in self._frozen_config.myopts
+		myparent = None
+		priority = None
+		depth = 0
+		if dep is None:
+			dep = Dependency()
+		else:
+			myparent = dep.parent
+			priority = dep.priority
+			depth = dep.depth
+		if priority is None:
+			priority = DepPriority()
+
+		if debug:
+			writemsg_level(
+				"\n%s%s %s\n" % ("Child:".ljust(15), pkg,
+				pkg_use_display(pkg, self._frozen_config.myopts,
+				modified_use=self._pkg_use_enabled(pkg))),
+				level=logging.DEBUG, noiselevel=-1)
+			if isinstance(myparent,
+				(PackageArg, AtomArg)):
+				# For PackageArg and AtomArg types, it's
+				# redundant to display the atom attribute.
+				writemsg_level(
+					"%s%s\n" % ("Parent Dep:".ljust(15), myparent),
+					level=logging.DEBUG, noiselevel=-1)
+			else:
+				# Display the specific atom from SetArg or
+				# Package types.
+				writemsg_level(
+					"%s%s required by %s\n" %
+					("Parent Dep:".ljust(15), dep.atom, myparent),
+					level=logging.DEBUG, noiselevel=-1)
+
+		# Ensure that the dependencies of the same package
+		# are never processed more than once.
+		previously_added = pkg in self._dynamic_config.digraph
+
+		# select the correct /var database that we'll be checking against
+		vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
+		pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+
+		arg_atoms = None
+		if True:
+			try:
+				arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+			except portage.exception.InvalidDependString as e:
+				if not pkg.installed:
+					# should have been masked before it was selected
+					raise
+				del e
+
+		# NOTE: REQUIRED_USE checks are delayed until after
+		# package selection, since we want to prompt the user
+		# for USE adjustment rather than have REQUIRED_USE
+		# affect package selection and || dep choices.
+		if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
+			eapi_has_required_use(pkg.metadata["EAPI"]):
+			required_use_is_sat = check_required_use(
+				pkg.metadata["REQUIRED_USE"],
+				self._pkg_use_enabled(pkg),
+				pkg.iuse.is_valid_flag)
+			if not required_use_is_sat:
+				if dep.atom is not None and dep.parent is not None:
+					self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+				if arg_atoms:
+					for parent_atom in arg_atoms:
+						parent, atom = parent_atom
+						self._add_parent_atom(pkg, parent_atom)
+
+				atom = dep.atom
+				if atom is None:
+					atom = Atom("=" + pkg.cpv)
+				self._dynamic_config._unsatisfied_deps_for_display.append(
+					((pkg.root, atom), {"myparent":dep.parent}))
+				self._dynamic_config._skip_restart = True
+				return 0
+
+		if not pkg.onlydeps:
+
+			existing_node, existing_node_matches = \
+				self._check_slot_conflict(pkg, dep.atom)
+			slot_collision = False
+			if existing_node:
+				if existing_node_matches:
+					# The existing node can be reused.
+					if arg_atoms:
+						for parent_atom in arg_atoms:
+							parent, atom = parent_atom
+							self._dynamic_config.digraph.add(existing_node, parent,
+								priority=priority)
+							self._add_parent_atom(existing_node, parent_atom)
+					# If a direct circular dependency is not an unsatisfied
+					# buildtime dependency then drop it here since otherwise
+					# it can skew the merge order calculation in an unwanted
+					# way.
+					if existing_node != myparent or \
+						(priority.buildtime and not priority.satisfied):
+						self._dynamic_config.digraph.addnode(existing_node, myparent,
+							priority=priority)
+						if dep.atom is not None and dep.parent is not None:
+							self._add_parent_atom(existing_node,
+								(dep.parent, dep.atom))
+					return 1
+				else:
+					# A slot conflict has occurred. 
+					# The existing node should not already be in
+					# runtime_pkg_mask, since that would trigger an
+					# infinite backtracking loop.
+					if self._dynamic_config._allow_backtracking and \
+						existing_node in \
+						self._dynamic_config._runtime_pkg_mask:
+						if "--debug" in self._frozen_config.myopts:
+							writemsg(
+								"!!! backtracking loop detected: %s %s\n" % \
+								(existing_node,
+								self._dynamic_config._runtime_pkg_mask[
+								existing_node]), noiselevel=-1)
+					elif self._dynamic_config._allow_backtracking and \
+						not self._accept_blocker_conflicts() and \
+						not self.need_restart():
+
+						self._add_slot_conflict(pkg)
+						if dep.atom is not None and dep.parent is not None:
+							self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+						if arg_atoms:
+							for parent_atom in arg_atoms:
+								parent, atom = parent_atom
+								self._add_parent_atom(pkg, parent_atom)
+						self._process_slot_conflicts()
+
+						backtrack_data = []
+						fallback_data = []
+						all_parents = set()
+						# The ordering of backtrack_data can make
+						# a difference here, because both mask actions may lead
+						# to valid, but different, solutions and the one with
+						# 'existing_node' masked is usually the better one. Because
+						# of that, we choose an order such that
+						# the backtracker will first explore the choice with
+						# existing_node masked. The backtracker reverses the
+						# order, so the order it uses is the reverse of the
+						# order shown here. See bug #339606.
+						for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
+							# For missed update messages, find out which
+							# atoms matched to_be_selected that did not
+							# match to_be_masked.
+							parent_atoms = \
+								self._dynamic_config._parent_atoms.get(to_be_selected, set())
+							if parent_atoms:
+								conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
+								if conflict_atoms:
+									parent_atoms = conflict_atoms
+
+							all_parents.update(parent_atoms)
+
+							all_match = True
+							for parent, atom in parent_atoms:
+								i = InternalPackageSet(initial_atoms=(atom,),
+									allow_repo=True)
+								if not i.findAtomForPackage(to_be_masked):
+									all_match = False
+									break
+
+							if to_be_selected >= to_be_masked:
+								# We only care about the parent atoms
+								# when they trigger a downgrade.
+								parent_atoms = set()
+
+							fallback_data.append((to_be_masked, parent_atoms))
+
+							if all_match:
+								# 'to_be_masked' does not violate any parent atom, which means
+								# there is no point in masking it.
+								pass
+							else:
+								backtrack_data.append((to_be_masked, parent_atoms))
+
+						if not backtrack_data:
+							# This shouldn't happen, but fall back to the old
+							# behavior if this gets triggered somehow.
+							backtrack_data = fallback_data
+
+						if len(backtrack_data) > 1:
+							# NOTE: Generally, we prefer to mask the higher
+							# version since this solves common cases in which a
+							# lower version is needed so that all dependencies
+							# will be satisfied (bug #337178). However, if
+							# existing_node happens to be installed then we
+							# mask that since this is a common case that is
+							# triggered when --update is not enabled.
+							if existing_node.installed:
+								pass
+							elif pkg > existing_node:
+								backtrack_data.reverse()
+
+						to_be_masked = backtrack_data[-1][0]
+
+						self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
+						self._dynamic_config._need_restart = True
+						if "--debug" in self._frozen_config.myopts:
+							msg = []
+							msg.append("")
+							msg.append("")
+							msg.append("backtracking due to slot conflict:")
+							if backtrack_data is fallback_data:
+								msg.append("!!! backtrack_data fallback")
+							msg.append("   first package:  %s" % existing_node)
+							msg.append("   second package: %s" % pkg)
+							msg.append("  package to mask: %s" % to_be_masked)
+							msg.append("      slot: %s" % pkg.slot_atom)
+							msg.append("   parents: %s" % ", ".join( \
+								"(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
+							msg.append("")
+							writemsg_level("".join("%s\n" % l for l in msg),
+								noiselevel=-1, level=logging.DEBUG)
+						return 0
+
+					# A slot collision has occurred.  Sometimes this coincides
+					# with unresolvable blockers, so the slot collision will be
+					# shown later if there are no unresolvable blockers.
+					self._add_slot_conflict(pkg)
+					slot_collision = True
+
+					if debug:
+						writemsg_level(
+							"%s%s %s\n" % ("Slot Conflict:".ljust(15),
+							existing_node, pkg_use_display(existing_node,
+							self._frozen_config.myopts,
+							modified_use=self._pkg_use_enabled(existing_node))),
+							level=logging.DEBUG, noiselevel=-1)
+
+			if slot_collision:
+				# Now add this node to the graph so that self.display()
+				# can show use flags and --tree portage.output.  This node is
+				# only being partially added to the graph.  It must not be
+				# allowed to interfere with the other nodes that have been
+				# added.  Do not overwrite data for existing nodes in
+				# self._dynamic_config.mydbapi since that data will be used for blocker
+				# validation.
+				# Even though the graph is now invalid, continue to process
+				# dependencies so that things like --fetchonly can still
+				# function despite collisions.
+				pass
+			elif not previously_added:
+				self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
+				self._dynamic_config.mydbapi[pkg.root].cpv_inject(pkg)
+				self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+				self._dynamic_config._highest_pkg_cache.clear()
+				self._check_masks(pkg)
+
+			if not pkg.installed:
+				# Allow this package to satisfy old-style virtuals in case it
+				# doesn't already. Any pre-existing providers will be preferred
+				# over this one.
+				try:
+					pkgsettings.setinst(pkg.cpv, pkg.metadata)
+					# For consistency, also update the global virtuals.
+					settings = self._frozen_config.roots[pkg.root].settings
+					settings.unlock()
+					settings.setinst(pkg.cpv, pkg.metadata)
+					settings.lock()
+				except portage.exception.InvalidDependString as e:
+					if not pkg.installed:
+						# should have been masked before it was selected
+						raise
+
+		if arg_atoms:
+			self._dynamic_config._set_nodes.add(pkg)
+
+		# Do this even when addme is False (--onlydeps) so that the
+		# parent/child relationship is always known in case
+		# self._show_slot_collision_notice() needs to be called later.
+		self._dynamic_config.digraph.add(pkg, myparent, priority=priority)
+		if dep.atom is not None and dep.parent is not None:
+			self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+		if arg_atoms:
+			for parent_atom in arg_atoms:
+				parent, atom = parent_atom
+				self._dynamic_config.digraph.add(pkg, parent, priority=priority)
+				self._add_parent_atom(pkg, parent_atom)
+
+		""" This section determines whether we go deeper into dependencies or not.
+			We want to go deeper on a few occasions:
+			Installing package A, we need to make sure package A's deps are met.
+			emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
+			If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
+		"""
+		if arg_atoms:
+			depth = 0
+		pkg.depth = depth
+		deep = self._dynamic_config.myparams.get("deep", 0)
+		recurse = deep is True or depth + 1 <= deep
+		dep_stack = self._dynamic_config._dep_stack
+		if "recurse" not in self._dynamic_config.myparams:
+			return 1
+		elif pkg.installed and not recurse:
+			dep_stack = self._dynamic_config._ignored_deps
+
+		self._spinner_update()
+
+		if not previously_added:
+			dep_stack.append(pkg)
+		return 1
+
+	def _check_masks(self, pkg):
+
+		slot_key = (pkg.root, pkg.slot_atom)
+
+		# Check for upgrades in the same slot that are
+		# masked due to a LICENSE change in a newer
+		# version that is not masked for any other reason.
+		other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
+		if other_pkg is not None and pkg < other_pkg:
+			self._dynamic_config._masked_license_updates.add(other_pkg)
+
+	def _add_parent_atom(self, pkg, parent_atom):
+		parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+		if parent_atoms is None:
+			parent_atoms = set()
+			self._dynamic_config._parent_atoms[pkg] = parent_atoms
+		parent_atoms.add(parent_atom)
+
+	def _add_slot_conflict(self, pkg):
+		self._dynamic_config._slot_collision_nodes.add(pkg)
+		slot_key = (pkg.slot_atom, pkg.root)
+		slot_nodes = self._dynamic_config._slot_collision_info.get(slot_key)
+		if slot_nodes is None:
+			slot_nodes = set()
+			slot_nodes.add(self._dynamic_config._slot_pkg_map[pkg.root][pkg.slot_atom])
+			self._dynamic_config._slot_collision_info[slot_key] = slot_nodes
+		slot_nodes.add(pkg)
+
+	def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
+
+		mytype = pkg.type_name
+		myroot = pkg.root
+		mykey = pkg.cpv
+		metadata = pkg.metadata
+		myuse = self._pkg_use_enabled(pkg)
+		jbigkey = pkg
+		depth = pkg.depth + 1
+		removal_action = "remove" in self._dynamic_config.myparams
+
+		edepend={}
+		depkeys = ["DEPEND","RDEPEND","PDEPEND"]
+		for k in depkeys:
+			edepend[k] = metadata[k]
+
+		if not pkg.built and \
+			"--buildpkgonly" in self._frozen_config.myopts and \
+			"deep" not in self._dynamic_config.myparams:
+			edepend["RDEPEND"] = ""
+			edepend["PDEPEND"] = ""
+
+		ignore_build_time_deps = False
+		if pkg.built and not removal_action:
+			if self._dynamic_config.myparams.get("bdeps", "n") == "y":
+				# Pull in build time deps as requested, but marked them as
+				# "optional" since they are not strictly required. This allows
+				# more freedom in the merge order calculation for solving
+				# circular dependencies. Don't convert to PDEPEND since that
+				# could make --with-bdeps=y less effective if it is used to
+				# adjust merge order to prevent built_with_use() calls from
+				# failing.
+				pass
+			else:
+				ignore_build_time_deps = True
+
+		if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
+			# Removal actions never traverse ignored buildtime
+			# dependencies, so it's safe to discard them early.
+			edepend["DEPEND"] = ""
+			ignore_build_time_deps = True
+
+		if removal_action:
+			depend_root = myroot
+		else:
+			depend_root = "/"
+			root_deps = self._frozen_config.myopts.get("--root-deps")
+			if root_deps is not None:
+				if root_deps is True:
+					depend_root = myroot
+				elif root_deps == "rdeps":
+					ignore_build_time_deps = True
+
+		# If rebuild mode is not enabled, it's safe to discard ignored
+		# build-time dependencies. If you want these deps to be traversed
+		# in "complete" mode then you need to specify --with-bdeps=y.
+		if ignore_build_time_deps and \
+			not self._rebuild.rebuild:
+			edepend["DEPEND"] = ""
+
+		deps = (
+			(depend_root, edepend["DEPEND"],
+				self._priority(buildtime=True,
+				optional=(pkg.built or ignore_build_time_deps),
+				ignored=ignore_build_time_deps)),
+			(myroot, edepend["RDEPEND"],
+				self._priority(runtime=True)),
+			(myroot, edepend["PDEPEND"],
+				self._priority(runtime_post=True))
+		)
+
+		debug = "--debug" in self._frozen_config.myopts
+		strict = mytype != "installed"
+
+		for dep_root, dep_string, dep_priority in deps:
+				if not dep_string:
+					continue
+				if debug:
+					writemsg_level("\nParent:    %s\n" % (pkg,),
+						noiselevel=-1, level=logging.DEBUG)
+					writemsg_level("Depstring: %s\n" % (dep_string,),
+						noiselevel=-1, level=logging.DEBUG)
+					writemsg_level("Priority:  %s\n" % (dep_priority,),
+						noiselevel=-1, level=logging.DEBUG)
+
+				try:
+					dep_string = portage.dep.use_reduce(dep_string,
+						uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
+				except portage.exception.InvalidDependString as e:
+					if not pkg.installed:
+						# should have been masked before it was selected
+						raise
+					del e
+
+					# Try again, but omit the is_valid_flag argument, since
+					# invalid USE conditionals are a common problem and it's
+					# practical to ignore this issue for installed packages.
+					try:
+						dep_string = portage.dep.use_reduce(dep_string,
+							uselist=self._pkg_use_enabled(pkg))
+					except portage.exception.InvalidDependString as e:
+						self._dynamic_config._masked_installed.add(pkg)
+						del e
+						continue
+
+				try:
+					dep_string = list(self._queue_disjunctive_deps(
+						pkg, dep_root, dep_priority, dep_string))
+				except portage.exception.InvalidDependString as e:
+					if pkg.installed:
+						self._dynamic_config._masked_installed.add(pkg)
+						del e
+						continue
+
+					# should have been masked before it was selected
+					raise
+
+				if not dep_string:
+					continue
+
+				dep_string = portage.dep.paren_enclose(dep_string,
+					unevaluated_atom=True)
+
+				if not self._add_pkg_dep_string(
+					pkg, dep_root, dep_priority, dep_string,
+					allow_unsatisfied):
+					return 0
+
+		self._dynamic_config._traversed_pkg_deps.add(pkg)
+		return 1
+
+	def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
+		allow_unsatisfied):
+		_autounmask_backup = self._dynamic_config._autounmask
+		if dep_priority.optional or dep_priority.ignored:
+			# Temporarily disable autounmask for deps that
+			# don't necessarily need to be satisfied.
+			self._dynamic_config._autounmask = False
+		try:
+			return self._wrapped_add_pkg_dep_string(
+				pkg, dep_root, dep_priority, dep_string,
+				allow_unsatisfied)
+		finally:
+			self._dynamic_config._autounmask = _autounmask_backup
+
+	def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
+		dep_string, allow_unsatisfied):
+		depth = pkg.depth + 1
+		deep = self._dynamic_config.myparams.get("deep", 0)
+		recurse_satisfied = deep is True or depth <= deep
+		debug = "--debug" in self._frozen_config.myopts
+		strict = pkg.type_name != "installed"
+
+		if debug:
+			writemsg_level("\nParent:    %s\n" % (pkg,),
+				noiselevel=-1, level=logging.DEBUG)
+			writemsg_level("Depstring: %s\n" % (dep_string,),
+				noiselevel=-1, level=logging.DEBUG)
+			writemsg_level("Priority:  %s\n" % (dep_priority,),
+				noiselevel=-1, level=logging.DEBUG)
+
+		try:
+			selected_atoms = self._select_atoms(dep_root,
+				dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
+				strict=strict, priority=dep_priority)
+		except portage.exception.InvalidDependString as e:
+			if pkg.installed:
+				self._dynamic_config._masked_installed.add(pkg)
+				return 1
+
+			# should have been masked before it was selected
+			raise
+
+		if debug:
+			writemsg_level("Candidates: %s\n" % \
+				([str(x) for x in selected_atoms[pkg]],),
+				noiselevel=-1, level=logging.DEBUG)
+
+		root_config = self._frozen_config.roots[dep_root]
+		vardb = root_config.trees["vartree"].dbapi
+		traversed_virt_pkgs = set()
+
+		reinstall_atoms = self._frozen_config.reinstall_atoms
+		for atom, child in self._minimize_children(
+			pkg, dep_priority, root_config, selected_atoms[pkg]):
+
+			# If this was a specially generated virtual atom
+			# from dep_check, map it back to the original, in
+			# order to avoid distortion in places like display
+			# or conflict resolution code.
+			is_virt = hasattr(atom, '_orig_atom')
+			atom = getattr(atom, '_orig_atom', atom)
+
+			if atom.blocker and \
+				(dep_priority.optional or dep_priority.ignored):
+				# For --with-bdeps, ignore build-time only blockers
+				# that originate from built packages.
+				continue
+
+			mypriority = dep_priority.copy()
+			if not atom.blocker:
+				inst_pkgs = [inst_pkg for inst_pkg in
+					reversed(vardb.match_pkgs(atom))
+					if not reinstall_atoms.findAtomForPackage(inst_pkg,
+							modified_use=self._pkg_use_enabled(inst_pkg))]
+				if inst_pkgs:
+					for inst_pkg in inst_pkgs:
+						if self._pkg_visibility_check(inst_pkg):
+							# highest visible
+							mypriority.satisfied = inst_pkg
+							break
+					if not mypriority.satisfied:
+						# none visible, so use highest
+						mypriority.satisfied = inst_pkgs[0]
+
+			dep = Dependency(atom=atom,
+				blocker=atom.blocker, child=child, depth=depth, parent=pkg,
+				priority=mypriority, root=dep_root)
+
+			# In some cases, dep_check will return deps that shouldn't
+			# be proccessed any further, so they are identified and
+			# discarded here. Try to discard as few as possible since
+			# discarded dependencies reduce the amount of information
+			# available for optimization of merge order.
+			ignored = False
+			if not atom.blocker and \
+				not recurse_satisfied and \
+				mypriority.satisfied and \
+				mypriority.satisfied.visible and \
+				dep.child is not None and \
+				not dep.child.installed and \
+				self._dynamic_config._slot_pkg_map[dep.child.root].get(
+				dep.child.slot_atom) is None:
+				myarg = None
+				if dep.root == self._frozen_config.target_root:
+					try:
+						myarg = next(self._iter_atoms_for_pkg(dep.child))
+					except StopIteration:
+						pass
+					except InvalidDependString:
+						if not dep.child.installed:
+							# This shouldn't happen since the package
+							# should have been masked.
+							raise
+
+				if myarg is None:
+					# Existing child selection may not be valid unless
+					# it's added to the graph immediately, since "complete"
+					# mode may select a different child later.
+					ignored = True
+					dep.child = None
+					self._dynamic_config._ignored_deps.append(dep)
+
+			if not ignored:
+				if dep_priority.ignored and \
+					not self._dynamic_config._traverse_ignored_deps:
+					if is_virt and dep.child is not None:
+						traversed_virt_pkgs.add(dep.child)
+					dep.child = None
+					self._dynamic_config._ignored_deps.append(dep)
+				else:
+					if not self._add_dep(dep,
+						allow_unsatisfied=allow_unsatisfied):
+						return 0
+					if is_virt and dep.child is not None:
+						traversed_virt_pkgs.add(dep.child)
+
+		selected_atoms.pop(pkg)
+
+		# Add selected indirect virtual deps to the graph. This
+		# takes advantage of circular dependency avoidance that's done
+		# by dep_zapdeps. We preserve actual parent/child relationships
+		# here in order to avoid distorting the dependency graph like
+		# <=portage-2.1.6.x did.
+		for virt_dep, atoms in selected_atoms.items():
+
+			virt_pkg = virt_dep.child
+			if virt_pkg not in traversed_virt_pkgs:
+				continue
+
+			if debug:
+				writemsg_level("\nCandidates: %s: %s\n" % \
+					(virt_pkg.cpv, [str(x) for x in atoms]),
+					noiselevel=-1, level=logging.DEBUG)
+
+			if not dep_priority.ignored or \
+				self._dynamic_config._traverse_ignored_deps:
+
+				inst_pkgs = [inst_pkg for inst_pkg in
+					reversed(vardb.match_pkgs(virt_dep.atom))
+					if not reinstall_atoms.findAtomForPackage(inst_pkg,
+							modified_use=self._pkg_use_enabled(inst_pkg))]
+				if inst_pkgs:
+					for inst_pkg in inst_pkgs:
+						if self._pkg_visibility_check(inst_pkg):
+							# highest visible
+							virt_dep.priority.satisfied = inst_pkg
+							break
+					if not virt_dep.priority.satisfied:
+						# none visible, so use highest
+						virt_dep.priority.satisfied = inst_pkgs[0]
+
+				if not self._add_pkg(virt_pkg, virt_dep):
+					return 0
+
+			for atom, child in self._minimize_children(
+				pkg, self._priority(runtime=True), root_config, atoms):
+
+				# If this was a specially generated virtual atom
+				# from dep_check, map it back to the original, in
+				# order to avoid distortion in places like display
+				# or conflict resolution code.
+				is_virt = hasattr(atom, '_orig_atom')
+				atom = getattr(atom, '_orig_atom', atom)
+
+				# This is a GLEP 37 virtual, so its deps are all runtime.
+				mypriority = self._priority(runtime=True)
+				if not atom.blocker:
+					inst_pkgs = [inst_pkg for inst_pkg in
+						reversed(vardb.match_pkgs(atom))
+						if not reinstall_atoms.findAtomForPackage(inst_pkg,
+								modified_use=self._pkg_use_enabled(inst_pkg))]
+					if inst_pkgs:
+						for inst_pkg in inst_pkgs:
+							if self._pkg_visibility_check(inst_pkg):
+								# highest visible
+								mypriority.satisfied = inst_pkg
+								break
+						if not mypriority.satisfied:
+							# none visible, so use highest
+							mypriority.satisfied = inst_pkgs[0]
+
+				# Dependencies of virtuals are considered to have the
+				# same depth as the virtual itself.
+				dep = Dependency(atom=atom,
+					blocker=atom.blocker, child=child, depth=virt_dep.depth,
+					parent=virt_pkg, priority=mypriority, root=dep_root,
+					collapsed_parent=pkg, collapsed_priority=dep_priority)
+
+				ignored = False
+				if not atom.blocker and \
+					not recurse_satisfied and \
+					mypriority.satisfied and \
+					mypriority.satisfied.visible and \
+					dep.child is not None and \
+					not dep.child.installed and \
+					self._dynamic_config._slot_pkg_map[dep.child.root].get(
+					dep.child.slot_atom) is None:
+					myarg = None
+					if dep.root == self._frozen_config.target_root:
+						try:
+							myarg = next(self._iter_atoms_for_pkg(dep.child))
+						except StopIteration:
+							pass
+						except InvalidDependString:
+							if not dep.child.installed:
+								raise
+
+					if myarg is None:
+						ignored = True
+						dep.child = None
+						self._dynamic_config._ignored_deps.append(dep)
+
+				if not ignored:
+					if dep_priority.ignored and \
+						not self._dynamic_config._traverse_ignored_deps:
+						if is_virt and dep.child is not None:
+							traversed_virt_pkgs.add(dep.child)
+						dep.child = None
+						self._dynamic_config._ignored_deps.append(dep)
+					else:
+						if not self._add_dep(dep,
+							allow_unsatisfied=allow_unsatisfied):
+							return 0
+						if is_virt and dep.child is not None:
+							traversed_virt_pkgs.add(dep.child)
+
+		if debug:
+			writemsg_level("\nExiting... %s\n" % (pkg,),
+				noiselevel=-1, level=logging.DEBUG)
+
+		return 1
+
+	def _minimize_children(self, parent, priority, root_config, atoms):
+		"""
+		Selects packages to satisfy the given atoms, and minimizes the
+		number of selected packages. This serves to identify and eliminate
+		redundant package selections when multiple atoms happen to specify
+		a version range.
+		"""
+
+		atom_pkg_map = {}
+
+		for atom in atoms:
+			if atom.blocker:
+				yield (atom, None)
+				continue
+			dep_pkg, existing_node = self._select_package(
+				root_config.root, atom)
+			if dep_pkg is None:
+				yield (atom, None)
+				continue
+			atom_pkg_map[atom] = dep_pkg
+
+		if len(atom_pkg_map) < 2:
+			for item in atom_pkg_map.items():
+				yield item
+			return
+
+		cp_pkg_map = {}
+		pkg_atom_map = {}
+		for atom, pkg in atom_pkg_map.items():
+			pkg_atom_map.setdefault(pkg, set()).add(atom)
+			cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
+
+		for cp, pkgs in cp_pkg_map.items():
+			if len(pkgs) < 2:
+				for pkg in pkgs:
+					for atom in pkg_atom_map[pkg]:
+						yield (atom, pkg)
+				continue
+
+			# Use a digraph to identify and eliminate any
+			# redundant package selections.
+			atom_pkg_graph = digraph()
+			cp_atoms = set()
+			for pkg1 in pkgs:
+				for atom in pkg_atom_map[pkg1]:
+					cp_atoms.add(atom)
+					atom_pkg_graph.add(pkg1, atom)
+					atom_set = InternalPackageSet(initial_atoms=(atom,),
+						allow_repo=True)
+					for pkg2 in pkgs:
+						if pkg2 is pkg1:
+							continue
+						if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
+							atom_pkg_graph.add(pkg2, atom)
+
+			for pkg in pkgs:
+				eliminate_pkg = True
+				for atom in atom_pkg_graph.parent_nodes(pkg):
+					if len(atom_pkg_graph.child_nodes(atom)) < 2:
+						eliminate_pkg = False
+						break
+				if eliminate_pkg:
+					atom_pkg_graph.remove(pkg)
+
+			# Yield ~, =*, < and <= atoms first, since those are more likely to
+			# cause slot conflicts, and we want those atoms to be displayed
+			# in the resulting slot conflict message (see bug #291142).
+			conflict_atoms = []
+			normal_atoms = []
+			for atom in cp_atoms:
+				conflict = False
+				for child_pkg in atom_pkg_graph.child_nodes(atom):
+					existing_node, matches = \
+						self._check_slot_conflict(child_pkg, atom)
+					if existing_node and not matches:
+						conflict = True
+						break
+				if conflict:
+					conflict_atoms.append(atom)
+				else:
+					normal_atoms.append(atom)
+
+			for atom in chain(conflict_atoms, normal_atoms):
+				child_pkgs = atom_pkg_graph.child_nodes(atom)
+				# if more than one child, yield highest version
+				if len(child_pkgs) > 1:
+					child_pkgs.sort()
+				yield (atom, child_pkgs[-1])
+
+	def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
+		"""
+		Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
+		Yields non-disjunctive deps. Raises InvalidDependString when 
+		necessary.
+		"""
+		i = 0
+		while i < len(dep_struct):
+			x = dep_struct[i]
+			if isinstance(x, list):
+				for y in self._queue_disjunctive_deps(
+					pkg, dep_root, dep_priority, x):
+					yield y
+			elif x == "||":
+				self._queue_disjunction(pkg, dep_root, dep_priority,
+					[ x, dep_struct[ i + 1 ] ] )
+				i += 1
+			else:
+				try:
+					x = portage.dep.Atom(x)
+				except portage.exception.InvalidAtom:
+					if not pkg.installed:
+						raise portage.exception.InvalidDependString(
+							"invalid atom: '%s'" % x)
+				else:
+					# Note: Eventually this will check for PROPERTIES=virtual
+					# or whatever other metadata gets implemented for this
+					# purpose.
+					if x.cp.startswith('virtual/'):
+						self._queue_disjunction( pkg, dep_root,
+							dep_priority, [ str(x) ] )
+					else:
+						yield str(x)
+			i += 1
+
+	def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
+		self._dynamic_config._dep_disjunctive_stack.append(
+			(pkg, dep_root, dep_priority, dep_struct))
+
+	def _pop_disjunction(self, allow_unsatisfied):
+		"""
+		Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
+		populate self._dynamic_config._dep_stack.
+		"""
+		pkg, dep_root, dep_priority, dep_struct = \
+			self._dynamic_config._dep_disjunctive_stack.pop()
+		dep_string = portage.dep.paren_enclose(dep_struct,
+			unevaluated_atom=True)
+		if not self._add_pkg_dep_string(
+			pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
+			return 0
+		return 1
+
+	def _priority(self, **kwargs):
+		if "remove" in self._dynamic_config.myparams:
+			priority_constructor = UnmergeDepPriority
+		else:
+			priority_constructor = DepPriority
+		return priority_constructor(**kwargs)
+
+	def _dep_expand(self, root_config, atom_without_category):
+		"""
+		@param root_config: a root config instance
+		@type root_config: RootConfig
+		@param atom_without_category: an atom without a category component
+		@type atom_without_category: String
+		@rtype: list
+		@returns: a list of atoms containing categories (possibly empty)
+		"""
+		null_cp = portage.dep_getkey(insert_category_into_atom(
+			atom_without_category, "null"))
+		cat, atom_pn = portage.catsplit(null_cp)
+
+		dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+		categories = set()
+		for db, pkg_type, built, installed, db_keys in dbs:
+			for cat in db.categories:
+				if db.cp_list("%s/%s" % (cat, atom_pn)):
+					categories.add(cat)
+
+		deps = []
+		for cat in categories:
+			deps.append(Atom(insert_category_into_atom(
+				atom_without_category, cat), allow_repo=True))
+		return deps
+
+	def _have_new_virt(self, root, atom_cp):
+		ret = False
+		for db, pkg_type, built, installed, db_keys in \
+			self._dynamic_config._filtered_trees[root]["dbs"]:
+			if db.cp_list(atom_cp):
+				ret = True
+				break
+		return ret
+
+	def _iter_atoms_for_pkg(self, pkg):
+		depgraph_sets = self._dynamic_config.sets[pkg.root]
+		atom_arg_map = depgraph_sets.atom_arg_map
+		root_config = self._frozen_config.roots[pkg.root]
+		for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
+			if atom.cp != pkg.cp and \
+				self._have_new_virt(pkg.root, atom.cp):
+				continue
+			visible_pkgs = \
+				self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
+			visible_pkgs.reverse() # descending order
+			higher_slot = None
+			for visible_pkg in visible_pkgs:
+				if visible_pkg.cp != atom.cp:
+					continue
+				if pkg >= visible_pkg:
+					# This is descending order, and we're not
+					# interested in any versions <= pkg given.
+					break
+				if pkg.slot_atom != visible_pkg.slot_atom:
+					higher_slot = visible_pkg
+					break
+			if higher_slot is not None:
+				continue
+			for arg in atom_arg_map[(atom, pkg.root)]:
+				if isinstance(arg, PackageArg) and \
+					arg.package != pkg:
+					continue
+				yield arg, atom
+
+	def select_files(self, myfiles):
+		"""Given a list of .tbz2s, .ebuilds sets, and deps, populate
+		self._dynamic_config._initial_arg_list and call self._resolve to create the 
+		appropriate depgraph and return a favorite list."""
+		self._load_vdb()
+		debug = "--debug" in self._frozen_config.myopts
+		root_config = self._frozen_config.roots[self._frozen_config.target_root]
+		sets = root_config.sets
+		depgraph_sets = self._dynamic_config.sets[root_config.root]
+		myfavorites=[]
+		myroot = self._frozen_config.target_root
+		dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
+		vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
+		real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
+		portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
+		bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
+		pkgsettings = self._frozen_config.pkgsettings[myroot]
+		args = []
+		onlydeps = "--onlydeps" in self._frozen_config.myopts
+		lookup_owners = []
+		for x in myfiles:
+			ext = os.path.splitext(x)[1]
+			if ext==".tbz2":
+				if not os.path.exists(x):
+					if os.path.exists(
+						os.path.join(pkgsettings["PKGDIR"], "All", x)):
+						x = os.path.join(pkgsettings["PKGDIR"], "All", x)
+					elif os.path.exists(
+						os.path.join(pkgsettings["PKGDIR"], x)):
+						x = os.path.join(pkgsettings["PKGDIR"], x)
+					else:
+						writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
+						writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
+						return 0, myfavorites
+				mytbz2=portage.xpak.tbz2(x)
+				mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
+				if os.path.realpath(x) != \
+					os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
+					writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
+					self._dynamic_config._skip_restart = True
+					return 0, myfavorites
+
+				pkg = self._pkg(mykey, "binary", root_config,
+					onlydeps=onlydeps)
+				args.append(PackageArg(arg=x, package=pkg,
+					root_config=root_config))
+			elif ext==".ebuild":
+				ebuild_path = portage.util.normalize_path(os.path.abspath(x))
+				pkgdir = os.path.dirname(ebuild_path)
+				tree_root = os.path.dirname(os.path.dirname(pkgdir))
+				cp = pkgdir[len(tree_root)+1:]
+				e = portage.exception.PackageNotFound(
+					("%s is not in a valid portage tree " + \
+					"hierarchy or does not exist") % x)
+				if not portage.isvalidatom(cp):
+					raise e
+				cat = portage.catsplit(cp)[0]
+				mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
+				if not portage.isvalidatom("="+mykey):
+					raise e
+				ebuild_path = portdb.findname(mykey)
+				if ebuild_path:
+					if ebuild_path != os.path.join(os.path.realpath(tree_root),
+						cp, os.path.basename(ebuild_path)):
+						writemsg(colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n\n"), noiselevel=-1)
+						self._dynamic_config._skip_restart = True
+						return 0, myfavorites
+					if mykey not in portdb.xmatch(
+						"match-visible", portage.cpv_getkey(mykey)):
+						writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
+						writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
+						writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
+						countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
+							"Continuing...")
+				else:
+					raise portage.exception.PackageNotFound(
+						"%s is not in a valid portage tree hierarchy or does not exist" % x)
+				pkg = self._pkg(mykey, "ebuild", root_config,
+					onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
+					os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
+				args.append(PackageArg(arg=x, package=pkg,
+					root_config=root_config))
+			elif x.startswith(os.path.sep):
+				if not x.startswith(myroot):
+					portage.writemsg(("\n\n!!! '%s' does not start with" + \
+						" $ROOT.\n") % x, noiselevel=-1)
+					self._dynamic_config._skip_restart = True
+					return 0, []
+				# Queue these up since it's most efficient to handle
+				# multiple files in a single iter_owners() call.
+				lookup_owners.append(x)
+			elif x.startswith("." + os.sep) or \
+				x.startswith(".." + os.sep):
+				f = os.path.abspath(x)
+				if not f.startswith(myroot):
+					portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
+						" $ROOT.\n") % (f, x), noiselevel=-1)
+					self._dynamic_config._skip_restart = True
+					return 0, []
+				lookup_owners.append(f)
+			else:
+				if x in ("system", "world"):
+					x = SETPREFIX + x
+				if x.startswith(SETPREFIX):
+					s = x[len(SETPREFIX):]
+					if s not in sets:
+						raise portage.exception.PackageSetNotFound(s)
+					if s in depgraph_sets.sets:
+						continue
+					pset = sets[s]
+					depgraph_sets.sets[s] = pset
+					args.append(SetArg(arg=x, pset=pset,
+						root_config=root_config))
+					continue
+				if not is_valid_package_atom(x, allow_repo=True):
+					portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
+						noiselevel=-1)
+					portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+					portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+					self._dynamic_config._skip_restart = True
+					return (0,[])
+				# Don't expand categories or old-style virtuals here unless
+				# necessary. Expansion of old-style virtuals here causes at
+				# least the following problems:
+				#   1) It's more difficult to determine which set(s) an atom
+				#      came from, if any.
+				#   2) It takes away freedom from the resolver to choose other
+				#      possible expansions when necessary.
+				if "/" in x:
+					args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
+						root_config=root_config))
+					continue
+				expanded_atoms = self._dep_expand(root_config, x)
+				installed_cp_set = set()
+				for atom in expanded_atoms:
+					if vardb.cp_list(atom.cp):
+						installed_cp_set.add(atom.cp)
+
+				if len(installed_cp_set) > 1:
+					non_virtual_cps = set()
+					for atom_cp in installed_cp_set:
+						if not atom_cp.startswith("virtual/"):
+							non_virtual_cps.add(atom_cp)
+					if len(non_virtual_cps) == 1:
+						installed_cp_set = non_virtual_cps
+
+				if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
+					installed_cp = next(iter(installed_cp_set))
+					for atom in expanded_atoms:
+						if atom.cp == installed_cp:
+							available = False
+							for pkg in self._iter_match_pkgs_any(
+								root_config, atom.without_use,
+								onlydeps=onlydeps):
+								if not pkg.installed:
+									available = True
+									break
+							if available:
+								expanded_atoms = [atom]
+								break
+
+				# If a non-virtual package and one or more virtual packages
+				# are in expanded_atoms, use the non-virtual package.
+				if len(expanded_atoms) > 1:
+					number_of_virtuals = 0
+					for expanded_atom in expanded_atoms:
+						if expanded_atom.cp.startswith("virtual/"):
+							number_of_virtuals += 1
+						else:
+							candidate = expanded_atom
+					if len(expanded_atoms) - number_of_virtuals == 1:
+						expanded_atoms = [ candidate ]
+
+				if len(expanded_atoms) > 1:
+					writemsg("\n\n", noiselevel=-1)
+					ambiguous_package_name(x, expanded_atoms, root_config,
+						self._frozen_config.spinner, self._frozen_config.myopts)
+					self._dynamic_config._skip_restart = True
+					return False, myfavorites
+				if expanded_atoms:
+					atom = expanded_atoms[0]
+				else:
+					null_atom = Atom(insert_category_into_atom(x, "null"),
+						allow_repo=True)
+					cat, atom_pn = portage.catsplit(null_atom.cp)
+					virts_p = root_config.settings.get_virts_p().get(atom_pn)
+					if virts_p:
+						# Allow the depgraph to choose which virtual.
+						atom = Atom(null_atom.replace('null/', 'virtual/', 1),
+							allow_repo=True)
+					else:
+						atom = null_atom
+
+				if atom.use and atom.use.conditional:
+					writemsg(
+						("\n\n!!! '%s' contains a conditional " + \
+						"which is not allowed.\n") % (x,), noiselevel=-1)
+					writemsg("!!! Please check ebuild(5) for full details.\n")
+					self._dynamic_config._skip_restart = True
+					return (0,[])
+
+				args.append(AtomArg(arg=x, atom=atom,
+					root_config=root_config))
+
+		if lookup_owners:
+			relative_paths = []
+			search_for_multiple = False
+			if len(lookup_owners) > 1:
+				search_for_multiple = True
+
+			for x in lookup_owners:
+				if not search_for_multiple and os.path.isdir(x):
+					search_for_multiple = True
+				relative_paths.append(x[len(myroot)-1:])
+
+			owners = set()
+			for pkg, relative_path in \
+				real_vardb._owners.iter_owners(relative_paths):
+				owners.add(pkg.mycpv)
+				if not search_for_multiple:
+					break
+
+			if not owners:
+				portage.writemsg(("\n\n!!! '%s' is not claimed " + \
+					"by any package.\n") % lookup_owners[0], noiselevel=-1)
+				self._dynamic_config._skip_restart = True
+				return 0, []
+
+			for cpv in owners:
+				slot = vardb.aux_get(cpv, ["SLOT"])[0]
+				if not slot:
+					# portage now masks packages with missing slot, but it's
+					# possible that one was installed by an older version
+					atom = Atom(portage.cpv_getkey(cpv))
+				else:
+					atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
+				args.append(AtomArg(arg=atom, atom=atom,
+					root_config=root_config))
+
+		if "--update" in self._frozen_config.myopts:
+			# In some cases, the greedy slots behavior can pull in a slot that
+			# the user would want to uninstall due to it being blocked by a
+			# newer version in a different slot. Therefore, it's necessary to
+			# detect and discard any that should be uninstalled. Each time
+			# that arguments are updated, package selections are repeated in
+			# order to ensure consistency with the current arguments:
+			#
+			#  1) Initialize args
+			#  2) Select packages and generate initial greedy atoms
+			#  3) Update args with greedy atoms
+			#  4) Select packages and generate greedy atoms again, while
+			#     accounting for any blockers between selected packages
+			#  5) Update args with revised greedy atoms
+
+			self._set_args(args)
+			greedy_args = []
+			for arg in args:
+				greedy_args.append(arg)
+				if not isinstance(arg, AtomArg):
+					continue
+				for atom in self._greedy_slots(arg.root_config, arg.atom):
+					greedy_args.append(
+						AtomArg(arg=arg.arg, atom=atom,
+							root_config=arg.root_config))
+
+			self._set_args(greedy_args)
+			del greedy_args
+
+			# Revise greedy atoms, accounting for any blockers
+			# between selected packages.
+			revised_greedy_args = []
+			for arg in args:
+				revised_greedy_args.append(arg)
+				if not isinstance(arg, AtomArg):
+					continue
+				for atom in self._greedy_slots(arg.root_config, arg.atom,
+					blocker_lookahead=True):
+					revised_greedy_args.append(
+						AtomArg(arg=arg.arg, atom=atom,
+							root_config=arg.root_config))
+			args = revised_greedy_args
+			del revised_greedy_args
+
+		self._set_args(args)
+
+		myfavorites = set(myfavorites)
+		for arg in args:
+			if isinstance(arg, (AtomArg, PackageArg)):
+				myfavorites.add(arg.atom)
+			elif isinstance(arg, SetArg):
+				myfavorites.add(arg.arg)
+		myfavorites = list(myfavorites)
+
+		if debug:
+			portage.writemsg("\n", noiselevel=-1)
+		# Order needs to be preserved since a feature of --nodeps
+		# is to allow the user to force a specific merge order.
+		self._dynamic_config._initial_arg_list = args[:]
+	
+		return self._resolve(myfavorites)
+	
+	def _resolve(self, myfavorites):
+		"""Given self._dynamic_config._initial_arg_list, pull in the root nodes, 
+		call self._creategraph to process theier deps and return 
+		a favorite list."""
+		debug = "--debug" in self._frozen_config.myopts
+		onlydeps = "--onlydeps" in self._frozen_config.myopts
+		myroot = self._frozen_config.target_root
+		pkgsettings = self._frozen_config.pkgsettings[myroot]
+		pprovideddict = pkgsettings.pprovideddict
+		virtuals = pkgsettings.getvirtuals()
+		args = self._dynamic_config._initial_arg_list[:]
+		for root, atom in chain(self._rebuild.rebuild_list,
+			self._rebuild.reinstall_list):
+			args.append(AtomArg(arg=atom, atom=atom,
+				root_config=self._frozen_config.roots[root]))
+		for arg in self._expand_set_args(args, add_to_digraph=True):
+			for atom in arg.pset.getAtoms():
+				self._spinner_update()
+				dep = Dependency(atom=atom, onlydeps=onlydeps,
+					root=myroot, parent=arg)
+				try:
+					pprovided = pprovideddict.get(atom.cp)
+					if pprovided and portage.match_from_list(atom, pprovided):
+						# A provided package has been specified on the command line.
+						self._dynamic_config._pprovided_args.append((arg, atom))
+						continue
+					if isinstance(arg, PackageArg):
+						if not self._add_pkg(arg.package, dep) or \
+							not self._create_graph():
+							if not self.need_restart():
+								sys.stderr.write(("\n\n!!! Problem " + \
+									"resolving dependencies for %s\n") % \
+									arg.arg)
+							return 0, myfavorites
+						continue
+					if debug:
+						writemsg_level("\n      Arg: %s\n     Atom: %s\n" %
+							(arg, atom), noiselevel=-1, level=logging.DEBUG)
+					pkg, existing_node = self._select_package(
+						myroot, atom, onlydeps=onlydeps)
+					if not pkg:
+						pprovided_match = False
+						for virt_choice in virtuals.get(atom.cp, []):
+							expanded_atom = portage.dep.Atom(
+								atom.replace(atom.cp, virt_choice.cp, 1))
+							pprovided = pprovideddict.get(expanded_atom.cp)
+							if pprovided and \
+								portage.match_from_list(expanded_atom, pprovided):
+								# A provided package has been
+								# specified on the command line.
+								self._dynamic_config._pprovided_args.append((arg, atom))
+								pprovided_match = True
+								break
+						if pprovided_match:
+							continue
+
+						if not (isinstance(arg, SetArg) and \
+							arg.name in ("selected", "system", "world")):
+							self._dynamic_config._unsatisfied_deps_for_display.append(
+								((myroot, atom), {"myparent" : arg}))
+							return 0, myfavorites
+
+						self._dynamic_config._missing_args.append((arg, atom))
+						continue
+					if atom.cp != pkg.cp:
+						# For old-style virtuals, we need to repeat the
+						# package.provided check against the selected package.
+						expanded_atom = atom.replace(atom.cp, pkg.cp)
+						pprovided = pprovideddict.get(pkg.cp)
+						if pprovided and \
+							portage.match_from_list(expanded_atom, pprovided):
+							# A provided package has been
+							# specified on the command line.
+							self._dynamic_config._pprovided_args.append((arg, atom))
+							continue
+					if pkg.installed and \
+						"selective" not in self._dynamic_config.myparams and \
+						not self._frozen_config.excluded_pkgs.findAtomForPackage(
+						pkg, modified_use=self._pkg_use_enabled(pkg)):
+						self._dynamic_config._unsatisfied_deps_for_display.append(
+							((myroot, atom), {"myparent" : arg}))
+						# Previous behavior was to bail out in this case, but
+						# since the dep is satisfied by the installed package,
+						# it's more friendly to continue building the graph
+						# and just show a warning message. Therefore, only bail
+						# out here if the atom is not from either the system or
+						# world set.
+						if not (isinstance(arg, SetArg) and \
+							arg.name in ("selected", "system", "world")):
+							return 0, myfavorites
+
+					# Add the selected package to the graph as soon as possible
+					# so that later dep_check() calls can use it as feedback
+					# for making more consistent atom selections.
+					if not self._add_pkg(pkg, dep):
+						if self.need_restart():
+							pass
+						elif isinstance(arg, SetArg):
+							writemsg(("\n\n!!! Problem resolving " + \
+								"dependencies for %s from %s\n") % \
+								(atom, arg.arg), noiselevel=-1)
+						else:
+							writemsg(("\n\n!!! Problem resolving " + \
+								"dependencies for %s\n") % \
+								(atom,), noiselevel=-1)
+						return 0, myfavorites
+
+				except SystemExit as e:
+					raise # Needed else can't exit
+				except Exception as e:
+					writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
+					writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
+					raise
+
+		# Now that the root packages have been added to the graph,
+		# process the dependencies.
+		if not self._create_graph():
+			return 0, myfavorites
+
+		try:
+			self.altlist()
+		except self._unknown_internal_error:
+			return False, myfavorites
+
+		digraph_set = frozenset(self._dynamic_config.digraph)
+
+		if digraph_set.intersection(
+			self._dynamic_config._needed_unstable_keywords) or \
+			digraph_set.intersection(
+			self._dynamic_config._needed_p_mask_changes) or \
+			digraph_set.intersection(
+			self._dynamic_config._needed_use_config_changes) or \
+			digraph_set.intersection(
+			self._dynamic_config._needed_license_changes) :
+			#We failed if the user needs to change the configuration
+			self._dynamic_config._success_without_autounmask = True
+			return False, myfavorites
+
+		digraph_set = None
+
+		if self._rebuild.trigger_rebuilds():
+			backtrack_infos = self._dynamic_config._backtrack_infos
+			config = backtrack_infos.setdefault("config", {})
+			config["rebuild_list"] = self._rebuild.rebuild_list
+			config["reinstall_list"] = self._rebuild.reinstall_list
+			self._dynamic_config._need_restart = True
+			return False, myfavorites
+
+		# We're true here unless we are missing binaries.
+		return (True, myfavorites)
+
+	def _set_args(self, args):
+		"""
+		Create the "__non_set_args__" package set from atoms and packages given as
+		arguments. This method can be called multiple times if necessary.
+		The package selection cache is automatically invalidated, since
+		arguments influence package selections.
+		"""
+
+		set_atoms = {}
+		non_set_atoms = {}
+		for root in self._dynamic_config.sets:
+			depgraph_sets = self._dynamic_config.sets[root]
+			depgraph_sets.sets.setdefault('__non_set_args__',
+				InternalPackageSet(allow_repo=True)).clear()
+			depgraph_sets.atoms.clear()
+			depgraph_sets.atom_arg_map.clear()
+			set_atoms[root] = []
+			non_set_atoms[root] = []
+
+		# We don't add set args to the digraph here since that
+		# happens at a later stage and we don't want to make
+		# any state changes here that aren't reversed by a
+		# another call to this method.
+		for arg in self._expand_set_args(args, add_to_digraph=False):
+			atom_arg_map = self._dynamic_config.sets[
+				arg.root_config.root].atom_arg_map
+			if isinstance(arg, SetArg):
+				atom_group = set_atoms[arg.root_config.root]
+			else:
+				atom_group = non_set_atoms[arg.root_config.root]
+
+			for atom in arg.pset.getAtoms():
+				atom_group.append(atom)
+				atom_key = (atom, arg.root_config.root)
+				refs = atom_arg_map.get(atom_key)
+				if refs is None:
+					refs = []
+					atom_arg_map[atom_key] = refs
+					if arg not in refs:
+						refs.append(arg)
+
+		for root in self._dynamic_config.sets:
+			depgraph_sets = self._dynamic_config.sets[root]
+			depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
+				non_set_atoms.get(root, [])))
+			depgraph_sets.sets['__non_set_args__'].update(
+				non_set_atoms.get(root, []))
+
+		# Invalidate the package selection cache, since
+		# arguments influence package selections.
+		self._dynamic_config._highest_pkg_cache.clear()
+		for trees in self._dynamic_config._filtered_trees.values():
+			trees["porttree"].dbapi._clear_cache()
+
+	def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
+		"""
+		Return a list of slot atoms corresponding to installed slots that
+		differ from the slot of the highest visible match. When
+		blocker_lookahead is True, slot atoms that would trigger a blocker
+		conflict are automatically discarded, potentially allowing automatic
+		uninstallation of older slots when appropriate.
+		"""
+		highest_pkg, in_graph = self._select_package(root_config.root, atom)
+		if highest_pkg is None:
+			return []
+		vardb = root_config.trees["vartree"].dbapi
+		slots = set()
+		for cpv in vardb.match(atom):
+			# don't mix new virtuals with old virtuals
+			if portage.cpv_getkey(cpv) == highest_pkg.cp:
+				slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
+
+		slots.add(highest_pkg.metadata["SLOT"])
+		if len(slots) == 1:
+			return []
+		greedy_pkgs = []
+		slots.remove(highest_pkg.metadata["SLOT"])
+		while slots:
+			slot = slots.pop()
+			slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
+			pkg, in_graph = self._select_package(root_config.root, slot_atom)
+			if pkg is not None and \
+				pkg.cp == highest_pkg.cp and pkg < highest_pkg:
+				greedy_pkgs.append(pkg)
+		if not greedy_pkgs:
+			return []
+		if not blocker_lookahead:
+			return [pkg.slot_atom for pkg in greedy_pkgs]
+
+		blockers = {}
+		blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
+		for pkg in greedy_pkgs + [highest_pkg]:
+			dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
+			try:
+				selected_atoms = self._select_atoms(
+					pkg.root, dep_str, self._pkg_use_enabled(pkg),
+					parent=pkg, strict=True)
+			except portage.exception.InvalidDependString:
+				continue
+			blocker_atoms = []
+			for atoms in selected_atoms.values():
+				blocker_atoms.extend(x for x in atoms if x.blocker)
+			blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
+
+		if highest_pkg not in blockers:
+			return []
+
+		# filter packages with invalid deps
+		greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
+
+		# filter packages that conflict with highest_pkg
+		greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
+			(blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
+			blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
+
+		if not greedy_pkgs:
+			return []
+
+		# If two packages conflict, discard the lower version.
+		discard_pkgs = set()
+		greedy_pkgs.sort(reverse=True)
+		for i in range(len(greedy_pkgs) - 1):
+			pkg1 = greedy_pkgs[i]
+			if pkg1 in discard_pkgs:
+				continue
+			for j in range(i + 1, len(greedy_pkgs)):
+				pkg2 = greedy_pkgs[j]
+				if pkg2 in discard_pkgs:
+					continue
+				if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
+					blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
+					# pkg1 > pkg2
+					discard_pkgs.add(pkg2)
+
+		return [pkg.slot_atom for pkg in greedy_pkgs \
+			if pkg not in discard_pkgs]
+
+	def _select_atoms_from_graph(self, *pargs, **kwargs):
+		"""
+		Prefer atoms matching packages that have already been
+		added to the graph or those that are installed and have
+		not been scheduled for replacement.
+		"""
+		kwargs["trees"] = self._dynamic_config._graph_trees
+		return self._select_atoms_highest_available(*pargs, **kwargs)
+
+	def _select_atoms_highest_available(self, root, depstring,
+		myuse=None, parent=None, strict=True, trees=None, priority=None):
+		"""This will raise InvalidDependString if necessary. If trees is
+		None then self._dynamic_config._filtered_trees is used."""
+
+		pkgsettings = self._frozen_config.pkgsettings[root]
+		if trees is None:
+			trees = self._dynamic_config._filtered_trees
+		mytrees = trees[root]
+		atom_graph = digraph()
+		if True:
+			# Temporarily disable autounmask so that || preferences
+			# account for masking and USE settings.
+			_autounmask_backup = self._dynamic_config._autounmask
+			self._dynamic_config._autounmask = False
+			mytrees["pkg_use_enabled"] = self._pkg_use_enabled
+			try:
+				if parent is not None:
+					trees[root]["parent"] = parent
+					trees[root]["atom_graph"] = atom_graph
+				if priority is not None:
+					trees[root]["priority"] = priority
+				mycheck = portage.dep_check(depstring, None,
+					pkgsettings, myuse=myuse,
+					myroot=root, trees=trees)
+			finally:
+				self._dynamic_config._autounmask = _autounmask_backup
+				del mytrees["pkg_use_enabled"]
+				if parent is not None:
+					trees[root].pop("parent")
+					trees[root].pop("atom_graph")
+				if priority is not None:
+					trees[root].pop("priority")
+			if not mycheck[0]:
+				raise portage.exception.InvalidDependString(mycheck[1])
+		if parent is None:
+			selected_atoms = mycheck[1]
+		elif parent not in atom_graph:
+			selected_atoms = {parent : mycheck[1]}
+		else:
+			# Recursively traversed virtual dependencies, and their
+			# direct dependencies, are considered to have the same
+			# depth as direct dependencies.
+			if parent.depth is None:
+				virt_depth = None
+			else:
+				virt_depth = parent.depth + 1
+			chosen_atom_ids = frozenset(id(atom) for atom in mycheck[1])
+			selected_atoms = OrderedDict()
+			node_stack = [(parent, None, None)]
+			traversed_nodes = set()
+			while node_stack:
+				node, node_parent, parent_atom = node_stack.pop()
+				traversed_nodes.add(node)
+				if node is parent:
+					k = parent
+				else:
+					if node_parent is parent:
+						if priority is None:
+							node_priority = None
+						else:
+							node_priority = priority.copy()
+					else:
+						# virtuals only have runtime deps
+						node_priority = self._priority(runtime=True)
+
+					k = Dependency(atom=parent_atom,
+						blocker=parent_atom.blocker, child=node,
+						depth=virt_depth, parent=node_parent,
+						priority=node_priority, root=node.root)
+
+				child_atoms = []
+				selected_atoms[k] = child_atoms
+				for atom_node in atom_graph.child_nodes(node):
+					child_atom = atom_node[0]
+					if id(child_atom) not in chosen_atom_ids:
+						continue
+					child_atoms.append(child_atom)
+					for child_node in atom_graph.child_nodes(atom_node):
+						if child_node in traversed_nodes:
+							continue
+						if not portage.match_from_list(
+							child_atom, [child_node]):
+							# Typically this means that the atom
+							# specifies USE deps that are unsatisfied
+							# by the selected package. The caller will
+							# record this as an unsatisfied dependency
+							# when necessary.
+							continue
+						node_stack.append((child_node, node, child_atom))
+
+		return selected_atoms
+
+	def _expand_virt_from_graph(self, root, atom):
+		if not isinstance(atom, Atom):
+			atom = Atom(atom)
+		graphdb = self._dynamic_config.mydbapi[root]
+		match = graphdb.match_pkgs(atom)
+		if not match:
+			yield atom
+			return
+		pkg = match[-1]
+		if not pkg.cpv.startswith("virtual/"):
+			yield atom
+			return
+		try:
+			rdepend = self._select_atoms_from_graph(
+				pkg.root, pkg.metadata.get("RDEPEND", ""),
+				myuse=self._pkg_use_enabled(pkg),
+				parent=pkg, strict=False)
+		except InvalidDependString as e:
+			writemsg_level("!!! Invalid RDEPEND in " + \
+				"'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+				(pkg.root, pkg.cpv, e),
+				noiselevel=-1, level=logging.ERROR)
+			yield atom
+			return
+
+		for atoms in rdepend.values():
+			for atom in atoms:
+				if hasattr(atom, "_orig_atom"):
+					# Ignore virtual atoms since we're only
+					# interested in expanding the real atoms.
+					continue
+				yield atom
+
+	def _get_dep_chain(self, start_node, target_atom=None,
+		unsatisfied_dependency=False):
+		"""
+		Returns a list of (atom, node_type) pairs that represent a dep chain.
+		If target_atom is None, the first package shown is pkg's parent.
+		If target_atom is not None the first package shown is pkg.
+		If unsatisfied_dependency is True, the first parent is select who's
+		dependency is not satisfied by 'pkg'. This is need for USE changes.
+		(Does not support target_atom.)
+		"""
+		traversed_nodes = set()
+		dep_chain = []
+		node = start_node
+		child = None
+		all_parents = self._dynamic_config._parent_atoms
+
+		if target_atom is not None and isinstance(node, Package):
+			affecting_use = set()
+			for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
+				try:
+					affecting_use.update(extract_affecting_use(
+						node.metadata[dep_str], target_atom,
+						eapi=node.metadata["EAPI"]))
+				except InvalidDependString:
+					if not node.installed:
+						raise
+			affecting_use.difference_update(node.use.mask, node.use.force)
+			pkg_name = _unicode_decode("%s") % (node.cpv,)
+			if affecting_use:
+				usedep = []
+				for flag in affecting_use:
+					if flag in self._pkg_use_enabled(node):
+						usedep.append(flag)
+					else:
+						usedep.append("-"+flag)
+				pkg_name += "[%s]" % ",".join(usedep)
+
+			dep_chain.append((pkg_name, node.type_name))
+
+		while node is not None:
+			traversed_nodes.add(node)
+
+			if isinstance(node, DependencyArg):
+				if self._dynamic_config.digraph.parent_nodes(node):
+					node_type = "set"
+				else:
+					node_type = "argument"
+				dep_chain.append((_unicode_decode("%s") % (node,), node_type))
+
+			elif node is not start_node:
+				for ppkg, patom in all_parents[child]:
+					if ppkg == node:
+						atom = patom.unevaluated_atom
+						break
+
+				dep_strings = set()
+				for priority in self._dynamic_config.digraph.nodes[node][0][child]:
+					if priority.buildtime:
+						dep_strings.add(node.metadata["DEPEND"])
+					if priority.runtime:
+						dep_strings.add(node.metadata["RDEPEND"])
+					if priority.runtime_post:
+						dep_strings.add(node.metadata["PDEPEND"])
+
+				affecting_use = set()
+				for dep_str in dep_strings:
+					try:
+						affecting_use.update(extract_affecting_use(
+							dep_str, atom, eapi=node.metadata["EAPI"]))
+					except InvalidDependString:
+						if not node.installed:
+							raise
+
+				#Don't show flags as 'affecting' if the user can't change them,
+				affecting_use.difference_update(node.use.mask, \
+					node.use.force)
+
+				pkg_name = _unicode_decode("%s") % (node.cpv,)
+				if affecting_use:
+					usedep = []
+					for flag in affecting_use:
+						if flag in self._pkg_use_enabled(node):
+							usedep.append(flag)
+						else:
+							usedep.append("-"+flag)
+					pkg_name += "[%s]" % ",".join(usedep)
+
+				dep_chain.append((pkg_name, node.type_name))
+
+			if node not in self._dynamic_config.digraph:
+				# The parent is not in the graph due to backtracking.
+				break
+
+			# When traversing to parents, prefer arguments over packages
+			# since arguments are root nodes. Never traverse the same
+			# package twice, in order to prevent an infinite loop.
+			child = node
+			selected_parent = None
+			parent_arg = None
+			parent_merge = None
+			parent_unsatisfied = None
+
+			for parent in self._dynamic_config.digraph.parent_nodes(node):
+				if parent in traversed_nodes:
+					continue
+				if isinstance(parent, DependencyArg):
+					parent_arg = parent
+				else:
+					if isinstance(parent, Package) and \
+						parent.operation == "merge":
+						parent_merge = parent
+					if unsatisfied_dependency and node is start_node:
+						# Make sure that pkg doesn't satisfy parent's dependency.
+						# This ensures that we select the correct parent for use
+						# flag changes.
+						for ppkg, atom in all_parents[start_node]:
+							if parent is ppkg:
+								atom_set = InternalPackageSet(initial_atoms=(atom,))
+								if not atom_set.findAtomForPackage(start_node):
+									parent_unsatisfied = parent
+								break
+					else:
+						selected_parent = parent
+
+			if parent_unsatisfied is not None:
+				selected_parent = parent_unsatisfied
+			elif parent_merge is not None:
+				# Prefer parent in the merge list (bug #354747).
+				selected_parent = parent_merge
+			elif parent_arg is not None:
+				if self._dynamic_config.digraph.parent_nodes(parent_arg):
+					selected_parent = parent_arg
+				else:
+					dep_chain.append(
+						(_unicode_decode("%s") % (parent_arg,), "argument"))
+					selected_parent = None
+
+			node = selected_parent
+		return dep_chain
+
+	def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
+		dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
+		display_list = []
+		for node, node_type in dep_chain:
+			if node_type == "argument":
+				display_list.append("required by %s (argument)" % node)
+			else:
+				display_list.append("required by %s" % node)
+
+		msg = "#" + ", ".join(display_list) + "\n"
+		return msg
+
+
+	def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
+		check_backtrack=False, check_autounmask_breakage=False):
+		"""
+		When check_backtrack=True, no output is produced and
+		the method either returns or raises _backtrack_mask if
+		a matching package has been masked by backtracking.
+		"""
+		backtrack_mask = False
+		autounmask_broke_use_dep = False
+		atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
+			allow_repo=True)
+		atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
+			allow_repo=True)
+		xinfo = '"%s"' % atom.unevaluated_atom
+		if arg:
+			xinfo='"%s"' % arg
+		if isinstance(myparent, AtomArg):
+			xinfo = _unicode_decode('"%s"') % (myparent,)
+		# Discard null/ from failed cpv_expand category expansion.
+		xinfo = xinfo.replace("null/", "")
+		if root != "/":
+			xinfo = "%s for %s" % (xinfo, root)
+		masked_packages = []
+		missing_use = []
+		missing_use_adjustable = set()
+		required_use_unsatisfied = []
+		masked_pkg_instances = set()
+		missing_licenses = []
+		have_eapi_mask = False
+		pkgsettings = self._frozen_config.pkgsettings[root]
+		root_config = self._frozen_config.roots[root]
+		portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
+		vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
+		bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
+		dbs = self._dynamic_config._filtered_trees[root]["dbs"]
+		for db, pkg_type, built, installed, db_keys in dbs:
+			if installed:
+				continue
+			match = db.match
+			if hasattr(db, "xmatch"):
+				cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
+			else:
+				cpv_list = db.match(atom.without_use)
+
+			if atom.repo is None and hasattr(db, "getRepositories"):
+				repo_list = db.getRepositories()
+			else:
+				repo_list = [atom.repo]
+
+			# descending order
+			cpv_list.reverse()
+			for cpv in cpv_list:
+				for repo in repo_list:
+					if not db.cpv_exists(cpv, myrepo=repo):
+						continue
+
+					metadata, mreasons  = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
+						built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
+					if metadata is not None and \
+						portage.eapi_is_supported(metadata["EAPI"]):
+						if not repo:
+							repo = metadata.get('repository')
+						pkg = self._pkg(cpv, pkg_type, root_config,
+							installed=installed, myrepo=repo)
+						if not atom_set.findAtomForPackage(pkg,
+							modified_use=self._pkg_use_enabled(pkg)):
+							continue
+						# pkg.metadata contains calculated USE for ebuilds,
+						# required later for getMissingLicenses.
+						metadata = pkg.metadata
+						if pkg in self._dynamic_config._runtime_pkg_mask:
+							backtrack_reasons = \
+								self._dynamic_config._runtime_pkg_mask[pkg]
+							mreasons.append('backtracking: %s' % \
+								', '.join(sorted(backtrack_reasons)))
+							backtrack_mask = True
+						if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+							modified_use=self._pkg_use_enabled(pkg)):
+							mreasons = ["exclude option"]
+						if mreasons:
+							masked_pkg_instances.add(pkg)
+						if atom.unevaluated_atom.use:
+							try:
+								if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
+									or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
+									missing_use.append(pkg)
+									if atom_set_with_use.findAtomForPackage(pkg):
+										autounmask_broke_use_dep = True
+									if not mreasons:
+										continue
+							except InvalidAtom:
+								writemsg("violated_conditionals raised " + \
+									"InvalidAtom: '%s' parent: %s" % \
+									(atom, myparent), noiselevel=-1)
+								raise
+						if not mreasons and \
+							not pkg.built and \
+							pkg.metadata["REQUIRED_USE"] and \
+							eapi_has_required_use(pkg.metadata["EAPI"]):
+							if not check_required_use(
+								pkg.metadata["REQUIRED_USE"],
+								self._pkg_use_enabled(pkg),
+								pkg.iuse.is_valid_flag):
+								required_use_unsatisfied.append(pkg)
+								continue
+						root_slot = (pkg.root, pkg.slot_atom)
+						if pkg.built and root_slot in self._rebuild.rebuild_list:
+							mreasons = ["need to rebuild from source"]
+						elif pkg.installed and root_slot in self._rebuild.reinstall_list:
+							mreasons = ["need to rebuild from source"]
+						elif pkg.built and not mreasons:
+							mreasons = ["use flag configuration mismatch"]
+					masked_packages.append(
+						(root_config, pkgsettings, cpv, repo, metadata, mreasons))
+
+		if check_backtrack:
+			if backtrack_mask:
+				raise self._backtrack_mask()
+			else:
+				return
+
+		if check_autounmask_breakage:
+			if autounmask_broke_use_dep:
+				raise self._autounmask_breakage()
+			else:
+				return
+
+		missing_use_reasons = []
+		missing_iuse_reasons = []
+		for pkg in missing_use:
+			use = self._pkg_use_enabled(pkg)
+			missing_iuse = []
+			#Use the unevaluated atom here, because some flags might have gone
+			#lost during evaluation.
+			required_flags = atom.unevaluated_atom.use.required
+			missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
+
+			mreasons = []
+			if missing_iuse:
+				mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
+				missing_iuse_reasons.append((pkg, mreasons))
+			else:
+				need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
+				need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
+
+				untouchable_flags = \
+					frozenset(chain(pkg.use.mask, pkg.use.force))
+				if untouchable_flags.intersection(
+					chain(need_enable, need_disable)):
+					continue
+
+				missing_use_adjustable.add(pkg)
+				required_use = pkg.metadata["REQUIRED_USE"]
+				required_use_warning = ""
+				if required_use:
+					old_use = self._pkg_use_enabled(pkg)
+					new_use = set(self._pkg_use_enabled(pkg))
+					for flag in need_enable:
+						new_use.add(flag)
+					for flag in need_disable:
+						new_use.discard(flag)
+					if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
+						not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+							required_use_warning = ", this change violates use flag constraints " + \
+								"defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
+
+				if need_enable or need_disable:
+					changes = []
+					changes.extend(colorize("red", "+" + x) \
+						for x in need_enable)
+					changes.extend(colorize("blue", "-" + x) \
+						for x in need_disable)
+					mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
+					missing_use_reasons.append((pkg, mreasons))
+
+			if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
+				# Lets see if the violated use deps are conditional.
+				# If so, suggest to change them on the parent.
+
+				# If the child package is masked then a change to
+				# parent USE is not a valid solution (a normal mask
+				# message should be displayed instead).
+				if pkg in masked_pkg_instances:
+					continue
+
+				mreasons = []
+				violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
+					pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
+				if not (violated_atom.use.enabled or violated_atom.use.disabled):
+					#all violated use deps are conditional
+					changes = []
+					conditional = violated_atom.use.conditional
+					involved_flags = set(chain(conditional.equal, conditional.not_equal, \
+						conditional.enabled, conditional.disabled))
+
+					untouchable_flags = \
+						frozenset(chain(myparent.use.mask, myparent.use.force))
+					if untouchable_flags.intersection(involved_flags):
+						continue
+
+					required_use = myparent.metadata["REQUIRED_USE"]
+					required_use_warning = ""
+					if required_use:
+						old_use = self._pkg_use_enabled(myparent)
+						new_use = set(self._pkg_use_enabled(myparent))
+						for flag in involved_flags:
+							if flag in old_use:
+								new_use.discard(flag)
+							else:
+								new_use.add(flag)
+						if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
+							not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
+								required_use_warning = ", this change violates use flag constraints " + \
+									"defined by %s: '%s'" % (myparent.cpv, \
+									human_readable_required_use(required_use))
+
+					for flag in involved_flags:
+						if flag in self._pkg_use_enabled(myparent):
+							changes.append(colorize("blue", "-" + flag))
+						else:
+							changes.append(colorize("red", "+" + flag))
+					mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
+					if (myparent, mreasons) not in missing_use_reasons:
+						missing_use_reasons.append((myparent, mreasons))
+
+		unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+			in missing_use_reasons if pkg not in masked_pkg_instances]
+
+		unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+			in missing_iuse_reasons if pkg not in masked_pkg_instances]
+
+		show_missing_use = False
+		if unmasked_use_reasons:
+			# Only show the latest version.
+			show_missing_use = []
+			pkg_reason = None
+			parent_reason = None
+			for pkg, mreasons in unmasked_use_reasons:
+				if pkg is myparent:
+					if parent_reason is None:
+						#This happens if a use change on the parent
+						#leads to a satisfied conditional use dep.
+						parent_reason = (pkg, mreasons)
+				elif pkg_reason is None:
+					#Don't rely on the first pkg in unmasked_use_reasons,
+					#being the highest version of the dependency.
+					pkg_reason = (pkg, mreasons)
+			if pkg_reason:
+				show_missing_use.append(pkg_reason)
+			if parent_reason:
+				show_missing_use.append(parent_reason)
+
+		elif unmasked_iuse_reasons:
+			masked_with_iuse = False
+			for pkg in masked_pkg_instances:
+				#Use atom.unevaluated here, because some flags might have gone
+				#lost during evaluation.
+				if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
+					# Package(s) with required IUSE are masked,
+					# so display a normal masking message.
+					masked_with_iuse = True
+					break
+			if not masked_with_iuse:
+				show_missing_use = unmasked_iuse_reasons
+
+		if required_use_unsatisfied:
+			# If there's a higher unmasked version in missing_use_adjustable
+			# then we want to show that instead.
+			for pkg in missing_use_adjustable:
+				if pkg not in masked_pkg_instances and \
+					pkg > required_use_unsatisfied[0]:
+					required_use_unsatisfied = False
+					break
+
+		mask_docs = False
+
+		if required_use_unsatisfied:
+			# We have an unmasked package that only requires USE adjustment
+			# in order to satisfy REQUIRED_USE, and nothing more. We assume
+			# that the user wants the latest version, so only the first
+			# instance is displayed.
+			pkg = required_use_unsatisfied[0]
+			output_cpv = pkg.cpv + _repo_separator + pkg.repo
+			writemsg_stdout("\n!!! " + \
+				colorize("BAD", "The ebuild selected to satisfy ") + \
+				colorize("INFORM", xinfo) + \
+				colorize("BAD", " has unmet requirements.") + "\n",
+				noiselevel=-1)
+			use_display = pkg_use_display(pkg, self._frozen_config.myopts)
+			writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
+				noiselevel=-1)
+			writemsg_stdout("\n  The following REQUIRED_USE flag constraints " + \
+				"are unsatisfied:\n", noiselevel=-1)
+			reduced_noise = check_required_use(
+				pkg.metadata["REQUIRED_USE"],
+				self._pkg_use_enabled(pkg),
+				pkg.iuse.is_valid_flag).tounicode()
+			writemsg_stdout("    %s\n" % \
+				human_readable_required_use(reduced_noise),
+				noiselevel=-1)
+			normalized_required_use = \
+				" ".join(pkg.metadata["REQUIRED_USE"].split())
+			if reduced_noise != normalized_required_use:
+				writemsg_stdout("\n  The above constraints " + \
+					"are a subset of the following complete expression:\n",
+					noiselevel=-1)
+				writemsg_stdout("    %s\n" % \
+					human_readable_required_use(normalized_required_use),
+					noiselevel=-1)
+			writemsg_stdout("\n", noiselevel=-1)
+
+		elif show_missing_use:
+			writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+			writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
+			for pkg, mreasons in show_missing_use:
+				writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
+
+		elif masked_packages:
+			writemsg_stdout("\n!!! " + \
+				colorize("BAD", "All ebuilds that could satisfy ") + \
+				colorize("INFORM", xinfo) + \
+				colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
+			writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
+			have_eapi_mask = show_masked_packages(masked_packages)
+			if have_eapi_mask:
+				writemsg_stdout("\n", noiselevel=-1)
+				msg = ("The current version of portage supports " + \
+					"EAPI '%s'. You must upgrade to a newer version" + \
+					" of portage before EAPI masked packages can" + \
+					" be installed.") % portage.const.EAPI
+				writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
+			writemsg_stdout("\n", noiselevel=-1)
+			mask_docs = True
+		else:
+			cp_exists = False
+			if not atom.cp.startswith("null/"):
+				for pkg in self._iter_match_pkgs_any(
+					root_config, Atom(atom.cp)):
+					cp_exists = True
+					break
+
+			writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+			if isinstance(myparent, AtomArg) and \
+				not cp_exists and \
+				self._frozen_config.myopts.get(
+				"--misspell-suggestions", "y") != "n":
+				cp = myparent.atom.cp.lower()
+				cat, pkg = portage.catsplit(cp)
+				if cat == "null":
+					cat = None
+
+				writemsg_stdout("\nemerge: searching for similar names..."
+					, noiselevel=-1)
+
+				all_cp = set()
+				all_cp.update(vardb.cp_all())
+				all_cp.update(portdb.cp_all())
+				if "--usepkg" in self._frozen_config.myopts:
+					all_cp.update(bindb.cp_all())
+				# discard dir containing no ebuilds
+				all_cp.discard(cp)
+
+				orig_cp_map = {}
+				for cp_orig in all_cp:
+					orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
+				all_cp = set(orig_cp_map)
+
+				if cat:
+					matches = difflib.get_close_matches(cp, all_cp)
+				else:
+					pkg_to_cp = {}
+					for other_cp in list(all_cp):
+						other_pkg = portage.catsplit(other_cp)[1]
+						if other_pkg == pkg:
+							# discard dir containing no ebuilds
+							all_cp.discard(other_cp)
+							continue
+						pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
+					pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
+					matches = []
+					for pkg_match in pkg_matches:
+						matches.extend(pkg_to_cp[pkg_match])
+
+				matches_orig_case = []
+				for cp in matches:
+					matches_orig_case.extend(orig_cp_map[cp])
+				matches = matches_orig_case
+
+				if len(matches) == 1:
+					writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
+						, noiselevel=-1)
+				elif len(matches) > 1:
+					writemsg_stdout(
+						"\nemerge: Maybe you meant any of these: %s?\n" % \
+						(", ".join(matches),), noiselevel=-1)
+				else:
+					# Generally, this would only happen if
+					# all dbapis are empty.
+					writemsg_stdout(" nothing similar found.\n"
+						, noiselevel=-1)
+		msg = []
+		if not isinstance(myparent, AtomArg):
+			# It's redundant to show parent for AtomArg since
+			# it's the same as 'xinfo' displayed above.
+			dep_chain = self._get_dep_chain(myparent, atom)
+			for node, node_type in dep_chain:
+				msg.append('(dependency required by "%s" [%s])' % \
+						(colorize('INFORM', _unicode_decode("%s") % \
+						(node)), node_type))
+
+		if msg:
+			writemsg_stdout("\n".join(msg), noiselevel=-1)
+			writemsg_stdout("\n", noiselevel=-1)
+
+		if mask_docs:
+			show_mask_docs()
+			writemsg_stdout("\n", noiselevel=-1)
+
+	def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
+		for db, pkg_type, built, installed, db_keys in \
+			self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
+			for pkg in self._iter_match_pkgs(root_config,
+				pkg_type, atom, onlydeps=onlydeps):
+				yield pkg
+
+	def _iter_match_pkgs(self, root_config, pkg_type, atom, onlydeps=False):
+		"""
+		Iterate over Package instances of pkg_type matching the given atom.
+		This does not check visibility and it also does not match USE for
+		unbuilt ebuilds since USE are lazily calculated after visibility
+		checks (to avoid the expense when possible).
+		"""
+
+		db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
+
+		if hasattr(db, "xmatch"):
+			# For portdbapi we match only against the cpv, in order
+			# to bypass unnecessary cache access for things like IUSE
+			# and SLOT. Later, we cache the metadata in a Package
+			# instance, and use that for further matching. This
+			# optimization is especially relevant since
+			# pordbapi.aux_get() does not cache calls that have
+			# myrepo or mytree arguments.
+			cpv_list = db.xmatch("match-all-cpv-only", atom)
+		else:
+			cpv_list = db.match(atom)
+
+		# USE=multislot can make an installed package appear as if
+		# it doesn't satisfy a slot dependency. Rebuilding the ebuild
+		# won't do any good as long as USE=multislot is enabled since
+		# the newly built package still won't have the expected slot.
+		# Therefore, assume that such SLOT dependencies are already
+		# satisfied rather than forcing a rebuild.
+		installed = pkg_type == 'installed'
+		if installed and not cpv_list and atom.slot:
+			for cpv in db.match(atom.cp):
+				slot_available = False
+				for other_db, other_type, other_built, \
+					other_installed, other_keys in \
+					self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
+					try:
+						if atom.slot == \
+							other_db.aux_get(cpv, ["SLOT"])[0]:
+							slot_available = True
+							break
+					except KeyError:
+						pass
+				if not slot_available:
+					continue
+				inst_pkg = self._pkg(cpv, "installed",
+					root_config, installed=installed, myrepo = atom.repo)
+				# Remove the slot from the atom and verify that
+				# the package matches the resulting atom.
+				if portage.match_from_list(
+					atom.without_slot, [inst_pkg]):
+					yield inst_pkg
+					return
+
+		if cpv_list:
+			atom_set = InternalPackageSet(initial_atoms=(atom,),
+				allow_repo=True)
+			if atom.repo is None and hasattr(db, "getRepositories"):
+				repo_list = db.getRepositories()
+			else:
+				repo_list = [atom.repo]
+
+			# descending order
+			cpv_list.reverse()
+			for cpv in cpv_list:
+				for repo in repo_list:
+
+					try:
+						pkg = self._pkg(cpv, pkg_type, root_config,
+							installed=installed, onlydeps=onlydeps, myrepo=repo)
+					except portage.exception.PackageNotFound:
+						pass
+					else:
+						# A cpv can be returned from dbapi.match() as an
+						# old-style virtual match even in cases when the
+						# package does not actually PROVIDE the virtual.
+						# Filter out any such false matches here.
+
+						# Make sure that cpv from the current repo satisfies the atom.
+						# This might not be the case if there are several repos with
+						# the same cpv, but different metadata keys, like SLOT.
+						# Also, for portdbapi, parts of the match that require
+						# metadata access are deferred until we have cached the
+						# metadata in a Package instance.
+						if not atom_set.findAtomForPackage(pkg,
+							modified_use=self._pkg_use_enabled(pkg)):
+							continue
+						yield pkg
+
+	def _select_pkg_highest_available(self, root, atom, onlydeps=False):
+		cache_key = (root, atom, onlydeps)
+		ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
+		if ret is not None:
+			pkg, existing = ret
+			if pkg and not existing:
+				existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+				if existing and existing == pkg:
+					# Update the cache to reflect that the
+					# package has been added to the graph.
+					ret = pkg, pkg
+					self._dynamic_config._highest_pkg_cache[cache_key] = ret
+			return ret
+		ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+		self._dynamic_config._highest_pkg_cache[cache_key] = ret
+		pkg, existing = ret
+		if pkg is not None:
+			settings = pkg.root_config.settings
+			if self._pkg_visibility_check(pkg) and \
+				not (pkg.installed and pkg.masks):
+				self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
+		return ret
+
+	def _want_installed_pkg(self, pkg):
+		"""
+		Given an installed package returned from select_pkg, return
+		True if the user has not explicitly requested for this package
+		to be replaced (typically via an atom on the command line).
+		"""
+		if "selective" not in self._dynamic_config.myparams and \
+			pkg.root == self._frozen_config.target_root:
+			if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+				modified_use=self._pkg_use_enabled(pkg)):
+				return True
+			try:
+				next(self._iter_atoms_for_pkg(pkg))
+			except StopIteration:
+				pass
+			except portage.exception.InvalidDependString:
+				pass
+			else:
+				return False
+		return True
+
+	def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
+		pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
+
+		default_selection = (pkg, existing)
+
+		if self._dynamic_config._autounmask is True:
+			if pkg is not None and \
+				pkg.installed and \
+				not self._want_installed_pkg(pkg):
+				pkg = None
+
+			for only_use_changes in True, False:
+				if pkg is not None:
+					break
+
+				for allow_unmasks in (False, True):
+					if only_use_changes and allow_unmasks:
+						continue
+
+					if pkg is not None:
+						break
+
+					pkg, existing = \
+						self._wrapped_select_pkg_highest_available_imp(
+							root, atom, onlydeps=onlydeps,
+							allow_use_changes=True,
+							allow_unstable_keywords=(not only_use_changes),
+							allow_license_changes=(not only_use_changes),
+							allow_unmasks=allow_unmasks)
+
+					if pkg is not None and \
+						pkg.installed and \
+						not self._want_installed_pkg(pkg):
+						pkg = None
+			
+			if self._dynamic_config._need_restart:
+				return None, None
+
+		if pkg is None:
+			# This ensures that we can fall back to an installed package
+			# that may have been rejected in the autounmask path above.
+			return default_selection
+
+		return pkg, existing
+
+	def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+
+		if pkg.visible:
+			return True
+
+		if pkg in self._dynamic_config.digraph:
+			# Sometimes we need to temporarily disable
+			# dynamic_config._autounmask, but for overall
+			# consistency in dependency resolution, in any
+			# case we want to respect autounmask visibity
+			# for packages that have already been added to
+			# the dependency graph.
+			return True
+
+		if not self._dynamic_config._autounmask:
+			return False
+
+		pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+		root_config = self._frozen_config.roots[pkg.root]
+		mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
+
+		masked_by_unstable_keywords = False
+		masked_by_missing_keywords = False
+		missing_licenses = None
+		masked_by_something_else = False
+		masked_by_p_mask = False
+
+		for reason in mreasons:
+			hint = reason.unmask_hint
+
+			if hint is None:
+				masked_by_something_else = True
+			elif hint.key == "unstable keyword":
+				masked_by_unstable_keywords = True
+				if hint.value == "**":
+					masked_by_missing_keywords = True
+			elif hint.key == "p_mask":
+				masked_by_p_mask = True
+			elif hint.key == "license":
+				missing_licenses = hint.value
+			else:
+				masked_by_something_else = True
+
+		if masked_by_something_else:
+			return False
+
+		if pkg in self._dynamic_config._needed_unstable_keywords:
+			#If the package is already keyworded, remove the mask.
+			masked_by_unstable_keywords = False
+			masked_by_missing_keywords = False
+
+		if pkg in self._dynamic_config._needed_p_mask_changes:
+			#If the package is already keyworded, remove the mask.
+			masked_by_p_mask = False
+
+		if missing_licenses:
+			#If the needed licenses are already unmasked, remove the mask.
+			missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
+
+		if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
+			#Package has already been unmasked.
+			return True
+
+		#We treat missing keywords in the same way as masks.
+		if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
+			(masked_by_missing_keywords and not allow_unmasks) or \
+			(masked_by_p_mask and not allow_unmasks) or \
+			(missing_licenses and not allow_license_changes):
+			#We are not allowed to do the needed changes.
+			return False
+
+		if masked_by_unstable_keywords:
+			self._dynamic_config._needed_unstable_keywords.add(pkg)
+			backtrack_infos = self._dynamic_config._backtrack_infos
+			backtrack_infos.setdefault("config", {})
+			backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
+			backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
+
+		if masked_by_p_mask:
+			self._dynamic_config._needed_p_mask_changes.add(pkg)
+			backtrack_infos = self._dynamic_config._backtrack_infos
+			backtrack_infos.setdefault("config", {})
+			backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
+			backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
+
+		if missing_licenses:
+			self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
+			backtrack_infos = self._dynamic_config._backtrack_infos
+			backtrack_infos.setdefault("config", {})
+			backtrack_infos["config"].setdefault("needed_license_changes", set())
+			backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
+
+		return True
+
+	def _pkg_use_enabled(self, pkg, target_use=None):
+		"""
+		If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
+		If target_use is given, the need changes are computed to make the package useable.
+		Example: target_use = { "foo": True, "bar": False }
+		The flags target_use must be in the pkg's IUSE.
+		"""
+		if pkg.built:
+			return pkg.use.enabled
+		needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
+
+		if target_use is None:
+			if needed_use_config_change is None:
+				return pkg.use.enabled
+			else:
+				return needed_use_config_change[0]
+
+		if needed_use_config_change is not None:
+			old_use = needed_use_config_change[0]
+			new_use = set()
+			old_changes = needed_use_config_change[1]
+			new_changes = old_changes.copy()
+		else:
+			old_use = pkg.use.enabled
+			new_use = set()
+			old_changes = {}
+			new_changes = {}
+
+		for flag, state in target_use.items():
+			if state:
+				if flag not in old_use:
+					if new_changes.get(flag) == False:
+						return old_use
+					new_changes[flag] = True
+				new_use.add(flag)
+			else:
+				if flag in old_use:
+					if new_changes.get(flag) == True:
+						return old_use
+					new_changes[flag] = False
+		new_use.update(old_use.difference(target_use))
+
+		def want_restart_for_use_change(pkg, new_use):
+			if pkg not in self._dynamic_config.digraph.nodes:
+				return False
+
+			for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
+				dep = pkg.metadata[key]
+				old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
+				new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
+
+				if old_val != new_val:
+					return True
+
+			parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+			if not parent_atoms:
+				return False
+
+			new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
+			for ppkg, atom in parent_atoms:
+				if not atom.use or \
+					not atom.use.required.intersection(changes):
+					continue
+				else:
+					return True
+
+			return False
+
+		if new_changes != old_changes:
+			#Don't do the change if it violates REQUIRED_USE.
+			required_use = pkg.metadata["REQUIRED_USE"]
+			if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
+				not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+				return old_use
+
+			if pkg.use.mask.intersection(new_changes) or \
+				pkg.use.force.intersection(new_changes):
+				return old_use
+
+			self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
+			backtrack_infos = self._dynamic_config._backtrack_infos
+			backtrack_infos.setdefault("config", {})
+			backtrack_infos["config"].setdefault("needed_use_config_changes", [])
+			backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
+			if want_restart_for_use_change(pkg, new_use):
+				self._dynamic_config._need_restart = True
+		return new_use
+
+	def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
+		allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+		root_config = self._frozen_config.roots[root]
+		pkgsettings = self._frozen_config.pkgsettings[root]
+		dbs = self._dynamic_config._filtered_trees[root]["dbs"]
+		vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
+		portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
+		# List of acceptable packages, ordered by type preference.
+		matched_packages = []
+		matched_pkgs_ignore_use = []
+		highest_version = None
+		if not isinstance(atom, portage.dep.Atom):
+			atom = portage.dep.Atom(atom)
+		atom_cp = atom.cp
+		atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
+		existing_node = None
+		myeb = None
+		rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
+		usepkg = "--usepkg" in self._frozen_config.myopts
+		usepkgonly = "--usepkgonly" in self._frozen_config.myopts
+		empty = "empty" in self._dynamic_config.myparams
+		selective = "selective" in self._dynamic_config.myparams
+		reinstall = False
+		avoid_update = "--update" not in self._frozen_config.myopts
+		dont_miss_updates = "--update" in self._frozen_config.myopts
+		use_ebuild_visibility = self._frozen_config.myopts.get(
+			'--use-ebuild-visibility', 'n') != 'n'
+		reinstall_atoms = self._frozen_config.reinstall_atoms
+		usepkg_exclude = self._frozen_config.usepkg_exclude
+		useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
+		matched_oldpkg = []
+		# Behavior of the "selective" parameter depends on
+		# whether or not a package matches an argument atom.
+		# If an installed package provides an old-style
+		# virtual that is no longer provided by an available
+		# package, the installed package may match an argument
+		# atom even though none of the available packages do.
+		# Therefore, "selective" logic does not consider
+		# whether or not an installed package matches an
+		# argument atom. It only considers whether or not
+		# available packages match argument atoms, which is
+		# represented by the found_available_arg flag.
+		found_available_arg = False
+		packages_with_invalid_use_config = []
+		for find_existing_node in True, False:
+			if existing_node:
+				break
+			for db, pkg_type, built, installed, db_keys in dbs:
+				if existing_node:
+					break
+				if installed and not find_existing_node:
+					want_reinstall = reinstall or empty or \
+						(found_available_arg and not selective)
+					if want_reinstall and matched_packages:
+						continue
+
+				# Ignore USE deps for the initial match since we want to
+				# ensure that updates aren't missed solely due to the user's
+				# USE configuration.
+				for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use, 
+					onlydeps=onlydeps):
+					if pkg in self._dynamic_config._runtime_pkg_mask:
+						# The package has been masked by the backtracking logic
+						continue
+					root_slot = (pkg.root, pkg.slot_atom)
+					if pkg.built and root_slot in self._rebuild.rebuild_list:
+						continue
+					if (pkg.installed and
+						root_slot in self._rebuild.reinstall_list):
+						continue
+
+					if not pkg.installed and \
+						self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+							modified_use=self._pkg_use_enabled(pkg)):
+						continue
+
+					if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
+						modified_use=self._pkg_use_enabled(pkg)):
+						break
+
+					useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
+						modified_use=self._pkg_use_enabled(pkg))
+
+					if packages_with_invalid_use_config and (not built or not useoldpkg) and \
+						(not pkg.installed or dont_miss_updates):
+						# Check if a higher version was rejected due to user
+						# USE configuration. The packages_with_invalid_use_config
+						# list only contains unbuilt ebuilds since USE can't
+						# be changed for built packages.
+						higher_version_rejected = False
+						repo_priority = pkg.repo_priority
+						for rejected in packages_with_invalid_use_config:
+							if rejected.cp != pkg.cp:
+								continue
+							if rejected > pkg:
+								higher_version_rejected = True
+								break
+							if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
+								# If version is identical then compare
+								# repo priority (see bug #350254).
+								rej_repo_priority = rejected.repo_priority
+								if rej_repo_priority is not None and \
+									(repo_priority is None or
+									rej_repo_priority > repo_priority):
+									higher_version_rejected = True
+									break
+						if higher_version_rejected:
+							continue
+
+					cpv = pkg.cpv
+					reinstall_for_flags = None
+
+					if not pkg.installed or \
+						(matched_packages and not avoid_update):
+						# Only enforce visibility on installed packages
+						# if there is at least one other visible package
+						# available. By filtering installed masked packages
+						# here, packages that have been masked since they
+						# were installed can be automatically downgraded
+						# to an unmasked version. NOTE: This code needs to
+						# be consistent with masking behavior inside
+						# _dep_check_composite_db, in order to prevent
+						# incorrect choices in || deps like bug #351828.
+
+						if not self._pkg_visibility_check(pkg, \
+							allow_unstable_keywords=allow_unstable_keywords,
+							allow_license_changes=allow_license_changes,
+							allow_unmasks=allow_unmasks):
+							continue
+
+						# Enable upgrade or downgrade to a version
+						# with visible KEYWORDS when the installed
+						# version is masked by KEYWORDS, but never
+						# reinstall the same exact version only due
+						# to a KEYWORDS mask. See bug #252167.
+
+						if pkg.type_name != "ebuild" and matched_packages:
+							# Don't re-install a binary package that is
+							# identical to the currently installed package
+							# (see bug #354441).
+							identical_binary = False
+							if usepkg and pkg.installed:
+								for selected_pkg in matched_packages:
+									if selected_pkg.type_name == "binary" and \
+										selected_pkg.cpv == pkg.cpv and \
+										selected_pkg.metadata.get('BUILD_TIME') == \
+										pkg.metadata.get('BUILD_TIME'):
+										identical_binary = True
+										break
+
+							if not identical_binary:
+								# If the ebuild no longer exists or it's
+								# keywords have been dropped, reject built
+								# instances (installed or binary).
+								# If --usepkgonly is enabled, assume that
+								# the ebuild status should be ignored.
+								if not use_ebuild_visibility and (usepkgonly or useoldpkg):
+									if pkg.installed and pkg.masks:
+										continue
+								else:
+									try:
+										pkg_eb = self._pkg(
+											pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
+									except portage.exception.PackageNotFound:
+										pkg_eb_visible = False
+										for pkg_eb in self._iter_match_pkgs(pkg.root_config,
+											"ebuild", Atom("=%s" % (pkg.cpv,))):
+											if self._pkg_visibility_check(pkg_eb, \
+												allow_unstable_keywords=allow_unstable_keywords,
+												allow_license_changes=allow_license_changes,
+												allow_unmasks=allow_unmasks):
+												pkg_eb_visible = True
+												break
+										if not pkg_eb_visible:
+											continue
+									else:
+										if not self._pkg_visibility_check(pkg_eb, \
+											allow_unstable_keywords=allow_unstable_keywords,
+											allow_license_changes=allow_license_changes,
+											allow_unmasks=allow_unmasks):
+											continue
+
+					# Calculation of USE for unbuilt ebuilds is relatively
+					# expensive, so it is only performed lazily, after the
+					# above visibility checks are complete.
+
+					myarg = None
+					if root == self._frozen_config.target_root:
+						try:
+							myarg = next(self._iter_atoms_for_pkg(pkg))
+						except StopIteration:
+							pass
+						except portage.exception.InvalidDependString:
+							if not installed:
+								# masked by corruption
+								continue
+					if not installed and myarg:
+						found_available_arg = True
+
+					if atom.unevaluated_atom.use:
+						#Make sure we don't miss a 'missing IUSE'.
+						if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
+							# Don't add this to packages_with_invalid_use_config
+							# since IUSE cannot be adjusted by the user.
+							continue
+
+					if atom.use:
+
+						matched_pkgs_ignore_use.append(pkg)
+						if allow_use_changes and not pkg.built:
+							target_use = {}
+							for flag in atom.use.enabled:
+								target_use[flag] = True
+							for flag in atom.use.disabled:
+								target_use[flag] = False
+							use = self._pkg_use_enabled(pkg, target_use)
+						else:
+							use = self._pkg_use_enabled(pkg)
+
+						use_match = True
+						can_adjust_use = not pkg.built
+						missing_enabled = atom.use.missing_enabled.difference(pkg.iuse.all)
+						missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
+
+						if atom.use.enabled:
+							if atom.use.enabled.intersection(missing_disabled):
+								use_match = False
+								can_adjust_use = False
+							need_enabled = atom.use.enabled.difference(use)
+							if need_enabled:
+								need_enabled = need_enabled.difference(missing_enabled)
+								if need_enabled:
+									use_match = False
+									if can_adjust_use:
+										if pkg.use.mask.intersection(need_enabled):
+											can_adjust_use = False
+
+						if atom.use.disabled:
+							if atom.use.disabled.intersection(missing_enabled):
+								use_match = False
+								can_adjust_use = False
+							need_disabled = atom.use.disabled.intersection(use)
+							if need_disabled:
+								need_disabled = need_disabled.difference(missing_disabled)
+								if need_disabled:
+									use_match = False
+									if can_adjust_use:
+										if pkg.use.force.difference(
+											pkg.use.mask).intersection(need_disabled):
+											can_adjust_use = False
+
+						if not use_match:
+							if can_adjust_use:
+								# Above we must ensure that this package has
+								# absolutely no use.force, use.mask, or IUSE
+								# issues that the user typically can't make
+								# adjustments to solve (see bug #345979).
+								# FIXME: Conditional USE deps complicate
+								# issues. This code currently excludes cases
+								# in which the user can adjust the parent
+								# package's USE in order to satisfy the dep.
+								packages_with_invalid_use_config.append(pkg)
+							continue
+
+					if pkg.cp == atom_cp:
+						if highest_version is None:
+							highest_version = pkg
+						elif pkg > highest_version:
+							highest_version = pkg
+					# At this point, we've found the highest visible
+					# match from the current repo. Any lower versions
+					# from this repo are ignored, so this so the loop
+					# will always end with a break statement below
+					# this point.
+					if find_existing_node:
+						e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+						if not e_pkg:
+							break
+						# Use PackageSet.findAtomForPackage()
+						# for PROVIDE support.
+						if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
+							if highest_version and \
+								e_pkg.cp == atom_cp and \
+								e_pkg < highest_version and \
+								e_pkg.slot_atom != highest_version.slot_atom:
+								# There is a higher version available in a
+								# different slot, so this existing node is
+								# irrelevant.
+								pass
+							else:
+								matched_packages.append(e_pkg)
+								existing_node = e_pkg
+						break
+					# Compare built package to current config and
+					# reject the built package if necessary.
+					if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
+						("--newuse" in self._frozen_config.myopts or \
+						"--reinstall" in self._frozen_config.myopts or \
+						"--binpkg-respect-use" in self._frozen_config.myopts):
+						iuses = pkg.iuse.all
+						old_use = self._pkg_use_enabled(pkg)
+						if myeb:
+							pkgsettings.setcpv(myeb)
+						else:
+							pkgsettings.setcpv(pkg)
+						now_use = pkgsettings["PORTAGE_USE"].split()
+						forced_flags = set()
+						forced_flags.update(pkgsettings.useforce)
+						forced_flags.update(pkgsettings.usemask)
+						cur_iuse = iuses
+						if myeb and not usepkgonly and not useoldpkg:
+							cur_iuse = myeb.iuse.all
+						if self._reinstall_for_flags(forced_flags,
+							old_use, iuses,
+							now_use, cur_iuse):
+							break
+					# Compare current config to installed package
+					# and do not reinstall if possible.
+					if not installed and not useoldpkg and \
+						("--newuse" in self._frozen_config.myopts or \
+						"--reinstall" in self._frozen_config.myopts) and \
+						cpv in vardb.match(atom):
+						forced_flags = set()
+						forced_flags.update(pkg.use.force)
+						forced_flags.update(pkg.use.mask)
+						inst_pkg = vardb.match_pkgs('=' + pkg.cpv)[0]
+						old_use = inst_pkg.use.enabled
+						old_iuse = inst_pkg.iuse.all
+						cur_use = self._pkg_use_enabled(pkg)
+						cur_iuse = pkg.iuse.all
+						reinstall_for_flags = \
+							self._reinstall_for_flags(
+							forced_flags, old_use, old_iuse,
+							cur_use, cur_iuse)
+						if reinstall_for_flags:
+							reinstall = True
+					if reinstall_atoms.findAtomForPackage(pkg, \
+							modified_use=self._pkg_use_enabled(pkg)):
+						reinstall = True
+					if not built:
+						myeb = pkg
+					elif useoldpkg:
+						matched_oldpkg.append(pkg)
+					matched_packages.append(pkg)
+					if reinstall_for_flags:
+						self._dynamic_config._reinstall_nodes[pkg] = \
+							reinstall_for_flags
+					break
+
+		if not matched_packages:
+			return None, None
+
+		if "--debug" in self._frozen_config.myopts:
+			for pkg in matched_packages:
+				portage.writemsg("%s %s%s%s\n" % \
+					((pkg.type_name + ":").rjust(10),
+					pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
+
+		# Filter out any old-style virtual matches if they are
+		# mixed with new-style virtual matches.
+		cp = atom.cp
+		if len(matched_packages) > 1 and \
+			"virtual" == portage.catsplit(cp)[0]:
+			for pkg in matched_packages:
+				if pkg.cp != cp:
+					continue
+				# Got a new-style virtual, so filter
+				# out any old-style virtuals.
+				matched_packages = [pkg for pkg in matched_packages \
+					if pkg.cp == cp]
+				break
+
+		if existing_node is not None and \
+			existing_node in matched_packages:
+			return existing_node, existing_node
+
+		if len(matched_packages) > 1:
+			if rebuilt_binaries:
+				inst_pkg = None
+				built_pkg = None
+				unbuilt_pkg = None
+				for pkg in matched_packages:
+					if pkg.installed:
+						inst_pkg = pkg
+					elif pkg.built:
+						built_pkg = pkg
+					else:
+						if unbuilt_pkg is None or pkg > unbuilt_pkg:
+							unbuilt_pkg = pkg
+				if built_pkg is not None and inst_pkg is not None:
+					# Only reinstall if binary package BUILD_TIME is
+					# non-empty, in order to avoid cases like to
+					# bug #306659 where BUILD_TIME fields are missing
+					# in local and/or remote Packages file.
+					try:
+						built_timestamp = int(built_pkg.metadata['BUILD_TIME'])
+					except (KeyError, ValueError):
+						built_timestamp = 0
+
+					try:
+						installed_timestamp = int(inst_pkg.metadata['BUILD_TIME'])
+					except (KeyError, ValueError):
+						installed_timestamp = 0
+
+					if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
+						pass
+					elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
+						minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
+						if built_timestamp and \
+							built_timestamp > installed_timestamp and \
+							built_timestamp >= minimal_timestamp:
+							return built_pkg, existing_node
+					else:
+						#Don't care if the binary has an older BUILD_TIME than the installed
+						#package. This is for closely tracking a binhost.
+						#Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
+						#pulled in here.
+						if built_timestamp and \
+							built_timestamp != installed_timestamp:
+							return built_pkg, existing_node
+
+			for pkg in matched_packages:
+				if pkg.installed and pkg.invalid:
+					matched_packages = [x for x in \
+						matched_packages if x is not pkg]
+
+			if avoid_update:
+				for pkg in matched_packages:
+					if pkg.installed and self._pkg_visibility_check(pkg, \
+						allow_unstable_keywords=allow_unstable_keywords,
+						allow_license_changes=allow_license_changes,
+						allow_unmasks=allow_unmasks):
+						return pkg, existing_node
+
+			visible_matches = []
+			if matched_oldpkg:
+				visible_matches = [pkg.cpv for pkg in matched_oldpkg \
+					if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
+						allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+			if not visible_matches:
+				visible_matches = [pkg.cpv for pkg in matched_packages \
+					if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
+						allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+			if visible_matches:
+				bestmatch = portage.best(visible_matches)
+			else:
+				# all are masked, so ignore visibility
+				bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
+			matched_packages = [pkg for pkg in matched_packages \
+				if portage.dep.cpvequal(pkg.cpv, bestmatch)]
+
+		# ordered by type preference ("ebuild" type is the last resort)
+		return  matched_packages[-1], existing_node
+
+	def _select_pkg_from_graph(self, root, atom, onlydeps=False):
+		"""
+		Select packages that have already been added to the graph or
+		those that are installed and have not been scheduled for
+		replacement.
+		"""
+		graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
+		matches = graph_db.match_pkgs(atom)
+		if not matches:
+			return None, None
+		pkg = matches[-1] # highest match
+		in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+		return pkg, in_graph
+
+	def _select_pkg_from_installed(self, root, atom, onlydeps=False):
+		"""
+		Select packages that are installed.
+		"""
+		vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
+		matches = vardb.match_pkgs(atom)
+		if not matches:
+			return None, None
+		if len(matches) > 1:
+			unmasked = [pkg for pkg in matches if \
+				self._pkg_visibility_check(pkg)]
+			if unmasked:
+				if len(unmasked) == 1:
+					matches = unmasked
+				else:
+					# Account for packages with masks (like KEYWORDS masks)
+					# that are usually ignored in visibility checks for
+					# installed packages, in order to handle cases like
+					# bug #350285.
+					unmasked = [pkg for pkg in matches if not pkg.masks]
+					if unmasked:
+						matches = unmasked
+		pkg = matches[-1] # highest match
+		in_graph = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
+		return pkg, in_graph
+
+	def _complete_graph(self, required_sets=None):
+		"""
+		Add any deep dependencies of required sets (args, system, world) that
+		have not been pulled into the graph yet. This ensures that the graph
+		is consistent such that initially satisfied deep dependencies are not
+		broken in the new graph. Initially unsatisfied dependencies are
+		irrelevant since we only want to avoid breaking dependencies that are
+		initially satisfied.
+
+		Since this method can consume enough time to disturb users, it is
+		currently only enabled by the --complete-graph option.
+
+		@param required_sets: contains required sets (currently only used
+			for depclean and prune removal operations)
+		@type required_sets: dict
+		"""
+		if "--buildpkgonly" in self._frozen_config.myopts or \
+			"recurse" not in self._dynamic_config.myparams:
+			return 1
+
+		if "complete" not in self._dynamic_config.myparams:
+			# Automatically enable complete mode if there are any
+			# downgrades, since they often break dependencies
+			# (like in bug #353613).
+			have_downgrade = False
+			for node in self._dynamic_config.digraph:
+				if not isinstance(node, Package) or \
+					node.operation != "merge":
+					continue
+				vardb = self._frozen_config.roots[
+					node.root].trees["vartree"].dbapi
+				inst_pkg = vardb.match_pkgs(node.slot_atom)
+				if inst_pkg and inst_pkg[0] > node:
+					have_downgrade = True
+					break
+
+			if have_downgrade:
+				self._dynamic_config.myparams["complete"] = True
+			else:
+				# Skip complete graph mode, in order to avoid consuming
+				# enough time to disturb users.
+				return 1
+
+		self._load_vdb()
+
+		# Put the depgraph into a mode that causes it to only
+		# select packages that have already been added to the
+		# graph or those that are installed and have not been
+		# scheduled for replacement. Also, toggle the "deep"
+		# parameter so that all dependencies are traversed and
+		# accounted for.
+		self._select_atoms = self._select_atoms_from_graph
+		if "remove" in self._dynamic_config.myparams:
+			self._select_package = self._select_pkg_from_installed
+		else:
+			self._select_package = self._select_pkg_from_graph
+			self._dynamic_config._traverse_ignored_deps = True
+		already_deep = self._dynamic_config.myparams.get("deep") is True
+		if not already_deep:
+			self._dynamic_config.myparams["deep"] = True
+
+		# Invalidate the package selection cache, since
+		# _select_package has just changed implementations.
+		for trees in self._dynamic_config._filtered_trees.values():
+			trees["porttree"].dbapi._clear_cache()
+
+		args = self._dynamic_config._initial_arg_list[:]
+		for root in self._frozen_config.roots:
+			if root != self._frozen_config.target_root and \
+				"remove" in self._dynamic_config.myparams:
+				# Only pull in deps for the relevant root.
+				continue
+			depgraph_sets = self._dynamic_config.sets[root]
+			required_set_names = self._frozen_config._required_set_names.copy()
+			remaining_args = required_set_names.copy()
+			if required_sets is None or root not in required_sets:
+				pass
+			else:
+				# Removal actions may override sets with temporary
+				# replacements that have had atoms removed in order
+				# to implement --deselect behavior.
+				required_set_names = set(required_sets[root])
+				depgraph_sets.sets.clear()
+				depgraph_sets.sets.update(required_sets[root])
+			if "remove" not in self._dynamic_config.myparams and \
+				root == self._frozen_config.target_root and \
+				already_deep:
+				remaining_args.difference_update(depgraph_sets.sets)
+			if not remaining_args and \
+				not self._dynamic_config._ignored_deps and \
+				not self._dynamic_config._dep_stack:
+				continue
+			root_config = self._frozen_config.roots[root]
+			for s in required_set_names:
+				pset = depgraph_sets.sets.get(s)
+				if pset is None:
+					pset = root_config.sets[s]
+				atom = SETPREFIX + s
+				args.append(SetArg(arg=atom, pset=pset,
+					root_config=root_config))
+
+		self._set_args(args)
+		for arg in self._expand_set_args(args, add_to_digraph=True):
+			for atom in arg.pset.getAtoms():
+				self._dynamic_config._dep_stack.append(
+					Dependency(atom=atom, root=arg.root_config.root,
+						parent=arg))
+
+		if True:
+			if self._dynamic_config._ignored_deps:
+				self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
+				self._dynamic_config._ignored_deps = []
+			if not self._create_graph(allow_unsatisfied=True):
+				return 0
+			# Check the unsatisfied deps to see if any initially satisfied deps
+			# will become unsatisfied due to an upgrade. Initially unsatisfied
+			# deps are irrelevant since we only want to avoid breaking deps
+			# that are initially satisfied.
+			while self._dynamic_config._unsatisfied_deps:
+				dep = self._dynamic_config._unsatisfied_deps.pop()
+				vardb = self._frozen_config.roots[
+					dep.root].trees["vartree"].dbapi
+				matches = vardb.match_pkgs(dep.atom)
+				if not matches:
+					self._dynamic_config._initially_unsatisfied_deps.append(dep)
+					continue
+				# An scheduled installation broke a deep dependency.
+				# Add the installed package to the graph so that it
+				# will be appropriately reported as a slot collision
+				# (possibly solvable via backtracking).
+				pkg = matches[-1] # highest match
+				if not self._add_pkg(pkg, dep):
+					return 0
+				if not self._create_graph(allow_unsatisfied=True):
+					return 0
+		return 1
+
+	def _pkg(self, cpv, type_name, root_config, installed=False, 
+		onlydeps=False, myrepo = None):
+		"""
+		Get a package instance from the cache, or create a new
+		one if necessary. Raises PackageNotFound from aux_get if it
+		failures for some reason (package does not exist or is
+		corrupt).
+		"""
+
+		# Ensure that we use the specially optimized RootConfig instance
+		# that refers to FakeVartree instead of the real vartree.
+		root_config = self._frozen_config.roots[root_config.root]
+		pkg = self._frozen_config._pkg_cache.get(
+			Package._gen_hash_key(cpv=cpv, type_name=type_name,
+			repo_name=myrepo, root_config=root_config,
+			installed=installed, onlydeps=onlydeps))
+		if pkg is None and onlydeps and not installed:
+			# Maybe it already got pulled in as a "merge" node.
+			pkg = self._dynamic_config.mydbapi[root_config.root].get(
+				Package._gen_hash_key(cpv=cpv, type_name=type_name,
+				repo_name=myrepo, root_config=root_config,
+				installed=installed, onlydeps=False))
+
+		if pkg is None:
+			tree_type = self.pkg_tree_map[type_name]
+			db = root_config.trees[tree_type].dbapi
+			db_keys = list(self._frozen_config._trees_orig[root_config.root][
+				tree_type].dbapi._aux_cache_keys)
+
+			try:
+				metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+			except KeyError:
+				raise portage.exception.PackageNotFound(cpv)
+
+			pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
+				installed=installed, metadata=metadata, onlydeps=onlydeps,
+				root_config=root_config, type_name=type_name)
+
+			self._frozen_config._pkg_cache[pkg] = pkg
+
+			if not self._pkg_visibility_check(pkg) and \
+				'LICENSE' in pkg.masks and len(pkg.masks) == 1:
+				slot_key = (pkg.root, pkg.slot_atom)
+				other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
+				if other_pkg is None or pkg > other_pkg:
+					self._frozen_config._highest_license_masked[slot_key] = pkg
+
+		return pkg
+
+	def _validate_blockers(self):
+		"""Remove any blockers from the digraph that do not match any of the
+		packages within the graph.  If necessary, create hard deps to ensure
+		correct merge order such that mutually blocking packages are never
+		installed simultaneously. Also add runtime blockers from all installed
+		packages if any of them haven't been added already (bug 128809)."""
+
+		if "--buildpkgonly" in self._frozen_config.myopts or \
+			"--nodeps" in self._frozen_config.myopts:
+			return True
+
+		complete = "complete" in self._dynamic_config.myparams
+		deep = "deep" in self._dynamic_config.myparams
+
+		if True:
+			# Pull in blockers from all installed packages that haven't already
+			# been pulled into the depgraph, in order to ensure that they are
+			# respected (bug 128809). Due to the performance penalty that is
+			# incurred by all the additional dep_check calls that are required,
+			# blockers returned from dep_check are cached on disk by the
+			# BlockerCache class.
+
+			# For installed packages, always ignore blockers from DEPEND since
+			# only runtime dependencies should be relevant for packages that
+			# are already built.
+			dep_keys = ["RDEPEND", "PDEPEND"]
+			for myroot in self._frozen_config.trees:
+				vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
+				portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
+				pkgsettings = self._frozen_config.pkgsettings[myroot]
+				root_config = self._frozen_config.roots[myroot]
+				dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
+				final_db = self._dynamic_config.mydbapi[myroot]
+
+				blocker_cache = BlockerCache(myroot, vardb)
+				stale_cache = set(blocker_cache)
+				for pkg in vardb:
+					cpv = pkg.cpv
+					stale_cache.discard(cpv)
+					pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
+					pkg_deps_added = \
+						pkg in self._dynamic_config._traversed_pkg_deps
+
+					# Check for masked installed packages. Only warn about
+					# packages that are in the graph in order to avoid warning
+					# about those that will be automatically uninstalled during
+					# the merge process or by --depclean. Always warn about
+					# packages masked by license, since the user likely wants
+					# to adjust ACCEPT_LICENSE.
+					if pkg in final_db:
+						if not self._pkg_visibility_check(pkg) and \
+							(pkg_in_graph or 'LICENSE' in pkg.masks):
+							self._dynamic_config._masked_installed.add(pkg)
+						else:
+							self._check_masks(pkg)
+
+					blocker_atoms = None
+					blockers = None
+					if pkg_deps_added:
+						blockers = []
+						try:
+							blockers.extend(
+								self._dynamic_config._blocker_parents.child_nodes(pkg))
+						except KeyError:
+							pass
+						try:
+							blockers.extend(
+								self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
+						except KeyError:
+							pass
+						if blockers:
+							# Select just the runtime blockers.
+							blockers = [blocker for blocker in blockers \
+								if blocker.priority.runtime or \
+								blocker.priority.runtime_post]
+					if blockers is not None:
+						blockers = set(blocker.atom for blocker in blockers)
+
+					# If this node has any blockers, create a "nomerge"
+					# node for it so that they can be enforced.
+					self._spinner_update()
+					blocker_data = blocker_cache.get(cpv)
+					if blocker_data is not None and \
+						blocker_data.counter != long(pkg.metadata["COUNTER"]):
+						blocker_data = None
+
+					# If blocker data from the graph is available, use
+					# it to validate the cache and update the cache if
+					# it seems invalid.
+					if blocker_data is not None and \
+						blockers is not None:
+						if not blockers.symmetric_difference(
+							blocker_data.atoms):
+							continue
+						blocker_data = None
+
+					if blocker_data is None and \
+						blockers is not None:
+						# Re-use the blockers from the graph.
+						blocker_atoms = sorted(blockers)
+						counter = long(pkg.metadata["COUNTER"])
+						blocker_data = \
+							blocker_cache.BlockerData(counter, blocker_atoms)
+						blocker_cache[pkg.cpv] = blocker_data
+						continue
+
+					if blocker_data:
+						blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
+					else:
+						# Use aux_get() to trigger FakeVartree global
+						# updates on *DEPEND when appropriate.
+						depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+						# It is crucial to pass in final_db here in order to
+						# optimize dep_check calls by eliminating atoms via
+						# dep_wordreduce and dep_eval calls.
+						try:
+							success, atoms = portage.dep_check(depstr,
+								final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
+								trees=self._dynamic_config._graph_trees, myroot=myroot)
+						except SystemExit:
+							raise
+						except Exception as e:
+							# This is helpful, for example, if a ValueError
+							# is thrown from cpv_expand due to multiple
+							# matches (this can happen if an atom lacks a
+							# category).
+							show_invalid_depstring_notice(
+								pkg, depstr, str(e))
+							del e
+							raise
+						if not success:
+							replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
+							if replacement_pkg and \
+								replacement_pkg[0].operation == "merge":
+								# This package is being replaced anyway, so
+								# ignore invalid dependencies so as not to
+								# annoy the user too much (otherwise they'd be
+								# forced to manually unmerge it first).
+								continue
+							show_invalid_depstring_notice(pkg, depstr, atoms)
+							return False
+						blocker_atoms = [myatom for myatom in atoms \
+							if myatom.blocker]
+						blocker_atoms.sort()
+						counter = long(pkg.metadata["COUNTER"])
+						blocker_cache[cpv] = \
+							blocker_cache.BlockerData(counter, blocker_atoms)
+					if blocker_atoms:
+						try:
+							for atom in blocker_atoms:
+								blocker = Blocker(atom=atom,
+									eapi=pkg.metadata["EAPI"],
+									priority=self._priority(runtime=True),
+									root=myroot)
+								self._dynamic_config._blocker_parents.add(blocker, pkg)
+						except portage.exception.InvalidAtom as e:
+							depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+							show_invalid_depstring_notice(
+								pkg, depstr, "Invalid Atom: %s" % (e,))
+							return False
+				for cpv in stale_cache:
+					del blocker_cache[cpv]
+				blocker_cache.flush()
+				del blocker_cache
+
+		# Discard any "uninstall" tasks scheduled by previous calls
+		# to this method, since those tasks may not make sense given
+		# the current graph state.
+		previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
+		if previous_uninstall_tasks:
+			self._dynamic_config._blocker_uninstalls = digraph()
+			self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
+
+		for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
+			self._spinner_update()
+			root_config = self._frozen_config.roots[blocker.root]
+			virtuals = root_config.settings.getvirtuals()
+			myroot = blocker.root
+			initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
+			final_db = self._dynamic_config.mydbapi[myroot]
+			
+			provider_virtual = False
+			if blocker.cp in virtuals and \
+				not self._have_new_virt(blocker.root, blocker.cp):
+				provider_virtual = True
+
+			# Use this to check PROVIDE for each matched package
+			# when necessary.
+			atom_set = InternalPackageSet(
+				initial_atoms=[blocker.atom])
+
+			if provider_virtual:
+				atoms = []
+				for provider_entry in virtuals[blocker.cp]:
+					atoms.append(Atom(blocker.atom.replace(
+						blocker.cp, provider_entry.cp, 1)))
+			else:
+				atoms = [blocker.atom]
+
+			blocked_initial = set()
+			for atom in atoms:
+				for pkg in initial_db.match_pkgs(atom):
+					if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+						blocked_initial.add(pkg)
+
+			blocked_final = set()
+			for atom in atoms:
+				for pkg in final_db.match_pkgs(atom):
+					if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+						blocked_final.add(pkg)
+
+			if not blocked_initial and not blocked_final:
+				parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
+				self._dynamic_config._blocker_parents.remove(blocker)
+				# Discard any parents that don't have any more blockers.
+				for pkg in parent_pkgs:
+					self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
+					if not self._dynamic_config._blocker_parents.child_nodes(pkg):
+						self._dynamic_config._blocker_parents.remove(pkg)
+				continue
+			for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
+				unresolved_blocks = False
+				depends_on_order = set()
+				for pkg in blocked_initial:
+					if pkg.slot_atom == parent.slot_atom and \
+						not blocker.atom.blocker.overlap.forbid:
+						# New !!atom blockers do not allow temporary
+						# simulaneous installation, so unlike !atom
+						# blockers, !!atom blockers aren't ignored
+						# when they match other packages occupying
+						# the same slot.
+						continue
+					if parent.installed:
+						# Two currently installed packages conflict with
+						# eachother. Ignore this case since the damage
+						# is already done and this would be likely to
+						# confuse users if displayed like a normal blocker.
+						continue
+
+					self._dynamic_config._blocked_pkgs.add(pkg, blocker)
+
+					if parent.operation == "merge":
+						# Maybe the blocked package can be replaced or simply
+						# unmerged to resolve this block.
+						depends_on_order.add((pkg, parent))
+						continue
+					# None of the above blocker resolutions techniques apply,
+					# so apparently this one is unresolvable.
+					unresolved_blocks = True
+				for pkg in blocked_final:
+					if pkg.slot_atom == parent.slot_atom and \
+						not blocker.atom.blocker.overlap.forbid:
+						# New !!atom blockers do not allow temporary
+						# simulaneous installation, so unlike !atom
+						# blockers, !!atom blockers aren't ignored
+						# when they match other packages occupying
+						# the same slot.
+						continue
+					if parent.operation == "nomerge" and \
+						pkg.operation == "nomerge":
+						# This blocker will be handled the next time that a
+						# merge of either package is triggered.
+						continue
+
+					self._dynamic_config._blocked_pkgs.add(pkg, blocker)
+
+					# Maybe the blocking package can be
+					# unmerged to resolve this block.
+					if parent.operation == "merge" and pkg.installed:
+						depends_on_order.add((pkg, parent))
+						continue
+					elif parent.operation == "nomerge":
+						depends_on_order.add((parent, pkg))
+						continue
+					# None of the above blocker resolutions techniques apply,
+					# so apparently this one is unresolvable.
+					unresolved_blocks = True
+
+				# Make sure we don't unmerge any package that have been pulled
+				# into the graph.
+				if not unresolved_blocks and depends_on_order:
+					for inst_pkg, inst_task in depends_on_order:
+						if self._dynamic_config.digraph.contains(inst_pkg) and \
+							self._dynamic_config.digraph.parent_nodes(inst_pkg):
+							unresolved_blocks = True
+							break
+
+				if not unresolved_blocks and depends_on_order:
+					for inst_pkg, inst_task in depends_on_order:
+						uninst_task = Package(built=inst_pkg.built,
+							cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+							metadata=inst_pkg.metadata,
+							operation="uninstall",
+							root_config=inst_pkg.root_config,
+							type_name=inst_pkg.type_name)
+						# Enforce correct merge order with a hard dep.
+						self._dynamic_config.digraph.addnode(uninst_task, inst_task,
+							priority=BlockerDepPriority.instance)
+						# Count references to this blocker so that it can be
+						# invalidated after nodes referencing it have been
+						# merged.
+						self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
+				if not unresolved_blocks and not depends_on_order:
+					self._dynamic_config._irrelevant_blockers.add(blocker, parent)
+					self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
+					if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
+						self._dynamic_config._blocker_parents.remove(blocker)
+					if not self._dynamic_config._blocker_parents.child_nodes(parent):
+						self._dynamic_config._blocker_parents.remove(parent)
+				if unresolved_blocks:
+					self._dynamic_config._unsolvable_blockers.add(blocker, parent)
+
+		return True
+
+	def _accept_blocker_conflicts(self):
+		acceptable = False
+		for x in ("--buildpkgonly", "--fetchonly",
+			"--fetch-all-uri", "--nodeps"):
+			if x in self._frozen_config.myopts:
+				acceptable = True
+				break
+		return acceptable
+
+	def _merge_order_bias(self, mygraph):
+		"""
+		For optimal leaf node selection, promote deep system runtime deps and
+		order nodes from highest to lowest overall reference count.
+		"""
+
+		node_info = {}
+		for node in mygraph.order:
+			node_info[node] = len(mygraph.parent_nodes(node))
+		deep_system_deps = _find_deep_system_runtime_deps(mygraph)
+
+		def cmp_merge_preference(node1, node2):
+
+			if node1.operation == 'uninstall':
+				if node2.operation == 'uninstall':
+					return 0
+				return 1
+
+			if node2.operation == 'uninstall':
+				if node1.operation == 'uninstall':
+					return 0
+				return -1
+
+			node1_sys = node1 in deep_system_deps
+			node2_sys = node2 in deep_system_deps
+			if node1_sys != node2_sys:
+				if node1_sys:
+					return -1
+				return 1
+
+			return node_info[node2] - node_info[node1]
+
+		mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
+
+	def altlist(self, reversed=False):
+
+		while self._dynamic_config._serialized_tasks_cache is None:
+			self._resolve_conflicts()
+			try:
+				self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
+					self._serialize_tasks()
+			except self._serialize_tasks_retry:
+				pass
+
+		retlist = self._dynamic_config._serialized_tasks_cache[:]
+		if reversed:
+			retlist.reverse()
+		return retlist
+
+	def _implicit_libc_deps(self, mergelist, graph):
+		"""
+		Create implicit dependencies on libc, in order to ensure that libc
+		is installed as early as possible (see bug #303567).
+		"""
+		libc_pkgs = {}
+		implicit_libc_roots = (self._frozen_config._running_root.root,)
+		for root in implicit_libc_roots:
+			graphdb = self._dynamic_config.mydbapi[root]
+			vardb = self._frozen_config.trees[root]["vartree"].dbapi
+			for atom in self._expand_virt_from_graph(root,
+ 				portage.const.LIBC_PACKAGE_ATOM):
+				if atom.blocker:
+					continue
+				match = graphdb.match_pkgs(atom)
+				if not match:
+					continue
+				pkg = match[-1]
+				if pkg.operation == "merge" and \
+					not vardb.cpv_exists(pkg.cpv):
+					libc_pkgs.setdefault(pkg.root, set()).add(pkg)
+
+		if not libc_pkgs:
+			return
+
+		earlier_libc_pkgs = set()
+
+		for pkg in mergelist:
+			if not isinstance(pkg, Package):
+				# a satisfied blocker
+				continue
+			root_libc_pkgs = libc_pkgs.get(pkg.root)
+			if root_libc_pkgs is not None and \
+				pkg.operation == "merge":
+				if pkg in root_libc_pkgs:
+					earlier_libc_pkgs.add(pkg)
+				else:
+					for libc_pkg in root_libc_pkgs:
+						if libc_pkg in earlier_libc_pkgs:
+							graph.add(libc_pkg, pkg,
+								priority=DepPriority(buildtime=True))
+
+	def schedulerGraph(self):
+		"""
+		The scheduler graph is identical to the normal one except that
+		uninstall edges are reversed in specific cases that require
+		conflicting packages to be temporarily installed simultaneously.
+		This is intended for use by the Scheduler in it's parallelization
+		logic. It ensures that temporary simultaneous installation of
+		conflicting packages is avoided when appropriate (especially for
+		!!atom blockers), but allowed in specific cases that require it.
+
+		Note that this method calls break_refs() which alters the state of
+		internal Package instances such that this depgraph instance should
+		not be used to perform any more calculations.
+		"""
+
+		# NOTE: altlist initializes self._dynamic_config._scheduler_graph
+		mergelist = self.altlist()
+		self._implicit_libc_deps(mergelist,
+			self._dynamic_config._scheduler_graph)
+
+		# Break DepPriority.satisfied attributes which reference
+		# installed Package instances.
+		for parents, children, node in \
+			self._dynamic_config._scheduler_graph.nodes.values():
+			for priorities in chain(parents.values(), children.values()):
+				for priority in priorities:
+					if priority.satisfied:
+						priority.satisfied = True
+
+		pkg_cache = self._frozen_config._pkg_cache
+		graph = self._dynamic_config._scheduler_graph
+		trees = self._frozen_config.trees
+		pruned_pkg_cache = {}
+		for key, pkg in pkg_cache.items():
+			if pkg in graph or \
+				(pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
+				pruned_pkg_cache[key] = pkg
+
+		for root in trees:
+			trees[root]['vartree']._pkg_cache = pruned_pkg_cache
+
+		self.break_refs()
+		sched_config = \
+			_scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
+
+		return sched_config
+
+	def break_refs(self):
+		"""
+		Break any references in Package instances that lead back to the depgraph.
+		This is useful if you want to hold references to packages without also
+		holding the depgraph on the heap. It should only be called after the
+		depgraph and _frozen_config will not be used for any more calculations.
+		"""
+		for root_config in self._frozen_config.roots.values():
+			root_config.update(self._frozen_config._trees_orig[
+				root_config.root]["root_config"])
+			# Both instances are now identical, so discard the
+			# original which should have no other references.
+			self._frozen_config._trees_orig[
+				root_config.root]["root_config"] = root_config
+
+	def _resolve_conflicts(self):
+		if not self._complete_graph():
+			raise self._unknown_internal_error()
+
+		if not self._validate_blockers():
+			self._dynamic_config._skip_restart = True
+			raise self._unknown_internal_error()
+
+		if self._dynamic_config._slot_collision_info:
+			self._process_slot_conflicts()
+
+	def _serialize_tasks(self):
+
+		debug = "--debug" in self._frozen_config.myopts
+
+		if debug:
+			writemsg("\ndigraph:\n\n", noiselevel=-1)
+			self._dynamic_config.digraph.debug_print()
+			writemsg("\n", noiselevel=-1)
+
+		scheduler_graph = self._dynamic_config.digraph.copy()
+
+		if '--nodeps' in self._frozen_config.myopts:
+			# Preserve the package order given on the command line.
+			return ([node for node in scheduler_graph \
+				if isinstance(node, Package) \
+				and node.operation == 'merge'], scheduler_graph)
+
+		mygraph=self._dynamic_config.digraph.copy()
+
+		removed_nodes = set()
+
+		# Prune off all DependencyArg instances since they aren't
+		# needed, and because of nested sets this is faster than doing
+		# it with multiple digraph.root_nodes() calls below. This also
+		# takes care of nested sets that have circular references,
+		# which wouldn't be matched by digraph.root_nodes().
+		for node in mygraph:
+			if isinstance(node, DependencyArg):
+				removed_nodes.add(node)
+		if removed_nodes:
+			mygraph.difference_update(removed_nodes)
+			removed_nodes.clear()
+
+		# Prune "nomerge" root nodes if nothing depends on them, since
+		# otherwise they slow down merge order calculation. Don't remove
+		# non-root nodes since they help optimize merge order in some cases
+		# such as revdep-rebuild.
+
+		while True:
+			for node in mygraph.root_nodes():
+				if not isinstance(node, Package) or \
+					node.installed or node.onlydeps:
+					removed_nodes.add(node)
+			if removed_nodes:
+				self._spinner_update()
+				mygraph.difference_update(removed_nodes)
+			if not removed_nodes:
+				break
+			removed_nodes.clear()
+		self._merge_order_bias(mygraph)
+		def cmp_circular_bias(n1, n2):
+			"""
+			RDEPEND is stronger than PDEPEND and this function
+			measures such a strength bias within a circular
+			dependency relationship.
+			"""
+			n1_n2_medium = n2 in mygraph.child_nodes(n1,
+				ignore_priority=priority_range.ignore_medium_soft)
+			n2_n1_medium = n1 in mygraph.child_nodes(n2,
+				ignore_priority=priority_range.ignore_medium_soft)
+			if n1_n2_medium == n2_n1_medium:
+				return 0
+			elif n1_n2_medium:
+				return 1
+			return -1
+		myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
+		retlist=[]
+		# Contains uninstall tasks that have been scheduled to
+		# occur after overlapping blockers have been installed.
+		scheduled_uninstalls = set()
+		# Contains any Uninstall tasks that have been ignored
+		# in order to avoid the circular deps code path. These
+		# correspond to blocker conflicts that could not be
+		# resolved.
+		ignored_uninstall_tasks = set()
+		have_uninstall_task = False
+		complete = "complete" in self._dynamic_config.myparams
+		asap_nodes = []
+
+		def get_nodes(**kwargs):
+			"""
+			Returns leaf nodes excluding Uninstall instances
+			since those should be executed as late as possible.
+			"""
+			return [node for node in mygraph.leaf_nodes(**kwargs) \
+				if isinstance(node, Package) and \
+					(node.operation != "uninstall" or \
+					node in scheduled_uninstalls)]
+
+		# sys-apps/portage needs special treatment if ROOT="/"
+		running_root = self._frozen_config._running_root.root
+		runtime_deps = InternalPackageSet(
+			initial_atoms=[PORTAGE_PACKAGE_ATOM])
+		running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
+			PORTAGE_PACKAGE_ATOM)
+		replacement_portage = self._dynamic_config.mydbapi[running_root].match_pkgs(
+			PORTAGE_PACKAGE_ATOM)
+
+		if running_portage:
+			running_portage = running_portage[0]
+		else:
+			running_portage = None
+
+		if replacement_portage:
+			replacement_portage = replacement_portage[0]
+		else:
+			replacement_portage = None
+
+		if replacement_portage == running_portage:
+			replacement_portage = None
+
+		if replacement_portage is not None and \
+			(running_portage is None or \
+			running_portage.cpv != replacement_portage.cpv or \
+			'9999' in replacement_portage.cpv or \
+			'git' in replacement_portage.inherited or \
+			'git-2' in replacement_portage.inherited):
+			# update from running_portage to replacement_portage asap
+			asap_nodes.append(replacement_portage)
+
+		if running_portage is not None:
+			try:
+				portage_rdepend = self._select_atoms_highest_available(
+					running_root, running_portage.metadata["RDEPEND"],
+					myuse=self._pkg_use_enabled(running_portage),
+					parent=running_portage, strict=False)
+			except portage.exception.InvalidDependString as e:
+				portage.writemsg("!!! Invalid RDEPEND in " + \
+					"'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+					(running_root, running_portage.cpv, e), noiselevel=-1)
+				del e
+				portage_rdepend = {running_portage : []}
+			for atoms in portage_rdepend.values():
+				runtime_deps.update(atom for atom in atoms \
+					if not atom.blocker)
+
+		# Merge libc asap, in order to account for implicit
+		# dependencies. See bug #303567.
+		implicit_libc_roots = (running_root,)
+		for root in implicit_libc_roots:
+			libc_pkgs = set()
+			vardb = self._frozen_config.trees[root]["vartree"].dbapi
+			graphdb = self._dynamic_config.mydbapi[root]
+			for atom in self._expand_virt_from_graph(root,
+				portage.const.LIBC_PACKAGE_ATOM):
+				if atom.blocker:
+					continue
+				match = graphdb.match_pkgs(atom)
+				if not match:
+					continue
+				pkg = match[-1]
+				if pkg.operation == "merge" and \
+					not vardb.cpv_exists(pkg.cpv):
+					libc_pkgs.add(pkg)
+
+			if libc_pkgs:
+				# If there's also an os-headers upgrade, we need to
+				# pull that in first. See bug #328317.
+				for atom in self._expand_virt_from_graph(root,
+					portage.const.OS_HEADERS_PACKAGE_ATOM):
+					if atom.blocker:
+						continue
+					match = graphdb.match_pkgs(atom)
+					if not match:
+						continue
+					pkg = match[-1]
+					if pkg.operation == "merge" and \
+						not vardb.cpv_exists(pkg.cpv):
+						asap_nodes.append(pkg)
+
+				asap_nodes.extend(libc_pkgs)
+
+		def gather_deps(ignore_priority, mergeable_nodes,
+			selected_nodes, node):
+			"""
+			Recursively gather a group of nodes that RDEPEND on
+			eachother. This ensures that they are merged as a group
+			and get their RDEPENDs satisfied as soon as possible.
+			"""
+			if node in selected_nodes:
+				return True
+			if node not in mergeable_nodes:
+				return False
+			if node == replacement_portage and \
+				mygraph.child_nodes(node,
+				ignore_priority=priority_range.ignore_medium_soft):
+				# Make sure that portage always has all of it's
+				# RDEPENDs installed first.
+				return False
+			selected_nodes.add(node)
+			for child in mygraph.child_nodes(node,
+				ignore_priority=ignore_priority):
+				if not gather_deps(ignore_priority,
+					mergeable_nodes, selected_nodes, child):
+					return False
+			return True
+
+		def ignore_uninst_or_med(priority):
+			if priority is BlockerDepPriority.instance:
+				return True
+			return priority_range.ignore_medium(priority)
+
+		def ignore_uninst_or_med_soft(priority):
+			if priority is BlockerDepPriority.instance:
+				return True
+			return priority_range.ignore_medium_soft(priority)
+
+		tree_mode = "--tree" in self._frozen_config.myopts
+		# Tracks whether or not the current iteration should prefer asap_nodes
+		# if available.  This is set to False when the previous iteration
+		# failed to select any nodes.  It is reset whenever nodes are
+		# successfully selected.
+		prefer_asap = True
+
+		# Controls whether or not the current iteration should drop edges that
+		# are "satisfied" by installed packages, in order to solve circular
+		# dependencies. The deep runtime dependencies of installed packages are
+		# not checked in this case (bug #199856), so it must be avoided
+		# whenever possible.
+		drop_satisfied = False
+
+		# State of variables for successive iterations that loosen the
+		# criteria for node selection.
+		#
+		# iteration   prefer_asap   drop_satisfied
+		# 1           True          False
+		# 2           False         False
+		# 3           False         True
+		#
+		# If no nodes are selected on the last iteration, it is due to
+		# unresolved blockers or circular dependencies.
+
+		while mygraph:
+			self._spinner_update()
+			selected_nodes = None
+			ignore_priority = None
+			if drop_satisfied or (prefer_asap and asap_nodes):
+				priority_range = DepPrioritySatisfiedRange
+			else:
+				priority_range = DepPriorityNormalRange
+			if prefer_asap and asap_nodes:
+				# ASAP nodes are merged before their soft deps. Go ahead and
+				# select root nodes here if necessary, since it's typical for
+				# the parent to have been removed from the graph already.
+				asap_nodes = [node for node in asap_nodes \
+					if mygraph.contains(node)]
+				for i in range(priority_range.SOFT,
+					priority_range.MEDIUM_SOFT + 1):
+					ignore_priority = priority_range.ignore_priority[i]
+					for node in asap_nodes:
+						if not mygraph.child_nodes(node,
+							ignore_priority=ignore_priority):
+							selected_nodes = [node]
+							asap_nodes.remove(node)
+							break
+					if selected_nodes:
+						break
+
+			if not selected_nodes and \
+				not (prefer_asap and asap_nodes):
+				for i in range(priority_range.NONE,
+					priority_range.MEDIUM_SOFT + 1):
+					ignore_priority = priority_range.ignore_priority[i]
+					nodes = get_nodes(ignore_priority=ignore_priority)
+					if nodes:
+						# If there is a mixture of merges and uninstalls,
+						# do the uninstalls first.
+						good_uninstalls = None
+						if len(nodes) > 1:
+							good_uninstalls = []
+							for node in nodes:
+								if node.operation == "uninstall":
+									good_uninstalls.append(node)
+
+							if good_uninstalls:
+								nodes = good_uninstalls
+							else:
+								nodes = nodes
+
+						if good_uninstalls or len(nodes) == 1 or \
+							(ignore_priority is None and \
+							not asap_nodes and not tree_mode):
+							# Greedily pop all of these nodes since no
+							# relationship has been ignored. This optimization
+							# destroys --tree output, so it's disabled in tree
+							# mode.
+							selected_nodes = nodes
+						else:
+							# For optimal merge order:
+							#  * Only pop one node.
+							#  * Removing a root node (node without a parent)
+							#    will not produce a leaf node, so avoid it.
+							#  * It's normal for a selected uninstall to be a
+							#    root node, so don't check them for parents.
+							if asap_nodes:
+								prefer_asap_parents = (True, False)
+							else:
+								prefer_asap_parents = (False,)
+							for check_asap_parent in prefer_asap_parents:
+								if check_asap_parent:
+									for node in nodes:
+										parents = mygraph.parent_nodes(node,
+											ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
+										if parents and set(parents).intersection(asap_nodes):
+											selected_nodes = [node]
+											break
+								else:
+									for node in nodes:
+										if mygraph.parent_nodes(node):
+											selected_nodes = [node]
+											break
+								if selected_nodes:
+									break
+						if selected_nodes:
+							break
+
+			if not selected_nodes:
+				nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
+				if nodes:
+					mergeable_nodes = set(nodes)
+					if prefer_asap and asap_nodes:
+						nodes = asap_nodes
+					# When gathering the nodes belonging to a runtime cycle,
+					# we want to minimize the number of nodes gathered, since
+					# this tends to produce a more optimal merge order.
+					# Ignoring all medium_soft deps serves this purpose.
+					# In the case of multiple runtime cycles, where some cycles
+					# may depend on smaller independent cycles, it's optimal
+					# to merge smaller independent cycles before other cycles
+					# that depend on them. Therefore, we search for the
+					# smallest cycle in order to try and identify and prefer
+					# these smaller independent cycles.
+					ignore_priority = priority_range.ignore_medium_soft
+					smallest_cycle = None
+					for node in nodes:
+						if not mygraph.parent_nodes(node):
+							continue
+						selected_nodes = set()
+						if gather_deps(ignore_priority,
+							mergeable_nodes, selected_nodes, node):
+							# When selecting asap_nodes, we need to ensure
+							# that we haven't selected a large runtime cycle
+							# that is obviously sub-optimal. This will be
+							# obvious if any of the non-asap selected_nodes
+							# is a leaf node when medium_soft deps are
+							# ignored.
+							if prefer_asap and asap_nodes and \
+								len(selected_nodes) > 1:
+								for node in selected_nodes.difference(
+									asap_nodes):
+									if not mygraph.child_nodes(node,
+										ignore_priority =
+										DepPriorityNormalRange.ignore_medium_soft):
+										selected_nodes = None
+										break
+							if selected_nodes:
+								if smallest_cycle is None or \
+									len(selected_nodes) < len(smallest_cycle):
+									smallest_cycle = selected_nodes
+
+					selected_nodes = smallest_cycle
+
+					if selected_nodes and debug:
+						writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
+							(len(selected_nodes),), noiselevel=-1)
+						cycle_digraph = mygraph.copy()
+						cycle_digraph.difference_update([x for x in
+							cycle_digraph if x not in selected_nodes])
+						cycle_digraph.debug_print()
+						writemsg("\n", noiselevel=-1)
+
+					if prefer_asap and asap_nodes and not selected_nodes:
+						# We failed to find any asap nodes to merge, so ignore
+						# them for the next iteration.
+						prefer_asap = False
+						continue
+
+			if selected_nodes and ignore_priority is not None:
+				# Try to merge ignored medium_soft deps as soon as possible
+				# if they're not satisfied by installed packages.
+				for node in selected_nodes:
+					children = set(mygraph.child_nodes(node))
+					soft = children.difference(
+						mygraph.child_nodes(node,
+						ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
+					medium_soft = children.difference(
+						mygraph.child_nodes(node,
+							ignore_priority = \
+							DepPrioritySatisfiedRange.ignore_medium_soft))
+					medium_soft.difference_update(soft)
+					for child in medium_soft:
+						if child in selected_nodes:
+							continue
+						if child in asap_nodes:
+							continue
+						# Merge PDEPEND asap for bug #180045.
+						asap_nodes.append(child)
+
+			if selected_nodes and len(selected_nodes) > 1:
+				if not isinstance(selected_nodes, list):
+					selected_nodes = list(selected_nodes)
+				selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
+
+			if not selected_nodes and myblocker_uninstalls:
+				# An Uninstall task needs to be executed in order to
+				# avoid conflict if possible.
+
+				if drop_satisfied:
+					priority_range = DepPrioritySatisfiedRange
+				else:
+					priority_range = DepPriorityNormalRange
+
+				mergeable_nodes = get_nodes(
+					ignore_priority=ignore_uninst_or_med)
+
+				min_parent_deps = None
+				uninst_task = None
+
+				for task in myblocker_uninstalls.leaf_nodes():
+					# Do some sanity checks so that system or world packages
+					# don't get uninstalled inappropriately here (only really
+					# necessary when --complete-graph has not been enabled).
+
+					if task in ignored_uninstall_tasks:
+						continue
+
+					if task in scheduled_uninstalls:
+						# It's been scheduled but it hasn't
+						# been executed yet due to dependence
+						# on installation of blocking packages.
+						continue
+
+					root_config = self._frozen_config.roots[task.root]
+					inst_pkg = self._pkg(task.cpv, "installed", root_config,
+						installed=True)
+
+					if self._dynamic_config.digraph.contains(inst_pkg):
+						continue
+
+					forbid_overlap = False
+					heuristic_overlap = False
+					for blocker in myblocker_uninstalls.parent_nodes(task):
+						if not eapi_has_strong_blocks(blocker.eapi):
+							heuristic_overlap = True
+						elif blocker.atom.blocker.overlap.forbid:
+							forbid_overlap = True
+							break
+					if forbid_overlap and running_root == task.root:
+						continue
+
+					if heuristic_overlap and running_root == task.root:
+						# Never uninstall sys-apps/portage or it's essential
+						# dependencies, except through replacement.
+						try:
+							runtime_dep_atoms = \
+								list(runtime_deps.iterAtomsForPackage(task))
+						except portage.exception.InvalidDependString as e:
+							portage.writemsg("!!! Invalid PROVIDE in " + \
+								"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+								(task.root, task.cpv, e), noiselevel=-1)
+							del e
+							continue
+
+						# Don't uninstall a runtime dep if it appears
+						# to be the only suitable one installed.
+						skip = False
+						vardb = root_config.trees["vartree"].dbapi
+						for atom in runtime_dep_atoms:
+							other_version = None
+							for pkg in vardb.match_pkgs(atom):
+								if pkg.cpv == task.cpv and \
+									pkg.metadata["COUNTER"] == \
+									task.metadata["COUNTER"]:
+									continue
+								other_version = pkg
+								break
+							if other_version is None:
+								skip = True
+								break
+						if skip:
+							continue
+
+						# For packages in the system set, don't take
+						# any chances. If the conflict can't be resolved
+						# by a normal replacement operation then abort.
+						skip = False
+						try:
+							for atom in root_config.sets[
+								"system"].iterAtomsForPackage(task):
+								skip = True
+								break
+						except portage.exception.InvalidDependString as e:
+							portage.writemsg("!!! Invalid PROVIDE in " + \
+								"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+								(task.root, task.cpv, e), noiselevel=-1)
+							del e
+							skip = True
+						if skip:
+							continue
+
+					# Note that the world check isn't always
+					# necessary since self._complete_graph() will
+					# add all packages from the system and world sets to the
+					# graph. This just allows unresolved conflicts to be
+					# detected as early as possible, which makes it possible
+					# to avoid calling self._complete_graph() when it is
+					# unnecessary due to blockers triggering an abortion.
+					if not complete:
+						# For packages in the world set, go ahead an uninstall
+						# when necessary, as long as the atom will be satisfied
+						# in the final state.
+						graph_db = self._dynamic_config.mydbapi[task.root]
+						skip = False
+						try:
+							for atom in root_config.sets[
+								"selected"].iterAtomsForPackage(task):
+								satisfied = False
+								for pkg in graph_db.match_pkgs(atom):
+									if pkg == inst_pkg:
+										continue
+									satisfied = True
+									break
+								if not satisfied:
+									skip = True
+									self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
+									break
+						except portage.exception.InvalidDependString as e:
+							portage.writemsg("!!! Invalid PROVIDE in " + \
+								"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+								(task.root, task.cpv, e), noiselevel=-1)
+							del e
+							skip = True
+						if skip:
+							continue
+
+					# Check the deps of parent nodes to ensure that
+					# the chosen task produces a leaf node. Maybe
+					# this can be optimized some more to make the
+					# best possible choice, but the current algorithm
+					# is simple and should be near optimal for most
+					# common cases.
+					self._spinner_update()
+					mergeable_parent = False
+					parent_deps = set()
+					parent_deps.add(task)
+					for parent in mygraph.parent_nodes(task):
+						parent_deps.update(mygraph.child_nodes(parent,
+							ignore_priority=priority_range.ignore_medium_soft))
+						if min_parent_deps is not None and \
+							len(parent_deps) >= min_parent_deps:
+							# This task is no better than a previously selected
+							# task, so abort search now in order to avoid wasting
+							# any more cpu time on this task. This increases
+							# performance dramatically in cases when there are
+							# hundreds of blockers to solve, like when
+							# upgrading to a new slot of kde-meta.
+							mergeable_parent = None
+							break
+						if parent in mergeable_nodes and \
+							gather_deps(ignore_uninst_or_med_soft,
+							mergeable_nodes, set(), parent):
+							mergeable_parent = True
+
+					if not mergeable_parent:
+						continue
+
+					if min_parent_deps is None or \
+						len(parent_deps) < min_parent_deps:
+						min_parent_deps = len(parent_deps)
+						uninst_task = task
+
+					if uninst_task is not None and min_parent_deps == 1:
+						# This is the best possible result, so so abort search
+						# now in order to avoid wasting any more cpu time.
+						break
+
+				if uninst_task is not None:
+					# The uninstall is performed only after blocking
+					# packages have been merged on top of it. File
+					# collisions between blocking packages are detected
+					# and removed from the list of files to be uninstalled.
+					scheduled_uninstalls.add(uninst_task)
+					parent_nodes = mygraph.parent_nodes(uninst_task)
+
+					# Reverse the parent -> uninstall edges since we want
+					# to do the uninstall after blocking packages have
+					# been merged on top of it.
+					mygraph.remove(uninst_task)
+					for blocked_pkg in parent_nodes:
+						mygraph.add(blocked_pkg, uninst_task,
+							priority=BlockerDepPriority.instance)
+						scheduler_graph.remove_edge(uninst_task, blocked_pkg)
+						scheduler_graph.add(blocked_pkg, uninst_task,
+							priority=BlockerDepPriority.instance)
+
+					# Sometimes a merge node will render an uninstall
+					# node unnecessary (due to occupying the same SLOT),
+					# and we want to avoid executing a separate uninstall
+					# task in that case.
+					slot_node = self._dynamic_config.mydbapi[uninst_task.root
+						].match_pkgs(uninst_task.slot_atom)
+					if slot_node and \
+						slot_node[0].operation == "merge":
+						mygraph.add(slot_node[0], uninst_task,
+							priority=BlockerDepPriority.instance)
+
+					# Reset the state variables for leaf node selection and
+					# continue trying to select leaf nodes.
+					prefer_asap = True
+					drop_satisfied = False
+					continue
+
+			if not selected_nodes:
+				# Only select root nodes as a last resort. This case should
+				# only trigger when the graph is nearly empty and the only
+				# remaining nodes are isolated (no parents or children). Since
+				# the nodes must be isolated, ignore_priority is not needed.
+				selected_nodes = get_nodes()
+
+			if not selected_nodes and not drop_satisfied:
+				drop_satisfied = True
+				continue
+
+			if not selected_nodes and myblocker_uninstalls:
+				# If possible, drop an uninstall task here in order to avoid
+				# the circular deps code path. The corresponding blocker will
+				# still be counted as an unresolved conflict.
+				uninst_task = None
+				for node in myblocker_uninstalls.leaf_nodes():
+					try:
+						mygraph.remove(node)
+					except KeyError:
+						pass
+					else:
+						uninst_task = node
+						ignored_uninstall_tasks.add(node)
+						break
+
+				if uninst_task is not None:
+					# Reset the state variables for leaf node selection and
+					# continue trying to select leaf nodes.
+					prefer_asap = True
+					drop_satisfied = False
+					continue
+
+			if not selected_nodes:
+				self._dynamic_config._circular_deps_for_display = mygraph
+				self._dynamic_config._skip_restart = True
+				raise self._unknown_internal_error()
+
+			# At this point, we've succeeded in selecting one or more nodes, so
+			# reset state variables for leaf node selection.
+			prefer_asap = True
+			drop_satisfied = False
+
+			mygraph.difference_update(selected_nodes)
+
+			for node in selected_nodes:
+				if isinstance(node, Package) and \
+					node.operation == "nomerge":
+					continue
+
+				# Handle interactions between blockers
+				# and uninstallation tasks.
+				solved_blockers = set()
+				uninst_task = None
+				if isinstance(node, Package) and \
+					"uninstall" == node.operation:
+					have_uninstall_task = True
+					uninst_task = node
+				else:
+					vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
+					inst_pkg = vardb.match_pkgs(node.slot_atom)
+					if inst_pkg:
+						# The package will be replaced by this one, so remove
+						# the corresponding Uninstall task if necessary.
+						inst_pkg = inst_pkg[0]
+						uninst_task = Package(built=inst_pkg.built,
+							cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+							metadata=inst_pkg.metadata,
+							operation="uninstall",
+							root_config=inst_pkg.root_config,
+							type_name=inst_pkg.type_name)
+						try:
+							mygraph.remove(uninst_task)
+						except KeyError:
+							pass
+
+				if uninst_task is not None and \
+					uninst_task not in ignored_uninstall_tasks and \
+					myblocker_uninstalls.contains(uninst_task):
+					blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
+					myblocker_uninstalls.remove(uninst_task)
+					# Discard any blockers that this Uninstall solves.
+					for blocker in blocker_nodes:
+						if not myblocker_uninstalls.child_nodes(blocker):
+							myblocker_uninstalls.remove(blocker)
+							if blocker not in \
+								self._dynamic_config._unsolvable_blockers:
+								solved_blockers.add(blocker)
+
+				retlist.append(node)
+
+				if (isinstance(node, Package) and \
+					"uninstall" == node.operation) or \
+					(uninst_task is not None and \
+					uninst_task in scheduled_uninstalls):
+					# Include satisfied blockers in the merge list
+					# since the user might be interested and also
+					# it serves as an indicator that blocking packages
+					# will be temporarily installed simultaneously.
+					for blocker in solved_blockers:
+						retlist.append(blocker)
+
+		unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
+		for node in myblocker_uninstalls.root_nodes():
+			unsolvable_blockers.add(node)
+
+		# If any Uninstall tasks need to be executed in order
+		# to avoid a conflict, complete the graph with any
+		# dependencies that may have been initially
+		# neglected (to ensure that unsafe Uninstall tasks
+		# are properly identified and blocked from execution).
+		if have_uninstall_task and \
+			not complete and \
+			not unsolvable_blockers:
+			self._dynamic_config.myparams["complete"] = True
+			if '--debug' in self._frozen_config.myopts:
+				msg = []
+				msg.append("enabling 'complete' depgraph mode " + \
+					"due to uninstall task(s):")
+				msg.append("")
+				for node in retlist:
+					if isinstance(node, Package) and \
+						node.operation == 'uninstall':
+						msg.append("\t%s" % (node,))
+				writemsg_level("\n%s\n" % \
+					"".join("%s\n" % line for line in msg),
+					level=logging.DEBUG, noiselevel=-1)
+			raise self._serialize_tasks_retry("")
+
+		# Set satisfied state on blockers, but not before the
+		# above retry path, since we don't want to modify the
+		# state in that case.
+		for node in retlist:
+			if isinstance(node, Blocker):
+				node.satisfied = True
+
+		for blocker in unsolvable_blockers:
+			retlist.append(blocker)
+
+		if unsolvable_blockers and \
+			not self._accept_blocker_conflicts():
+			self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
+			self._dynamic_config._serialized_tasks_cache = retlist[:]
+			self._dynamic_config._scheduler_graph = scheduler_graph
+			self._dynamic_config._skip_restart = True
+			raise self._unknown_internal_error()
+
+		if self._dynamic_config._slot_collision_info and \
+			not self._accept_blocker_conflicts():
+			self._dynamic_config._serialized_tasks_cache = retlist[:]
+			self._dynamic_config._scheduler_graph = scheduler_graph
+			raise self._unknown_internal_error()
+
+		return retlist, scheduler_graph
+
+	def _show_circular_deps(self, mygraph):
+		self._dynamic_config._circular_dependency_handler = \
+			circular_dependency_handler(self, mygraph)
+		handler = self._dynamic_config._circular_dependency_handler
+
+		self._frozen_config.myopts.pop("--quiet", None)
+		self._frozen_config.myopts["--verbose"] = True
+		self._frozen_config.myopts["--tree"] = True
+		portage.writemsg("\n\n", noiselevel=-1)
+		self.display(handler.merge_list)
+		prefix = colorize("BAD", " * ")
+		portage.writemsg("\n", noiselevel=-1)
+		portage.writemsg(prefix + "Error: circular dependencies:\n",
+			noiselevel=-1)
+		portage.writemsg("\n", noiselevel=-1)
+
+		if handler.circular_dep_message is None:
+			handler.debug_print()
+			portage.writemsg("\n", noiselevel=-1)
+
+		if handler.circular_dep_message is not None:
+			portage.writemsg(handler.circular_dep_message, noiselevel=-1)
+
+		suggestions = handler.suggestions
+		if suggestions:
+			writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
+			if len(suggestions) == 1:
+				writemsg("by applying the following change:\n", noiselevel=-1)
+			else:
+				writemsg("by applying " + colorize("bold", "any of") + \
+					" the following changes:\n", noiselevel=-1)
+			writemsg("".join(suggestions), noiselevel=-1)
+			writemsg("\nNote that this change can be reverted, once the package has" + \
+				" been installed.\n", noiselevel=-1)
+			if handler.large_cycle_count:
+				writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
+					"Several changes might be required to resolve all cycles.\n" + \
+					"Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
+		else:
+			writemsg("\n\n", noiselevel=-1)
+			writemsg(prefix + "Note that circular dependencies " + \
+				"can often be avoided by temporarily\n", noiselevel=-1)
+			writemsg(prefix + "disabling USE flags that trigger " + \
+				"optional dependencies.\n", noiselevel=-1)
+
+	def _show_merge_list(self):
+		if self._dynamic_config._serialized_tasks_cache is not None and \
+			not (self._dynamic_config._displayed_list is not None and \
+			(self._dynamic_config._displayed_list == self._dynamic_config._serialized_tasks_cache or \
+			self._dynamic_config._displayed_list == \
+				list(reversed(self._dynamic_config._serialized_tasks_cache)))):
+			display_list = self._dynamic_config._serialized_tasks_cache[:]
+			if "--tree" in self._frozen_config.myopts:
+				display_list.reverse()
+			self.display(display_list)
+
+	def _show_unsatisfied_blockers(self, blockers):
+		self._show_merge_list()
+		msg = "Error: The above package list contains " + \
+			"packages which cannot be installed " + \
+			"at the same time on the same system."
+		prefix = colorize("BAD", " * ")
+		portage.writemsg("\n", noiselevel=-1)
+		for line in textwrap.wrap(msg, 70):
+			portage.writemsg(prefix + line + "\n", noiselevel=-1)
+
+		# Display the conflicting packages along with the packages
+		# that pulled them in. This is helpful for troubleshooting
+		# cases in which blockers don't solve automatically and
+		# the reasons are not apparent from the normal merge list
+		# display.
+
+		conflict_pkgs = {}
+		for blocker in blockers:
+			for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
+				self._dynamic_config._blocker_parents.parent_nodes(blocker)):
+				parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+				if not parent_atoms:
+					atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
+					if atom is not None:
+						parent_atoms = set([("@selected", atom)])
+				if parent_atoms:
+					conflict_pkgs[pkg] = parent_atoms
+
+		if conflict_pkgs:
+			# Reduce noise by pruning packages that are only
+			# pulled in by other conflict packages.
+			pruned_pkgs = set()
+			for pkg, parent_atoms in conflict_pkgs.items():
+				relevant_parent = False
+				for parent, atom in parent_atoms:
+					if parent not in conflict_pkgs:
+						relevant_parent = True
+						break
+				if not relevant_parent:
+					pruned_pkgs.add(pkg)
+			for pkg in pruned_pkgs:
+				del conflict_pkgs[pkg]
+
+		if conflict_pkgs:
+			msg = []
+			msg.append("\n")
+			indent = "  "
+			for pkg, parent_atoms in conflict_pkgs.items():
+
+				# Prefer packages that are not directly involved in a conflict.
+				# It can be essential to see all the packages here, so don't
+				# omit any. If the list is long, people can simply use a pager.
+				preferred_parents = set()
+				for parent_atom in parent_atoms:
+					parent, atom = parent_atom
+					if parent not in conflict_pkgs:
+						preferred_parents.add(parent_atom)
+
+				ordered_list = list(preferred_parents)
+				if len(parent_atoms) > len(ordered_list):
+					for parent_atom in parent_atoms:
+						if parent_atom not in preferred_parents:
+							ordered_list.append(parent_atom)
+
+				msg.append(indent + "%s pulled in by\n" % pkg)
+
+				for parent_atom in ordered_list:
+					parent, atom = parent_atom
+					msg.append(2*indent)
+					if isinstance(parent,
+						(PackageArg, AtomArg)):
+						# For PackageArg and AtomArg types, it's
+						# redundant to display the atom attribute.
+						msg.append(str(parent))
+					else:
+						# Display the specific atom from SetArg or
+						# Package types.
+						msg.append("%s required by %s" % (atom, parent))
+					msg.append("\n")
+
+				msg.append("\n")
+
+			writemsg("".join(msg), noiselevel=-1)
+
+		if "--quiet" not in self._frozen_config.myopts:
+			show_blocker_docs_link()
+
+	def display(self, mylist, favorites=[], verbosity=None):
+
+		# This is used to prevent display_problems() from
+		# redundantly displaying this exact same merge list
+		# again via _show_merge_list().
+		self._dynamic_config._displayed_list = mylist
+		display = Display()
+
+		return display(self, mylist, favorites, verbosity)
+
+	def _display_autounmask(self):
+		"""
+		Display --autounmask message and optionally write it to config files
+		(using CONFIG_PROTECT). The message includes the comments and the changes.
+		"""
+
+		autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
+		quiet = "--quiet" in self._frozen_config.myopts
+		pretend = "--pretend" in self._frozen_config.myopts
+		ask = "--ask" in self._frozen_config.myopts
+		enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
+
+		def check_if_latest(pkg):
+			is_latest = True
+			is_latest_in_slot = True
+			dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
+			root_config = self._frozen_config.roots[pkg.root]
+
+			for db, pkg_type, built, installed, db_keys in dbs:
+				for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
+					if other_pkg.cp != pkg.cp:
+						# old-style PROVIDE virtual means there are no
+						# normal matches for this pkg_type
+						break
+					if other_pkg > pkg:
+						is_latest = False
+						if other_pkg.slot_atom == pkg.slot_atom:
+							is_latest_in_slot = False
+							break
+					else:
+						# iter_match_pkgs yields highest version first, so
+						# there's no need to search this pkg_type any further
+						break
+
+				if not is_latest_in_slot:
+					break
+
+			return is_latest, is_latest_in_slot
+
+		#Set of roots we have autounmask changes for.
+		roots = set()
+
+		unstable_keyword_msg = {}
+		for pkg in self._dynamic_config._needed_unstable_keywords:
+			self._show_merge_list()
+			if pkg in self._dynamic_config.digraph:
+				root = pkg.root
+				roots.add(root)
+				unstable_keyword_msg.setdefault(root, [])
+				is_latest, is_latest_in_slot = check_if_latest(pkg)
+				pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+				mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
+					use=self._pkg_use_enabled(pkg))
+				for reason in mreasons:
+					if reason.unmask_hint and \
+						reason.unmask_hint.key == 'unstable keyword':
+						keyword = reason.unmask_hint.value
+
+						unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
+						if is_latest:
+							unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
+						elif is_latest_in_slot:
+							unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+						else:
+							unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
+
+		p_mask_change_msg = {}
+		for pkg in self._dynamic_config._needed_p_mask_changes:
+			self._show_merge_list()
+			if pkg in self._dynamic_config.digraph:
+				root = pkg.root
+				roots.add(root)
+				p_mask_change_msg.setdefault(root, [])
+				is_latest, is_latest_in_slot = check_if_latest(pkg)
+				pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+				mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
+					use=self._pkg_use_enabled(pkg))
+				for reason in mreasons:
+					if reason.unmask_hint and \
+						reason.unmask_hint.key == 'p_mask':
+						keyword = reason.unmask_hint.value
+
+						comment, filename = portage.getmaskingreason(
+							pkg.cpv, metadata=pkg.metadata,
+							settings=pkgsettings,
+							portdb=pkg.root_config.trees["porttree"].dbapi,
+							return_location=True)
+
+						p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
+						if filename:
+							p_mask_change_msg[root].append("# %s:\n" % filename)
+						if comment:
+							comment = [line for line in
+								comment.splitlines() if line]
+							for line in comment:
+								p_mask_change_msg[root].append("%s\n" % line)
+						if is_latest:
+							p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
+						elif is_latest_in_slot:
+							p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+						else:
+							p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
+
+		use_changes_msg = {}
+		for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
+			self._show_merge_list()
+			if pkg in self._dynamic_config.digraph:
+				root = pkg.root
+				roots.add(root)
+				use_changes_msg.setdefault(root, [])
+				is_latest, is_latest_in_slot = check_if_latest(pkg)
+				changes = needed_use_config_change[1]
+				adjustments = []
+				for flag, state in changes.items():
+					if state:
+						adjustments.append(flag)
+					else:
+						adjustments.append("-" + flag)
+				use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
+				if is_latest:
+					use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
+				elif is_latest_in_slot:
+					use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
+				else:
+					use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
+
+		license_msg = {}
+		for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
+			self._show_merge_list()
+			if pkg in self._dynamic_config.digraph:
+				root = pkg.root
+				roots.add(root)
+				license_msg.setdefault(root, [])
+				is_latest, is_latest_in_slot = check_if_latest(pkg)
+
+				license_msg[root].append(self._get_dep_chain_as_comment(pkg))
+				if is_latest:
+					license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
+				elif is_latest_in_slot:
+					license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
+				else:
+					license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
+
+		def find_config_file(abs_user_config, file_name):
+			"""
+			Searches /etc/portage for an appropriate file to append changes to.
+			If the file_name is a file it is returned, if it is a directory, the
+			last file in it is returned. Order of traversal is the identical to
+			portage.util.grablines(recursive=True).
+
+			file_name - String containing a file name like "package.use"
+			return value - String. Absolute path of file to write to. None if
+			no suitable file exists.
+			"""
+			file_path = os.path.join(abs_user_config, file_name)
+
+			try:
+				os.lstat(file_path)
+			except OSError as e:
+				if e.errno == errno.ENOENT:
+					# The file doesn't exist, so we'll
+					# simply create it.
+					return file_path
+
+				# Disk or file system trouble?
+				return None
+
+			last_file_path = None
+			stack = [file_path]
+			while stack:
+				p = stack.pop()
+				try:
+					st = os.stat(p)
+				except OSError:
+					pass
+				else:
+					if stat.S_ISREG(st.st_mode):
+						last_file_path = p
+					elif stat.S_ISDIR(st.st_mode):
+						if os.path.basename(p) in _ignorecvs_dirs:
+							continue
+						try:
+							contents = os.listdir(p)
+						except OSError:
+							pass
+						else:
+							contents.sort(reverse=True)
+							for child in contents:
+								if child.startswith(".") or \
+									child.endswith("~"):
+									continue
+								stack.append(os.path.join(p, child))
+
+			return last_file_path
+
+		write_to_file = autounmask_write and not pretend
+		#Make sure we have a file to write to before doing any write.
+		file_to_write_to = {}
+		problems = []
+		if write_to_file:
+			for root in roots:
+				settings = self._frozen_config.roots[root].settings
+				abs_user_config = os.path.join(
+					settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+
+				if root in unstable_keyword_msg:
+					if not os.path.exists(os.path.join(abs_user_config,
+						"package.keywords")):
+						filename = "package.accept_keywords"
+					else:
+						filename = "package.keywords"
+					file_to_write_to[(abs_user_config, "package.keywords")] = \
+						find_config_file(abs_user_config, filename)
+
+				if root in p_mask_change_msg:
+					file_to_write_to[(abs_user_config, "package.unmask")] = \
+						find_config_file(abs_user_config, "package.unmask")
+
+				if root in use_changes_msg:
+					file_to_write_to[(abs_user_config, "package.use")] = \
+						find_config_file(abs_user_config, "package.use")
+
+				if root in license_msg:
+					file_to_write_to[(abs_user_config, "package.license")] = \
+						find_config_file(abs_user_config, "package.license")
+
+			for (abs_user_config, f), path in file_to_write_to.items():
+				if path is None:
+					problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
+
+			write_to_file = not problems
+
+		for root in roots:
+			settings = self._frozen_config.roots[root].settings
+			abs_user_config = os.path.join(
+				settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+
+			if len(roots) > 1:
+				writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+
+			if root in unstable_keyword_msg:
+				writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
+					" are necessary to proceed:\n", noiselevel=-1)
+				writemsg_stdout("".join(unstable_keyword_msg[root]), noiselevel=-1)
+
+			if root in p_mask_change_msg:
+				writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
+					" are necessary to proceed:\n", noiselevel=-1)
+				writemsg_stdout("".join(p_mask_change_msg[root]), noiselevel=-1)
+
+			if root in use_changes_msg:
+				writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
+					" are necessary to proceed:\n", noiselevel=-1)
+				writemsg_stdout("".join(use_changes_msg[root]), noiselevel=-1)
+
+			if root in license_msg:
+				writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
+					" are necessary to proceed:\n", noiselevel=-1)
+				writemsg_stdout("".join(license_msg[root]), noiselevel=-1)
+
+		protect_obj = {}
+		if write_to_file:
+			for root in roots:
+				settings = self._frozen_config.roots[root].settings
+				protect_obj[root] = ConfigProtect(settings["EROOT"], \
+					shlex_split(settings.get("CONFIG_PROTECT", "")),
+					shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
+
+		def write_changes(root, changes, file_to_write_to):
+			file_contents = None
+			try:
+				file_contents = io.open(
+					_unicode_encode(file_to_write_to,
+					encoding=_encodings['fs'], errors='strict'),
+					mode='r', encoding=_encodings['content'],
+					errors='replace').readlines()
+			except IOError as e:
+				if e.errno == errno.ENOENT:
+					file_contents = []
+				else:
+					problems.append("!!! Failed to read '%s': %s\n" % \
+						(file_to_write_to, e))
+			if file_contents is not None:
+				file_contents.extend(changes)
+				if protect_obj[root].isprotected(file_to_write_to):
+					# We want to force new_protect_filename to ensure
+					# that the user will see all our changes via
+					# etc-update, even if file_to_write_to doesn't
+					# exist yet, so we specify force=True.
+					file_to_write_to = new_protect_filename(file_to_write_to,
+						force=True)
+				try:
+					write_atomic(file_to_write_to, "".join(file_contents))
+				except PortageException:
+					problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
+
+		if not quiet and \
+			(unstable_keyword_msg or \
+			p_mask_change_msg or \
+			use_changes_msg or \
+			license_msg):
+			msg = [
+				"",
+				"NOTE: This --autounmask behavior can be disabled by setting",
+				"      EMERGE_DEFAULT_OPTS=\"--autounmask=n\" in make.conf."
+			]
+			for line in msg:
+				if line:
+					line = colorize("INFORM", line)
+				writemsg_stdout(line + "\n", noiselevel=-1)
+
+		if ask and write_to_file and file_to_write_to:
+			prompt = "\nWould you like to add these " + \
+				"changes to your config files?"
+			if userquery(prompt, enter_invalid) == 'No':
+				write_to_file = False
+
+		if write_to_file and file_to_write_to:
+			for root in roots:
+				settings = self._frozen_config.roots[root].settings
+				abs_user_config = os.path.join(
+					settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+				ensure_dirs(abs_user_config)
+
+				if root in unstable_keyword_msg:
+					write_changes(root, unstable_keyword_msg[root],
+						file_to_write_to.get((abs_user_config, "package.keywords")))
+
+				if root in p_mask_change_msg:
+					write_changes(root, p_mask_change_msg[root],
+						file_to_write_to.get((abs_user_config, "package.unmask")))
+
+				if root in use_changes_msg:
+					write_changes(root, use_changes_msg[root],
+						file_to_write_to.get((abs_user_config, "package.use")))
+
+				if root in license_msg:
+					write_changes(root, license_msg[root],
+						file_to_write_to.get((abs_user_config, "package.license")))
+
+		if problems:
+			writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
+				noiselevel=-1)
+			writemsg_stdout("".join(problems), noiselevel=-1)
+		elif write_to_file and roots:
+			writemsg_stdout("\nAutounmask changes successfully written. Remember to run etc-update.\n", \
+				noiselevel=-1)
+		elif not pretend and not autounmask_write and roots:
+			writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
+				noiselevel=-1)
+
+
+	def display_problems(self):
+		"""
+		Display problems with the dependency graph such as slot collisions.
+		This is called internally by display() to show the problems _after_
+		the merge list where it is most likely to be seen, but if display()
+		is not going to be called then this method should be called explicitly
+		to ensure that the user is notified of problems with the graph.
+
+		All output goes to stderr, except for unsatisfied dependencies which
+		go to stdout for parsing by programs such as autounmask.
+		"""
+
+		# Note that show_masked_packages() sends its output to
+		# stdout, and some programs such as autounmask parse the
+		# output in cases when emerge bails out. However, when
+		# show_masked_packages() is called for installed packages
+		# here, the message is a warning that is more appropriate
+		# to send to stderr, so temporarily redirect stdout to
+		# stderr. TODO: Fix output code so there's a cleaner way
+		# to redirect everything to stderr.
+		sys.stdout.flush()
+		sys.stderr.flush()
+		stdout = sys.stdout
+		try:
+			sys.stdout = sys.stderr
+			self._display_problems()
+		finally:
+			sys.stdout = stdout
+			sys.stdout.flush()
+			sys.stderr.flush()
+
+		# This goes to stdout for parsing by programs like autounmask.
+		for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+			self._show_unsatisfied_dep(*pargs, **kwargs)
+
+	def _display_problems(self):
+		if self._dynamic_config._circular_deps_for_display is not None:
+			self._show_circular_deps(
+				self._dynamic_config._circular_deps_for_display)
+
+		# The user is only notified of a slot conflict if
+		# there are no unresolvable blocker conflicts.
+		if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+			self._show_unsatisfied_blockers(
+				self._dynamic_config._unsatisfied_blockers_for_display)
+		elif self._dynamic_config._slot_collision_info:
+			self._show_slot_collision_notice()
+		else:
+			self._show_missed_update()
+
+		self._display_autounmask()
+
+		# TODO: Add generic support for "set problem" handlers so that
+		# the below warnings aren't special cases for world only.
+
+		if self._dynamic_config._missing_args:
+			world_problems = False
+			if "world" in self._dynamic_config.sets[
+				self._frozen_config.target_root].sets:
+				# Filter out indirect members of world (from nested sets)
+				# since only direct members of world are desired here.
+				world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
+				for arg, atom in self._dynamic_config._missing_args:
+					if arg.name in ("selected", "world") and atom in world_set:
+						world_problems = True
+						break
+
+			if world_problems:
+				sys.stderr.write("\n!!! Problems have been " + \
+					"detected with your world file\n")
+				sys.stderr.write("!!! Please run " + \
+					green("emaint --check world")+"\n\n")
+
+		if self._dynamic_config._missing_args:
+			sys.stderr.write("\n" + colorize("BAD", "!!!") + \
+				" Ebuilds for the following packages are either all\n")
+			sys.stderr.write(colorize("BAD", "!!!") + \
+				" masked or don't exist:\n")
+			sys.stderr.write(" ".join(str(atom) for arg, atom in \
+				self._dynamic_config._missing_args) + "\n")
+
+		if self._dynamic_config._pprovided_args:
+			arg_refs = {}
+			for arg, atom in self._dynamic_config._pprovided_args:
+				if isinstance(arg, SetArg):
+					parent = arg.name
+					arg_atom = (atom, atom)
+				else:
+					parent = "args"
+					arg_atom = (arg.arg, atom)
+				refs = arg_refs.setdefault(arg_atom, [])
+				if parent not in refs:
+					refs.append(parent)
+			msg = []
+			msg.append(bad("\nWARNING: "))
+			if len(self._dynamic_config._pprovided_args) > 1:
+				msg.append("Requested packages will not be " + \
+					"merged because they are listed in\n")
+			else:
+				msg.append("A requested package will not be " + \
+					"merged because it is listed in\n")
+			msg.append("package.provided:\n\n")
+			problems_sets = set()
+			for (arg, atom), refs in arg_refs.items():
+				ref_string = ""
+				if refs:
+					problems_sets.update(refs)
+					refs.sort()
+					ref_string = ", ".join(["'%s'" % name for name in refs])
+					ref_string = " pulled in by " + ref_string
+				msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
+			msg.append("\n")
+			if "selected" in problems_sets or "world" in problems_sets:
+				msg.append("This problem can be solved in one of the following ways:\n\n")
+				msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
+				msg.append("  B) Uninstall offending packages (cleans them from world).\n")
+				msg.append("  C) Remove offending entries from package.provided.\n\n")
+				msg.append("The best course of action depends on the reason that an offending\n")
+				msg.append("package.provided entry exists.\n\n")
+			sys.stderr.write("".join(msg))
+
+		masked_packages = []
+		for pkg in self._dynamic_config._masked_license_updates:
+			root_config = pkg.root_config
+			pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+			mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
+			masked_packages.append((root_config, pkgsettings,
+				pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+		if masked_packages:
+			writemsg("\n" + colorize("BAD", "!!!") + \
+				" The following updates are masked by LICENSE changes:\n",
+				noiselevel=-1)
+			show_masked_packages(masked_packages)
+			show_mask_docs()
+			writemsg("\n", noiselevel=-1)
+
+		masked_packages = []
+		for pkg in self._dynamic_config._masked_installed:
+			root_config = pkg.root_config
+			pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+			mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
+			masked_packages.append((root_config, pkgsettings,
+				pkg.cpv, pkg.repo, pkg.metadata, mreasons))
+		if masked_packages:
+			writemsg("\n" + colorize("BAD", "!!!") + \
+				" The following installed packages are masked:\n",
+				noiselevel=-1)
+			show_masked_packages(masked_packages)
+			show_mask_docs()
+			writemsg("\n", noiselevel=-1)
+
+	def saveNomergeFavorites(self):
+		"""Find atoms in favorites that are not in the mergelist and add them
+		to the world file if necessary."""
+		for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
+			"--oneshot", "--onlydeps", "--pretend"):
+			if x in self._frozen_config.myopts:
+				return
+		root_config = self._frozen_config.roots[self._frozen_config.target_root]
+		world_set = root_config.sets["selected"]
+
+		world_locked = False
+		if hasattr(world_set, "lock"):
+			world_set.lock()
+			world_locked = True
+
+		if hasattr(world_set, "load"):
+			world_set.load() # maybe it's changed on disk
+
+		args_set = self._dynamic_config.sets[
+			self._frozen_config.target_root].sets['__non_set_args__']
+		portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
+		added_favorites = set()
+		for x in self._dynamic_config._set_nodes:
+			if x.operation != "nomerge":
+				continue
+
+			if x.root != root_config.root:
+				continue
+
+			try:
+				myfavkey = create_world_atom(x, args_set, root_config)
+				if myfavkey:
+					if myfavkey in added_favorites:
+						continue
+					added_favorites.add(myfavkey)
+			except portage.exception.InvalidDependString as e:
+				writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
+					(x.cpv, e), noiselevel=-1)
+				writemsg("!!! see '%s'\n\n" % os.path.join(
+					x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
+				del e
+		all_added = []
+		for arg in self._dynamic_config._initial_arg_list:
+			if not isinstance(arg, SetArg):
+				continue
+			if arg.root_config.root != root_config.root:
+				continue
+			k = arg.name
+			if k in ("selected", "world") or \
+				not root_config.sets[k].world_candidate:
+				continue
+			s = SETPREFIX + k
+			if s in world_set:
+				continue
+			all_added.append(SETPREFIX + k)
+		all_added.extend(added_favorites)
+		all_added.sort()
+		for a in all_added:
+			writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
+				colorize("INFORM", str(a)), noiselevel=-1)
+		if all_added:
+			world_set.update(all_added)
+
+		if world_locked:
+			world_set.unlock()
+
+	def _loadResumeCommand(self, resume_data, skip_masked=True,
+		skip_missing=True):
+		"""
+		Add a resume command to the graph and validate it in the process.  This
+		will raise a PackageNotFound exception if a package is not available.
+		"""
+
+		self._load_vdb()
+
+		if not isinstance(resume_data, dict):
+			return False
+
+		mergelist = resume_data.get("mergelist")
+		if not isinstance(mergelist, list):
+			mergelist = []
+
+		favorites = resume_data.get("favorites")
+		args_set = self._dynamic_config.sets[
+			self._frozen_config.target_root].sets['__non_set_args__']
+		if isinstance(favorites, list):
+			args = self._load_favorites(favorites)
+		else:
+			args = []
+
+		fakedb = self._dynamic_config.mydbapi
+		trees = self._frozen_config.trees
+		serialized_tasks = []
+		masked_tasks = []
+		for x in mergelist:
+			if not (isinstance(x, list) and len(x) == 4):
+				continue
+			pkg_type, myroot, pkg_key, action = x
+			if pkg_type not in self.pkg_tree_map:
+				continue
+			if action != "merge":
+				continue
+			root_config = self._frozen_config.roots[myroot]
+
+			# Use the resume "favorites" list to see if a repo was specified
+			# for this package.
+			depgraph_sets = self._dynamic_config.sets[root_config.root]
+			repo = None
+			for atom in depgraph_sets.atoms.getAtoms():
+				if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
+					repo = atom.repo
+					break
+
+			atom = "=" + pkg_key
+			if repo:
+				atom = atom + _repo_separator + repo
+
+			try:
+				atom = Atom(atom, allow_repo=True)
+			except InvalidAtom:
+				continue
+
+			pkg = None
+			for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
+				if not self._pkg_visibility_check(pkg) or \
+					self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+						modified_use=self._pkg_use_enabled(pkg)):
+					continue
+				break
+
+			if pkg is None:
+				# It does no exist or it is corrupt.
+				if skip_missing:
+					# TODO: log these somewhere
+					continue
+				raise portage.exception.PackageNotFound(pkg_key)
+
+			if "merge" == pkg.operation and \
+				self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+					modified_use=self._pkg_use_enabled(pkg)):
+				continue
+
+			if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
+				if skip_masked:
+					masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
+				else:
+					self._dynamic_config._unsatisfied_deps_for_display.append(
+						((pkg.root, "="+pkg.cpv), {"myparent":None}))
+
+			fakedb[myroot].cpv_inject(pkg)
+			serialized_tasks.append(pkg)
+			self._spinner_update()
+
+		if self._dynamic_config._unsatisfied_deps_for_display:
+			return False
+
+		if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
+			self._dynamic_config._serialized_tasks_cache = serialized_tasks
+			self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
+		else:
+			self._select_package = self._select_pkg_from_graph
+			self._dynamic_config.myparams["selective"] = True
+			# Always traverse deep dependencies in order to account for
+			# potentially unsatisfied dependencies of installed packages.
+			# This is necessary for correct --keep-going or --resume operation
+			# in case a package from a group of circularly dependent packages
+			# fails. In this case, a package which has recently been installed
+			# may have an unsatisfied circular dependency (pulled in by
+			# PDEPEND, for example). So, even though a package is already
+			# installed, it may not have all of it's dependencies satisfied, so
+			# it may not be usable. If such a package is in the subgraph of
+			# deep depenedencies of a scheduled build, that build needs to
+			# be cancelled. In order for this type of situation to be
+			# recognized, deep traversal of dependencies is required.
+			self._dynamic_config.myparams["deep"] = True
+
+			for task in serialized_tasks:
+				if isinstance(task, Package) and \
+					task.operation == "merge":
+					if not self._add_pkg(task, None):
+						return False
+
+			# Packages for argument atoms need to be explicitly
+			# added via _add_pkg() so that they are included in the
+			# digraph (needed at least for --tree display).
+			for arg in self._expand_set_args(args, add_to_digraph=True):
+				for atom in arg.pset.getAtoms():
+					pkg, existing_node = self._select_package(
+						arg.root_config.root, atom)
+					if existing_node is None and \
+						pkg is not None:
+						if not self._add_pkg(pkg, Dependency(atom=atom,
+							root=pkg.root, parent=arg)):
+							return False
+
+			# Allow unsatisfied deps here to avoid showing a masking
+			# message for an unsatisfied dep that isn't necessarily
+			# masked.
+			if not self._create_graph(allow_unsatisfied=True):
+				return False
+
+			unsatisfied_deps = []
+			for dep in self._dynamic_config._unsatisfied_deps:
+				if not isinstance(dep.parent, Package):
+					continue
+				if dep.parent.operation == "merge":
+					unsatisfied_deps.append(dep)
+					continue
+
+				# For unsatisfied deps of installed packages, only account for
+				# them if they are in the subgraph of dependencies of a package
+				# which is scheduled to be installed.
+				unsatisfied_install = False
+				traversed = set()
+				dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
+				while dep_stack:
+					node = dep_stack.pop()
+					if not isinstance(node, Package):
+						continue
+					if node.operation == "merge":
+						unsatisfied_install = True
+						break
+					if node in traversed:
+						continue
+					traversed.add(node)
+					dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
+
+				if unsatisfied_install:
+					unsatisfied_deps.append(dep)
+
+			if masked_tasks or unsatisfied_deps:
+				# This probably means that a required package
+				# was dropped via --skipfirst. It makes the
+				# resume list invalid, so convert it to a
+				# UnsatisfiedResumeDep exception.
+				raise self.UnsatisfiedResumeDep(self,
+					masked_tasks + unsatisfied_deps)
+			self._dynamic_config._serialized_tasks_cache = None
+			try:
+				self.altlist()
+			except self._unknown_internal_error:
+				return False
+
+		return True
+
+	def _load_favorites(self, favorites):
+		"""
+		Use a list of favorites to resume state from a
+		previous select_files() call. This creates similar
+		DependencyArg instances to those that would have
+		been created by the original select_files() call.
+		This allows Package instances to be matched with
+		DependencyArg instances during graph creation.
+		"""
+		root_config = self._frozen_config.roots[self._frozen_config.target_root]
+		sets = root_config.sets
+		depgraph_sets = self._dynamic_config.sets[root_config.root]
+		args = []
+		for x in favorites:
+			if not isinstance(x, basestring):
+				continue
+			if x in ("system", "world"):
+				x = SETPREFIX + x
+			if x.startswith(SETPREFIX):
+				s = x[len(SETPREFIX):]
+				if s not in sets:
+					continue
+				if s in depgraph_sets.sets:
+					continue
+				pset = sets[s]
+				depgraph_sets.sets[s] = pset
+				args.append(SetArg(arg=x, pset=pset,
+					root_config=root_config))
+			else:
+				try:
+					x = Atom(x, allow_repo=True)
+				except portage.exception.InvalidAtom:
+					continue
+				args.append(AtomArg(arg=x, atom=x,
+					root_config=root_config))
+
+		self._set_args(args)
+		return args
+
+	class UnsatisfiedResumeDep(portage.exception.PortageException):
+		"""
+		A dependency of a resume list is not installed. This
+		can occur when a required package is dropped from the
+		merge list via --skipfirst.
+		"""
+		def __init__(self, depgraph, value):
+			portage.exception.PortageException.__init__(self, value)
+			self.depgraph = depgraph
+
+	class _internal_exception(portage.exception.PortageException):
+		def __init__(self, value=""):
+			portage.exception.PortageException.__init__(self, value)
+
+	class _unknown_internal_error(_internal_exception):
+		"""
+		Used by the depgraph internally to terminate graph creation.
+		The specific reason for the failure should have been dumped
+		to stderr, unfortunately, the exact reason for the failure
+		may not be known.
+		"""
+
+	class _serialize_tasks_retry(_internal_exception):
+		"""
+		This is raised by the _serialize_tasks() method when it needs to
+		be called again for some reason. The only case that it's currently
+		used for is when neglected dependencies need to be added to the
+		graph in order to avoid making a potentially unsafe decision.
+		"""
+
+	class _backtrack_mask(_internal_exception):
+		"""
+		This is raised by _show_unsatisfied_dep() when it's called with
+		check_backtrack=True and a matching package has been masked by
+		backtracking.
+		"""
+
+	class _autounmask_breakage(_internal_exception):
+		"""
+		This is raised by _show_unsatisfied_dep() when it's called with
+		check_autounmask_breakage=True and a matching package has been
+		been disqualified due to autounmask changes.
+		"""
+
+	def need_restart(self):
+		return self._dynamic_config._need_restart and \
+			not self._dynamic_config._skip_restart
+
+	def success_without_autounmask(self):
+		return self._dynamic_config._success_without_autounmask
+
+	def autounmask_breakage_detected(self):
+		try:
+			for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+				self._show_unsatisfied_dep(
+					*pargs, check_autounmask_breakage=True, **kwargs)
+		except self._autounmask_breakage:
+			return True
+		return False
+
+	def get_backtrack_infos(self):
+		return self._dynamic_config._backtrack_infos
+			
+
+class _dep_check_composite_db(dbapi):
+	"""
+	A dbapi-like interface that is optimized for use in dep_check() calls.
+	This is built on top of the existing depgraph package selection logic.
+	Some packages that have been added to the graph may be masked from this
+	view in order to influence the atom preference selection that occurs
+	via dep_check().
+	"""
+	def __init__(self, depgraph, root):
+		dbapi.__init__(self)
+		self._depgraph = depgraph
+		self._root = root
+		self._match_cache = {}
+		self._cpv_pkg_map = {}
+
+	def _clear_cache(self):
+		self._match_cache.clear()
+		self._cpv_pkg_map.clear()
+
+	def cp_list(self, cp):
+		"""
+		Emulate cp_list just so it can be used to check for existence
+		of new-style virtuals. Since it's a waste of time to return
+		more than one cpv for this use case, a maximum of one cpv will
+		be returned.
+		"""
+		if isinstance(cp, Atom):
+			atom = cp
+		else:
+			atom = Atom(cp)
+		ret = []
+		for pkg in self._depgraph._iter_match_pkgs_any(
+			self._depgraph._frozen_config.roots[self._root], atom):
+			if pkg.cp == cp:
+				ret.append(pkg.cpv)
+				break
+
+		return ret
+
+	def match(self, atom):
+		ret = self._match_cache.get(atom)
+		if ret is not None:
+			return ret[:]
+		pkg, existing = self._depgraph._select_package(self._root, atom)
+		if not pkg:
+			ret = []
+		else:
+			# Return the highest available from select_package() as well as
+			# any matching slots in the graph db.
+			slots = set()
+			slots.add(pkg.metadata["SLOT"])
+			if pkg.cp.startswith("virtual/"):
+				# For new-style virtual lookahead that occurs inside
+				# dep_check(), examine all slots. This is needed
+				# so that newer slots will not unnecessarily be pulled in
+				# when a satisfying lower slot is already installed. For
+				# example, if virtual/jdk-1.4 is satisfied via kaffe then
+				# there's no need to pull in a newer slot to satisfy a
+				# virtual/jdk dependency.
+				for db, pkg_type, built, installed, db_keys in \
+					self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
+					for cpv in db.match(atom):
+						if portage.cpv_getkey(cpv) != pkg.cp:
+							continue
+						slots.add(db.aux_get(cpv, ["SLOT"])[0])
+			ret = []
+			if self._visible(pkg):
+				self._cpv_pkg_map[pkg.cpv] = pkg
+				ret.append(pkg.cpv)
+			slots.remove(pkg.metadata["SLOT"])
+			while slots:
+				slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
+				pkg, existing = self._depgraph._select_package(
+					self._root, slot_atom)
+				if not pkg:
+					continue
+				if not self._visible(pkg):
+					continue
+				self._cpv_pkg_map[pkg.cpv] = pkg
+				ret.append(pkg.cpv)
+			if ret:
+				self._cpv_sort_ascending(ret)
+		self._match_cache[atom] = ret
+		return ret[:]
+
+	def _visible(self, pkg):
+		if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
+			try:
+				arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
+			except (StopIteration, portage.exception.InvalidDependString):
+				arg = None
+			if arg:
+				return False
+		if pkg.installed and \
+			(pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
+			# Account for packages with masks (like KEYWORDS masks)
+			# that are usually ignored in visibility checks for
+			# installed packages, in order to handle cases like
+			# bug #350285.
+			myopts = self._depgraph._frozen_config.myopts
+			use_ebuild_visibility = myopts.get(
+				'--use-ebuild-visibility', 'n') != 'n'
+			avoid_update = "--update" not in myopts and \
+				"remove" not in self._depgraph._dynamic_config.myparams
+			usepkgonly = "--usepkgonly" in myopts
+			if not avoid_update:
+				if not use_ebuild_visibility and usepkgonly:
+					return False
+				else:
+					try:
+						pkg_eb = self._depgraph._pkg(
+							pkg.cpv, "ebuild", pkg.root_config,
+							myrepo=pkg.repo)
+					except portage.exception.PackageNotFound:
+						pkg_eb_visible = False
+						for pkg_eb in self._depgraph._iter_match_pkgs(
+							pkg.root_config, "ebuild",
+							Atom("=%s" % (pkg.cpv,))):
+							if self._depgraph._pkg_visibility_check(pkg_eb):
+								pkg_eb_visible = True
+								break
+						if not pkg_eb_visible:
+							return False
+					else:
+						if not self._depgraph._pkg_visibility_check(pkg_eb):
+							return False
+
+		in_graph = self._depgraph._dynamic_config._slot_pkg_map[
+			self._root].get(pkg.slot_atom)
+		if in_graph is None:
+			# Mask choices for packages which are not the highest visible
+			# version within their slot (since they usually trigger slot
+			# conflicts).
+			highest_visible, in_graph = self._depgraph._select_package(
+				self._root, pkg.slot_atom)
+			# Note: highest_visible is not necessarily the real highest
+			# visible, especially when --update is not enabled, so use
+			# < operator instead of !=.
+			if pkg < highest_visible:
+				return False
+		elif in_graph != pkg:
+			# Mask choices for packages that would trigger a slot
+			# conflict with a previously selected package.
+			return False
+		return True
+
+	def aux_get(self, cpv, wants):
+		metadata = self._cpv_pkg_map[cpv].metadata
+		return [metadata.get(x, "") for x in wants]
+
+	def match_pkgs(self, atom):
+		return [self._cpv_pkg_map[cpv] for cpv in self.match(atom)]
+
+def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
+
+	if "--quiet" in myopts:
+		writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
+		writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
+		for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+			writemsg("    " + colorize("INFORM", cp) + "\n", noiselevel=-1)
+		return
+
+	s = search(root_config, spinner, "--searchdesc" in myopts,
+		"--quiet" not in myopts, "--usepkg" in myopts,
+		"--usepkgonly" in myopts)
+	null_cp = portage.dep_getkey(insert_category_into_atom(
+		arg, "null"))
+	cat, atom_pn = portage.catsplit(null_cp)
+	s.searchkey = atom_pn
+	for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+		s.addCP(cp)
+	s.output()
+	writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
+	writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
+
+def _spinner_start(spinner, myopts):
+	if spinner is None:
+		return
+	if "--quiet" not in myopts and \
+		("--pretend" in myopts or "--ask" in myopts or \
+		"--tree" in myopts or "--verbose" in myopts):
+		action = ""
+		if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+			action = "fetched"
+		elif "--buildpkgonly" in myopts:
+			action = "built"
+		else:
+			action = "merged"
+		if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
+			if "--unordered-display" in myopts:
+				portage.writemsg_stdout("\n" + \
+					darkgreen("These are the packages that " + \
+					"would be %s:" % action) + "\n\n")
+			else:
+				portage.writemsg_stdout("\n" + \
+					darkgreen("These are the packages that " + \
+					"would be %s, in reverse order:" % action) + "\n\n")
+		else:
+			portage.writemsg_stdout("\n" + \
+				darkgreen("These are the packages that " + \
+				"would be %s, in order:" % action) + "\n\n")
+
+	show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
+	if not show_spinner:
+		spinner.update = spinner.update_quiet
+
+	if show_spinner:
+		portage.writemsg_stdout("Calculating dependencies  ")
+
+def _spinner_stop(spinner):
+	if spinner is None or \
+		spinner.update == spinner.update_quiet:
+		return
+
+	if spinner.update != spinner.update_basic:
+		# update_basic is used for non-tty output,
+		# so don't output backspaces in that case.
+		portage.writemsg_stdout("\b\b")
+
+	portage.writemsg_stdout("... done!\n")
+
+def backtrack_depgraph(settings, trees, myopts, myparams, 
+	myaction, myfiles, spinner):
+	"""
+	Raises PackageSetNotFound if myfiles contains a missing package set.
+	"""
+	_spinner_start(spinner, myopts)
+	try:
+		return _backtrack_depgraph(settings, trees, myopts, myparams, 
+			myaction, myfiles, spinner)
+	finally:
+		_spinner_stop(spinner)
+
+
+def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
+
+	debug = "--debug" in myopts
+	mydepgraph = None
+	max_retries = myopts.get('--backtrack', 10)
+	max_depth = max(1, (max_retries + 1) / 2)
+	allow_backtracking = max_retries > 0
+	backtracker = Backtracker(max_depth)
+	backtracked = 0
+
+	frozen_config = _frozen_depgraph_config(settings, trees,
+		myopts, spinner)
+
+	while backtracker:
+
+		if debug and mydepgraph is not None:
+			writemsg_level(
+				"\n\nbacktracking try %s \n\n" % \
+				backtracked, noiselevel=-1, level=logging.DEBUG)
+			mydepgraph.display_problems()
+
+		backtrack_parameters = backtracker.get()
+
+		mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+			frozen_config=frozen_config,
+			allow_backtracking=allow_backtracking,
+			backtrack_parameters=backtrack_parameters)
+		success, favorites = mydepgraph.select_files(myfiles)
+
+		if success or mydepgraph.success_without_autounmask():
+			break
+		elif not allow_backtracking:
+			break
+		elif backtracked >= max_retries:
+			break
+		elif mydepgraph.need_restart():
+			backtracked += 1
+			backtracker.feedback(mydepgraph.get_backtrack_infos())
+		else:
+			break
+
+	if not (success or mydepgraph.success_without_autounmask()) and backtracked:
+
+		if debug:
+			writemsg_level(
+				"\n\nbacktracking aborted after %s tries\n\n" % \
+				backtracked, noiselevel=-1, level=logging.DEBUG)
+			mydepgraph.display_problems()
+
+		mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+			frozen_config=frozen_config,
+			allow_backtracking=False,
+			backtrack_parameters=backtracker.get_best_run())
+		success, favorites = mydepgraph.select_files(myfiles)
+
+	if not success and mydepgraph.autounmask_breakage_detected():
+		if debug:
+			writemsg_level(
+				"\n\nautounmask breakage detected\n\n",
+				noiselevel=-1, level=logging.DEBUG)
+			mydepgraph.display_problems()
+		myopts["--autounmask"] = "n"
+		mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+			frozen_config=frozen_config, allow_backtracking=False)
+		success, favorites = mydepgraph.select_files(myfiles)
+
+	return (success, mydepgraph, favorites)
+
+
+def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
+	"""
+	Raises PackageSetNotFound if myfiles contains a missing package set.
+	"""
+	_spinner_start(spinner, myopts)
+	try:
+		return _resume_depgraph(settings, trees, mtimedb, myopts,
+			myparams, spinner)
+	finally:
+		_spinner_stop(spinner)
+
+def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
+	"""
+	Construct a depgraph for the given resume list. This will raise
+	PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
+	TODO: Return reasons for dropped_tasks, for display/logging.
+	@rtype: tuple
+	@returns: (success, depgraph, dropped_tasks)
+	"""
+	skip_masked = True
+	skip_unsatisfied = True
+	mergelist = mtimedb["resume"]["mergelist"]
+	dropped_tasks = set()
+	frozen_config = _frozen_depgraph_config(settings, trees,
+		myopts, spinner)
+	while True:
+		mydepgraph = depgraph(settings, trees,
+			myopts, myparams, spinner, frozen_config=frozen_config)
+		try:
+			success = mydepgraph._loadResumeCommand(mtimedb["resume"],
+				skip_masked=skip_masked)
+		except depgraph.UnsatisfiedResumeDep as e:
+			if not skip_unsatisfied:
+				raise
+
+			graph = mydepgraph._dynamic_config.digraph
+			unsatisfied_parents = dict((dep.parent, dep.parent) \
+				for dep in e.value)
+			traversed_nodes = set()
+			unsatisfied_stack = list(unsatisfied_parents)
+			while unsatisfied_stack:
+				pkg = unsatisfied_stack.pop()
+				if pkg in traversed_nodes:
+					continue
+				traversed_nodes.add(pkg)
+
+				# If this package was pulled in by a parent
+				# package scheduled for merge, removing this
+				# package may cause the the parent package's
+				# dependency to become unsatisfied.
+				for parent_node in graph.parent_nodes(pkg):
+					if not isinstance(parent_node, Package) \
+						or parent_node.operation not in ("merge", "nomerge"):
+						continue
+					unsatisfied = \
+						graph.child_nodes(parent_node,
+						ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
+					if pkg in unsatisfied:
+						unsatisfied_parents[parent_node] = parent_node
+						unsatisfied_stack.append(parent_node)
+
+			unsatisfied_tuples = frozenset(tuple(parent_node)
+				for parent_node in unsatisfied_parents
+				if isinstance(parent_node, Package))
+			pruned_mergelist = []
+			for x in mergelist:
+				if isinstance(x, list) and \
+					tuple(x) not in unsatisfied_tuples:
+					pruned_mergelist.append(x)
+
+			# If the mergelist doesn't shrink then this loop is infinite.
+			if len(pruned_mergelist) == len(mergelist):
+				# This happens if a package can't be dropped because
+				# it's already installed, but it has unsatisfied PDEPEND.
+				raise
+			mergelist[:] = pruned_mergelist
+
+			# Exclude installed packages that have been removed from the graph due
+			# to failure to build/install runtime dependencies after the dependent
+			# package has already been installed.
+			dropped_tasks.update(pkg for pkg in \
+				unsatisfied_parents if pkg.operation != "nomerge")
+
+			del e, graph, traversed_nodes, \
+				unsatisfied_parents, unsatisfied_stack
+			continue
+		else:
+			break
+	return (success, mydepgraph, dropped_tasks)
+
+def get_mask_info(root_config, cpv, pkgsettings,
+	db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
+	eapi_masked = False
+	try:
+		metadata = dict(zip(db_keys,
+			db.aux_get(cpv, db_keys, myrepo=myrepo)))
+	except KeyError:
+		metadata = None
+
+	if metadata is None:
+		mreasons = ["corruption"]
+	else:
+		eapi = metadata['EAPI']
+		if eapi[:1] == '-':
+			eapi = eapi[1:]
+		if not portage.eapi_is_supported(eapi):
+			mreasons = ['EAPI %s' % eapi]
+		else:
+			pkg = Package(type_name=pkg_type, root_config=root_config,
+				cpv=cpv, built=built, installed=installed, metadata=metadata)
+
+			modified_use = None
+			if _pkg_use_enabled is not None:
+				modified_use = _pkg_use_enabled(pkg)
+
+			mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
+
+	return metadata, mreasons
+
+def show_masked_packages(masked_packages):
+	shown_licenses = set()
+	shown_comments = set()
+	# Maybe there is both an ebuild and a binary. Only
+	# show one of them to avoid redundant appearance.
+	shown_cpvs = set()
+	have_eapi_mask = False
+	for (root_config, pkgsettings, cpv, repo,
+		metadata, mreasons) in masked_packages:
+		output_cpv = cpv
+		if repo:
+			output_cpv += _repo_separator + repo
+		if output_cpv in shown_cpvs:
+			continue
+		shown_cpvs.add(output_cpv)
+		eapi_masked = metadata is not None and \
+			not portage.eapi_is_supported(metadata["EAPI"])
+		if eapi_masked:
+			have_eapi_mask = True
+			# When masked by EAPI, metadata is mostly useless since
+			# it doesn't contain essential things like SLOT.
+			metadata = None
+		comment, filename = None, None
+		if not eapi_masked and \
+			"package.mask" in mreasons:
+			comment, filename = \
+				portage.getmaskingreason(
+				cpv, metadata=metadata,
+				settings=pkgsettings,
+				portdb=root_config.trees["porttree"].dbapi,
+				return_location=True)
+		missing_licenses = []
+		if not eapi_masked and metadata is not None:
+			try:
+				missing_licenses = \
+					pkgsettings._getMissingLicenses(
+						cpv, metadata)
+			except portage.exception.InvalidDependString:
+				# This will have already been reported
+				# above via mreasons.
+				pass
+
+		writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
+
+		if comment and comment not in shown_comments:
+			writemsg_stdout(filename + ":\n" + comment + "\n",
+				noiselevel=-1)
+			shown_comments.add(comment)
+		portdb = root_config.trees["porttree"].dbapi
+		for l in missing_licenses:
+			l_path = portdb.findLicensePath(l)
+			if l in shown_licenses:
+				continue
+			msg = ("A copy of the '%s' license" + \
+			" is located at '%s'.\n\n") % (l, l_path)
+			writemsg_stdout(msg, noiselevel=-1)
+			shown_licenses.add(l)
+	return have_eapi_mask
+
+def show_mask_docs():
+	writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
+	writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
+
+def show_blocker_docs_link():
+	writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
+	writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
+	writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
+
+def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
+	return [mreason.message for \
+		mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
+
+def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
+	mreasons = _getmaskingstatus(
+		pkg, settings=pkgsettings,
+		portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
+
+	if not pkg.installed:
+		if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
+			mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
+				pkg.metadata["CHOST"]))
+
+	if pkg.invalid:
+		for msg_type, msgs in pkg.invalid.items():
+			for msg in msgs:
+				mreasons.append(
+					_MaskReason("invalid", "invalid: %s" % (msg,)))
+
+	if not pkg.metadata["SLOT"]:
+		mreasons.append(
+			_MaskReason("invalid", "SLOT: undefined"))
+
+	return mreasons

diff --git a/portage_with_autodep/pym/_emerge/emergelog.py b/portage_with_autodep/pym/_emerge/emergelog.py
new file mode 100644
index 0000000..d6ef1b4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/emergelog.py
@@ -0,0 +1,63 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import io
+import sys
+import time
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import secpass
+from portage.output import xtermTitle
+
+# We disable emergelog by default, since it's called from
+# dblink.merge() and we don't want that to trigger log writes
+# unless it's really called via emerge.
+_disable = True
+_emerge_log_dir = '/var/log'
+
+# Coerce to unicode, in order to prevent TypeError when writing
+# raw bytes to TextIOWrapper with python2.
+_log_fmt = _unicode_decode("%.0f: %s\n")
+
+def emergelog(xterm_titles, mystr, short_msg=None):
+
+	if _disable:
+		return
+
+	mystr = _unicode_decode(mystr)
+
+	if short_msg is not None:
+		short_msg = _unicode_decode(short_msg)
+
+	if xterm_titles and short_msg:
+		if "HOSTNAME" in os.environ:
+			short_msg = os.environ["HOSTNAME"]+": "+short_msg
+		xtermTitle(short_msg)
+	try:
+		file_path = os.path.join(_emerge_log_dir, 'emerge.log')
+		existing_log = os.path.isfile(file_path)
+		mylogfile = io.open(_unicode_encode(file_path,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='a', encoding=_encodings['content'],
+			errors='backslashreplace')
+		if not existing_log:
+			portage.util.apply_secpass_permissions(file_path,
+				uid=portage.portage_uid, gid=portage.portage_gid,
+				mode=0o660)
+		mylock = None
+		try:
+			mylock = portage.locks.lockfile(mylogfile)
+			mylogfile.write(_log_fmt % (time.time(), mystr))
+			mylogfile.flush()
+		finally:
+			if mylock:
+				portage.locks.unlockfile(mylock)
+			mylogfile.close()
+	except (IOError,OSError,portage.exception.PortageException) as e:
+		if secpass >= 1:
+			print("emergelog():",e, file=sys.stderr)

diff --git a/portage_with_autodep/pym/_emerge/getloadavg.py b/portage_with_autodep/pym/_emerge/getloadavg.py
new file mode 100644
index 0000000..e9babf1
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/getloadavg.py
@@ -0,0 +1,27 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+
+getloadavg = getattr(os, "getloadavg", None)
+if getloadavg is None:
+	def getloadavg():
+		"""
+		Uses /proc/loadavg to emulate os.getloadavg().
+		Raises OSError if the load average was unobtainable.
+		"""
+		try:
+			loadavg_str = open('/proc/loadavg').readline()
+		except IOError:
+			# getloadavg() is only supposed to raise OSError, so convert
+			raise OSError('unknown')
+		loadavg_split = loadavg_str.split()
+		if len(loadavg_split) < 3:
+			raise OSError('unknown')
+		loadavg_floats = []
+		for i in range(3):
+			try:
+				loadavg_floats.append(float(loadavg_split[i]))
+			except ValueError:
+				raise OSError('unknown')
+		return tuple(loadavg_floats)

diff --git a/portage_with_autodep/pym/_emerge/help.py b/portage_with_autodep/pym/_emerge/help.py
new file mode 100644
index 0000000..c978ce2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/help.py
@@ -0,0 +1,815 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from portage.const import _ENABLE_DYN_LINK_MAP
+from portage.output import bold, turquoise, green
+
+def shorthelp():
+	print(bold("emerge:")+" the other white meat (command-line interface to the Portage system)")
+	print(bold("Usage:"))
+	print("   "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuild")+" | "+turquoise("tbz2")+" | "+turquoise("file")+" | "+turquoise("@set")+" | "+turquoise("atom")+" ] [ ... ]")
+	print("   "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("system")+" | "+turquoise("world")+" >")
+	print("   "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >")
+	print("   "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]")
+	print("   "+turquoise("emerge")+" "+turquoise("--help")+" [ "+green("--verbose")+" ] ")
+	print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvV")+"]")
+	print("          [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" >            ] [ "+green("--columns")+"    ]")
+	print("          [ "+green("--complete-graph")+"             ] [ "+green("--deep")+"       ]")
+	print("          [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + "            ]")
+	print("          [ "+green("--newuse")+"    ] [ "+green("--noconfmem")+"  ] [ "+green("--nospinner")+"  ]")
+	print("          [ "+green("--oneshot")+"   ] [ "+green("--onlydeps")+"   ]")
+	print("          [ "+green("--reinstall ")+turquoise("changed-use")+"      ] [ " + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" >         ]")
+	print(bold("Actions:")+"  [ "+green("--depclean")+" | "+green("--list-sets")+" | "+green("--search")+" | "+green("--sync")+" | "+green("--version")+"        ]")
+
+def help(myopts, havecolor=1):
+	# TODO: Implement a wrap() that accounts for console color escape codes.
+	from textwrap import wrap
+	desc_left_margin = 14
+	desc_indent = desc_left_margin * " "
+	desc_width = 80 - desc_left_margin - 5
+	if "--verbose" not in myopts:
+		shorthelp()
+		print()
+		print("   For more help try 'emerge --help --verbose' or consult the man page.")
+	else:
+		shorthelp()
+		print()
+		print(turquoise("Help (this screen):"))
+		print("       "+green("--help")+" ("+green("-h")+" short option)")
+		print("              Displays this help; an additional argument (see above) will tell")
+		print("              emerge to display detailed help.")
+		print()
+		print(turquoise("Actions:"))
+		print("       "+green("--clean"))
+		print("              Cleans the system by removing outdated packages which will not")
+		print("              remove functionalities or prevent your system from working.")
+		print("              The arguments can be in several different formats :")
+		print("              * world ")
+		print("              * system or")
+		print("              * 'dependency specification' (in single quotes is best.)")
+		print("              Here are a few examples of the dependency specification format:")
+		print("              "+bold("binutils")+" matches")
+		print("                  binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
+		print("              "+bold("sys-devel/binutils")+" matches")
+		print("                  binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
+		print("              "+bold(">sys-devel/binutils-2.11.90.0.7")+" matches")
+		print("                  binutils-2.11.92.0.12.3-r1")
+		print("              "+bold(">=sys-devel/binutils-2.11.90.0.7")+" matches")
+		print("                  binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
+		print("              "+bold("<=sys-devel/binutils-2.11.92.0.12.3-r1")+" matches")
+		print("                  binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
+		print()
+		print("       "+green("--config"))
+		print("              Runs package-specific operations that must be executed after an")
+		print("              emerge process has completed.  This usually entails configuration")
+		print("              file setup or other similar setups that the user may wish to run.")
+		print()
+		print("       "+green("--depclean")+" ("+green("-c")+" short option)")
+
+		paragraph = "Cleans the system by removing packages that are " + \
+		"not associated with explicitly merged packages. Depclean works " + \
+		"by creating the full dependency tree from the " + \
+		"@world set, then comparing it to installed packages. Packages " + \
+		"installed, but not part of the dependency tree, will be " + \
+		"uninstalled by depclean. See --with-bdeps for behavior with " + \
+		"respect to build time dependencies that are not strictly " + \
+		"required. Packages that are part of the world set will " + \
+		"always be kept. They can be manually added to this set with " + \
+		"emerge --noreplace <atom>. As a safety measure, depclean " + \
+		"will not remove any packages unless *all* required dependencies " + \
+		"have been resolved. As a consequence, it is often necessary to " + \
+		"run emerge --update --newuse --deep @world " + \
+		"prior to depclean."
+
+		for line in wrap(paragraph, desc_width):
+			print(desc_indent + line)
+		print()
+
+		paragraph =  "WARNING: Inexperienced users are advised to use " + \
+		"--pretend with this option in order to see a preview of which " + \
+		"packages will be uninstalled. Always study the list of packages " + \
+		"to be cleaned for any obvious mistakes. Note that packages " + \
+		"listed in package.provided (see portage(5)) may be removed by " + \
+		"depclean, even if they are part of the world set."
+
+		paragraph += " Also note that " + \
+			"depclean may break link level dependencies"
+
+		if _ENABLE_DYN_LINK_MAP:
+			paragraph += ", especially when the " + \
+				"--depclean-lib-check option is disabled"
+
+		paragraph += ". Thus, it is " + \
+			"recommended to use a tool such as revdep-rebuild(1) " + \
+			"in order to detect such breakage."
+
+		for line in wrap(paragraph, desc_width):
+			print(desc_indent + line)
+		print()
+
+		paragraph = "Depclean serves as a dependency aware version of " + \
+			"--unmerge. When given one or more atoms, it will unmerge " + \
+			"matched packages that have no reverse dependencies. Use " + \
+			"--depclean together with --verbose to show reverse dependencies."
+
+		for line in wrap(paragraph, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--deselect") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+
+		paragraph = \
+			"Remove atoms and/or sets from the world file. This action is implied " + \
+			"by uninstall actions, including --depclean, " + \
+			"--prune and --unmerge. Use --deselect=n " + \
+			"in order to prevent uninstall actions from removing " + \
+			"atoms from the world file."
+
+		for line in wrap(paragraph, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--ignore-default-opts"))
+
+		paragraph = \
+			"Causes EMERGE_DEFAULT_OPTS (see make.conf(5)) to be ignored."
+
+		for line in wrap(paragraph, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--info"))
+		print("              Displays important portage variables that will be exported to")
+		print("              ebuild.sh when performing merges. This information is useful")
+		print("              for bug reports and verification of settings. All settings in")
+		print("              make.{conf,globals,defaults} and the environment show up if")
+		print("              run with the '--verbose' flag.")
+		print()
+		print("       " + green("--list-sets"))
+		paragraph = "Displays a list of available package sets."
+
+		for line in wrap(paragraph, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--metadata"))
+		print("              Transfers metadata cache from ${PORTDIR}/metadata/cache/ to")
+		print("              /var/cache/edb/dep/ as is normally done on the tail end of an")
+		print("              rsync update using " + bold("emerge --sync") + ". This process populates the")
+		print("              cache database that portage uses for pre-parsed lookups of")
+		print("              package data.  It does not populate cache for the overlays")
+		print("              listed in PORTDIR_OVERLAY.  In order to generate cache for")
+		print("              overlays, use " + bold("--regen") + ".")
+		print()
+		print("       "+green("--prune")+" ("+green("-P")+" short option)")
+		print("              "+turquoise("WARNING: This action can remove important packages!"))
+		paragraph = "Removes all but the highest installed version of a " + \
+			"package from your system. Use --prune together with " + \
+			"--verbose to show reverse dependencies or with --nodeps " + \
+			"to ignore all dependencies. "
+
+		for line in wrap(paragraph, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--regen"))
+		print("              Causes portage to check and update the dependency cache of all")
+		print("              ebuilds in the portage tree. This is not recommended for rsync")
+		print("              users as rsync updates the cache using server-side caches.")
+		print("              Rsync users should simply 'emerge --sync' to regenerate.")
+		desc = "In order to specify parallel --regen behavior, use "+ \
+			"the ---jobs and --load-average options. If you would like to " + \
+			"generate and distribute cache for use by others, use egencache(1)."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--resume")+" ("+green("-r")+" short option)")
+		print("              Resumes the most recent merge list that has been aborted due to an")
+		print("              error. Please note that this operation will only return an error")
+		print("              on failure. If there is nothing for portage to do, then portage")
+		print("              will exit with a message and a success condition. A resume list")
+		print("              will persist until it has been completed in entirety or until")
+		print("              another aborted merge list replaces it. The resume history is")
+		print("              capable of storing two merge lists. After one resume list")
+		print("              completes, it is possible to invoke --resume once again in order")
+		print("              to resume an older list.")
+		print()
+		print("       "+green("--search")+" ("+green("-s")+" short option)")
+		print("              Searches for matches of the supplied string in the current local")
+		print("              portage tree. By default emerge uses a case-insensitive simple ")
+		print("              search, but you can enable a regular expression search by ")
+		print("              prefixing the search string with %.")
+		print("              Prepending the expression with a '@' will cause the category to")
+		print("              be included in the search.")
+		print("              A few examples:")
+		print("              "+bold("emerge --search libc"))
+		print("                  list all packages that contain libc in their name")
+		print("              "+bold("emerge --search '%^kde'"))
+		print("                  list all packages starting with kde")
+		print("              "+bold("emerge --search '%gcc$'"))
+		print("                  list all packages ending with gcc")
+		print("              "+bold("emerge --search '%@^dev-java.*jdk'"))
+		print("                  list all available Java JDKs")
+		print()
+		print("       "+green("--searchdesc")+" ("+green("-S")+" short option)")
+		print("              Matches the search string against the description field as well")
+		print("              the package's name. Take caution as the descriptions are also")
+		print("              matched as regular expressions.")
+		print("                emerge -S html")
+		print("                emerge -S applet")
+		print("                emerge -S 'perl.*module'")
+		print()
+		print("       "+green("--sync"))
+		desc = "This updates the portage tree that is located in the " + \
+			"directory that the PORTDIR variable refers to (default " + \
+			"location is /usr/portage). The SYNC variable specifies " + \
+			"the remote URI from which files will be synchronized. " + \
+			"The PORTAGE_SYNC_STALE variable configures " + \
+			"warnings that are shown when emerge --sync has not " + \
+			"been executed recently."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print(desc_indent + turquoise("WARNING:"))
+		desc = "The emerge --sync action will modify and/or delete " + \
+			"files located inside the directory that the PORTDIR " + \
+			"variable refers to (default location is /usr/portage). " + \
+			"For more information, see the PORTDIR documentation in " + \
+			"the make.conf(5) man page."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print(desc_indent + green("NOTE:"))
+		desc = "The emerge-webrsync program will download the entire " + \
+			"portage tree as a tarball, which is much faster than emerge " + \
+			"--sync for first time syncs."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--unmerge")+" ("+green("-C")+" short option)")
+		print("              "+turquoise("WARNING: This action can remove important packages!"))
+		print("              Removes all matching packages. This does no checking of")
+		print("              dependencies, so it may remove packages necessary for the proper")
+		print("              operation of your system. Its arguments can be atoms or")
+		print("              ebuilds. For a dependency aware version of --unmerge, use")
+		print("              --depclean or --prune.")
+		print()
+		print("       "+green("--version")+" ("+green("-V")+" short option)")
+		print("              Displays the currently installed version of portage along with")
+		print("              other information useful for quick reference on a system. See")
+		print("              "+bold("emerge info")+" for more advanced information.")
+		print()
+		print(turquoise("Options:"))
+		print("       "+green("--accept-properties=ACCEPT_PROPERTIES"))
+		desc = "This option temporarily overrides the ACCEPT_PROPERTIES " + \
+			"variable. The ACCEPT_PROPERTIES variable is incremental, " + \
+			"which means that the specified setting is appended to the " + \
+			"existing value from your configuration. The special -* " + \
+			"token can be used to discard the existing configuration " + \
+			"value and start fresh. See the MASKED PACKAGES section " + \
+			"and make.conf(5) for more information about " + \
+			"ACCEPT_PROPERTIES. A typical usage example for this option " + \
+			"would be to use --accept-properties=-interactive to " + \
+			"temporarily mask interactive packages. With default " + \
+			"configuration, this would result in an effective " + \
+			"ACCEPT_PROPERTIES value of \"* -interactive\"."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--alphabetical"))
+		print("              When displaying USE and other flag output, combines the enabled")
+		print("              and disabled flags into a single list and sorts it alphabetically.")
+		print("              With this option, output such as USE=\"dar -bar -foo\" will instead")
+		print("              be displayed as USE=\"-bar dar -foo\"")
+		print()
+		print("       " + green("--ask") + \
+			" [ %s | %s ] (%s short option)" % \
+			(turquoise("y"), turquoise("n"), green("-a")))
+		desc = "Before performing the action, display what will take place (server info for " + \
+			"--sync, --pretend output for merge, and so forth), then ask " + \
+			"whether to proceed with the action or abort.  Using --ask is more " + \
+			"efficient than using --pretend and then executing the same command " + \
+			"without --pretend, as dependencies will only need to be calculated once. " + \
+			"WARNING: If the \"Enter\" key is pressed at the prompt (with no other input), " + \
+			"it is interpreted as acceptance of the first choice.  Note that the input " + \
+			"buffer is not cleared prior to the prompt, so an accidental press of the " + \
+			"\"Enter\" key at any time prior to the prompt will be interpreted as a choice! " + \
+			"Use the --ask-enter-invalid option if you want a single \"Enter\" key " + \
+			"press to be interpreted as invalid input."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("        " + green("--ask-enter-invalid"))
+		desc = "When used together with the --ask option, " + \
+			"interpret a single \"Enter\" key press as " + \
+			"invalid input. This helps prevent accidental " + \
+			"acceptance of the first choice. This option is " + \
+			"intended to be set in the make.conf(5) " + \
+			"EMERGE_DEFAULT_OPTS variable."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print() 
+		print("       " + green("--autounmask") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Automatically unmask packages and generate package.use " + \
+			"settings as necessary to satisfy dependencies. This " + \
+			"option is enabled by default. If any configuration " + \
+			"changes are required, then they will be displayed " + \
+			"after the merge list and emerge will immediately " + \
+			"abort. If the displayed configuration changes are " + \
+			"satisfactory, you should copy and paste them into " + \
+			"the specified configuration file(s), or enable the " + \
+			"--autounmask-write option. The " + \
+			"EMERGE_DEFAULT_OPTS variable may be used to " + \
+			"disable this option by default in make.conf(5)."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--autounmask-write") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "If --autounmask is enabled, changes are written " + \
+			"to config files, respecting CONFIG_PROTECT and --ask."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--backtrack") + " " + turquoise("COUNT"))
+		desc = "Specifies an integer number of times to backtrack if " + \
+			"dependency calculation fails due to a conflict or an " + \
+			"unsatisfied dependency (default: '10')."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--binpkg-respect-use") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Tells emerge to ignore binary packages if their use flags" + \
+			" don't match the current configuration. (default: 'n')"
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--buildpkg") + \
+			" [ %s | %s ] (%s short option)" % \
+			(turquoise("y"), turquoise("n"), green("-b")))
+		desc = "Tells emerge to build binary packages for all ebuilds processed in" + \
+			" addition to actually merging the packages. Useful for maintainers" + \
+			" or if you administrate multiple Gentoo Linux systems (build once," + \
+			" emerge tbz2s everywhere) as well as disaster recovery. The package" + \
+			" will be created in the" + \
+			" ${PKGDIR}/All directory. An alternative for already-merged" + \
+			" packages is to use quickpkg(1) which creates a tbz2 from the" + \
+			" live filesystem."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--buildpkgonly")+" ("+green("-B")+" short option)")
+		print("              Creates a binary package, but does not merge it to the")
+		print("              system. This has the restriction that unsatisfied dependencies")
+		print("              must not exist for the desired package as they cannot be used if")
+		print("              they do not exist on the system.")
+		print()
+		print("       " + green("--changed-use"))
+		desc = "This is an alias for --reinstall=changed-use."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--changelog")+" ("+green("-l")+" short option)")
+		print("              When pretending, also display the ChangeLog entries for packages")
+		print("              that will be upgraded.")
+		print()
+		print("       "+green("--color") + " < " + turquoise("y") + " | "+ turquoise("n")+" >")
+		print("              Enable or disable color output. This option will override NOCOLOR")
+		print("              (see make.conf(5)) and may also be used to force color output when")
+		print("              stdout is not a tty (by default, color is disabled unless stdout")
+		print("              is a tty).")
+		print()
+		print("       "+green("--columns"))
+		print("              Display the pretend output in a tabular form. Versions are")
+		print("              aligned vertically.")
+		print()
+		print("       "+green("--complete-graph") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "This causes emerge to consider the deep dependencies of all" + \
+			" packages from the world set. With this option enabled," + \
+			" emerge will bail out if it determines that the given operation will" + \
+			" break any dependencies of the packages that have been added to the" + \
+			" graph. Like the --deep option, the --complete-graph" + \
+			" option will significantly increase the time taken for dependency" + \
+			" calculations. Note that, unlike the --deep option, the" + \
+			" --complete-graph option does not cause any more packages to" + \
+			" be updated than would have otherwise " + \
+			"been updated with the option disabled. " + \
+			"Using --with-bdeps=y together with --complete-graph makes " + \
+			"the graph as complete as possible."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--config-root=DIR"))
+		desc = "Set the PORTAGE_CONFIGROOT environment variable " + \
+			"which is documented in the emerge(1) man page."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--debug")+" ("+green("-d")+" short option)")
+		print("              Tell emerge to run the ebuild command in --debug mode. In this")
+		print("              mode, the bash build environment will run with the -x option,")
+		print("              causing it to output verbose debug information print to stdout.")
+		print("              --debug is great for finding bash syntax errors as providing")
+		print("              very verbose information about the dependency and build process.")
+		print()
+		print("       "+green("--deep") + " " + turquoise("[DEPTH]") + \
+			" (" + green("-D") + " short option)")
+		print("              This flag forces emerge to consider the entire dependency tree of")
+		print("              packages, instead of checking only the immediate dependencies of")
+		print("              the packages. As an example, this catches updates in libraries")
+		print("              that are not directly listed in the dependencies of a package.")
+		print("              Also see --with-bdeps for behavior with respect to build time")
+		print("              dependencies that are not strictly required.")
+		print()
+
+		if _ENABLE_DYN_LINK_MAP:
+			print("       " + green("--depclean-lib-check") + " [ %s | %s ]" % \
+				(turquoise("y"), turquoise("n")))
+			desc = "Account for library link-level dependencies during " + \
+				"--depclean and --prune actions. This " + \
+				"option is enabled by default. In some cases this can " + \
+				"be somewhat time-consuming. This option is ignored " + \
+				"when FEATURES=\"preserve-libs\" is enabled in " + \
+				"make.conf(5), since any libraries that have " + \
+				"consumers will simply be preserved."
+			for line in wrap(desc, desc_width):
+				print(desc_indent + line)
+			print()
+
+		print("       "+green("--emptytree")+" ("+green("-e")+" short option)")
+		desc = "Reinstalls target atoms and their entire deep " + \
+			"dependency tree, as though no packages are currently " + \
+			"installed. You should run this with --pretend " + \
+			"first to make sure the result is what you expect."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--exclude") + " " + turquoise("ATOMS"))
+		desc = "A space separated list of package names or slot atoms. " + \
+			"Emerge won't  install any ebuild or binary package that " + \
+			"matches any of the given package atoms."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--fail-clean") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Clean up temporary files after a build failure. This is " + \
+			"particularly useful if you have PORTAGE_TMPDIR on " + \
+			"tmpfs. If this option is enabled, you probably also want " + \
+			"to enable PORT_LOGDIR (see make.conf(5)) in " + \
+			"order to save the build log."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--fetchonly")+" ("+green("-f")+" short option)")
+		print("              Instead of doing any package building, just perform fetches for")
+		print("              all packages (main package as well as all dependencies.) When")
+		print("              used in combination with --pretend all the SRC_URIs will be")
+		print("              displayed multiple mirrors per line, one line per file.")
+		print()
+		print("       "+green("--fetch-all-uri")+" ("+green("-F")+" short option)")
+		print("              Same as --fetchonly except that all package files, including those")
+		print("              not required to build the package, will be processed.")
+		print()
+		print("       " + green("--getbinpkg") + \
+			" [ %s | %s ] (%s short option)" % \
+			(turquoise("y"), turquoise("n"), green("-g")))
+		print("              Using the server and location defined in PORTAGE_BINHOST, portage")
+		print("              will download the information from each binary file there and it")
+		print("              will use that information to help build the dependency list. This")
+		print("              option implies '-k'. (Use -gK for binary-only merging.)")
+		print()
+		print("       " + green("--getbinpkgonly") + \
+			" [ %s | %s ] (%s short option)" % \
+			(turquoise("y"), turquoise("n"), green("-G")))
+		print("              This option is identical to -g, as above, except it will not use")
+		print("              ANY information from the local machine. All binaries will be")
+		print("              downloaded from the remote server without consulting packages")
+		print("              existing in the packages directory.")
+		print()
+		print("       " + green("--jobs") + " " + turquoise("[JOBS]") + " ("+green("-j")+" short option)")
+		desc = "Specifies the number of packages " + \
+			"to build simultaneously. If this option is " + \
+			"given without an argument, emerge will not " + \
+			"limit the number of jobs that " + \
+			"can run simultaneously. Also see " + \
+			"the related --load-average option. " + \
+			"Note that interactive packages currently force a setting " + \
+			"of --jobs=1. This issue can be temporarily avoided " + \
+			"by specifying --accept-properties=-interactive."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--keep-going") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Continue as much as possible after " + \
+			"an error. When an error occurs, " + \
+			"dependencies are recalculated for " + \
+			"remaining packages and any with " + \
+			"unsatisfied dependencies are " + \
+			"automatically dropped. Also see " + \
+			"the related --skipfirst option."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--load-average") + " " + turquoise("LOAD"))
+		desc = "Specifies that no new builds should " + \
+			"be started if there are other builds " + \
+			"running and the load average is at " + \
+			"least LOAD (a floating-point number). " + \
+			"This option is recommended for use " + \
+			"in combination with --jobs in " + \
+			"order to avoid excess load. See " + \
+			"make(1) for information about " + \
+			"analogous options that should be " + \
+			"configured via MAKEOPTS in " + \
+			"make.conf(5)."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--misspell-suggestions") + " < %s | %s >" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Enable or disable misspell suggestions. By default, " + \
+			"emerge will show a list of packages with similar names " + \
+			"when a package doesn't exist. The EMERGE_DEFAULT_OPTS " + \
+			"variable may be used to disable this option by default"
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--newuse")+" ("+green("-N")+" short option)")
+		desc = "Tells emerge to include installed packages where USE " + \
+			"flags have changed since compilation. This option " + \
+			"also implies the --selective option. If you would " + \
+			"like to skip rebuilds for which disabled flags have " + \
+			"been added to or removed from IUSE, see the related " + \
+			"--reinstall=changed-use option."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--noconfmem"))
+		print("              Portage keeps track of files that have been placed into")
+		print("              CONFIG_PROTECT directories, and normally it will not merge the")
+		print("              same file more than once, as that would become annoying. This")
+		print("              can lead to problems when the user wants the file in the case")
+		print("              of accidental deletion. With this option, files will always be")
+		print("              merged to the live fs instead of silently dropped.")
+		print()
+		print("       "+green("--nodeps")+" ("+green("-O")+" short option)")
+		print("              Merge specified packages, but don't merge any dependencies.")
+		print("              Note that the build may fail if deps aren't satisfied.")
+		print() 
+		print("       "+green("--noreplace")+" ("+green("-n")+" short option)")
+		print("              Skip the packages specified on the command-line that have")
+		print("              already been installed.  Without this option, any packages,")
+		print("              ebuilds, or deps you specify on the command-line *will* cause")
+		print("              Portage to remerge the package, even if it is already installed.")
+		print("              Note that Portage won't remerge dependencies by default.")
+		print() 
+		print("       "+green("--nospinner"))
+		print("              Disables the spinner regardless of terminal type.")
+		print()
+		print("       " + green("--usepkg-exclude") + " " + turquoise("ATOMS"))
+		desc = "A space separated list of package names or slot atoms." + \
+			" Emerge will ignore matching binary packages."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--rebuild-exclude") + " " + turquoise("ATOMS"))
+		desc = "A space separated list of package names or slot atoms." + \
+			" Emerge will not rebuild matching packages due to --rebuild."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--rebuild-ignore") + " " + turquoise("ATOMS"))
+		desc = "A space separated list of package names or slot atoms." + \
+			" Emerge will not rebuild packages that depend on matching " + \
+			" packages due to --rebuild."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--oneshot")+" ("+green("-1")+" short option)")
+		print("              Emerge as normal, but don't add packages to the world profile.")
+		print("              This package will only be updated if it is depended upon by")
+		print("              another package.")
+		print()
+		print("       "+green("--onlydeps")+" ("+green("-o")+" short option)")
+		print("              Only merge (or pretend to merge) the dependencies of the")
+		print("              specified packages, not the packages themselves.")
+		print()
+		print("       " + green("--package-moves") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Perform package moves when necessary. This option " + \
+			"is enabled by default. WARNING: This option " + \
+			"should remain enabled under normal circumstances. " + \
+			"Do not disable it unless you know what you are " + \
+			"doing."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--pretend")+" ("+green("-p")+" short option)")
+		print("              Instead of actually performing the merge, simply display what")
+		print("              ebuilds and tbz2s *would* have been installed if --pretend")
+		print("              weren't used.  Using --pretend is strongly recommended before")
+		print("              installing an unfamiliar package.  In the printout, N = new,")
+		print("              U = updating, R = replacing, F = fetch  restricted, B = blocked")
+		print("              by an already installed package, D = possible downgrading,")
+		print("              S = slotted install. --verbose causes affecting use flags to be")
+		print("              printed out accompanied by a '+' for enabled and a '-' for")
+		print("              disabled USE flags.")
+		print()
+		print("       " + green("--quiet") + \
+			" [ %s | %s ] (%s short option)" % \
+			(turquoise("y"), turquoise("n"), green("-q")))
+		print("              Effects vary, but the general outcome is a reduced or condensed")
+		print("              output from portage's displays.")
+		print()
+		print("       " + green("--quiet-build") + \
+			" [ %s | %s ]" % (turquoise("y"), turquoise("n")))
+		desc = "Redirect all build output to logs alone, and do not " + \
+			"display it on stdout."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--quiet-unmerge-warn"))
+		desc = "Disable the warning message that's shown prior to " + \
+			"--unmerge actions. This option is intended " + \
+			"to be set in the make.conf(5) " + \
+			"EMERGE_DEFAULT_OPTS variable."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--rebuild-if-new-rev") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Rebuild packages when dependencies that are " + \
+			"used at both build-time and run-time are built, " + \
+			"if the dependency is not already installed with the " + \
+			"same version and revision."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--rebuild-if-new-ver") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Rebuild packages when dependencies that are " + \
+			"used at both build-time and run-time are built, " + \
+			"if the dependency is not already installed with the " + \
+			"same version. Revision numbers are ignored."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--rebuild-if-unbuilt") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Rebuild packages when dependencies that are " + \
+			"used at both build-time and run-time are built."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--rebuilt-binaries") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Replace installed packages with binary packages that have " + \
+			"been rebuilt. Rebuilds are detected by comparison of " + \
+			"BUILD_TIME package metadata. This option is enabled " + \
+			"automatically when using binary packages " + \
+			"(--usepkgonly or --getbinpkgonly) together with " + \
+			"--update and --deep."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--rebuilt-binaries-timestamp") + "=%s" % turquoise("TIMESTAMP"))
+		desc = "This option modifies emerge's behaviour only if " + \
+			"--rebuilt-binaries is given. Only binaries that " + \
+			"have a BUILD_TIME that is larger than the given TIMESTAMP " + \
+			"and that is larger than that of the installed package will " + \
+			"be considered by the rebuilt-binaries logic."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--reinstall ") + turquoise("changed-use"))
+		print("              Tells emerge to include installed packages where USE flags have")
+		print("              changed since installation.  Unlike --newuse, this option does")
+		print("              not trigger reinstallation when flags that the user has not")
+		print("              enabled are added or removed.")
+		print()
+		print("       " + green("--reinstall-atoms") + " " + turquoise("ATOMS"))
+		desc = "A space separated list of package names or slot atoms. " + \
+			"Emerge will treat matching packages as if they are not " + \
+			"installed, and reinstall them if necessary."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--root=DIR"))
+		desc = "Set the ROOT environment variable " + \
+			"which is documented in the emerge(1) man page."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--root-deps[=rdeps]"))
+		desc = "If no argument is given then build-time dependencies of packages for " + \
+			"ROOT are installed to " + \
+			"ROOT instead of /. If the rdeps argument is given then discard " + \
+			"all build-time dependencies of packages for ROOT. This option is " + \
+			"only meaningful when used together with ROOT and it should not " + \
+			"be enabled under normal circumstances. For currently supported " + \
+			"EAPI values, the build-time dependencies are specified in the " + \
+			"DEPEND variable. However, behavior may change for new " + \
+			"EAPIs when related extensions are added in the future."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--select") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Add specified packages to the world set (inverse of " + \
+			"--oneshot). This is useful if you want to " + \
+			"use EMERGE_DEFAULT_OPTS to make " + \
+			"--oneshot behavior default."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--selective") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "This identical to the --noreplace option. " + \
+			"Some options, such as --update, imply --selective. " + \
+			"Use --selective=n if you want to forcefully disable " + \
+			"--selective, regardless of options like --update."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--skipfirst"))
+		desc = "This option is only valid when " + \
+			"used with --resume.  It removes the " + \
+			"first package in the resume list. " + \
+			"Dependencies are recalculated for " + \
+			"remaining packages and any that " + \
+			"have unsatisfied dependencies or are " + \
+			"masked will be automatically dropped. " + \
+			"Also see the related " + \
+			"--keep-going option."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--tree")+" ("+green("-t")+" short option)")
+		print("              Shows the dependency tree using indentation for dependencies.")
+		print("              The packages are also listed in reverse merge order so that")
+		print("              a package's dependencies follow the package. Only really useful")
+		print("              in combination with --emptytree, --update or --deep.")
+		print()
+		print("       " + green("--unordered-display"))
+		desc = "By default the displayed merge list is sorted using the " + \
+			"order in which the packages will be merged. When " + \
+			"--tree is used together with this option, this " + \
+			"constraint is removed, hopefully leading to a more " + \
+			"readable dependency tree."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       "+green("--update")+" ("+green("-u")+" short option)")
+		desc = "Updates packages to the best version available, which may " + \
+			"not always be the  highest version number due to masking " + \
+			"for testing and development. Package atoms specified on " + \
+			"the command line are greedy, meaning that unspecific " + \
+			"atoms may match multiple versions of slotted packages."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--use-ebuild-visibility") + " [ %s | %s ]" % \
+			(turquoise("y"), turquoise("n")))
+		desc = "Use unbuilt ebuild metadata for visibility " + \
+			"checks on built packages."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--useoldpkg-atoms") + " " + turquoise("ATOMS"))
+		desc = "A space separated list of package names or slot atoms." + \
+			" Emerge will prefer matching binary packages over newer" + \
+			" unbuilt packages."
+		for line in wrap(desc, desc_width):
+			print(desc_indent + line)
+		print()
+		print("       " + green("--usepkg") + \
+			" [ %s | %s ] (%s short option)" % \
+			(turquoise("y"), turquoise("n"), green("-k")))
+		print("              Tell emerge to use binary packages (from $PKGDIR) if they are")
+		print("              available, thus possibly avoiding some time-consuming compiles.")
+		print("              This option is useful for CD installs; you can export")
+		print("              PKGDIR=/mnt/cdrom/packages and then use this option to have")
+		print("              emerge \"pull\" binary packages from the CD in order to satisfy") 
+		print("              dependencies.")
+		print()
+		print("       " + green("--usepkgonly") + \
+			" [ %s | %s ] (%s short option)" % \
+			(turquoise("y"), turquoise("n"), green("-K")))
+		print("              Like --usepkg above, except this only allows the use of binary")
+		print("              packages, and it will abort the emerge if the package is not")
+		print("              available at the time of dependency calculation.")
+		print()
+		print("       "+green("--verbose")+" ("+green("-v")+" short option)")
+		print("              Effects vary, but the general outcome is an increased or expanded")
+		print("              display of content in portage's displays.")
+		print()
+		print("       "+green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" >")
+		print("              In dependency calculations, pull in build time dependencies that")
+		print("              are not strictly required. This defaults to 'n' for installation")
+		print("              actions and 'y' for the --depclean action. This setting can be")
+		print("              added to EMERGE_DEFAULT_OPTS (see make.conf(5)) and later")
+		print("              overridden via the command line.")
+		print()

diff --git a/portage_with_autodep/pym/_emerge/is_valid_package_atom.py b/portage_with_autodep/pym/_emerge/is_valid_package_atom.py
new file mode 100644
index 0000000..7cb2a5b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/is_valid_package_atom.py
@@ -0,0 +1,21 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+from portage.dep import isvalidatom
+
+def insert_category_into_atom(atom, category):
+	alphanum = re.search(r'\w', atom)
+	if alphanum:
+		ret = atom[:alphanum.start()] + "%s/" % category + \
+			atom[alphanum.start():]
+	else:
+		ret = None
+	return ret
+
+def is_valid_package_atom(x, allow_repo=False):
+	if "/" not in x:
+		x2 = insert_category_into_atom(x, 'cat')
+		if x2 != None:
+			x = x2
+	return isvalidatom(x, allow_blockers=False, allow_repo=allow_repo)

diff --git a/portage_with_autodep/pym/_emerge/main.py b/portage_with_autodep/pym/_emerge/main.py
new file mode 100644
index 0000000..2830214
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/main.py
@@ -0,0 +1,1910 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import signal
+import stat
+import sys
+import textwrap
+import platform
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+import _emerge.help
+import portage.xpak, errno, re, time
+from portage.output import colorize, xtermTitle, xtermTitleReset
+from portage.output import create_color_func
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+
+from portage.const import _ENABLE_DYN_LINK_MAP
+import portage.elog
+import portage.util
+import portage.locks
+import portage.exception
+from portage.data import secpass
+from portage.dbapi.dep_expand import dep_expand
+from portage.util import normalize_path as normpath
+from portage.util import shlex_split, writemsg_level, writemsg_stdout
+from portage._sets import SETPREFIX
+from portage._global_updates import _global_updates
+
+from _emerge.actions import action_config, action_sync, action_metadata, \
+	action_regen, action_search, action_uninstall, action_info, action_build, \
+	adjust_configs, chk_updated_cfg_files, display_missing_pkg_set, \
+	display_news_notification, getportageversion, load_emerge_config
+import _emerge
+from _emerge.emergelog import emergelog
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.stdout_spinner import stdout_spinner
+from _emerge.userquery import userquery
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+options=[
+"--alphabetical",
+"--ask-enter-invalid",
+"--buildpkgonly",
+"--changed-use",
+"--changelog",    "--columns",
+"--debug",
+"--digest",
+"--emptytree",
+"--fetchonly",    "--fetch-all-uri",
+"--ignore-default-opts",
+"--noconfmem",
+"--newuse",
+"--nodeps",       "--noreplace",
+"--nospinner",    "--oneshot",
+"--onlydeps",     "--pretend",
+"--quiet-unmerge-warn",
+"--resume",
+"--searchdesc",
+"--skipfirst",
+"--tree",
+"--unordered-display",
+"--update",
+"--verbose",
+]
+
+shortmapping={
+"1":"--oneshot",
+"B":"--buildpkgonly",
+"c":"--depclean",
+"C":"--unmerge",
+"d":"--debug",
+"e":"--emptytree",
+"f":"--fetchonly", "F":"--fetch-all-uri",
+"h":"--help",
+"l":"--changelog",
+"n":"--noreplace", "N":"--newuse",
+"o":"--onlydeps",  "O":"--nodeps",
+"p":"--pretend",   "P":"--prune",
+"r":"--resume",
+"s":"--search",    "S":"--searchdesc",
+"t":"--tree",
+"u":"--update",
+"v":"--verbose",   "V":"--version"
+}
+
+def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
+
+	if os.path.exists("/usr/bin/install-info"):
+		out = portage.output.EOutput()
+		regen_infodirs=[]
+		for z in infodirs:
+			if z=='':
+				continue
+			inforoot=normpath(root+z)
+			if os.path.isdir(inforoot) and \
+				not [x for x in os.listdir(inforoot) \
+				if x.startswith('.keepinfodir')]:
+					infomtime = os.stat(inforoot)[stat.ST_MTIME]
+					if inforoot not in prev_mtimes or \
+						prev_mtimes[inforoot] != infomtime:
+							regen_infodirs.append(inforoot)
+
+		if not regen_infodirs:
+			portage.writemsg_stdout("\n")
+			if portage.util.noiselimit >= 0:
+				out.einfo("GNU info directory index is up-to-date.")
+		else:
+			portage.writemsg_stdout("\n")
+			if portage.util.noiselimit >= 0:
+				out.einfo("Regenerating GNU info directory index...")
+
+			dir_extensions = ("", ".gz", ".bz2")
+			icount=0
+			badcount=0
+			errmsg = ""
+			for inforoot in regen_infodirs:
+				if inforoot=='':
+					continue
+
+				if not os.path.isdir(inforoot) or \
+					not os.access(inforoot, os.W_OK):
+					continue
+
+				file_list = os.listdir(inforoot)
+				file_list.sort()
+				dir_file = os.path.join(inforoot, "dir")
+				moved_old_dir = False
+				processed_count = 0
+				for x in file_list:
+					if x.startswith(".") or \
+						os.path.isdir(os.path.join(inforoot, x)):
+						continue
+					if x.startswith("dir"):
+						skip = False
+						for ext in dir_extensions:
+							if x == "dir" + ext or \
+								x == "dir" + ext + ".old":
+								skip = True
+								break
+						if skip:
+							continue
+					if processed_count == 0:
+						for ext in dir_extensions:
+							try:
+								os.rename(dir_file + ext, dir_file + ext + ".old")
+								moved_old_dir = True
+							except EnvironmentError as e:
+								if e.errno != errno.ENOENT:
+									raise
+								del e
+					processed_count += 1
+					myso = portage.subprocess_getstatusoutput(
+						"LANG=C LANGUAGE=C /usr/bin/install-info " +
+						"--dir-file=%s/dir %s/%s" % (inforoot, inforoot, x))[1]
+					existsstr="already exists, for file `"
+					if myso!="":
+						if re.search(existsstr,myso):
+							# Already exists... Don't increment the count for this.
+							pass
+						elif myso[:44]=="install-info: warning: no info dir entry in ":
+							# This info file doesn't contain a DIR-header: install-info produces this
+							# (harmless) warning (the --quiet switch doesn't seem to work).
+							# Don't increment the count for this.
+							pass
+						else:
+							badcount=badcount+1
+							errmsg += myso + "\n"
+					icount=icount+1
+
+				if moved_old_dir and not os.path.exists(dir_file):
+					# We didn't generate a new dir file, so put the old file
+					# back where it was originally found.
+					for ext in dir_extensions:
+						try:
+							os.rename(dir_file + ext + ".old", dir_file + ext)
+						except EnvironmentError as e:
+							if e.errno != errno.ENOENT:
+								raise
+							del e
+
+				# Clean dir.old cruft so that they don't prevent
+				# unmerge of otherwise empty directories.
+				for ext in dir_extensions:
+					try:
+						os.unlink(dir_file + ext + ".old")
+					except EnvironmentError as e:
+						if e.errno != errno.ENOENT:
+							raise
+						del e
+
+				#update mtime so we can potentially avoid regenerating.
+				prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
+
+			if badcount:
+				out.eerror("Processed %d info files; %d errors." % \
+					(icount, badcount))
+				writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
+			else:
+				if icount > 0 and portage.util.noiselimit >= 0:
+					out.einfo("Processed %d info files." % (icount,))
+
+def display_preserved_libs(vardbapi, myopts):
+	MAX_DISPLAY = 3
+
+	if vardbapi._linkmap is None or \
+		vardbapi._plib_registry is None:
+		# preserve-libs is entirely disabled
+		return
+
+	# Explicitly load and prune the PreservedLibsRegistry in order
+	# to ensure that we do not display stale data.
+	vardbapi._plib_registry.load()
+
+	if vardbapi._plib_registry.hasEntries():
+		if "--quiet" in myopts:
+			print()
+			print(colorize("WARN", "!!!") + " existing preserved libs found")
+			return
+		else:
+			print()
+			print(colorize("WARN", "!!!") + " existing preserved libs:")
+
+		plibdata = vardbapi._plib_registry.getPreservedLibs()
+		linkmap = vardbapi._linkmap
+		consumer_map = {}
+		owners = {}
+		linkmap_broken = False
+
+		try:
+			linkmap.rebuild()
+		except portage.exception.CommandNotFound as e:
+			writemsg_level("!!! Command Not Found: %s\n" % (e,),
+				level=logging.ERROR, noiselevel=-1)
+			del e
+			linkmap_broken = True
+		else:
+			search_for_owners = set()
+			for cpv in plibdata:
+				internal_plib_keys = set(linkmap._obj_key(f) \
+					for f in plibdata[cpv])
+				for f in plibdata[cpv]:
+					if f in consumer_map:
+						continue
+					consumers = []
+					for c in linkmap.findConsumers(f):
+						# Filter out any consumers that are also preserved libs
+						# belonging to the same package as the provider.
+						if linkmap._obj_key(c) not in internal_plib_keys:
+							consumers.append(c)
+					consumers.sort()
+					consumer_map[f] = consumers
+					search_for_owners.update(consumers[:MAX_DISPLAY+1])
+
+			owners = {}
+			for f in search_for_owners:
+				owner_set = set()
+				for owner in linkmap.getOwners(f):
+					owner_dblink = vardbapi._dblink(owner)
+					if owner_dblink.exists():
+						owner_set.add(owner_dblink)
+				if owner_set:
+					owners[f] = owner_set
+
+		for cpv in plibdata:
+			print(colorize("WARN", ">>>") + " package: %s" % cpv)
+			samefile_map = {}
+			for f in plibdata[cpv]:
+				obj_key = linkmap._obj_key(f)
+				alt_paths = samefile_map.get(obj_key)
+				if alt_paths is None:
+					alt_paths = set()
+					samefile_map[obj_key] = alt_paths
+				alt_paths.add(f)
+
+			for alt_paths in samefile_map.values():
+				alt_paths = sorted(alt_paths)
+				for p in alt_paths:
+					print(colorize("WARN", " * ") + " - %s" % (p,))
+				f = alt_paths[0]
+				consumers = consumer_map.get(f, [])
+				for c in consumers[:MAX_DISPLAY]:
+					print(colorize("WARN", " * ") + "     used by %s (%s)" % \
+						(c, ", ".join(x.mycpv for x in owners.get(c, []))))
+				if len(consumers) == MAX_DISPLAY + 1:
+					print(colorize("WARN", " * ") + "     used by %s (%s)" % \
+						(consumers[MAX_DISPLAY], ", ".join(x.mycpv \
+						for x in owners.get(consumers[MAX_DISPLAY], []))))
+				elif len(consumers) > MAX_DISPLAY:
+					print(colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY))
+		print("Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries")
+
+def post_emerge(myaction, myopts, myfiles,
+	target_root, trees, mtimedb, retval):
+	"""
+	Misc. things to run at the end of a merge session.
+
+	Update Info Files
+	Update Config Files
+	Update News Items
+	Commit mtimeDB
+	Display preserved libs warnings
+
+	@param myaction: The action returned from parse_opts()
+	@type myaction: String
+	@param myopts: emerge options
+	@type myopts: dict
+	@param myfiles: emerge arguments
+	@type myfiles: list
+	@param target_root: The target ROOT for myaction
+	@type target_root: String
+	@param trees: A dictionary mapping each ROOT to it's package databases
+	@type trees: dict
+	@param mtimedb: The mtimeDB to store data needed across merge invocations
+	@type mtimedb: MtimeDB class instance
+	@param retval: Emerge's return value
+	@type retval: Int
+	"""
+
+	root_config = trees[target_root]["root_config"]
+	vardbapi = trees[target_root]["vartree"].dbapi
+	settings = vardbapi.settings
+	info_mtimes = mtimedb["info"]
+
+	# Load the most current variables from ${ROOT}/etc/profile.env
+	settings.unlock()
+	settings.reload()
+	settings.regenerate()
+	settings.lock()
+
+	config_protect = shlex_split(settings.get("CONFIG_PROTECT", ""))
+	infodirs = settings.get("INFOPATH","").split(":") + \
+		settings.get("INFODIR","").split(":")
+
+	os.chdir("/")
+
+	if retval == os.EX_OK:
+		exit_msg = " *** exiting successfully."
+	else:
+		exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+	emergelog("notitles" not in settings.features, exit_msg)
+
+	_flush_elog_mod_echo()
+
+	if not vardbapi._pkgs_changed:
+		display_news_notification(root_config, myopts)
+		# If vdb state has not changed then there's nothing else to do.
+		return
+
+	vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+	portage.util.ensure_dirs(vdb_path)
+	vdb_lock = None
+	if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+		vardbapi.lock()
+		vdb_lock = True
+
+	if vdb_lock:
+		try:
+			if "noinfo" not in settings.features:
+				chk_updated_info_files(target_root,
+					infodirs, info_mtimes, retval)
+			mtimedb.commit()
+		finally:
+			if vdb_lock:
+				vardbapi.unlock()
+
+	chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+	display_news_notification(root_config, myopts)
+	if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
+		display_preserved_libs(vardbapi, myopts)	
+
+	postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+		portage.USER_CONFIG_PATH, "bin", "post_emerge")
+	if os.access(postemerge, os.X_OK):
+		hook_retval = portage.process.spawn(
+						[postemerge], env=settings.environ())
+		if hook_retval != os.EX_OK:
+			writemsg_level(
+				" %s spawn failed of %s\n" % (bad("*"), postemerge,),
+				level=logging.ERROR, noiselevel=-1)
+
+	if "--quiet" not in myopts and \
+		myaction is None and "@world" in myfiles:
+		show_depclean_suggestion()
+
+def show_depclean_suggestion():
+	out = portage.output.EOutput()
+	msg = "After world updates, it is important to remove " + \
+		"obsolete packages with emerge --depclean. Refer " + \
+		"to `man emerge` for more information."
+	for line in textwrap.wrap(msg, 72):
+		out.ewarn(line)
+
+def multiple_actions(action1, action2):
+	sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
+	sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
+	sys.exit(1)
+
+def insert_optional_args(args):
+	"""
+	Parse optional arguments and insert a value if one has
+	not been provided. This is done before feeding the args
+	to the optparse parser since that parser does not support
+	this feature natively.
+	"""
+
+	class valid_integers(object):
+		def __contains__(self, s):
+			try:
+				return int(s) >= 0
+			except (ValueError, OverflowError):
+				return False
+
+	valid_integers = valid_integers()
+	y_or_n = ('y', 'n',)
+
+	new_args = []
+
+	default_arg_opts = {
+		'--ask'                  : y_or_n,
+		'--autounmask'           : y_or_n,
+		'--autounmask-write'     : y_or_n,
+		'--buildpkg'             : y_or_n,
+		'--complete-graph'       : y_or_n,
+		'--deep'       : valid_integers,
+		'--deselect'             : y_or_n,
+		'--binpkg-respect-use'   : y_or_n,
+		'--fail-clean'           : y_or_n,
+		'--getbinpkg'            : y_or_n,
+		'--getbinpkgonly'        : y_or_n,
+		'--jobs'       : valid_integers,
+		'--keep-going'           : y_or_n,
+		'--package-moves'        : y_or_n,
+		'--quiet'                : y_or_n,
+		'--quiet-build'          : y_or_n,
+		'--rebuild-if-new-rev'   : y_or_n,
+		'--rebuild-if-new-ver'   : y_or_n,
+		'--rebuild-if-unbuilt'   : y_or_n,
+		'--rebuilt-binaries'     : y_or_n,
+		'--root-deps'  : ('rdeps',),
+		'--select'               : y_or_n,
+		'--selective'            : y_or_n,
+		"--use-ebuild-visibility": y_or_n,
+		'--usepkg'               : y_or_n,
+		'--usepkgonly'           : y_or_n,
+	}
+
+	if _ENABLE_DYN_LINK_MAP:
+		default_arg_opts['--depclean-lib-check'] = y_or_n
+
+	short_arg_opts = {
+		'D' : valid_integers,
+		'j' : valid_integers,
+	}
+
+	# Don't make things like "-kn" expand to "-k n"
+	# since existence of -n makes it too ambiguous.
+	short_arg_opts_n = {
+		'a' : y_or_n,
+		'b' : y_or_n,
+		'g' : y_or_n,
+		'G' : y_or_n,
+		'k' : y_or_n,
+		'K' : y_or_n,
+		'q' : y_or_n,
+	}
+
+	arg_stack = args[:]
+	arg_stack.reverse()
+	while arg_stack:
+		arg = arg_stack.pop()
+
+		default_arg_choices = default_arg_opts.get(arg)
+		if default_arg_choices is not None:
+			new_args.append(arg)
+			if arg_stack and arg_stack[-1] in default_arg_choices:
+				new_args.append(arg_stack.pop())
+			else:
+				# insert default argument
+				new_args.append('True')
+			continue
+
+		if arg[:1] != "-" or arg[:2] == "--":
+			new_args.append(arg)
+			continue
+
+		match = None
+		for k, arg_choices in short_arg_opts.items():
+			if k in arg:
+				match = k
+				break
+
+		if match is None:
+			for k, arg_choices in short_arg_opts_n.items():
+				if k in arg:
+					match = k
+					break
+
+		if match is None:
+			new_args.append(arg)
+			continue
+
+		if len(arg) == 2:
+			new_args.append(arg)
+			if arg_stack and arg_stack[-1] in arg_choices:
+				new_args.append(arg_stack.pop())
+			else:
+				# insert default argument
+				new_args.append('True')
+			continue
+
+		# Insert an empty placeholder in order to
+		# satisfy the requirements of optparse.
+
+		new_args.append("-" + match)
+		opt_arg = None
+		saved_opts = None
+
+		if arg[1:2] == match:
+			if match not in short_arg_opts_n and arg[2:] in arg_choices:
+				opt_arg = arg[2:]
+			else:
+				saved_opts = arg[2:]
+				opt_arg = "True"
+		else:
+			saved_opts = arg[1:].replace(match, "")
+			opt_arg = "True"
+
+		if opt_arg is None and arg_stack and \
+			arg_stack[-1] in arg_choices:
+			opt_arg = arg_stack.pop()
+
+		if opt_arg is None:
+			new_args.append("True")
+		else:
+			new_args.append(opt_arg)
+
+		if saved_opts is not None:
+			# Recycle these on arg_stack since they
+			# might contain another match.
+			arg_stack.append("-" + saved_opts)
+
+	return new_args
+
+def _find_bad_atoms(atoms):
+	bad_atoms = []
+	for x in ' '.join(atoms).split():
+		bad_atom = False
+		try:
+			atom = portage.dep.Atom(x, allow_wildcard=True)
+		except portage.exception.InvalidAtom:
+			try:
+				atom = portage.dep.Atom("*/"+x, allow_wildcard=True)
+			except portage.exception.InvalidAtom:
+				bad_atom = True
+
+		if bad_atom or atom.operator or atom.blocker or atom.use:
+			bad_atoms.append(x)
+	return bad_atoms
+
+
+def parse_opts(tmpcmdline, silent=False):
+	myaction=None
+	myopts = {}
+	myfiles=[]
+
+	global options, shortmapping
+
+	actions = frozenset([
+		"clean", "config", "depclean", "help",
+		"info", "list-sets", "metadata",
+		"prune", "regen",  "search",
+		"sync",  "unmerge", "version",
+	])
+
+	longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
+	true_y_or_n = ("True", "y", "n")
+	true_y = ("True", "y")
+	argument_options = {
+
+		"--ask": {
+			"shortopt" : "-a",
+			"help"    : "prompt before performing any actions",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--autounmask": {
+			"help"    : "automatically unmask packages",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--autounmask-write": {
+			"help"    : "write changes made by --autounmask to disk",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--accept-properties": {
+			"help":"temporarily override ACCEPT_PROPERTIES",
+			"action":"store"
+		},
+
+		"--backtrack": {
+
+			"help"   : "Specifies how many times to backtrack if dependency " + \
+				"calculation fails ",
+
+			"action" : "store"
+		},
+
+		"--buildpkg": {
+			"shortopt" : "-b",
+			"help"     : "build binary packages",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--config-root": {
+			"help":"specify the location for portage configuration files",
+			"action":"store"
+		},
+		"--color": {
+			"help":"enable or disable color output",
+			"type":"choice",
+			"choices":("y", "n")
+		},
+
+		"--complete-graph": {
+			"help"    : "completely account for all known dependencies",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--deep": {
+
+			"shortopt" : "-D",
+
+			"help"   : "Specifies how deep to recurse into dependencies " + \
+				"of packages given as arguments. If no argument is given, " + \
+				"depth is unlimited. Default behavior is to skip " + \
+				"dependencies of installed packages.",
+
+			"action" : "store"
+		},
+
+		"--deselect": {
+			"help"    : "remove atoms/sets from the world file",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--exclude": {
+			"help"   :"A space separated list of package names or slot atoms. " + \
+				"Emerge won't  install any ebuild or binary package that " + \
+				"matches any of the given package atoms.",
+
+			"action" : "append"
+		},
+
+		"--fail-clean": {
+			"help"    : "clean temp files after build failure",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--jobs": {
+
+			"shortopt" : "-j",
+
+			"help"   : "Specifies the number of packages to build " + \
+				"simultaneously.",
+
+			"action" : "store"
+		},
+
+		"--keep-going": {
+			"help"    : "continue as much as possible after an error",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--load-average": {
+
+			"help"   :"Specifies that no new builds should be started " + \
+				"if there are other builds running and the load average " + \
+				"is at least LOAD (a floating-point number).",
+
+			"action" : "store"
+		},
+
+		"--misspell-suggestions": {
+			"help"    : "enable package name misspell suggestions",
+			"type"    : "choice",
+			"choices" : ("y", "n")
+		},
+
+		"--with-bdeps": {
+			"help":"include unnecessary build time dependencies",
+			"type":"choice",
+			"choices":("y", "n")
+		},
+		"--reinstall": {
+			"help":"specify conditions to trigger package reinstallation",
+			"type":"choice",
+			"choices":["changed-use"]
+		},
+
+		"--reinstall-atoms": {
+			"help"   :"A space separated list of package names or slot atoms. " + \
+				"Emerge will treat matching packages as if they are not " + \
+				"installed, and reinstall them if necessary. Implies --deep.",
+
+			"action" : "append",
+		},
+
+		"--binpkg-respect-use": {
+			"help"    : "discard binary packages if their use flags \
+				don't match the current configuration",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--getbinpkg": {
+			"shortopt" : "-g",
+			"help"     : "fetch binary packages",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--getbinpkgonly": {
+			"shortopt" : "-G",
+			"help"     : "fetch binary packages only",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--usepkg-exclude": {
+			"help"   :"A space separated list of package names or slot atoms. " + \
+				"Emerge will ignore matching binary packages. ",
+
+			"action" : "append",
+		},
+
+		"--rebuild-exclude": {
+			"help"   :"A space separated list of package names or slot atoms. " + \
+				"Emerge will not rebuild these packages due to the " + \
+				"--rebuild flag. ",
+
+			"action" : "append",
+		},
+
+		"--rebuild-ignore": {
+			"help"   :"A space separated list of package names or slot atoms. " + \
+				"Emerge will not rebuild packages that depend on matching " + \
+				"packages due to the --rebuild flag. ",
+
+			"action" : "append",
+		},
+
+		"--package-moves": {
+			"help"     : "perform package moves when necessary",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--quiet": {
+			"shortopt" : "-q",
+			"help"     : "reduced or condensed output",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--quiet-build": {
+			"help"     : "redirect build output to logs",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--rebuild-if-new-rev": {
+			"help"     : "Rebuild packages when dependencies that are " + \
+				"used at both build-time and run-time are built, " + \
+				"if the dependency is not already installed with the " + \
+				"same version and revision.",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--rebuild-if-new-ver": {
+			"help"     : "Rebuild packages when dependencies that are " + \
+				"used at both build-time and run-time are built, " + \
+				"if the dependency is not already installed with the " + \
+				"same version. Revision numbers are ignored.",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--rebuild-if-unbuilt": {
+			"help"     : "Rebuild packages when dependencies that are " + \
+				"used at both build-time and run-time are built.",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--rebuilt-binaries": {
+			"help"     : "replace installed packages with binary " + \
+			             "packages that have been rebuilt",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+		
+		"--rebuilt-binaries-timestamp": {
+			"help"   : "use only binaries that are newer than this " + \
+			           "timestamp for --rebuilt-binaries",
+			"action" : "store"
+		},
+
+		"--root": {
+		 "help"   : "specify the target root filesystem for merging packages",
+		 "action" : "store"
+		},
+
+		"--root-deps": {
+			"help"    : "modify interpretation of depedencies",
+			"type"    : "choice",
+			"choices" :("True", "rdeps")
+		},
+
+		"--select": {
+			"help"    : "add specified packages to the world set " + \
+			            "(inverse of --oneshot)",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--selective": {
+			"help"    : "identical to --noreplace",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--use-ebuild-visibility": {
+			"help"     : "use unbuilt ebuild metadata for visibility checks on built packages",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--useoldpkg-atoms": {
+			"help"   :"A space separated list of package names or slot atoms. " + \
+				"Emerge will prefer matching binary packages over newer unbuilt packages. ",
+
+			"action" : "append",
+		},
+
+		"--usepkg": {
+			"shortopt" : "-k",
+			"help"     : "use binary packages",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+		"--usepkgonly": {
+			"shortopt" : "-K",
+			"help"     : "use only binary packages",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
+
+	}
+
+	if _ENABLE_DYN_LINK_MAP:
+		argument_options["--depclean-lib-check"] = {
+			"help"    : "check for consumers of libraries before removing them",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		}
+
+	from optparse import OptionParser
+	parser = OptionParser()
+	if parser.has_option("--help"):
+		parser.remove_option("--help")
+
+	for action_opt in actions:
+		parser.add_option("--" + action_opt, action="store_true",
+			dest=action_opt.replace("-", "_"), default=False)
+	for myopt in options:
+		parser.add_option(myopt, action="store_true",
+			dest=myopt.lstrip("--").replace("-", "_"), default=False)
+	for shortopt, longopt in shortmapping.items():
+		parser.add_option("-" + shortopt, action="store_true",
+			dest=longopt.lstrip("--").replace("-", "_"), default=False)
+	for myalias, myopt in longopt_aliases.items():
+		parser.add_option(myalias, action="store_true",
+			dest=myopt.lstrip("--").replace("-", "_"), default=False)
+
+	for myopt, kwargs in argument_options.items():
+		shortopt = kwargs.pop("shortopt", None)
+		args = [myopt]
+		if shortopt is not None:
+			args.append(shortopt)
+		parser.add_option(dest=myopt.lstrip("--").replace("-", "_"),
+			*args, **kwargs)
+
+	tmpcmdline = insert_optional_args(tmpcmdline)
+
+	myoptions, myargs = parser.parse_args(args=tmpcmdline)
+
+	if myoptions.ask in true_y:
+		myoptions.ask = True
+	else:
+		myoptions.ask = None
+
+	if myoptions.autounmask in true_y:
+		myoptions.autounmask = True
+
+	if myoptions.autounmask_write in true_y:
+		myoptions.autounmask_write = True
+
+	if myoptions.buildpkg in true_y:
+		myoptions.buildpkg = True
+	else:
+		myoptions.buildpkg = None
+
+	if myoptions.changed_use is not False:
+		myoptions.reinstall = "changed-use"
+		myoptions.changed_use = False
+
+	if myoptions.deselect in true_y:
+		myoptions.deselect = True
+
+	if myoptions.binpkg_respect_use in true_y:
+		myoptions.binpkg_respect_use = True
+	else:
+		myoptions.binpkg_respect_use = None
+
+	if myoptions.complete_graph in true_y:
+		myoptions.complete_graph = True
+	else:
+		myoptions.complete_graph = None
+
+	if _ENABLE_DYN_LINK_MAP:
+		if myoptions.depclean_lib_check in true_y:
+			myoptions.depclean_lib_check = True
+
+	if myoptions.exclude:
+		bad_atoms = _find_bad_atoms(myoptions.exclude)
+		if bad_atoms and not silent:
+			parser.error("Invalid Atom(s) in --exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+				(",".join(bad_atoms),))
+
+	if myoptions.reinstall_atoms:
+		bad_atoms = _find_bad_atoms(myoptions.reinstall_atoms)
+		if bad_atoms and not silent:
+			parser.error("Invalid Atom(s) in --reinstall-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+				(",".join(bad_atoms),))
+
+	if myoptions.rebuild_exclude:
+		bad_atoms = _find_bad_atoms(myoptions.rebuild_exclude)
+		if bad_atoms and not silent:
+			parser.error("Invalid Atom(s) in --rebuild-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+				(",".join(bad_atoms),))
+
+	if myoptions.rebuild_ignore:
+		bad_atoms = _find_bad_atoms(myoptions.rebuild_ignore)
+		if bad_atoms and not silent:
+			parser.error("Invalid Atom(s) in --rebuild-ignore parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+				(",".join(bad_atoms),))
+
+	if myoptions.usepkg_exclude:
+		bad_atoms = _find_bad_atoms(myoptions.usepkg_exclude)
+		if bad_atoms and not silent:
+			parser.error("Invalid Atom(s) in --usepkg-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+				(",".join(bad_atoms),))
+
+	if myoptions.useoldpkg_atoms:
+		bad_atoms = _find_bad_atoms(myoptions.useoldpkg_atoms)
+		if bad_atoms and not silent:
+			parser.error("Invalid Atom(s) in --useoldpkg-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+				(",".join(bad_atoms),))
+
+	if myoptions.fail_clean in true_y:
+		myoptions.fail_clean = True
+
+	if myoptions.getbinpkg in true_y:
+		myoptions.getbinpkg = True
+	else:
+		myoptions.getbinpkg = None
+
+	if myoptions.getbinpkgonly in true_y:
+		myoptions.getbinpkgonly = True
+	else:
+		myoptions.getbinpkgonly = None
+
+	if myoptions.keep_going in true_y:
+		myoptions.keep_going = True
+	else:
+		myoptions.keep_going = None
+
+	if myoptions.package_moves in true_y:
+		myoptions.package_moves = True
+
+	if myoptions.quiet in true_y:
+		myoptions.quiet = True
+	else:
+		myoptions.quiet = None
+
+	if myoptions.quiet_build in true_y:
+		myoptions.quiet_build = True
+	else:
+		myoptions.quiet_build = None
+
+	if myoptions.rebuild_if_new_ver in true_y:
+		myoptions.rebuild_if_new_ver = True
+	else:
+		myoptions.rebuild_if_new_ver = None
+
+	if myoptions.rebuild_if_new_rev in true_y:
+		myoptions.rebuild_if_new_rev = True
+		myoptions.rebuild_if_new_ver = None
+	else:
+		myoptions.rebuild_if_new_rev = None
+
+	if myoptions.rebuild_if_unbuilt in true_y:
+		myoptions.rebuild_if_unbuilt = True
+		myoptions.rebuild_if_new_rev = None
+		myoptions.rebuild_if_new_ver = None
+	else:
+		myoptions.rebuild_if_unbuilt = None
+
+	if myoptions.rebuilt_binaries in true_y:
+		myoptions.rebuilt_binaries = True
+
+	if myoptions.root_deps in true_y:
+		myoptions.root_deps = True
+
+	if myoptions.select in true_y:
+		myoptions.select = True
+		myoptions.oneshot = False
+	elif myoptions.select == "n":
+		myoptions.oneshot = True
+
+	if myoptions.selective in true_y:
+		myoptions.selective = True
+
+	if myoptions.backtrack is not None:
+
+		try:
+			backtrack = int(myoptions.backtrack)
+		except (OverflowError, ValueError):
+			backtrack = -1
+
+		if backtrack < 0:
+			backtrack = None
+			if not silent:
+				parser.error("Invalid --backtrack parameter: '%s'\n" % \
+					(myoptions.backtrack,))
+
+		myoptions.backtrack = backtrack
+
+	if myoptions.deep is not None:
+		deep = None
+		if myoptions.deep == "True":
+			deep = True
+		else:
+			try:
+				deep = int(myoptions.deep)
+			except (OverflowError, ValueError):
+				deep = -1
+
+		if deep is not True and deep < 0:
+			deep = None
+			if not silent:
+				parser.error("Invalid --deep parameter: '%s'\n" % \
+					(myoptions.deep,))
+
+		myoptions.deep = deep
+
+	if myoptions.jobs:
+		jobs = None
+		if myoptions.jobs == "True":
+			jobs = True
+		else:
+			try:
+				jobs = int(myoptions.jobs)
+			except ValueError:
+				jobs = -1
+
+		if jobs is not True and \
+			jobs < 1:
+			jobs = None
+			if not silent:
+				parser.error("Invalid --jobs parameter: '%s'\n" % \
+					(myoptions.jobs,))
+
+		myoptions.jobs = jobs
+
+	if myoptions.load_average:
+		try:
+			load_average = float(myoptions.load_average)
+		except ValueError:
+			load_average = 0.0
+
+		if load_average <= 0.0:
+			load_average = None
+			if not silent:
+				parser.error("Invalid --load-average parameter: '%s'\n" % \
+					(myoptions.load_average,))
+
+		myoptions.load_average = load_average
+	
+	if myoptions.rebuilt_binaries_timestamp:
+		try:
+			rebuilt_binaries_timestamp = int(myoptions.rebuilt_binaries_timestamp)
+		except ValueError:
+			rebuilt_binaries_timestamp = -1
+
+		if rebuilt_binaries_timestamp < 0:
+			rebuilt_binaries_timestamp = 0
+			if not silent:
+				parser.error("Invalid --rebuilt-binaries-timestamp parameter: '%s'\n" % \
+					(myoptions.rebuilt_binaries_timestamp,))
+
+		myoptions.rebuilt_binaries_timestamp = rebuilt_binaries_timestamp
+
+	if myoptions.use_ebuild_visibility in true_y:
+		myoptions.use_ebuild_visibility = True
+	else:
+		# None or "n"
+		pass
+
+	if myoptions.usepkg in true_y:
+		myoptions.usepkg = True
+	else:
+		myoptions.usepkg = None
+
+	if myoptions.usepkgonly in true_y:
+		myoptions.usepkgonly = True
+	else:
+		myoptions.usepkgonly = None
+
+	for myopt in options:
+		v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
+		if v:
+			myopts[myopt] = True
+
+	for myopt in argument_options:
+		v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
+		if v is not None:
+			myopts[myopt] = v
+
+	if myoptions.searchdesc:
+		myoptions.search = True
+
+	for action_opt in actions:
+		v = getattr(myoptions, action_opt.replace("-", "_"))
+		if v:
+			if myaction:
+				multiple_actions(myaction, action_opt)
+				sys.exit(1)
+			myaction = action_opt
+
+	if myaction is None and myoptions.deselect is True:
+		myaction = 'deselect'
+
+	if myargs and sys.hexversion < 0x3000000 and \
+		not isinstance(myargs[0], unicode):
+		for i in range(len(myargs)):
+			myargs[i] = portage._unicode_decode(myargs[i])
+
+	myfiles += myargs
+
+	return myaction, myopts, myfiles
+
+# Warn about features that may confuse users and
+# lead them to report invalid bugs.
+_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
+
+def validate_ebuild_environment(trees):
+	features_warn = set()
+	for myroot in trees:
+		settings = trees[myroot]["vartree"].settings
+		settings.validate()
+		features_warn.update(
+			_emerge_features_warn.intersection(settings.features))
+
+	if features_warn:
+		msg = "WARNING: The FEATURES variable contains one " + \
+			"or more values that should be disabled under " + \
+			"normal circumstances: %s" % " ".join(features_warn)
+		out = portage.output.EOutput()
+		for line in textwrap.wrap(msg, 65):
+			out.ewarn(line)
+
+def apply_priorities(settings):
+	ionice(settings)
+	nice(settings)
+
+def nice(settings):
+	try:
+		os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
+	except (OSError, ValueError) as e:
+		out = portage.output.EOutput()
+		out.eerror("Failed to change nice value to '%s'" % \
+			settings["PORTAGE_NICENESS"])
+		out.eerror("%s\n" % str(e))
+
+def ionice(settings):
+
+	ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
+	if ionice_cmd:
+		ionice_cmd = portage.util.shlex_split(ionice_cmd)
+	if not ionice_cmd:
+		return
+
+	from portage.util import varexpand
+	variables = {"PID" : str(os.getpid())}
+	cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
+
+	try:
+		rval = portage.process.spawn(cmd, env=os.environ)
+	except portage.exception.CommandNotFound:
+		# The OS kernel probably doesn't support ionice,
+		# so return silently.
+		return
+
+	if rval != os.EX_OK:
+		out = portage.output.EOutput()
+		out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
+		out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
+
+def setconfig_fallback(root_config):
+	from portage._sets.base import DummyPackageSet
+	from portage._sets.files import WorldSelectedSet
+	from portage._sets.profiles import PackagesSystemSet
+	setconfig = root_config.setconfig
+	setconfig.psets['world'] = DummyPackageSet(atoms=['@selected', '@system'])
+	setconfig.psets['selected'] = WorldSelectedSet(root_config.settings['EROOT'])
+	setconfig.psets['system'] = \
+		PackagesSystemSet(root_config.settings.profiles)
+	root_config.sets = setconfig.getSets()
+
+def get_missing_sets(root_config):
+	# emerge requires existence of "world", "selected", and "system"
+	missing_sets = []
+
+	for s in ("selected", "system", "world",):
+		if s not in root_config.sets:
+			missing_sets.append(s)
+
+	return missing_sets
+
+def missing_sets_warning(root_config, missing_sets):
+	if len(missing_sets) > 2:
+		missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
+		missing_sets_str += ', and "%s"' % missing_sets[-1]
+	elif len(missing_sets) == 2:
+		missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
+	else:
+		missing_sets_str = '"%s"' % missing_sets[-1]
+	msg = ["emerge: incomplete set configuration, " + \
+		"missing set(s): %s" % missing_sets_str]
+	if root_config.sets:
+		msg.append("        sets defined: %s" % ", ".join(root_config.sets))
+	global_config_path = portage.const.GLOBAL_CONFIG_PATH
+	if root_config.settings['EPREFIX']:
+		global_config_path = os.path.join(root_config.settings['EPREFIX'],
+				portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
+	msg.append("        This usually means that '%s'" % \
+		(os.path.join(global_config_path, "sets/portage.conf"),))
+	msg.append("        is missing or corrupt.")
+	msg.append("        Falling back to default world and system set configuration!!!")
+	for line in msg:
+		writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
+
+def ensure_required_sets(trees):
+	warning_shown = False
+	for root_trees in trees.values():
+		missing_sets = get_missing_sets(root_trees["root_config"])
+		if missing_sets and not warning_shown:
+			warning_shown = True
+			missing_sets_warning(root_trees["root_config"], missing_sets)
+		if missing_sets:
+			setconfig_fallback(root_trees["root_config"])
+
+def expand_set_arguments(myfiles, myaction, root_config):
+	retval = os.EX_OK
+	setconfig = root_config.setconfig
+
+	sets = setconfig.getSets()
+
+	# In order to know exactly which atoms/sets should be added to the
+	# world file, the depgraph performs set expansion later. It will get
+	# confused about where the atoms came from if it's not allowed to
+	# expand them itself.
+	do_not_expand = (None, )
+	newargs = []
+	for a in myfiles:
+		if a in ("system", "world"):
+			newargs.append(SETPREFIX+a)
+		else:
+			newargs.append(a)
+	myfiles = newargs
+	del newargs
+	newargs = []
+
+	# separators for set arguments
+	ARG_START = "{"
+	ARG_END = "}"
+
+	for i in range(0, len(myfiles)):
+		if myfiles[i].startswith(SETPREFIX):
+			start = 0
+			end = 0
+			x = myfiles[i][len(SETPREFIX):]
+			newset = ""
+			while x:
+				start = x.find(ARG_START)
+				end = x.find(ARG_END)
+				if start > 0 and start < end:
+					namepart = x[:start]
+					argpart = x[start+1:end]
+
+					# TODO: implement proper quoting
+					args = argpart.split(",")
+					options = {}
+					for a in args:
+						if "=" in a:
+							k, v  = a.split("=", 1)
+							options[k] = v
+						else:
+							options[a] = "True"
+					setconfig.update(namepart, options)
+					newset += (x[:start-len(namepart)]+namepart)
+					x = x[end+len(ARG_END):]
+				else:
+					newset += x
+					x = ""
+			myfiles[i] = SETPREFIX+newset
+
+	sets = setconfig.getSets()
+
+	# display errors that occurred while loading the SetConfig instance
+	for e in setconfig.errors:
+		print(colorize("BAD", "Error during set creation: %s" % e))
+
+	unmerge_actions = ("unmerge", "prune", "clean", "depclean")
+
+	for a in myfiles:
+		if a.startswith(SETPREFIX):		
+				s = a[len(SETPREFIX):]
+				if s not in sets:
+					display_missing_pkg_set(root_config, s)
+					return (None, 1)
+				setconfig.active.append(s)
+				try:
+					set_atoms = setconfig.getSetAtoms(s)
+				except portage.exception.PackageSetNotFound as e:
+					writemsg_level(("emerge: the given set '%s' " + \
+						"contains a non-existent set named '%s'.\n") % \
+						(s, e), level=logging.ERROR, noiselevel=-1)
+					return (None, 1)
+				if myaction in unmerge_actions and \
+						not sets[s].supportsOperation("unmerge"):
+					sys.stderr.write("emerge: the given set '%s' does " % s + \
+						"not support unmerge operations\n")
+					retval = 1
+				elif not set_atoms:
+					print("emerge: '%s' is an empty set" % s)
+				elif myaction not in do_not_expand:
+					newargs.extend(set_atoms)
+				else:
+					newargs.append(SETPREFIX+s)
+				for e in sets[s].errors:
+					print(e)
+		else:
+			newargs.append(a)
+	return (newargs, retval)
+
+def repo_name_check(trees):
+	missing_repo_names = set()
+	for root_trees in trees.values():
+		porttree = root_trees.get("porttree")
+		if porttree:
+			portdb = porttree.dbapi
+			missing_repo_names.update(portdb.getMissingRepoNames())
+			if portdb.porttree_root in missing_repo_names and \
+				not os.path.exists(os.path.join(
+				portdb.porttree_root, "profiles")):
+				# This is normal if $PORTDIR happens to be empty,
+				# so don't warn about it.
+				missing_repo_names.remove(portdb.porttree_root)
+
+	if missing_repo_names:
+		msg = []
+		msg.append("WARNING: One or more repositories " + \
+			"have missing repo_name entries:")
+		msg.append("")
+		for p in missing_repo_names:
+			msg.append("\t%s/profiles/repo_name" % (p,))
+		msg.append("")
+		msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
+			"should be a plain text file containing a unique " + \
+			"name for the repository on the first line.", 70))
+		msg.append("\n")
+		writemsg_level("".join("%s\n" % l for l in msg),
+			level=logging.WARNING, noiselevel=-1)
+
+	return bool(missing_repo_names)
+
+def repo_name_duplicate_check(trees):
+	ignored_repos = {}
+	for root, root_trees in trees.items():
+		if 'porttree' in root_trees:
+			portdb = root_trees['porttree'].dbapi
+			if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
+				for repo_name, paths in portdb.getIgnoredRepos():
+					k = (root, repo_name, portdb.getRepositoryPath(repo_name))
+					ignored_repos.setdefault(k, []).extend(paths)
+
+	if ignored_repos:
+		msg = []
+		msg.append('WARNING: One or more repositories ' + \
+			'have been ignored due to duplicate')
+		msg.append('  profiles/repo_name entries:')
+		msg.append('')
+		for k in sorted(ignored_repos):
+			msg.append('  %s overrides' % ", ".join(k))
+			for path in ignored_repos[k]:
+				msg.append('    %s' % (path,))
+			msg.append('')
+		msg.extend('  ' + x for x in textwrap.wrap(
+			"All profiles/repo_name entries must be unique in order " + \
+			"to avoid having duplicates ignored. " + \
+			"Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
+			"/etc/make.conf if you would like to disable this warning."))
+		msg.append("\n")
+		writemsg_level(''.join('%s\n' % l for l in msg),
+			level=logging.WARNING, noiselevel=-1)
+
+	return bool(ignored_repos)
+
+def config_protect_check(trees):
+	for root, root_trees in trees.items():
+		if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
+			msg = "!!! CONFIG_PROTECT is empty"
+			if root != "/":
+				msg += " for '%s'" % root
+			msg += "\n"
+			writemsg_level(msg, level=logging.WARN, noiselevel=-1)
+
+def profile_check(trees, myaction):
+	if myaction in ("help", "info", "sync", "version"):
+		return os.EX_OK
+	for root, root_trees in trees.items():
+		if root_trees["root_config"].settings.profiles:
+			continue
+		# generate some profile related warning messages
+		validate_ebuild_environment(trees)
+		msg = "If you have just changed your profile configuration, you " + \
+			"should revert back to the previous configuration. Due to " + \
+			"your current profile being invalid, allowed actions are " + \
+			"limited to --help, --info, --sync, and --version."
+		writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+			level=logging.ERROR, noiselevel=-1)
+		return 1
+	return os.EX_OK
+
+def check_procfs():
+	procfs_path = '/proc'
+	if platform.system() not in ("Linux",) or \
+		os.path.ismount(procfs_path):
+		return os.EX_OK
+	msg = "It seems that %s is not mounted. You have been warned." % procfs_path
+	writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+		level=logging.ERROR, noiselevel=-1)
+	return 1
+
+def emerge_main(args=None):
+	"""
+	@param args: command arguments (default: sys.argv[1:])
+	@type args: list
+	"""
+	if args is None:
+		args = sys.argv[1:]
+
+	portage._disable_legacy_globals()
+	portage.dep._internal_warnings = True
+	# Disable color until we're sure that it should be enabled (after
+	# EMERGE_DEFAULT_OPTS has been parsed).
+	portage.output.havecolor = 0
+	# This first pass is just for options that need to be known as early as
+	# possible, such as --config-root.  They will be parsed again later,
+	# together with EMERGE_DEFAULT_OPTS (which may vary depending on the
+	# the value of --config-root).
+	myaction, myopts, myfiles = parse_opts(args, silent=True)
+	if "--debug" in myopts:
+		os.environ["PORTAGE_DEBUG"] = "1"
+	if "--config-root" in myopts:
+		os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
+	if "--root" in myopts:
+		os.environ["ROOT"] = myopts["--root"]
+	if "--accept-properties" in myopts:
+		os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
+
+	# Portage needs to ensure a sane umask for the files it creates.
+	os.umask(0o22)
+	settings, trees, mtimedb = load_emerge_config()
+	portdb = trees[settings["ROOT"]]["porttree"].dbapi
+	rval = profile_check(trees, myaction)
+	if rval != os.EX_OK:
+		return rval
+
+	tmpcmdline = []
+	if "--ignore-default-opts" not in myopts:
+		tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
+	tmpcmdline.extend(args)
+	myaction, myopts, myfiles = parse_opts(tmpcmdline)
+
+	if myaction not in ('help', 'info', 'version') and \
+		myopts.get('--package-moves') != 'n' and \
+		_global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
+		mtimedb.commit()
+		# Reload the whole config from scratch.
+		settings, trees, mtimedb = load_emerge_config(trees=trees)
+		portdb = trees[settings["ROOT"]]["porttree"].dbapi
+
+	xterm_titles = "notitles" not in settings.features
+	if xterm_titles:
+		xtermTitle("emerge")
+
+	if "--digest" in myopts:
+		os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
+		# Reload the whole config from scratch so that the portdbapi internal
+		# config is updated with new FEATURES.
+		settings, trees, mtimedb = load_emerge_config(trees=trees)
+		portdb = trees[settings["ROOT"]]["porttree"].dbapi
+
+	adjust_configs(myopts, trees)
+	apply_priorities(settings)
+
+	if myaction == 'version':
+		writemsg_stdout(getportageversion(
+			settings["PORTDIR"], settings["ROOT"],
+			settings.profile_path, settings["CHOST"],
+			trees[settings["ROOT"]]["vartree"].dbapi) + '\n', noiselevel=-1)
+		return 0
+	elif myaction == 'help':
+		_emerge.help.help(myopts, portage.output.havecolor)
+		return 0
+
+	spinner = stdout_spinner()
+	if "candy" in settings.features:
+		spinner.update = spinner.update_scroll
+
+	if "--quiet" not in myopts:
+		portage.deprecated_profile_check(settings=settings)
+		if portage.const._ENABLE_REPO_NAME_WARN:
+			# Bug #248603 - Disable warnings about missing
+			# repo_name entries for stable branch.
+			repo_name_check(trees)
+		repo_name_duplicate_check(trees)
+		config_protect_check(trees)
+	check_procfs()
+
+	if "getbinpkg" in settings.features:
+		myopts["--getbinpkg"] = True
+
+	if "--getbinpkgonly" in myopts:
+		myopts["--getbinpkg"] = True
+
+	if "--getbinpkgonly" in myopts:
+		myopts["--usepkgonly"] = True
+
+	if "--getbinpkg" in myopts:
+		myopts["--usepkg"] = True
+
+	if "--usepkgonly" in myopts:
+		myopts["--usepkg"] = True
+
+	if "buildpkg" in settings.features or "--buildpkgonly" in myopts:
+		myopts["--buildpkg"] = True
+
+	if "--buildpkgonly" in myopts:
+		# --buildpkgonly will not merge anything, so
+		# it cancels all binary package options.
+		for opt in ("--getbinpkg", "--getbinpkgonly",
+			"--usepkg", "--usepkgonly"):
+			myopts.pop(opt, None)
+
+	for mytrees in trees.values():
+		mydb = mytrees["porttree"].dbapi
+		# Freeze the portdbapi for performance (memoize all xmatch results).
+		mydb.freeze()
+
+		if myaction in ('search', None) and \
+			"--usepkg" in myopts:
+			# Populate the bintree with current --getbinpkg setting.
+			# This needs to happen before expand_set_arguments(), in case
+			# any sets use the bintree.
+			mytrees["bintree"].populate(
+				getbinpkgs="--getbinpkg" in myopts)
+
+	del mytrees, mydb
+
+	if "moo" in myfiles:
+		print("""
+
+  Larry loves Gentoo (""" + platform.system() + """)
+
+ _______________________
+< Have you mooed today? >
+ -----------------------
+        \   ^__^
+         \  (oo)\_______
+            (__)\       )\/\ 
+                ||----w |
+                ||     ||
+
+""")
+
+	for x in myfiles:
+		ext = os.path.splitext(x)[1]
+		if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
+			print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
+			break
+
+	root_config = trees[settings["ROOT"]]["root_config"]
+	if myaction == "list-sets":
+		writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
+		return os.EX_OK
+
+	ensure_required_sets(trees)
+
+	# only expand sets for actions taking package arguments
+	oldargs = myfiles[:]
+	if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
+		myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
+		if retval != os.EX_OK:
+			return retval
+
+		# Need to handle empty sets specially, otherwise emerge will react 
+		# with the help message for empty argument lists
+		if oldargs and not myfiles:
+			print("emerge: no targets left after set expansion")
+			return 0
+
+	if ("--tree" in myopts) and ("--columns" in myopts):
+		print("emerge: can't specify both of \"--tree\" and \"--columns\".")
+		return 1
+
+	if '--emptytree' in myopts and '--noreplace' in myopts:
+		writemsg_level("emerge: can't specify both of " + \
+			"\"--emptytree\" and \"--noreplace\".\n",
+			level=logging.ERROR, noiselevel=-1)
+		return 1
+
+	if ("--quiet" in myopts):
+		spinner.update = spinner.update_quiet
+		portage.util.noiselimit = -1
+
+	if "--fetch-all-uri" in myopts:
+		myopts["--fetchonly"] = True
+
+	if "--skipfirst" in myopts and "--resume" not in myopts:
+		myopts["--resume"] = True
+
+	# Allow -p to remove --ask
+	if "--pretend" in myopts:
+		myopts.pop("--ask", None)
+
+	# forbid --ask when not in a terminal
+	# note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
+	if ("--ask" in myopts) and (not sys.stdin.isatty()):
+		portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
+			noiselevel=-1)
+		return 1
+
+	if settings.get("PORTAGE_DEBUG", "") == "1":
+		spinner.update = spinner.update_quiet
+		portage.util.noiselimit = 0
+		if "python-trace" in settings.features:
+			import portage.debug as portage_debug
+			portage_debug.set_trace(True)
+
+	if not ("--quiet" in myopts):
+		if '--nospinner' in myopts or \
+			settings.get('TERM') == 'dumb' or \
+			not sys.stdout.isatty():
+			spinner.update = spinner.update_basic
+
+	if "--debug" in myopts:
+		print("myaction", myaction)
+		print("myopts", myopts)
+
+	if not myaction and not myfiles and "--resume" not in myopts:
+		_emerge.help.help(myopts, portage.output.havecolor)
+		return 1
+
+	pretend = "--pretend" in myopts
+	fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+	buildpkgonly = "--buildpkgonly" in myopts
+
+	# check if root user is the current user for the actions where emerge needs this
+	if portage.secpass < 2:
+		# We've already allowed "--version" and "--help" above.
+		if "--pretend" not in myopts and myaction not in ("search","info"):
+			need_superuser = myaction in ('clean', 'depclean', 'deselect',
+				'prune', 'unmerge') or not \
+				(fetchonly or \
+				(buildpkgonly and secpass >= 1) or \
+				myaction in ("metadata", "regen", "sync"))
+			if portage.secpass < 1 or \
+				need_superuser:
+				if need_superuser:
+					access_desc = "superuser"
+				else:
+					access_desc = "portage group"
+				# Always show portage_group_warning() when only portage group
+				# access is required but the user is not in the portage group.
+				from portage.data import portage_group_warning
+				if "--ask" in myopts:
+					writemsg_stdout("This action requires %s access...\n" % \
+						(access_desc,), noiselevel=-1)
+					if portage.secpass < 1 and not need_superuser:
+						portage_group_warning()
+					if userquery("Would you like to add --pretend to options?",
+						"--ask-enter-invalid" in myopts) == "No":
+						return 1
+					myopts["--pretend"] = True
+					del myopts["--ask"]
+				else:
+					sys.stderr.write(("emerge: %s access is required\n") \
+						% access_desc)
+					if portage.secpass < 1 and not need_superuser:
+						portage_group_warning()
+					return 1
+
+	# Disable emergelog for everything except build or unmerge operations.
+	# This helps minimize parallel emerge.log entries that can confuse log
+	# parsers like genlop.
+	disable_emergelog = False
+	for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
+		if x in myopts:
+			disable_emergelog = True
+			break
+	if myaction in ("search", "info"):
+		disable_emergelog = True
+
+	_emerge.emergelog._disable = disable_emergelog
+
+	if not disable_emergelog:
+		if 'EMERGE_LOG_DIR' in settings:
+			try:
+				# At least the parent needs to exist for the lock file.
+				portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
+			except portage.exception.PortageException as e:
+				writemsg_level("!!! Error creating directory for " + \
+					"EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
+					(settings['EMERGE_LOG_DIR'], e),
+					noiselevel=-1, level=logging.ERROR)
+			else:
+				_emerge.emergelog._emerge_log_dir = settings["EMERGE_LOG_DIR"]
+
+	if not "--pretend" in myopts:
+		emergelog(xterm_titles, "Started emerge on: "+\
+			_unicode_decode(
+				time.strftime("%b %d, %Y %H:%M:%S", time.localtime()),
+				encoding=_encodings['content'], errors='replace'))
+		myelogstr=""
+		if myopts:
+			myelogstr=" ".join(myopts)
+		if myaction:
+			myelogstr+=" "+myaction
+		if myfiles:
+			myelogstr += " " + " ".join(oldargs)
+		emergelog(xterm_titles, " *** emerge " + myelogstr)
+	del oldargs
+
+	def emergeexitsig(signum, frame):
+		signal.signal(signal.SIGINT, signal.SIG_IGN)
+		signal.signal(signal.SIGTERM, signal.SIG_IGN)
+		portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
+		sys.exit(128 + signum)
+	signal.signal(signal.SIGINT, emergeexitsig)
+	signal.signal(signal.SIGTERM, emergeexitsig)
+
+	def emergeexit():
+		"""This gets out final log message in before we quit."""
+		if "--pretend" not in myopts:
+			emergelog(xterm_titles, " *** terminating.")
+		if xterm_titles:
+			xtermTitleReset()
+	portage.atexit_register(emergeexit)
+
+	if myaction in ("config", "metadata", "regen", "sync"):
+		if "--pretend" in myopts:
+			sys.stderr.write(("emerge: The '%s' action does " + \
+				"not support '--pretend'.\n") % myaction)
+			return 1
+
+	if "sync" == myaction:
+		return action_sync(settings, trees, mtimedb, myopts, myaction)
+	elif "metadata" == myaction:
+		action_metadata(settings, portdb, myopts)
+	elif myaction=="regen":
+		validate_ebuild_environment(trees)
+		return action_regen(settings, portdb, myopts.get("--jobs"),
+			myopts.get("--load-average"))
+	# HELP action
+	elif "config"==myaction:
+		validate_ebuild_environment(trees)
+		action_config(settings, trees, myopts, myfiles)
+
+	# SEARCH action
+	elif "search"==myaction:
+		validate_ebuild_environment(trees)
+		action_search(trees[settings["ROOT"]]["root_config"],
+			myopts, myfiles, spinner)
+
+	elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
+		validate_ebuild_environment(trees)
+		rval = action_uninstall(settings, trees, mtimedb["ldpath"],
+			myopts, myaction, myfiles, spinner)
+		if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
+			post_emerge(myaction, myopts, myfiles, settings["ROOT"],
+				trees, mtimedb, rval)
+		return rval
+
+	elif myaction == 'info':
+
+		# Ensure atoms are valid before calling unmerge().
+		vardb = trees[settings["ROOT"]]["vartree"].dbapi
+		portdb = trees[settings["ROOT"]]["porttree"].dbapi
+		bindb = trees[settings["ROOT"]]["bintree"].dbapi
+		valid_atoms = []
+		for x in myfiles:
+			if is_valid_package_atom(x):
+				try:
+					#look at the installed files first, if there is no match
+					#look at the ebuilds, since EAPI 4 allows running pkg_info
+					#on non-installed packages
+					valid_atom = dep_expand(x, mydb=vardb, settings=settings)
+					if valid_atom.cp.split("/")[0] == "null":
+						valid_atom = dep_expand(x, mydb=portdb, settings=settings)
+					if valid_atom.cp.split("/")[0] == "null" and "--usepkg" in myopts:
+						valid_atom = dep_expand(x, mydb=bindb, settings=settings)
+					valid_atoms.append(valid_atom)
+				except portage.exception.AmbiguousPackageName as e:
+					msg = "The short ebuild name \"" + x + \
+						"\" is ambiguous.  Please specify " + \
+						"one of the following " + \
+						"fully-qualified ebuild names instead:"
+					for line in textwrap.wrap(msg, 70):
+						writemsg_level("!!! %s\n" % (line,),
+							level=logging.ERROR, noiselevel=-1)
+					for i in e.args[0]:
+						writemsg_level("    %s\n" % colorize("INFORM", i),
+							level=logging.ERROR, noiselevel=-1)
+					writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+					return 1
+				continue
+			msg = []
+			msg.append("'%s' is not a valid package atom." % (x,))
+			msg.append("Please check ebuild(5) for full details.")
+			writemsg_level("".join("!!! %s\n" % line for line in msg),
+				level=logging.ERROR, noiselevel=-1)
+			return 1
+
+		return action_info(settings, trees, myopts, valid_atoms)
+
+	# "update", "system", or just process files:
+	else:
+		validate_ebuild_environment(trees)
+
+		for x in myfiles:
+			if x.startswith(SETPREFIX) or \
+				is_valid_package_atom(x, allow_repo=True):
+				continue
+			if x[:1] == os.sep:
+				continue
+			try:
+				os.lstat(x)
+				continue
+			except OSError:
+				pass
+			msg = []
+			msg.append("'%s' is not a valid package atom." % (x,))
+			msg.append("Please check ebuild(5) for full details.")
+			writemsg_level("".join("!!! %s\n" % line for line in msg),
+				level=logging.ERROR, noiselevel=-1)
+			return 1
+
+		if "--pretend" not in myopts:
+			display_news_notification(root_config, myopts)
+		retval = action_build(settings, trees, mtimedb,
+			myopts, myaction, myfiles, spinner)
+		post_emerge(myaction, myopts, myfiles, settings["ROOT"],
+			trees, mtimedb, retval)
+
+		return retval

diff --git a/portage_with_autodep/pym/_emerge/resolver/__init__.py b/portage_with_autodep/pym/_emerge/resolver/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/_emerge/resolver/backtracking.py b/portage_with_autodep/pym/_emerge/resolver/backtracking.py
new file mode 100644
index 0000000..dcdaee0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/backtracking.py
@@ -0,0 +1,197 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+
+class BacktrackParameter(object):
+
+	__slots__ = (
+		"needed_unstable_keywords", "runtime_pkg_mask", "needed_use_config_changes", "needed_license_changes",
+		"rebuild_list", "reinstall_list", "needed_p_mask_changes"
+	)
+
+	def __init__(self):
+		self.needed_unstable_keywords = set()
+		self.needed_p_mask_changes = set()
+		self.runtime_pkg_mask = {}
+		self.needed_use_config_changes = {}
+		self.needed_license_changes = {}
+		self.rebuild_list = set()
+		self.reinstall_list = set()
+
+	def __deepcopy__(self, memo=None):
+		if memo is None:
+			memo = {}
+		result = BacktrackParameter()
+		memo[id(self)] = result
+
+		#Shallow copies are enough here, as we only need to ensure that nobody adds stuff
+		#to our sets and dicts. The existing content is immutable.
+		result.needed_unstable_keywords = copy.copy(self.needed_unstable_keywords)
+		result.needed_p_mask_changes = copy.copy(self.needed_p_mask_changes)
+		result.runtime_pkg_mask = copy.copy(self.runtime_pkg_mask)
+		result.needed_use_config_changes = copy.copy(self.needed_use_config_changes)
+		result.needed_license_changes = copy.copy(self.needed_license_changes)
+		result.rebuild_list = copy.copy(self.rebuild_list)
+		result.reinstall_list = copy.copy(self.reinstall_list)
+
+		return result
+
+	def __eq__(self, other):
+		return self.needed_unstable_keywords == other.needed_unstable_keywords and \
+			self.needed_p_mask_changes == other.needed_p_mask_changes and \
+			self.runtime_pkg_mask == other.runtime_pkg_mask and \
+			self.needed_use_config_changes == other.needed_use_config_changes and \
+			self.needed_license_changes == other.needed_license_changes and \
+			self.rebuild_list == other.rebuild_list and \
+			self.reinstall_list == other.reinstall_list
+
+
+class _BacktrackNode:
+
+	__slots__ = (
+		"parameter", "depth", "mask_steps", "terminal",
+	)
+
+	def __init__(self, parameter=BacktrackParameter(), depth=0, mask_steps=0, terminal=True):
+		self.parameter = parameter
+		self.depth = depth
+		self.mask_steps = mask_steps
+		self.terminal = terminal
+
+	def __eq__(self, other):
+		return self.parameter == other.parameter
+
+
+class Backtracker(object):
+
+	__slots__ = (
+		"_max_depth", "_unexplored_nodes", "_current_node", "_nodes", "_root",
+	)
+
+	def __init__(self, max_depth):
+		self._max_depth = max_depth
+		self._unexplored_nodes = []
+		self._current_node = None
+		self._nodes = []
+
+		self._root = _BacktrackNode()
+		self._add(self._root)
+
+
+	def _add(self, node, explore=True):
+		"""
+		Adds a newly computed backtrack parameter. Makes sure that it doesn't already exist and
+		that we don't backtrack deeper than we are allowed by --backtrack.
+		"""
+		if node.mask_steps <= self._max_depth and node not in self._nodes:
+			if explore:
+				self._unexplored_nodes.append(node)
+			self._nodes.append(node)
+
+
+	def get(self):
+		"""
+		Returns a backtrack parameter. The backtrack graph is explored with depth first.
+		"""
+		if self._unexplored_nodes:
+			node = self._unexplored_nodes.pop()
+			self._current_node = node
+			return copy.deepcopy(node.parameter)
+		else:
+			return None
+
+
+	def __len__(self):
+		return len(self._unexplored_nodes)
+
+
+	def _feedback_slot_conflict(self, conflict_data):
+		for pkg, parent_atoms in conflict_data:
+			new_node = copy.deepcopy(self._current_node)
+			new_node.depth += 1
+			new_node.mask_steps += 1
+			new_node.terminal = False
+			new_node.parameter.runtime_pkg_mask.setdefault(
+				pkg, {})["slot conflict"] = parent_atoms
+			self._add(new_node)
+
+
+	def _feedback_missing_dep(self, dep):
+		new_node = copy.deepcopy(self._current_node)
+		new_node.depth += 1
+		new_node.mask_steps += 1
+		new_node.terminal = False
+
+		new_node.parameter.runtime_pkg_mask.setdefault(
+			dep.parent, {})["missing dependency"] = \
+				set([(dep.parent, dep.root, dep.atom)])
+
+		self._add(new_node)
+
+
+	def _feedback_config(self, changes, explore=True):
+		"""
+		Handle config changes. Don't count config changes for the maximum backtrack depth.
+		"""
+		new_node = copy.deepcopy(self._current_node)
+		new_node.depth += 1
+		para = new_node.parameter
+
+		for change, data in changes.items():
+			if change == "needed_unstable_keywords":
+				para.needed_unstable_keywords.update(data)
+			elif change == "needed_p_mask_changes":
+				para.needed_p_mask_changes.update(data)
+			elif change == "needed_license_changes":
+				for pkg, missing_licenses in data:
+					para.needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
+			elif change == "needed_use_config_changes":
+				for pkg, (new_use, new_changes) in data:
+					para.needed_use_config_changes[pkg] = (new_use, new_changes)
+			elif change == "rebuild_list":
+				para.rebuild_list.update(data)
+			elif change == "reinstall_list":
+				para.reinstall_list.update(data)
+
+		self._add(new_node, explore=explore)
+		self._current_node = new_node
+
+
+	def feedback(self, infos):
+		"""
+		Takes information from the depgraph and computes new backtrack parameters to try.
+		"""
+		assert self._current_node is not None, "call feedback() only after get() was called"
+
+		#Not all config changes require a restart, that's why they can appear together
+		#with other conflicts.
+		if "config" in infos:
+			self._feedback_config(infos["config"], explore=(len(infos)==1))
+
+		#There is at most one of the following types of conflicts for a given restart.
+		if "slot conflict" in infos:
+			self._feedback_slot_conflict(infos["slot conflict"])
+		elif "missing dependency" in infos:
+			self._feedback_missing_dep(infos["missing dependency"])
+
+
+	def backtracked(self):
+		"""
+		If we didn't backtrack, there is only the root.
+		"""
+		return len(self._nodes) > 1
+
+
+	def get_best_run(self):
+		"""
+		Like, get() but returns the backtrack parameter that has as many config changes as possible,
+		but has no masks. This makes --autounmask effective, but prevents confusing error messages
+		with "masked by backtracking".
+		"""
+		best_node = self._root
+		for node in self._nodes:
+			if node.terminal and node.depth > best_node.depth:
+				best_node = node
+
+		return copy.deepcopy(best_node.parameter)

diff --git a/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
new file mode 100644
index 0000000..d113c5e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
@@ -0,0 +1,267 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from itertools import chain, product
+import logging
+
+from portage.dep import use_reduce, extract_affecting_use, check_required_use, get_required_use_flags
+from portage.exception import InvalidDependString
+from portage.output import colorize
+from portage.util import writemsg_level
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+
+class circular_dependency_handler(object):
+	
+	def __init__(self, depgraph, graph):
+		self.depgraph = depgraph
+		self.graph = graph
+		self.all_parent_atoms = depgraph._dynamic_config._parent_atoms
+
+		if "--debug" in depgraph._frozen_config.myopts:
+			# Show this debug output before doing the calculations
+			# that follow, so at least we have this debug info
+			# if we happen to hit a bug later.
+			writemsg_level("\n\ncircular dependency graph:\n\n",
+				level=logging.DEBUG, noiselevel=-1)
+			self.debug_print()
+
+		self.cycles, self.shortest_cycle = self._find_cycles()
+		#Guess if it is a large cluster of cycles. This usually requires
+		#a global USE change.
+		self.large_cycle_count = len(self.cycles) > 3
+		self.merge_list = self._prepare_reduced_merge_list()
+		#The digraph dump
+		self.circular_dep_message = self._prepare_circular_dep_message()
+		#Suggestions, in machine and human readable form
+		self.solutions, self.suggestions = self._find_suggestions()
+
+	def _find_cycles(self):
+		shortest_cycle = None
+		cycles = self.graph.get_cycles(ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
+		for cycle in cycles:
+			if not shortest_cycle or len(cycle) < len(shortest_cycle):
+				shortest_cycle = cycle
+		return cycles, shortest_cycle
+
+	def _prepare_reduced_merge_list(self):
+		"""
+		Create a merge to be displayed by depgraph.display().
+		This merge list contains only packages involved in
+		the circular deps.
+		"""
+		display_order = []
+		tempgraph = self.graph.copy()
+		while tempgraph:
+			nodes = tempgraph.leaf_nodes()
+			if not nodes:
+				node = tempgraph.order[0]
+			else:
+				node = nodes[0]
+			display_order.append(node)
+			tempgraph.remove(node)
+		display_order.reverse()
+		return display_order
+
+	def _prepare_circular_dep_message(self):
+		"""
+		Like digraph.debug_print(), but prints only the shortest cycle.
+		"""
+		if not self.shortest_cycle:
+			return None
+
+		msg = []
+		indent = ""
+		for pos, pkg in enumerate(self.shortest_cycle):
+			parent = self.shortest_cycle[pos-1]
+			priorities = self.graph.nodes[parent][0][pkg]
+			if pos > 0:
+				msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
+			else:
+				msg.append(indent + "%s depends on" % pkg)
+			indent += " "
+
+		pkg = self.shortest_cycle[0]
+		parent = self.shortest_cycle[-1]
+		priorities = self.graph.nodes[parent][0][pkg]
+		msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
+
+		return "\n".join(msg)
+
+	def _get_use_mask_and_force(self, pkg):
+		return pkg.use.mask, pkg.use.force
+
+	def _get_autounmask_changes(self, pkg):
+		needed_use_config_change = self.depgraph._dynamic_config._needed_use_config_changes.get(pkg)
+		if needed_use_config_change is None:
+			return frozenset()
+
+		use, changes = needed_use_config_change
+		return frozenset(changes.keys())
+
+	def _find_suggestions(self):
+		if not self.shortest_cycle:
+			return None, None
+
+		suggestions = []
+		final_solutions = {}
+
+		for pos, pkg in enumerate(self.shortest_cycle):
+			parent = self.shortest_cycle[pos-1]
+			priorities = self.graph.nodes[parent][0][pkg]
+			parent_atoms = self.all_parent_atoms.get(pkg)
+
+			if priorities[-1].buildtime:
+				dep = parent.metadata["DEPEND"]
+			elif priorities[-1].runtime:
+				dep = parent.metadata["RDEPEND"]
+
+			for ppkg, atom in parent_atoms:
+				if ppkg == parent:
+					changed_parent = ppkg
+					parent_atom = atom.unevaluated_atom
+					break
+
+			try:
+				affecting_use = extract_affecting_use(dep, parent_atom,
+					eapi=parent.metadata["EAPI"])
+			except InvalidDependString:
+				if not parent.installed:
+					raise
+				affecting_use = set()
+
+			# Make sure we don't want to change a flag that is 
+			#	a) in use.mask or use.force
+			#	b) changed by autounmask
+			
+			usemask, useforce = self._get_use_mask_and_force(parent)
+			autounmask_changes = self._get_autounmask_changes(parent)
+			untouchable_flags = frozenset(chain(usemask, useforce, autounmask_changes))
+
+			affecting_use.difference_update(untouchable_flags)
+
+			#If any of the flags we're going to touch is in REQUIRED_USE, add all
+			#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
+			required_use_flags = get_required_use_flags(parent.metadata["REQUIRED_USE"])
+
+			if affecting_use.intersection(required_use_flags):
+				# TODO: Find out exactly which REQUIRED_USE flags are
+				# entangled with affecting_use. We have to limit the
+				# number of flags since the number of loops is
+				# exponentially related (see bug #374397).
+				total_flags = set()
+				total_flags.update(affecting_use, required_use_flags)
+				total_flags.difference_update(untouchable_flags)
+				if len(total_flags) <= 10:
+					affecting_use = total_flags
+
+			affecting_use = tuple(affecting_use)
+
+			if not affecting_use:
+				continue
+
+			#We iterate over all possible settings of these use flags and gather
+			#a set of possible changes
+			#TODO: Use the information encoded in REQUIRED_USE
+			solutions = set()
+			for use_state in product(("disabled", "enabled"),
+				repeat=len(affecting_use)):
+				current_use = set(self.depgraph._pkg_use_enabled(parent))
+				for flag, state in zip(affecting_use, use_state):
+					if state == "enabled":
+						current_use.add(flag)
+					else:
+						current_use.discard(flag)
+				try:
+					reduced_dep = use_reduce(dep,
+						uselist=current_use, flat=True)
+				except InvalidDependString:
+					if not parent.installed:
+						raise
+					reduced_dep = None
+
+				if reduced_dep is not None and \
+					parent_atom not in reduced_dep:
+					#We found an assignment that removes the atom from 'dep'.
+					#Make sure it doesn't conflict with REQUIRED_USE.
+					required_use = parent.metadata["REQUIRED_USE"]
+
+					if check_required_use(required_use, current_use, parent.iuse.is_valid_flag):
+						use = self.depgraph._pkg_use_enabled(parent)
+						solution = set()
+						for flag, state in zip(affecting_use, use_state):
+							if state == "enabled" and \
+								flag not in use:
+								solution.add((flag, True))
+							elif state == "disabled" and \
+								flag in use:
+								solution.add((flag, False))
+						solutions.add(frozenset(solution))
+
+			for solution in solutions:
+				ignore_solution = False
+				for other_solution in solutions:
+					if solution is other_solution:
+						continue
+					if solution.issuperset(other_solution):
+						ignore_solution = True
+				if ignore_solution:
+					continue
+
+				#Check if a USE change conflicts with use requirements of the parents.
+				#If a requiremnet is hard, ignore the suggestion.
+				#If the requirment is conditional, warn the user that other changes might be needed.
+				followup_change = False
+				parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(changed_parent)
+				for ppkg, atom in parent_parent_atoms:
+
+					atom = atom.unevaluated_atom
+					if not atom.use:
+						continue
+
+					for flag, state in solution:
+						if flag in atom.use.enabled or flag in atom.use.disabled:
+							ignore_solution = True
+							break
+						elif atom.use.conditional:
+							for flags in atom.use.conditional.values():
+								if flag in flags:
+									followup_change = True
+									break
+
+					if ignore_solution:
+						break
+
+				if ignore_solution:
+					continue
+
+				changes = []
+				for flag, state in solution:
+					if state:
+						changes.append(colorize("red", "+"+flag))
+					else:
+						changes.append(colorize("blue", "-"+flag))
+				msg = "- %s (Change USE: %s)\n" \
+					% (parent.cpv, " ".join(changes))
+				if followup_change:
+					msg += " (This change might require USE changes on parent packages.)"
+				suggestions.append(msg)
+				final_solutions.setdefault(pkg, set()).add(solution)
+
+		return final_solutions, suggestions
+
+	def debug_print(self):
+		"""
+		Create a copy of the digraph, prune all root nodes,
+		and call the debug_print() method.
+		"""
+		graph = self.graph.copy()
+		while True:
+			root_nodes = graph.root_nodes(
+				ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
+			if not root_nodes:
+				break
+			graph.difference_update(root_nodes)
+
+		graph.debug_print()

diff --git a/portage_with_autodep/pym/_emerge/resolver/output.py b/portage_with_autodep/pym/_emerge/resolver/output.py
new file mode 100644
index 0000000..05e316a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/output.py
@@ -0,0 +1,888 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Resolver output display operation.
+"""
+
+__all__ = (
+	"Display",
+	)
+
+import sys
+
+from portage import os
+from portage import _unicode_decode
+from portage.dbapi.dep_expand import dep_expand
+from portage.const import PORTAGE_PACKAGE_ATOM
+from portage.dep import cpvequal, match_from_list
+from portage.exception import InvalidDependString
+from portage.output import ( blue, bold, colorize, create_color_func,
+	darkblue, darkgreen, green, nc_len, red, teal, turquoise, yellow )
+bad = create_color_func("BAD")
+from portage.util import writemsg_stdout, writemsg_level
+from portage.versions import best, catpkgsplit, cpv_getkey
+
+from _emerge.Blocker import Blocker
+from _emerge.create_world_atom import create_world_atom
+from _emerge.resolver.output_helpers import ( _DisplayConfig, _tree_display,
+	_PackageCounters, _create_use_string, _format_size, _calc_changelog, PkgInfo)
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+
+class Display(object):
+	"""Formats and outputs the depgrah supplied it for merge/re-merge, etc.
+
+	__call__()
+	@param depgraph: list
+	@param favorites: defaults to []
+	@param verbosity: integer, defaults to None
+	"""
+
+	def __init__(self):
+		self.changelogs = []
+		self.print_msg = []
+		self.blockers = []
+		self.counters = _PackageCounters()
+		self.resolver = None
+		self.resolved = None
+		self.vardb = None
+		self.portdb = None
+		self.verboseadd = ''
+		self.oldlp = None
+		self.myfetchlist = None
+		self.indent = ''
+		self.is_new = True
+		self.cur_use = None
+		self.cur_iuse = None
+		self.old_use = ''
+		self.old_iuse = ''
+		self.use_expand = None
+		self.use_expand_hidden = None
+		self.pkgsettings = None
+		self.forced_flags = None
+		self.newlp = None
+		self.conf = None
+		self.blocker_style = None
+
+
+	def _blockers(self, pkg, fetch_symbol):
+		"""Processes pkg for blockers and adds colorized strings to
+		self.print_msg and self.blockers
+
+		@param pkg: _emerge.Package instance
+		@param fetch_symbol: string
+		@rtype: bool
+		Modifies class globals: self.blocker_style, self.resolved,
+			self.print_msg
+		"""
+		if pkg.satisfied:
+			self.blocker_style = "PKG_BLOCKER_SATISFIED"
+			addl = "%s  %s  " % (colorize(self.blocker_style, "b"),
+				fetch_symbol)
+		else:
+			self.blocker_style = "PKG_BLOCKER"
+			addl = "%s  %s  " % (colorize(self.blocker_style, "B"),
+				fetch_symbol)
+		addl += self.empty_space_in_brackets()
+		self.resolved = dep_expand(
+			str(pkg.atom).lstrip("!"), mydb=self.vardb,
+			settings=self.pkgsettings
+			)
+		if self.conf.columns and self.conf.quiet:
+			addl += " " + colorize(self.blocker_style, str(self.resolved))
+		else:
+			addl = "[%s %s] %s%s" % \
+				(colorize(self.blocker_style, "blocks"),
+				addl, self.indent,
+				colorize(self.blocker_style, str(self.resolved))
+				)
+		block_parents = self.conf.blocker_parents.parent_nodes(pkg)
+		block_parents = set([pnode[2] for pnode in block_parents])
+		block_parents = ", ".join(block_parents)
+		if self.resolved != pkg[2]:
+			addl += colorize(self.blocker_style,
+				" (\"%s\" is blocking %s)") % \
+				(str(pkg.atom).lstrip("!"), block_parents)
+		else:
+			addl += colorize(self.blocker_style,
+				" (is blocking %s)") % block_parents
+		if isinstance(pkg, Blocker) and pkg.satisfied:
+			if self.conf.columns:
+				return True
+			self.print_msg.append(addl)
+		else:
+			self.blockers.append(addl)
+		return False
+
+
+	def _display_use(self, pkg, myoldbest, myinslotlist):
+		""" USE flag display
+
+		@param pkg: _emerge.Package instance
+		@param myoldbest: list of installed versions
+		@param myinslotlist: list of installed slots
+		Modifies class globals: self.forced_flags, self.cur_iuse,
+			self.old_iuse, self.old_use, self.use_expand
+		"""
+
+		self.forced_flags = set()
+		self.forced_flags.update(pkg.use.force)
+		self.forced_flags.update(pkg.use.mask)
+
+		self.cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
+			if flag in pkg.iuse.all]
+		self.cur_iuse = sorted(pkg.iuse.all)
+
+		if myoldbest and myinslotlist:
+			previous_cpv = myoldbest[0].cpv
+		else:
+			previous_cpv = pkg.cpv
+		if self.vardb.cpv_exists(previous_cpv):
+			previous_pkg = self.vardb.match_pkgs('=' + previous_cpv)[0]
+			self.old_iuse = sorted(previous_pkg.iuse.all)
+			self.old_use = previous_pkg.use.enabled
+			self.is_new = False
+		else:
+			self.old_iuse = []
+			self.old_use = []
+			self.is_new = True
+
+		self.old_use = [flag for flag in self.old_use if flag in self.old_iuse]
+
+		self.use_expand = pkg.use.expand
+		self.use_expand_hidden = pkg.use.expand_hidden
+		return
+
+	def include_mask_str(self):
+		return self.conf.verbosity > 1
+
+	def gen_mask_str(self, pkg):
+		"""
+		@param pkg: _emerge.Package instance
+		"""
+		hardmasked = pkg.isHardMasked()
+		mask_str = " "
+
+		if hardmasked:
+			mask_str = colorize("BAD", "#")
+		else:
+			keyword_mask = pkg.get_keyword_mask()
+
+			if keyword_mask is None:
+				pass
+			elif keyword_mask == "missing":
+				mask_str = colorize("BAD", "*")
+			else:
+				mask_str = colorize("WARN", "~")
+
+		return mask_str
+
+	def empty_space_in_brackets(self):
+		space = ""
+		if self.include_mask_str():
+			# add column for mask status
+			space += " "
+		return space
+
+	def map_to_use_expand(self, myvals, forced_flags=False,
+		remove_hidden=True):
+		"""Map use expand variables
+
+		@param myvals: list
+		@param forced_flags: bool
+		@param remove_hidden: bool
+		@rtype ret dictionary
+			or ret dict, forced dict.
+		"""
+		ret = {}
+		forced = {}
+		for exp in self.use_expand:
+			ret[exp] = []
+			forced[exp] = set()
+			for val in myvals[:]:
+				if val.startswith(exp.lower()+"_"):
+					if val in self.forced_flags:
+						forced[exp].add(val[len(exp)+1:])
+					ret[exp].append(val[len(exp)+1:])
+					myvals.remove(val)
+		ret["USE"] = myvals
+		forced["USE"] = [val for val in myvals \
+			if val in self.forced_flags]
+		if remove_hidden:
+			for exp in self.use_expand_hidden:
+				ret.pop(exp, None)
+		if forced_flags:
+			return ret, forced
+		return ret
+
+
+	def recheck_hidden(self, pkg):
+		""" Prevent USE_EXPAND_HIDDEN flags from being hidden if they
+		are the only thing that triggered reinstallation.
+
+		@param pkg: _emerge.Package instance
+		Modifies self.use_expand_hidden, self.use_expand, self.verboseadd
+		"""
+		reinst_flags_map = {}
+		reinstall_for_flags = self.conf.reinstall_nodes.get(pkg)
+		reinst_expand_map = None
+		if reinstall_for_flags:
+			reinst_flags_map = self.map_to_use_expand(
+				list(reinstall_for_flags), remove_hidden=False)
+			for k in list(reinst_flags_map):
+				if not reinst_flags_map[k]:
+					del reinst_flags_map[k]
+			if not reinst_flags_map.get("USE"):
+				reinst_expand_map = reinst_flags_map.copy()
+				reinst_expand_map.pop("USE", None)
+		if reinst_expand_map and \
+			not set(reinst_expand_map).difference(
+			self.use_expand_hidden):
+			self.use_expand_hidden = \
+				set(self.use_expand_hidden).difference(
+				reinst_expand_map)
+
+		cur_iuse_map, iuse_forced = \
+			self.map_to_use_expand(self.cur_iuse, forced_flags=True)
+		cur_use_map = self.map_to_use_expand(self.cur_use)
+		old_iuse_map = self.map_to_use_expand(self.old_iuse)
+		old_use_map = self.map_to_use_expand(self.old_use)
+
+		use_expand = sorted(self.use_expand)
+		use_expand.insert(0, "USE")
+
+		for key in use_expand:
+			if key in self.use_expand_hidden:
+				continue
+			self.verboseadd += _create_use_string(self.conf, key.upper(),
+				cur_iuse_map[key], iuse_forced[key],
+				cur_use_map[key], old_iuse_map[key],
+				old_use_map[key], self.is_new,
+				reinst_flags_map.get(key))
+		return
+
+
+	@staticmethod
+	def pkgprint(pkg_str, pkg_info):
+		"""Colorizes a string acording to pkg_info settings
+
+		@param pkg_str: string
+		@param pkg_info: dictionary
+		@rtype colorized string
+		"""
+		if pkg_info.merge:
+			if pkg_info.built:
+				if pkg_info.system:
+					return colorize("PKG_BINARY_MERGE_SYSTEM", pkg_str)
+				elif pkg_info.world:
+					return colorize("PKG_BINARY_MERGE_WORLD", pkg_str)
+				else:
+					return colorize("PKG_BINARY_MERGE", pkg_str)
+			else:
+				if pkg_info.system:
+					return colorize("PKG_MERGE_SYSTEM", pkg_str)
+				elif pkg_info.world:
+					return colorize("PKG_MERGE_WORLD", pkg_str)
+				else:
+					return colorize("PKG_MERGE", pkg_str)
+		elif pkg_info.operation == "uninstall":
+			return colorize("PKG_UNINSTALL", pkg_str)
+		else:
+			if pkg_info.system:
+				return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
+			elif pkg_info.world:
+				return colorize("PKG_NOMERGE_WORLD", pkg_str)
+			else:
+				return colorize("PKG_NOMERGE", pkg_str)
+
+
+	def verbose_size(self, pkg, repoadd_set, pkg_info):
+		"""Determines the size of the downloads required
+
+		@param pkg: _emerge.Package instance
+		@param repoadd_set: set of repos to add
+		@param pkg_info: dictionary
+		Modifies class globals: self.myfetchlist, self.counters.totalsize,
+			self.verboseadd, repoadd_set.
+		"""
+		mysize = 0
+		if pkg.type_name == "ebuild" and pkg_info.merge:
+			try:
+				myfilesdict = self.portdb.getfetchsizes(pkg.cpv,
+					useflags=pkg_info.use, myrepo=pkg.repo)
+			except InvalidDependString as e:
+				# FIXME: validate SRC_URI earlier
+				depstr, = self.portdb.aux_get(pkg.cpv,
+					["SRC_URI"], myrepo=pkg.repo)
+				show_invalid_depstring_notice(
+					pkg, depstr, str(e))
+				raise
+			if myfilesdict is None:
+				myfilesdict = "[empty/missing/bad digest]"
+			else:
+				for myfetchfile in myfilesdict:
+					if myfetchfile not in self.myfetchlist:
+						mysize += myfilesdict[myfetchfile]
+						self.myfetchlist.append(myfetchfile)
+				if pkg_info.ordered:
+					self.counters.totalsize += mysize
+			self.verboseadd += _format_size(mysize)
+
+		# overlay verbose
+		# assign index for a previous version in the same slot
+		slot_matches = self.vardb.match(pkg.slot_atom)
+		if slot_matches:
+			repo_name_prev = self.vardb.aux_get(slot_matches[0],
+				["repository"])[0]
+		else:
+			repo_name_prev = None
+
+		# now use the data to generate output
+		if pkg.installed or not slot_matches:
+			self.repoadd = self.conf.repo_display.repoStr(
+				pkg_info.repo_path_real)
+		else:
+			repo_path_prev = None
+			if repo_name_prev:
+				repo_path_prev = self.portdb.getRepositoryPath(
+					repo_name_prev)
+			if repo_path_prev == pkg_info.repo_path_real:
+				self.repoadd = self.conf.repo_display.repoStr(
+					pkg_info.repo_path_real)
+			else:
+				self.repoadd = "%s=>%s" % (
+					self.conf.repo_display.repoStr(repo_path_prev),
+					self.conf.repo_display.repoStr(pkg_info.repo_path_real))
+		if self.repoadd:
+			repoadd_set.add(self.repoadd)
+
+
+	@staticmethod
+	def convert_myoldbest(myoldbest):
+		"""converts and colorizes a version list to a string
+
+		@param myoldbest: list
+		@rtype string.
+		"""
+		# Convert myoldbest from a list to a string.
+		myoldbest_str = ""
+		if myoldbest:
+			versions = []
+			for pos, pkg in enumerate(myoldbest):
+				key = catpkgsplit(pkg.cpv)[2] + \
+					"-" + catpkgsplit(pkg.cpv)[3]
+				if key[-3:] == "-r0":
+					key = key[:-3]
+				versions.append(key)
+			myoldbest_str = blue("["+", ".join(versions)+"]")
+		return myoldbest_str
+
+
+	def set_interactive(self, pkg, ordered, addl):
+		"""Increments counters.interactive if the pkg is to
+		be merged and it's metadata has interactive set True
+
+		@param pkg: _emerge.Package instance
+		@param ordered: boolean
+		@param addl: already defined string to add to
+		"""
+		if 'interactive' in pkg.metadata.properties and \
+			pkg.operation == 'merge':
+			addl = colorize("WARN", "I") + addl[1:]
+			if ordered:
+				self.counters.interactive += 1
+		return addl
+
+	def _set_non_root_columns(self, addl, pkg_info, pkg):
+		"""sets the indent level and formats the output
+
+		@param addl: already defined string to add to
+		@param pkg_info: dictionary
+		@param pkg: _emerge.Package instance
+		@rtype string
+		"""
+		if self.conf.quiet:
+			myprint = addl + " " + self.indent + \
+				self.pkgprint(pkg_info.cp, pkg_info)
+			myprint = myprint+darkblue(" "+pkg_info.ver)+" "
+			myprint = myprint+pkg_info.oldbest
+			myprint = myprint+darkgreen("to "+pkg.root)
+			self.verboseadd = None
+		else:
+			if not pkg_info.merge:
+				myprint = "[%s] %s%s" % \
+					(self.pkgprint(pkg_info.operation.ljust(13), pkg_info),
+					self.indent, self.pkgprint(pkg.cp, pkg_info))
+			else:
+				myprint = "[%s %s] %s%s" % \
+					(self.pkgprint(pkg.type_name, pkg_info), addl,
+					self.indent, self.pkgprint(pkg.cp, pkg_info))
+			if (self.newlp-nc_len(myprint)) > 0:
+				myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
+			myprint = myprint+"["+darkblue(pkg_info.ver)+"] "
+			if (self.oldlp-nc_len(myprint)) > 0:
+				myprint = myprint+" "*(self.oldlp-nc_len(myprint))
+			myprint = myprint+pkg_info.oldbest
+			myprint += darkgreen("to " + pkg.root)
+		return myprint
+
+
+	def _set_root_columns(self, addl, pkg_info, pkg):
+		"""sets the indent level and formats the output
+
+		@param addl: already defined string to add to
+		@param pkg_info: dictionary
+		@param pkg: _emerge.Package instance
+		@rtype string
+		Modifies self.verboseadd
+		"""
+		if self.conf.quiet:
+			myprint = addl + " " + self.indent + \
+				self.pkgprint(pkg_info.cp, pkg_info)
+			myprint = myprint+" "+green(pkg_info.ver)+" "
+			myprint = myprint+pkg_info.oldbest
+			self.verboseadd = None
+		else:
+			if not pkg_info.merge:
+				addl = self.empty_space_in_brackets()
+				myprint = "[%s%s] %s%s" % \
+					(self.pkgprint(pkg_info.operation.ljust(13), pkg_info),
+					addl, self.indent, self.pkgprint(pkg.cp, pkg_info))
+			else:
+				myprint = "[%s %s] %s%s" % \
+					(self.pkgprint(pkg.type_name, pkg_info), addl,
+					self.indent, self.pkgprint(pkg.cp, pkg_info))
+			if (self.newlp-nc_len(myprint)) > 0:
+				myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
+			myprint = myprint+green(" ["+pkg_info.ver+"] ")
+			if (self.oldlp-nc_len(myprint)) > 0:
+				myprint = myprint+(" "*(self.oldlp-nc_len(myprint)))
+			myprint += pkg_info.oldbest
+		return myprint
+
+
+	def _set_no_columns(self, pkg, pkg_info, addl):
+		"""prints pkg info without column indentation.
+
+		@param pkg: _emerge.Package instance
+		@param pkg_info: dictionary
+		@param addl: the current text to add for the next line to output
+		@rtype the updated addl
+		"""
+		if not pkg_info.merge:
+			addl = self.empty_space_in_brackets()
+			myprint = "[%s%s] %s%s %s" % \
+				(self.pkgprint(pkg_info.operation.ljust(13),
+				pkg_info), addl,
+				self.indent, self.pkgprint(pkg.cpv, pkg_info),
+				pkg_info.oldbest)
+		else:
+			myprint = "[%s %s] %s%s %s" % \
+				(self.pkgprint(pkg.type_name, pkg_info),
+				addl, self.indent,
+				self.pkgprint(pkg.cpv, pkg_info), pkg_info.oldbest)
+		return myprint
+
+
+	def _insert_slot(self, pkg, pkg_info, myinslotlist):
+		"""Adds slot info to the message
+
+		@returns addl: formatted slot info
+		@returns myoldbest: installed version list
+		Modifies self.counters.downgrades, self.counters.upgrades,
+			self.counters.binary
+		"""
+		addl = "   " + pkg_info.fetch_symbol
+		if not cpvequal(pkg.cpv,
+			best([pkg.cpv] + [x.cpv for x in myinslotlist])):
+			# Downgrade in slot
+			addl += turquoise("U")+blue("D")
+			if pkg_info.ordered:
+				self.counters.downgrades += 1
+				if pkg.type_name == "binary":
+					self.counters.binary += 1
+		else:
+			# Update in slot
+			addl += turquoise("U") + " "
+			if pkg_info.ordered:
+				self.counters.upgrades += 1
+				if pkg.type_name == "binary":
+					self.counters.binary += 1
+		return addl
+
+
+	def _new_slot(self, pkg, pkg_info):
+		"""New slot, mark it new.
+
+		@returns addl: formatted slot info
+		@returns myoldbest: installed version list
+		Modifies self.counters.newslot, self.counters.binary
+		"""
+		addl = " " + green("NS") + pkg_info.fetch_symbol + "  "
+		if pkg_info.ordered:
+			self.counters.newslot += 1
+			if pkg.type_name == "binary":
+				self.counters.binary += 1
+		return addl
+
+
+	def print_messages(self, show_repos):
+		"""Performs the actual output printing of the pre-formatted
+		messages
+
+		@param show_repos: bool.
+		"""
+		for msg in self.print_msg:
+			if isinstance(msg, basestring):
+				writemsg_stdout("%s\n" % (msg,), noiselevel=-1)
+				continue
+			myprint, self.verboseadd, repoadd = msg
+			if self.verboseadd:
+				myprint += " " + self.verboseadd
+			if show_repos and repoadd:
+				myprint += " " + teal("[%s]" % repoadd)
+			writemsg_stdout("%s\n" % (myprint,), noiselevel=-1)
+		return
+
+
+	def print_blockers(self):
+		"""Performs the actual output printing of the pre-formatted
+		blocker messages
+		"""
+		for pkg in self.blockers:
+			writemsg_stdout("%s\n" % (pkg,), noiselevel=-1)
+		return
+
+
+	def print_verbose(self, show_repos):
+		"""Prints the verbose output to std_out
+
+		@param show_repos: bool.
+		"""
+		writemsg_stdout('\n%s\n' % (self.counters,), noiselevel=-1)
+		if show_repos:
+			# Use _unicode_decode() to force unicode format string so
+			# that RepoDisplay.__unicode__() is called in python2.
+			writemsg_stdout(_unicode_decode("%s") % (self.conf.repo_display,),
+				noiselevel=-1)
+		return
+
+
+	def print_changelog(self):
+		"""Prints the changelog text to std_out
+		"""
+		writemsg_stdout('\n', noiselevel=-1)
+		for revision, text in self.changelogs:
+			writemsg_stdout(bold('*'+revision) + '\n' + text,
+				noiselevel=-1)
+		return
+
+
+	def get_display_list(self, mylist):
+		"""Determines the display list to process
+
+		@param mylist
+		@rtype list
+		Modifies self.counters.blocks, self.counters.blocks_satisfied,
+
+		"""
+		unsatisfied_blockers = []
+		ordered_nodes = []
+		for pkg in mylist:
+			if isinstance(pkg, Blocker):
+				self.counters.blocks += 1
+				if pkg.satisfied:
+					ordered_nodes.append(pkg)
+					self.counters.blocks_satisfied += 1
+				else:
+					unsatisfied_blockers.append(pkg)
+			else:
+				ordered_nodes.append(pkg)
+		if self.conf.tree_display:
+			display_list = _tree_display(self.conf, ordered_nodes)
+		else:
+			display_list = [(pkg, 0, True) for pkg in ordered_nodes]
+		for pkg in unsatisfied_blockers:
+			display_list.append((pkg, 0, True))
+		return display_list
+
+
+	def set_pkg_info(self, pkg, ordered):
+		"""Sets various pkg_info dictionary variables
+
+		@param pkg: _emerge.Package instance
+		@param ordered: bool
+		@rtype pkg_info dictionary
+		Modifies self.counters.restrict_fetch,
+			self.counters.restrict_fetch_satisfied
+		"""
+		pkg_info = PkgInfo()
+		pkg_info.ordered = ordered
+		pkg_info.fetch_symbol = " "
+		pkg_info.operation = pkg.operation
+		pkg_info.merge = ordered and pkg_info.operation == "merge"
+		if not pkg_info.merge and pkg_info.operation == "merge":
+			pkg_info.operation = "nomerge"
+		pkg_info.built = pkg.type_name != "ebuild"
+		pkg_info.ebuild_path = None
+		pkg_info.repo_name = pkg.repo
+		if pkg.type_name == "ebuild":
+			pkg_info.ebuild_path = self.portdb.findname(
+				pkg.cpv, myrepo=pkg_info.repo_name)
+			if pkg_info.ebuild_path is None:
+				raise AssertionError(
+					"ebuild not found for '%s'" % pkg.cpv)
+			pkg_info.repo_path_real = os.path.dirname(os.path.dirname(
+				os.path.dirname(pkg_info.ebuild_path)))
+		else:
+			pkg_info.repo_path_real = \
+				self.portdb.getRepositoryPath(pkg.metadata["repository"])
+		pkg_info.use = list(self.conf.pkg_use_enabled(pkg))
+		if not pkg.built and pkg.operation == 'merge' and \
+			'fetch' in pkg.metadata.restrict:
+			pkg_info.fetch_symbol = red("F")
+			if pkg_info.ordered:
+				self.counters.restrict_fetch += 1
+			if not self.portdb.getfetchsizes(pkg.cpv,
+				useflags=pkg_info.use, myrepo=pkg.repo):
+				pkg_info.fetch_symbol = green("f")
+				if pkg_info.ordered:
+					self.counters.restrict_fetch_satisfied += 1
+		return pkg_info
+
+
+	def do_changelog(self, pkg, pkg_info):
+		"""Processes and adds the changelog text to the master text for output
+
+		@param pkg: _emerge.Package instance
+		@param pkg_info: dictionay
+		Modifies self.changelogs
+		"""
+		inst_matches = self.vardb.match(pkg.slot_atom)
+		if inst_matches:
+			ebuild_path_cl = pkg_info.ebuild_path
+			if ebuild_path_cl is None:
+				# binary package
+				ebuild_path_cl = self.portdb.findname(pkg.cpv, myrepo=pkg.repo)
+			if ebuild_path_cl is not None:
+				self.changelogs.extend(_calc_changelog(
+					ebuild_path_cl, inst_matches[0], pkg.cpv))
+		return
+
+
+	def check_system_world(self, pkg):
+		"""Checks for any occurances of the package in the system or world sets
+
+		@param pkg: _emerge.Package instance
+		@rtype system and world booleans
+		"""
+		root_config = self.conf.roots[pkg.root]
+		system_set = root_config.sets["system"]
+		world_set  = root_config.sets["selected"]
+		system = False
+		world = False
+		try:
+			system = system_set.findAtomForPackage(
+				pkg, modified_use=self.conf.pkg_use_enabled(pkg))
+			world = world_set.findAtomForPackage(
+				pkg, modified_use=self.conf.pkg_use_enabled(pkg))
+			if not (self.conf.oneshot or world) and \
+				pkg.root == self.conf.target_root and \
+				self.conf.favorites.findAtomForPackage(
+					pkg, modified_use=self.conf.pkg_use_enabled(pkg)
+					):
+				# Maybe it will be added to world now.
+				if create_world_atom(pkg, self.conf.favorites, root_config):
+					world = True
+		except InvalidDependString:
+			# This is reported elsewhere if relevant.
+			pass
+		return system, world
+
+
+	@staticmethod
+	def get_ver_str(pkg):
+		"""Obtains the version string
+		@param pkg: _emerge.Package instance
+		@rtype string
+		"""
+		ver_str = list(catpkgsplit(pkg.cpv)[2:])
+		if ver_str[1] == "r0":
+			ver_str[1] = ""
+		else:
+			ver_str[1] = "-" + ver_str[1]
+		return ver_str[0]+ver_str[1]
+
+
+	def _get_installed_best(self, pkg, pkg_info):
+		""" we need to use "--emptrytree" testing here rather than
+		"empty" param testing because "empty"
+		param is used for -u, where you still *do* want to see when
+		something is being upgraded.
+
+		@param pkg: _emerge.Package instance
+		@param pkg_info: dictionay
+		@rtype addl, myoldbest: list, myinslotlist: list
+		Modifies self.counters.reinst, self.counters.binary, self.counters.new
+
+		"""
+		myoldbest = []
+		myinslotlist = None
+		installed_versions = self.vardb.match_pkgs(pkg.cp)
+		if self.vardb.cpv_exists(pkg.cpv):
+			addl = "  "+yellow("R")+pkg_info.fetch_symbol+"  "
+			if pkg_info.ordered:
+				if pkg_info.merge:
+					self.counters.reinst += 1
+					if pkg.type_name == "binary":
+						self.counters.binary += 1
+				elif pkg_info.operation == "uninstall":
+					self.counters.uninst += 1
+		# filter out old-style virtual matches
+		elif installed_versions and \
+			installed_versions[0].cp == pkg.cp:
+			myinslotlist = self.vardb.match_pkgs(pkg.slot_atom)
+			# If this is the first install of a new-style virtual, we
+			# need to filter out old-style virtual matches.
+			if myinslotlist and \
+				myinslotlist[0].cp != pkg.cp:
+				myinslotlist = None
+			if myinslotlist:
+				myoldbest = myinslotlist[:]
+				addl = self._insert_slot(pkg, pkg_info, myinslotlist)
+			else:
+				myoldbest = installed_versions
+				addl = self._new_slot(pkg, pkg_info)
+			if self.conf.changelog:
+				self.do_changelog(pkg, pkg_info)
+		else:
+			addl = " " + green("N") + " " + pkg_info.fetch_symbol + "  "
+			if pkg_info.ordered:
+				self.counters.new += 1
+				if pkg.type_name == "binary":
+					self.counters.binary += 1
+		return addl, myoldbest, myinslotlist
+
+
+	def __call__(self, depgraph, mylist, favorites=None, verbosity=None):
+		"""The main operation to format and display the resolver output.
+
+		@param depgraph: dependency grah
+		@param mylist: list of packages being processed
+		@param favorites: list, defaults to []
+		@param verbosity: verbose level, defaults to None
+		Modifies self.conf, self.myfetchlist, self.portdb, self.vardb,
+			self.pkgsettings, self.verboseadd, self.oldlp, self.newlp,
+			self.print_msg,
+		"""
+		if favorites is None:
+			favorites = []
+		self.conf = _DisplayConfig(depgraph, mylist, favorites, verbosity)
+		mylist = self.get_display_list(self.conf.mylist)
+		# files to fetch list - avoids counting a same file twice
+		# in size display (verbose mode)
+		self.myfetchlist = []
+		# Use this set to detect when all the "repoadd" strings are "[0]"
+		# and disable the entire repo display in this case.
+		repoadd_set = set()
+
+		for mylist_index in range(len(mylist)):
+			pkg, depth, ordered = mylist[mylist_index]
+			self.portdb = self.conf.trees[pkg.root]["porttree"].dbapi
+			self.vardb = self.conf.trees[pkg.root]["vartree"].dbapi
+			self.pkgsettings = self.conf.pkgsettings[pkg.root]
+			self.indent = " " * depth
+
+			if isinstance(pkg, Blocker):
+				if self._blockers(pkg, fetch_symbol=" "):
+					continue
+			else:
+				pkg_info = self.set_pkg_info(pkg, ordered)
+				addl, pkg_info.oldbest, myinslotlist = \
+					self._get_installed_best(pkg, pkg_info)
+				self.verboseadd = ""
+				self.repoadd = None
+				self._display_use(pkg, pkg_info.oldbest, myinslotlist)
+				self.recheck_hidden(pkg)
+				if self.conf.verbosity == 3:
+					self.verbose_size(pkg, repoadd_set, pkg_info)
+
+				pkg_info.cp = pkg.cp
+				pkg_info.ver = self.get_ver_str(pkg)
+
+				self.oldlp = self.conf.columnwidth - 30
+				self.newlp = self.oldlp - 30
+				pkg_info.oldbest = self.convert_myoldbest(pkg_info.oldbest)
+				pkg_info.system, pkg_info.world = \
+					self.check_system_world(pkg)
+				addl = self.set_interactive(pkg, pkg_info.ordered, addl)
+
+				if self.include_mask_str():
+					addl += self.gen_mask_str(pkg)
+
+				if pkg.root != "/":
+					if pkg_info.oldbest:
+						pkg_info.oldbest += " "
+					if self.conf.columns:
+						myprint = self._set_non_root_columns(
+							addl, pkg_info, pkg)
+					else:
+						if not pkg_info.merge:
+							addl = self.empty_space_in_brackets()
+							myprint = "[%s%s] " % (
+								self.pkgprint(pkg_info.operation.ljust(13),
+								pkg_info), addl,
+								)
+						else:
+							myprint = "[%s %s] " % (
+								self.pkgprint(pkg.type_name, pkg_info), addl)
+						myprint += self.indent + \
+							self.pkgprint(pkg.cpv, pkg_info) + " " + \
+							pkg_info.oldbest + darkgreen("to " + pkg.root)
+				else:
+					if self.conf.columns:
+						myprint = self._set_root_columns(
+							addl, pkg_info, pkg)
+					else:
+						myprint = self._set_no_columns(
+							pkg, pkg_info, addl)
+
+				if self.conf.columns and pkg.operation == "uninstall":
+					continue
+				self.print_msg.append((myprint, self.verboseadd, self.repoadd))
+
+				if not self.conf.tree_display \
+					and not self.conf.no_restart \
+					and pkg.root == self.conf.running_root.root \
+					and match_from_list(PORTAGE_PACKAGE_ATOM, [pkg]) \
+					and not self.conf.quiet:
+
+					if not self.vardb.cpv_exists(pkg.cpv) or \
+						'9999' in pkg.cpv or \
+						'git' in pkg.inherited or \
+						'git-2' in pkg.inherited:
+						if mylist_index < len(mylist) - 1:
+							self.print_msg.append(
+								colorize(
+									"WARN", "*** Portage will stop merging "
+									"at this point and reload itself,"
+									)
+								)
+							self.print_msg.append(
+								colorize("WARN", "    then resume the merge.")
+								)
+
+		show_repos = repoadd_set and repoadd_set != set(["0"])
+
+		# now finally print out the messages
+		self.print_messages(show_repos)
+		self.print_blockers()
+		if self.conf.verbosity == 3:
+			self.print_verbose(show_repos)
+		if self.conf.changelog:
+			self.print_changelog()
+
+		return os.EX_OK

diff --git a/portage_with_autodep/pym/_emerge/resolver/output_helpers.py b/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
new file mode 100644
index 0000000..b7e7376
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
@@ -0,0 +1,576 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Contains private support functions for the Display class
+in output.py
+"""
+__all__ = (
+	)
+
+import io
+import re
+import sys
+
+from portage import os
+from portage import _encodings, _unicode_encode
+from portage._sets.base import InternalPackageSet
+from portage.output import blue, colorize, create_color_func, green, red, \
+	teal, yellow
+bad = create_color_func("BAD")
+from portage.util import writemsg
+from portage.versions import catpkgsplit
+
+from _emerge.Blocker import Blocker
+from _emerge.Package import Package
+
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+
+class _RepoDisplay(object):
+	def __init__(self, roots):
+		self._shown_repos = {}
+		self._unknown_repo = False
+		repo_paths = set()
+		for root_config in roots.values():
+			portdir = root_config.settings.get("PORTDIR")
+			if portdir:
+				repo_paths.add(portdir)
+			overlays = root_config.settings.get("PORTDIR_OVERLAY")
+			if overlays:
+				repo_paths.update(overlays.split())
+		repo_paths = list(repo_paths)
+		self._repo_paths = repo_paths
+		self._repo_paths_real = [ os.path.realpath(repo_path) \
+			for repo_path in repo_paths ]
+
+		# pre-allocate index for PORTDIR so that it always has index 0.
+		for root_config in roots.values():
+			portdb = root_config.trees["porttree"].dbapi
+			portdir = portdb.porttree_root
+			if portdir:
+				self.repoStr(portdir)
+
+	def repoStr(self, repo_path_real):
+		real_index = -1
+		if repo_path_real:
+			real_index = self._repo_paths_real.index(repo_path_real)
+		if real_index == -1:
+			s = "?"
+			self._unknown_repo = True
+		else:
+			shown_repos = self._shown_repos
+			repo_paths = self._repo_paths
+			repo_path = repo_paths[real_index]
+			index = shown_repos.get(repo_path)
+			if index is None:
+				index = len(shown_repos)
+				shown_repos[repo_path] = index
+			s = str(index)
+		return s
+
+	def __str__(self):
+		output = []
+		shown_repos = self._shown_repos
+		unknown_repo = self._unknown_repo
+		if shown_repos or self._unknown_repo:
+			output.append("Portage tree and overlays:\n")
+		show_repo_paths = list(shown_repos)
+		for repo_path, repo_index in shown_repos.items():
+			show_repo_paths[repo_index] = repo_path
+		if show_repo_paths:
+			for index, repo_path in enumerate(show_repo_paths):
+				output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
+		if unknown_repo:
+			output.append(" "+teal("[?]") + \
+				" indicates that the source repository could not be determined\n")
+		return "".join(output)
+
+	if sys.hexversion < 0x3000000:
+
+		__unicode__ = __str__
+
+		def __str__(self):
+			return _unicode_encode(self.__unicode__(),
+				encoding=_encodings['content'])
+
+
+class _PackageCounters(object):
+
+	def __init__(self):
+		self.upgrades   = 0
+		self.downgrades = 0
+		self.new        = 0
+		self.newslot    = 0
+		self.reinst     = 0
+		self.uninst     = 0
+		self.blocks     = 0
+		self.blocks_satisfied         = 0
+		self.totalsize  = 0
+		self.restrict_fetch           = 0
+		self.restrict_fetch_satisfied = 0
+		self.interactive              = 0
+		self.binary                   = 0
+
+	def __str__(self):
+		total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
+		myoutput = []
+		details = []
+		myoutput.append("Total: %s package" % total_installs)
+		if total_installs != 1:
+			myoutput.append("s")
+		if total_installs != 0:
+			myoutput.append(" (")
+		if self.upgrades > 0:
+			details.append("%s upgrade" % self.upgrades)
+			if self.upgrades > 1:
+				details[-1] += "s"
+		if self.downgrades > 0:
+			details.append("%s downgrade" % self.downgrades)
+			if self.downgrades > 1:
+				details[-1] += "s"
+		if self.new > 0:
+			details.append("%s new" % self.new)
+		if self.newslot > 0:
+			details.append("%s in new slot" % self.newslot)
+			if self.newslot > 1:
+				details[-1] += "s"
+		if self.reinst > 0:
+			details.append("%s reinstall" % self.reinst)
+			if self.reinst > 1:
+				details[-1] += "s"
+		if self.binary > 0:
+			details.append("%s binary" % self.binary)
+			if self.binary > 1:
+				details[-1] = details[-1][:-1] + "ies"
+		if self.uninst > 0:
+			details.append("%s uninstall" % self.uninst)
+			if self.uninst > 1:
+				details[-1] += "s"
+		if self.interactive > 0:
+			details.append("%s %s" % (self.interactive,
+				colorize("WARN", "interactive")))
+		myoutput.append(", ".join(details))
+		if total_installs != 0:
+			myoutput.append(")")
+		myoutput.append(", Size of downloads: %s" % _format_size(self.totalsize))
+		if self.restrict_fetch:
+			myoutput.append("\nFetch Restriction: %s package" % \
+				self.restrict_fetch)
+			if self.restrict_fetch > 1:
+				myoutput.append("s")
+		if self.restrict_fetch_satisfied < self.restrict_fetch:
+			myoutput.append(bad(" (%s unsatisfied)") % \
+				(self.restrict_fetch - self.restrict_fetch_satisfied))
+		if self.blocks > 0:
+			myoutput.append("\nConflict: %s block" % \
+				self.blocks)
+			if self.blocks > 1:
+				myoutput.append("s")
+			if self.blocks_satisfied < self.blocks:
+				myoutput.append(bad(" (%s unsatisfied)") % \
+					(self.blocks - self.blocks_satisfied))
+		return "".join(myoutput)
+
+
+class _DisplayConfig(object):
+
+	def __init__(self, depgraph, mylist, favorites, verbosity):
+		frozen_config = depgraph._frozen_config
+		dynamic_config = depgraph._dynamic_config
+
+		self.mylist = mylist
+		self.favorites = InternalPackageSet(favorites, allow_repo=True)
+		self.verbosity = verbosity
+
+		if self.verbosity is None:
+			self.verbosity = ("--quiet" in frozen_config.myopts and 1 or \
+				"--verbose" in frozen_config.myopts and 3 or 2)
+
+		self.oneshot = "--oneshot" in frozen_config.myopts or \
+			"--onlydeps" in frozen_config.myopts
+		self.columns = "--columns" in frozen_config.myopts
+		self.tree_display = "--tree" in frozen_config.myopts
+		self.alphabetical = "--alphabetical" in frozen_config.myopts
+		self.quiet = "--quiet" in frozen_config.myopts
+		self.all_flags = self.verbosity == 3 or self.quiet
+		self.print_use_string = self.verbosity != 1 or "--verbose" in frozen_config.myopts
+		self.changelog = "--changelog" in frozen_config.myopts
+		self.edebug = frozen_config.edebug
+		self.no_restart = frozen_config._opts_no_restart.intersection(frozen_config.myopts)
+		self.unordered_display = "--unordered-display" in frozen_config.myopts
+
+		mywidth = 130
+		if "COLUMNWIDTH" in frozen_config.settings:
+			try:
+				mywidth = int(frozen_config.settings["COLUMNWIDTH"])
+			except ValueError as e:
+				writemsg("!!! %s\n" % str(e), noiselevel=-1)
+				writemsg("!!! Unable to parse COLUMNWIDTH='%s'\n" % \
+					frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
+				del e
+		self.columnwidth = mywidth
+
+		self.repo_display = _RepoDisplay(frozen_config.roots)
+		self.trees = frozen_config.trees
+		self.pkgsettings = frozen_config.pkgsettings
+		self.target_root = frozen_config.target_root
+		self.running_root = frozen_config._running_root
+		self.roots = frozen_config.roots
+
+		self.blocker_parents = dynamic_config._blocker_parents
+		self.reinstall_nodes = dynamic_config._reinstall_nodes
+		self.digraph = dynamic_config.digraph
+		self.blocker_uninstalls = dynamic_config._blocker_uninstalls
+		self.slot_pkg_map = dynamic_config._slot_pkg_map
+		self.set_nodes = dynamic_config._set_nodes
+
+		self.pkg_use_enabled = depgraph._pkg_use_enabled
+		self.pkg = depgraph._pkg
+
+
+# formats a size given in bytes nicely
+def _format_size(mysize):
+	if isinstance(mysize, basestring):
+		return mysize
+	if 0 != mysize % 1024:
+		# Always round up to the next kB so that it doesn't show 0 kB when
+		# some small file still needs to be fetched.
+		mysize += 1024 - mysize % 1024
+	mystr=str(mysize//1024)
+	mycount=len(mystr)
+	while (mycount > 3):
+		mycount-=3
+		mystr=mystr[:mycount]+","+mystr[mycount:]
+	return mystr+" kB"
+
+
+def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
+	old_iuse, old_use,
+	is_new, reinst_flags):
+
+	if not conf.print_use_string:
+		return ""
+
+	enabled = []
+	if conf.alphabetical:
+		disabled = enabled
+		removed = enabled
+	else:
+		disabled = []
+		removed = []
+	cur_iuse = set(cur_iuse)
+	enabled_flags = cur_iuse.intersection(cur_use)
+	removed_iuse = set(old_iuse).difference(cur_iuse)
+	any_iuse = cur_iuse.union(old_iuse)
+	any_iuse = list(any_iuse)
+	any_iuse.sort()
+	for flag in any_iuse:
+		flag_str = None
+		isEnabled = False
+		reinst_flag = reinst_flags and flag in reinst_flags
+		if flag in enabled_flags:
+			isEnabled = True
+			if is_new or flag in old_use and \
+				(conf.all_flags or reinst_flag):
+				flag_str = red(flag)
+			elif flag not in old_iuse:
+				flag_str = yellow(flag) + "%*"
+			elif flag not in old_use:
+				flag_str = green(flag) + "*"
+		elif flag in removed_iuse:
+			if conf.all_flags or reinst_flag:
+				flag_str = yellow("-" + flag) + "%"
+				if flag in old_use:
+					flag_str += "*"
+				flag_str = "(" + flag_str + ")"
+				removed.append(flag_str)
+			continue
+		else:
+			if is_new or flag in old_iuse and \
+				flag not in old_use and \
+				(conf.all_flags or reinst_flag):
+				flag_str = blue("-" + flag)
+			elif flag not in old_iuse:
+				flag_str = yellow("-" + flag)
+				if flag not in iuse_forced:
+					flag_str += "%"
+			elif flag in old_use:
+				flag_str = green("-" + flag) + "*"
+		if flag_str:
+			if flag in iuse_forced:
+				flag_str = "(" + flag_str + ")"
+			if isEnabled:
+				enabled.append(flag_str)
+			else:
+				disabled.append(flag_str)
+
+	if conf.alphabetical:
+		ret = " ".join(enabled)
+	else:
+		ret = " ".join(enabled + disabled + removed)
+	if ret:
+		ret = '%s="%s" ' % (name, ret)
+	return ret
+
+
+def _tree_display(conf, mylist):
+
+	# If there are any Uninstall instances, add the
+	# corresponding blockers to the digraph.
+	mygraph = conf.digraph.copy()
+
+	executed_uninstalls = set(node for node in mylist \
+		if isinstance(node, Package) and node.operation == "unmerge")
+
+	for uninstall in conf.blocker_uninstalls.leaf_nodes():
+		uninstall_parents = \
+			conf.blocker_uninstalls.parent_nodes(uninstall)
+		if not uninstall_parents:
+			continue
+
+		# Remove the corresponding "nomerge" node and substitute
+		# the Uninstall node.
+		inst_pkg = conf.pkg(uninstall.cpv, "installed",
+			uninstall.root_config, installed=True)
+
+		try:
+			mygraph.remove(inst_pkg)
+		except KeyError:
+			pass
+
+		try:
+			inst_pkg_blockers = conf.blocker_parents.child_nodes(inst_pkg)
+		except KeyError:
+			inst_pkg_blockers = []
+
+		# Break the Package -> Uninstall edges.
+		mygraph.remove(uninstall)
+
+		# Resolution of a package's blockers
+		# depend on it's own uninstallation.
+		for blocker in inst_pkg_blockers:
+			mygraph.add(uninstall, blocker)
+
+		# Expand Package -> Uninstall edges into
+		# Package -> Blocker -> Uninstall edges.
+		for blocker in uninstall_parents:
+			mygraph.add(uninstall, blocker)
+			for parent in conf.blocker_parents.parent_nodes(blocker):
+				if parent != inst_pkg:
+					mygraph.add(blocker, parent)
+
+		# If the uninstall task did not need to be executed because
+		# of an upgrade, display Blocker -> Upgrade edges since the
+		# corresponding Blocker -> Uninstall edges will not be shown.
+		upgrade_node = \
+			conf.slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
+		if upgrade_node is not None and \
+			uninstall not in executed_uninstalls:
+			for blocker in uninstall_parents:
+				mygraph.add(upgrade_node, blocker)
+
+	if conf.unordered_display:
+		display_list = _unordered_tree_display(mygraph, mylist)
+	else:
+		display_list = _ordered_tree_display(conf, mygraph, mylist)
+
+	_prune_tree_display(display_list)
+
+	return display_list
+
+
+def _unordered_tree_display(mygraph, mylist):
+	display_list = []
+	seen_nodes = set()
+
+	def print_node(node, depth):
+
+		if node in seen_nodes:
+			pass
+		else:
+			seen_nodes.add(node)
+
+			if isinstance(node, (Blocker, Package)):
+				display_list.append((node, depth, True))
+			else:
+				depth = -1
+
+			for child_node in mygraph.child_nodes(node):
+				print_node(child_node, depth + 1)
+
+	for root_node in mygraph.root_nodes():
+		print_node(root_node, 0)
+
+	return display_list
+
+
+def _ordered_tree_display(conf, mygraph, mylist):
+	depth = 0
+	shown_edges = set()
+	tree_nodes = []
+	display_list = []
+
+	for x in mylist:
+		depth = len(tree_nodes)
+		while depth and x not in \
+			mygraph.child_nodes(tree_nodes[depth-1]):
+				depth -= 1
+		if depth:
+			tree_nodes = tree_nodes[:depth]
+			tree_nodes.append(x)
+			display_list.append((x, depth, True))
+			shown_edges.add((x, tree_nodes[depth-1]))
+		else:
+			traversed_nodes = set() # prevent endless circles
+			traversed_nodes.add(x)
+			def add_parents(current_node, ordered):
+				parent_nodes = None
+				# Do not traverse to parents if this node is an
+				# an argument or a direct member of a set that has
+				# been specified as an argument (system or world).
+				if current_node not in conf.set_nodes:
+					parent_nodes = mygraph.parent_nodes(current_node)
+				if parent_nodes:
+					child_nodes = set(mygraph.child_nodes(current_node))
+					selected_parent = None
+					# First, try to avoid a direct cycle.
+					for node in parent_nodes:
+						if not isinstance(node, (Blocker, Package)):
+							continue
+						if node not in traversed_nodes and \
+							node not in child_nodes:
+							edge = (current_node, node)
+							if edge in shown_edges:
+								continue
+							selected_parent = node
+							break
+					if not selected_parent:
+						# A direct cycle is unavoidable.
+						for node in parent_nodes:
+							if not isinstance(node, (Blocker, Package)):
+								continue
+							if node not in traversed_nodes:
+								edge = (current_node, node)
+								if edge in shown_edges:
+									continue
+								selected_parent = node
+								break
+					if selected_parent:
+						shown_edges.add((current_node, selected_parent))
+						traversed_nodes.add(selected_parent)
+						add_parents(selected_parent, False)
+				display_list.append((current_node,
+					len(tree_nodes), ordered))
+				tree_nodes.append(current_node)
+			tree_nodes = []
+			add_parents(x, True)
+
+	return display_list
+
+
+def _prune_tree_display(display_list):
+	last_merge_depth = 0
+	for i in range(len(display_list) - 1, -1, -1):
+		node, depth, ordered = display_list[i]
+		if not ordered and depth == 0 and i > 0 \
+			and node == display_list[i-1][0] and \
+			display_list[i-1][1] == 0:
+			# An ordered node got a consecutive duplicate
+			# when the tree was being filled in.
+			del display_list[i]
+			continue
+		if ordered and isinstance(node, Package) \
+			and node.operation in ('merge', 'uninstall'):
+			last_merge_depth = depth
+			continue
+		if depth >= last_merge_depth or \
+			i < len(display_list) - 1 and \
+			depth >= display_list[i+1][1]:
+				del display_list[i]
+
+
+def _calc_changelog(ebuildpath,current,next):
+	if ebuildpath == None or not os.path.exists(ebuildpath):
+		return []
+	current = '-'.join(catpkgsplit(current)[1:])
+	if current.endswith('-r0'):
+		current = current[:-3]
+	next = '-'.join(catpkgsplit(next)[1:])
+	if next.endswith('-r0'):
+		next = next[:-3]
+	changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
+	try:
+		changelog = io.open(_unicode_encode(changelogpath,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'], errors='replace'
+		).read()
+	except SystemExit:
+		raise # Needed else can't exit
+	except:
+		return []
+	divisions = _find_changelog_tags(changelog)
+	#print 'XX from',current,'to',next
+	#for div,text in divisions: print 'XX',div
+	# skip entries for all revisions above the one we are about to emerge
+	for i in range(len(divisions)):
+		if divisions[i][0]==next:
+			divisions = divisions[i:]
+			break
+	# find out how many entries we are going to display
+	for i in range(len(divisions)):
+		if divisions[i][0]==current:
+			divisions = divisions[:i]
+			break
+	else:
+		# couldnt find the current revision in the list. display nothing
+		return []
+	return divisions
+
+
+def _find_changelog_tags(changelog):
+	divs = []
+	release = None
+	while 1:
+		match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
+		if match is None:
+			if release is not None:
+				divs.append((release,changelog))
+			return divs
+		if release is not None:
+			divs.append((release,changelog[:match.start()]))
+		changelog = changelog[match.end():]
+		release = match.group(1)
+		if release.endswith('.ebuild'):
+			release = release[:-7]
+		if release.endswith('-r0'):
+			release = release[:-3]
+
+
+class PkgInfo(object):
+	"""Simple class to hold instance attributes for current
+	information about the pkg being printed.
+	"""
+	
+	__slots__ = ("ordered", "fetch_symbol", "operation", "merge",
+		"built", "cp", "ebuild_path", "repo_name", "repo_path_real", 
+		"world", "system", "use", "oldbest", "ver"
+		)
+
+
+	def __init__(self):
+		self.built = False
+		self.cp = ''
+		self.ebuild_path = ''
+		self.fetch_symbol = ''
+		self.merge = ''
+		self.oldbest = ''
+		self.operation = ''
+		self.ordered = False
+		self.repo_path_real = ''
+		self.repo_name = ''
+		self.system = False
+		self.use = ''
+		self.ver = ''
+		self.world = False

diff --git a/portage_with_autodep/pym/_emerge/resolver/slot_collision.py b/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
new file mode 100644
index 0000000..0df8f20
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
@@ -0,0 +1,978 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+
+from _emerge.AtomArg import AtomArg
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from portage.dep import check_required_use
+from portage.output import colorize
+from portage._sets.base import InternalPackageSet
+from portage.util import writemsg
+from portage.versions import cpv_getversion, vercmp
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+class slot_conflict_handler(object):
+	"""This class keeps track of all slot conflicts and provides
+	an interface to get possible solutions.
+
+	How it works:
+	If two packages have been pulled into a slot, one needs to
+	go away. This class focuses on cases where this can be achieved
+	with a change in USE settings.
+
+	1) Find out if what causes a given slot conflict. There are
+	three possibilities:
+
+		a) One parent needs foo-1:0 and another one needs foo-2:0,
+		nothing we can do about this. This is called a 'version
+		based conflict'.
+
+		b) All parents of one of the conflict packages could use
+		another conflict package. This is called an 'unspecific
+		conflict'. This should be caught by the backtracking logic.
+		Ask the user to enable -uN (if not already enabled). If -uN is
+		enabled, this case is treated in the same way as c).
+
+		c) Neither a 'version based conflict' nor an 'unspecific
+		conflict'. Ignoring use deps would result result in an
+		'unspecific conflict'. This is called a 'specific conflict'.
+		This is the only conflict we try to find suggestions for.
+
+	2) Computing suggestions.
+
+	Def.: "configuration": A list of packages, containing exactly one
+			package from each slot conflict.
+
+	We try to find USE changes such that all parents of conflict packages
+	can work with a package in the configuration we're looking at. This
+	is done for all possible configurations, except if the 'all-ebuild'
+	configuration has a suggestion. In this case we immediately abort the
+	search.
+	For the current configuration, all use flags that are part of violated
+	use deps are computed. This is done for every slot conflict on its own.
+
+	Def.: "solution (candidate)": An assignment of "enabled" / "disabled"
+			values for the use flags that are part of violated use deps.
+
+	Now all involved use flags for the current configuration are known. For
+	now they have an undetermined value. Fix their value in the
+	following cases:
+		* The use dep in the parent atom is unconditional.
+		* The parent package is 'installed'.
+		* The conflict package is 'installed'.
+
+	USE of 'installed' packages can't be changed. This always requires an
+	non-installed package.
+
+	During this procedure, contradictions may occur. In this case the
+	configuration has no solution.
+
+	Now generate all possible solution candidates with fixed values. Check
+	if they don't introduce new conflicts.
+
+	We have found a valid assignment for all involved use flags. Compute
+	the needed USE changes and prepare the message for the user.
+	"""
+
+	def __init__(self, depgraph):
+		self.depgraph = depgraph
+		self.myopts = depgraph._frozen_config.myopts
+		self.debug = "--debug" in self.myopts
+		if self.debug:
+			writemsg("Starting slot conflict handler\n", noiselevel=-1)
+		#slot_collision_info is a dict mapping (slot atom, root) to set
+		#of packages. The packages in the set all belong to the same
+		#slot.
+		self.slot_collision_info = depgraph._dynamic_config._slot_collision_info
+		
+		#A dict mapping packages to pairs of parent package
+		#and parent atom
+		self.all_parents = depgraph._dynamic_config._parent_atoms
+		
+		#set containing all nodes that are part of a slot conflict
+		conflict_nodes = set()
+		
+		#a list containing list of packages that form a slot conflict
+		conflict_pkgs = []
+		
+		#a list containing sets of (parent, atom) pairs that have pulled packages
+		#into the same slot
+		all_conflict_atoms_by_slotatom = []
+		
+		#fill conflict_pkgs, all_conflict_atoms_by_slotatom
+		for (atom, root), pkgs \
+			in self.slot_collision_info.items():
+			conflict_pkgs.append(list(pkgs))
+			all_conflict_atoms_by_slotatom.append(set())
+			
+			for pkg in pkgs:
+				conflict_nodes.add(pkg)
+				for ppkg, atom in self.all_parents.get(pkg):
+					all_conflict_atoms_by_slotatom[-1].add((ppkg, atom))
+
+		#Variable that holds the non-explanation part of the message.
+		self.conflict_msg = []
+		#If any conflict package was pulled in only by unspecific atoms, then
+		#the user forgot to enable --newuse and/or --update.
+		self.conflict_is_unspecific = False
+		
+		#Indicate if the conflict is caused by incompatible version requirements
+		#cat/pkg-2 pulled in, but a parent requires <cat/pkg-2
+		self.is_a_version_conflict = False
+
+		self._prepare_conflict_msg_and_check_for_specificity()
+
+		#a list of dicts that hold the needed USE values to solve all conflicts
+		self.solutions = []
+
+		#a list of dicts that hold the needed USE changes to solve all conflicts
+		self.changes = []
+
+		#configuration = a list of packages with exactly one package from every
+		#single slot conflict
+		config_gen = _configuration_generator(conflict_pkgs)
+		first_config = True
+
+		#go through all configurations and collect solutions
+		while(True):
+			config = config_gen.get_configuration()
+			if not config:
+				break
+
+			if self.debug:
+				writemsg("\nNew configuration:\n", noiselevel=-1)
+				for pkg in config:
+					writemsg("   " + str(pkg) + "\n", noiselevel=-1)
+				writemsg("\n", noiselevel=-1)
+
+			new_solutions = self._check_configuration(config, all_conflict_atoms_by_slotatom, conflict_nodes)
+
+			if new_solutions:
+				self.solutions.extend(new_solutions)
+
+				if first_config:
+					#If the "all ebuild"-config gives a solution, use it.
+					#Otherwise enumerate all other solutions.
+					if self.debug:
+						writemsg("All-ebuild configuration has a solution. Aborting search.\n", noiselevel=-1)
+					break
+			first_config = False
+
+			if len(conflict_pkgs) > 4:
+				# The number of configurations to check grows exponentially in the number of conflict_pkgs.
+				# To prevent excessive running times, only check the "all-ebuild" configuration,
+				# if the number of conflict packages is too large.
+				if self.debug:
+					writemsg("\nAborting search due to excessive number of configurations.\n", noiselevel=-1)
+				break
+
+		for solution in self.solutions:
+			self._add_change(self._get_change(solution))
+
+
+	def get_conflict(self):
+		return "".join(self.conflict_msg)
+
+	def _is_subset(self, change1, change2):
+		"""
+		Checks if a set of changes 'change1' is a subset of the changes 'change2'.
+		"""
+		#All pkgs of change1 have to be in change2.
+		#For every package in change1, the changes have to be a subset of
+		#the corresponding changes in change2.
+		for pkg in change1:
+			if pkg not in change2:
+				return False
+
+			for flag in change1[pkg]:
+				if flag not in change2[pkg]:
+					return False
+				if change1[pkg][flag] != change2[pkg][flag]:
+					return False
+		return True
+
+	def _add_change(self, new_change):
+		"""
+		Make sure to keep only minimal changes. If "+foo", does the job, discard "+foo -bar".
+		"""
+		changes = self.changes
+		#Make sure there is no other solution that is a subset of the new solution.
+		ignore = False
+		to_be_removed = []
+		for change in changes:
+			if self._is_subset(change, new_change):
+				ignore = True
+				break
+			elif self._is_subset(new_change, change):
+				to_be_removed.append(change)
+
+		if not ignore:
+			#Discard all existing change that are a superset of the new change.
+			for obsolete_change in to_be_removed:
+				changes.remove(obsolete_change)
+			changes.append(new_change)
+
+	def _get_change(self, solution):
+		_pkg_use_enabled = self.depgraph._pkg_use_enabled
+		new_change = {}
+		for pkg in solution:
+			for flag, state in solution[pkg].items():
+				if state == "enabled" and flag not in _pkg_use_enabled(pkg):
+					new_change.setdefault(pkg, {})[flag] = True
+				elif state == "disabled" and flag in _pkg_use_enabled(pkg):
+					new_change.setdefault(pkg, {})[flag] = False
+		return new_change
+
+	def _prepare_conflict_msg_and_check_for_specificity(self):
+		"""
+		Print all slot conflicts in a human readable way.
+		"""
+		_pkg_use_enabled = self.depgraph._pkg_use_enabled
+		msg = self.conflict_msg
+		indent = "  "
+		msg.append("\n!!! Multiple package instances within a single " + \
+			"package slot have been pulled\n")
+		msg.append("!!! into the dependency graph, resulting" + \
+			" in a slot conflict:\n\n")
+
+		for (slot_atom, root), pkgs \
+			in self.slot_collision_info.items():
+			msg.append(str(slot_atom))
+			if root != '/':
+				msg.append(" for %s" % (root,))
+			msg.append("\n\n")
+
+			for pkg in pkgs:
+				msg.append(indent)
+				msg.append(str(pkg))
+				parent_atoms = self.all_parents.get(pkg)
+				if parent_atoms:
+					#Create a list of collision reasons and map them to sets
+					#of atoms.
+					#Possible reasons:
+					#	("version", "ge") for operator >=, >
+					#	("version", "eq") for operator =, ~
+					#	("version", "le") for operator <=, <
+					#	("use", "<some use flag>") for unmet use conditionals
+					collision_reasons = {}
+					num_all_specific_atoms = 0
+
+					for ppkg, atom in parent_atoms:
+						atom_set = InternalPackageSet(initial_atoms=(atom,))
+						atom_without_use_set = InternalPackageSet(initial_atoms=(atom.without_use,))
+
+						for other_pkg in pkgs:
+							if other_pkg == pkg:
+								continue
+
+							if not atom_without_use_set.findAtomForPackage(other_pkg, \
+								modified_use=_pkg_use_enabled(other_pkg)):
+								#The version range does not match.
+								sub_type = None
+								if atom.operator in (">=", ">"):
+									sub_type = "ge"
+								elif atom.operator in ("=", "~"):
+									sub_type = "eq"
+								elif atom.operator in ("<=", "<"):
+									sub_type = "le"
+
+								atoms = collision_reasons.get(("version", sub_type), set())
+								atoms.add((ppkg, atom, other_pkg))
+								num_all_specific_atoms += 1
+								collision_reasons[("version", sub_type)] = atoms
+							elif not atom_set.findAtomForPackage(other_pkg, \
+								modified_use=_pkg_use_enabled(other_pkg)):
+								missing_iuse = other_pkg.iuse.get_missing_iuse(
+									atom.unevaluated_atom.use.required)
+								if missing_iuse:
+									for flag in missing_iuse:
+										atoms = collision_reasons.get(("use", flag), set())
+										atoms.add((ppkg, atom, other_pkg))
+										collision_reasons[("use", flag)] = atoms
+									num_all_specific_atoms += 1
+								else:
+									#Use conditionals not met.
+									violated_atom = atom.violated_conditionals(_pkg_use_enabled(other_pkg), \
+										other_pkg.iuse.is_valid_flag)
+									for flag in violated_atom.use.enabled.union(violated_atom.use.disabled):
+										atoms = collision_reasons.get(("use", flag), set())
+										atoms.add((ppkg, atom, other_pkg))
+										collision_reasons[("use", flag)] = atoms
+									num_all_specific_atoms += 1
+
+					msg.append(" pulled in by\n")
+
+					selected_for_display = set()
+					unconditional_use_deps = set()
+
+					for (type, sub_type), parents in collision_reasons.items():
+						#From each (type, sub_type) pair select at least one atom.
+						#Try to select as few atoms as possible
+
+						if type == "version":
+							#Find the atom with version that is as far away as possible.
+							best_matches = {}
+							for ppkg, atom, other_pkg in parents:
+								if atom.cp in best_matches:
+									cmp = vercmp( \
+										cpv_getversion(atom.cpv), \
+										cpv_getversion(best_matches[atom.cp][1].cpv))
+
+									if (sub_type == "ge" and  cmp > 0) \
+										or (sub_type == "le" and cmp < 0) \
+										or (sub_type == "eq" and cmp > 0):
+										best_matches[atom.cp] = (ppkg, atom)
+								else:
+									best_matches[atom.cp] = (ppkg, atom)
+							selected_for_display.update(best_matches.values())
+						elif type == "use":
+							#Prefer atoms with unconditional use deps over, because it's
+							#not possible to change them on the parent, which means there
+							#are fewer possible solutions.
+							use = sub_type
+							for ppkg, atom, other_pkg in parents:
+								missing_iuse = other_pkg.iuse.get_missing_iuse(
+									atom.unevaluated_atom.use.required)
+								if missing_iuse:
+									unconditional_use_deps.add((ppkg, atom))
+								else:
+									parent_use = None
+									if isinstance(ppkg, Package):
+										parent_use = _pkg_use_enabled(ppkg)
+									violated_atom = atom.unevaluated_atom.violated_conditionals(
+										_pkg_use_enabled(other_pkg),
+										other_pkg.iuse.is_valid_flag,
+										parent_use=parent_use)
+									# It's possible for autounmask to change
+									# parent_use such that the unevaluated form
+									# of the atom now matches, even though the
+									# earlier evaluated form (from before
+									# autounmask changed parent_use) does not.
+									# In this case (see bug #374423), it's
+									# expected that violated_atom.use is None.
+									# Since the atom now matches, we don't want
+									# to display it in the slot conflict
+									# message, so we simply ignore it and rely
+									# on the autounmask display to communicate
+									# the necessary USE change to the user.
+									if violated_atom.use is None:
+										continue
+									if use in violated_atom.use.enabled or \
+										use in violated_atom.use.disabled:
+										unconditional_use_deps.add((ppkg, atom))
+								# When USE flags are removed, it can be
+								# essential to see all broken reverse
+								# dependencies here, so don't omit any.
+								# If the list is long, people can simply
+								# use a pager.
+								selected_for_display.add((ppkg, atom))
+
+					def highlight_violations(atom, version, use=[]):
+						"""Colorize parts of an atom"""
+						atom_str = str(atom)
+						if version:
+							op = atom.operator
+							ver = None
+							if atom.cp != atom.cpv:
+								ver = cpv_getversion(atom.cpv)
+							slot = atom.slot
+
+							if op == "=*":
+								op = "="
+								ver += "*"
+
+							if op is not None:
+								atom_str = atom_str.replace(op, colorize("BAD", op), 1)
+
+							if ver is not None:
+								start = atom_str.rfind(ver)
+								end = start + len(ver)
+								atom_str = atom_str[:start] + \
+									colorize("BAD", ver) + \
+									atom_str[end:]
+							if slot:
+								atom_str = atom_str.replace(":" + slot, colorize("BAD", ":" + slot))
+						
+						if use and atom.use.tokens:
+							use_part_start = atom_str.find("[")
+							use_part_end = atom_str.find("]")
+							
+							new_tokens = []
+							for token in atom.use.tokens:
+								if token.lstrip("-!").rstrip("=?") in use:
+									new_tokens.append(colorize("BAD", token))
+								else:
+									new_tokens.append(token)
+
+							atom_str = atom_str[:use_part_start] \
+								+ "[%s]" % (",".join(new_tokens),) + \
+								atom_str[use_part_end+1:]
+						
+						return atom_str
+
+					# Show unconditional use deps first, since those
+					# are more problematic than the conditional kind.
+					ordered_list = list(unconditional_use_deps)
+					if len(selected_for_display) > len(unconditional_use_deps):
+						for parent_atom in selected_for_display:
+							if parent_atom not in unconditional_use_deps:
+								ordered_list.append(parent_atom)
+					for parent_atom in ordered_list:
+						parent, atom = parent_atom
+						msg.append(2*indent)
+						if isinstance(parent,
+							(PackageArg, AtomArg)):
+							# For PackageArg and AtomArg types, it's
+							# redundant to display the atom attribute.
+							msg.append(str(parent))
+						else:
+							# Display the specific atom from SetArg or
+							# Package types.
+							version_violated = False
+							use = []
+							for (type, sub_type), parents in collision_reasons.items():
+								for x in parents:
+									if parent == x[0] and atom == x[1]:
+										if type == "version":
+											version_violated = True
+										elif type == "use":
+											use.append(sub_type)
+										break
+
+							atom_str = highlight_violations(atom.unevaluated_atom, version_violated, use)
+
+							if version_violated:
+								self.is_a_version_conflict = True
+
+							msg.append("%s required by %s" % (atom_str, parent))
+						msg.append("\n")
+					
+					if not selected_for_display:
+						msg.append(2*indent)
+						msg.append("(no parents that aren't satisfied by other packages in this slot)\n")
+						self.conflict_is_unspecific = True
+					
+					omitted_parents = num_all_specific_atoms - len(selected_for_display)
+					if omitted_parents:
+						msg.append(2*indent)
+						if len(selected_for_display) > 1:
+							msg.append("(and %d more with the same problems)\n" % omitted_parents)
+						else:
+							msg.append("(and %d more with the same problem)\n" % omitted_parents)
+				else:
+					msg.append(" (no parents)\n")
+				msg.append("\n")
+		msg.append("\n")
+
+	def get_explanation(self):
+		msg = ""
+		_pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+		if self.is_a_version_conflict:
+			return None
+
+		if self.conflict_is_unspecific and \
+			not ("--newuse" in self.myopts and "--update" in self.myopts):
+			msg += "!!! Enabling --newuse and --update might solve this conflict.\n"
+			msg += "!!! If not, it might help emerge to give a more specific suggestion.\n\n"
+			return msg
+
+		solutions = self.solutions
+		if not solutions:
+			return None
+
+		if len(solutions)==1:
+			if len(self.slot_collision_info)==1:
+				msg += "It might be possible to solve this slot collision\n"
+			else:
+				msg += "It might be possible to solve these slot collisions\n"
+			msg += "by applying all of the following changes:\n"
+		else:
+			if len(self.slot_collision_info)==1:
+				msg += "It might be possible to solve this slot collision\n"
+			else:
+				msg += "It might be possible to solve these slot collisions\n"
+			msg += "by applying one of the following solutions:\n"
+
+		def print_change(change, indent=""):
+			mymsg = ""
+			for pkg in change:
+				changes = []
+				for flag, state in change[pkg].items():
+					if state:
+						changes.append(colorize("red", "+" + flag))
+					else:
+						changes.append(colorize("blue", "-" + flag))
+				mymsg += indent + "- " + pkg.cpv + " (Change USE: %s" % " ".join(changes) + ")\n"
+			mymsg += "\n"
+			return mymsg
+
+
+		if len(self.changes) == 1:
+			msg += print_change(self.changes[0], "   ")
+		else:
+			for change in self.changes:
+				msg += "  Solution: Apply all of:\n"
+				msg += print_change(change, "     ")
+
+		return msg
+
+	def _check_configuration(self, config, all_conflict_atoms_by_slotatom, conflict_nodes):
+		"""
+		Given a configuartion, required use changes are computed and checked to
+		make sure that no new conflict is introduced. Returns a solution or None.
+		"""
+		_pkg_use_enabled = self.depgraph._pkg_use_enabled
+		#An installed package can only be part of a valid configuration if it has no
+		#pending use changed. Otherwise the ebuild will be pulled in again.
+		for pkg in config:
+			if not pkg.installed:
+				continue
+
+			for (atom, root), pkgs \
+				in self.slot_collision_info.items():
+				if pkg not in pkgs:
+					continue
+				for other_pkg in pkgs:
+					if other_pkg == pkg:
+						continue
+					if pkg.iuse.all.symmetric_difference(other_pkg.iuse.all) \
+						or _pkg_use_enabled(pkg).symmetric_difference(_pkg_use_enabled(other_pkg)):
+						if self.debug:
+							writemsg(str(pkg) + " has pending USE changes. Rejecting configuration.\n", noiselevel=-1)
+						return False
+
+		#A list of dicts. Keeps one dict per slot conflict. [ { flag1: "enabled" }, { flag2: "disabled" } ]
+		all_involved_flags = []
+
+		#Go through all slot conflicts
+		for id, pkg in enumerate(config):
+			involved_flags = {}
+			for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+				if ppkg in conflict_nodes and not ppkg in config:
+					#The parent is part of a slot conflict itself and is
+					#not part of the current config.
+					continue
+
+				i = InternalPackageSet(initial_atoms=(atom,))
+				if i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
+					continue
+
+				i = InternalPackageSet(initial_atoms=(atom.without_use,))
+				if not i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
+					#Version range does not match.
+					if self.debug:
+						writemsg(str(pkg) + " does not satify all version requirements." + \
+							" Rejecting configuration.\n", noiselevel=-1)
+					return False
+
+				if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required):
+					#Missing IUSE.
+					#FIXME: This needs to support use dep defaults.
+					if self.debug:
+						writemsg(str(pkg) + " misses needed flags from IUSE." + \
+							" Rejecting configuration.\n", noiselevel=-1)
+					return False
+
+				if not isinstance(ppkg, Package) or ppkg.installed:
+					#We cannot assume that it's possible to reinstall the package. Do not
+					#check if some of its atom has use.conditional
+					violated_atom = atom.violated_conditionals(_pkg_use_enabled(pkg), \
+						pkg.iuse.is_valid_flag)
+				else:
+					violated_atom = atom.unevaluated_atom.violated_conditionals(_pkg_use_enabled(pkg), \
+						pkg.iuse.is_valid_flag, parent_use=_pkg_use_enabled(ppkg))
+					if violated_atom.use is None:
+						# It's possible for autounmask to change
+						# parent_use such that the unevaluated form
+						# of the atom now matches, even though the
+						# earlier evaluated form (from before
+						# autounmask changed parent_use) does not.
+						# In this case (see bug #374423), it's
+						# expected that violated_atom.use is None.
+						continue
+
+				if pkg.installed and (violated_atom.use.enabled or violated_atom.use.disabled):
+					#We can't change USE of an installed package (only of an ebuild, but that is already
+					#part of the conflict, isn't it?
+					if self.debug:
+						writemsg(str(pkg) + ": installed package would need USE changes." + \
+							" Rejecting configuration.\n", noiselevel=-1)
+					return False
+
+				#Compute the required USE changes. A flag can be forced to "enabled" or "disabled",
+				#it can be in the conditional state "cond" that allows both values or in the
+				#"contradiction" state, which means that some atoms insist on differnt values
+				#for this flag and those kill this configuration.
+				for flag in violated_atom.use.required:
+					state = involved_flags.get(flag, "")
+					
+					if flag in violated_atom.use.enabled:
+						if state in ("", "cond", "enabled"):
+							state = "enabled"
+						else:
+							state = "contradiction"
+					elif flag in violated_atom.use.disabled:
+						if state in ("", "cond", "disabled"):
+							state = "disabled"
+						else:
+							state = "contradiction"
+					else:
+						if state == "":
+							state = "cond"
+
+					involved_flags[flag] = state
+
+			if pkg.installed:
+				#We don't change the installed pkg's USE. Force all involved flags
+				#to the same value as the installed package has it.
+				for flag in involved_flags:
+					if involved_flags[flag] == "enabled":
+						if not flag in _pkg_use_enabled(pkg):
+							involved_flags[flag] = "contradiction"
+					elif involved_flags[flag] == "disabled":
+						if flag in _pkg_use_enabled(pkg):
+							involved_flags[flag] = "contradiction"
+					elif involved_flags[flag] == "cond":
+						if flag in _pkg_use_enabled(pkg):
+							involved_flags[flag] = "enabled"
+						else:
+							involved_flags[flag] = "disabled"
+
+			for flag, state in involved_flags.items():
+				if state == "contradiction":
+					if self.debug:
+						writemsg("Contradicting requirements found for flag " + \
+							flag + ". Rejecting configuration.\n", noiselevel=-1)
+					return False
+
+			all_involved_flags.append(involved_flags)
+
+		if self.debug:
+			writemsg("All involved flags:\n", noiselevel=-1)
+			for id, involved_flags in enumerate(all_involved_flags):
+				writemsg("   " + str(config[id]) + "\n", noiselevel=-1)
+				for flag, state in involved_flags.items():
+					writemsg("     " + flag + ": " + state + "\n", noiselevel=-1)
+
+		solutions = []
+		sol_gen = _solution_candidate_generator(all_involved_flags)
+		while(True):
+			candidate = sol_gen.get_candidate()
+			if not candidate:
+				break
+			solution = self._check_solution(config, candidate, all_conflict_atoms_by_slotatom)
+			if solution:
+				solutions.append(solution)
+		
+		if self.debug:
+			if not solutions:
+				writemsg("No viable solutions. Rejecting configuration.\n", noiselevel=-1)
+		return solutions
+		
+	
+	def _force_flag_for_package(self, required_changes, pkg, flag, state):
+		"""
+		Adds an USE change to required_changes. Sets the target state to
+		"contradiction" if a flag is forced to conflicting values.
+		"""
+		_pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+		if state == "disabled":
+			changes = required_changes.get(pkg, {})
+			flag_change = changes.get(flag, "")
+			if flag_change == "enabled":
+				flag_change = "contradiction"
+			elif flag in _pkg_use_enabled(pkg):
+				flag_change = "disabled"
+			
+			changes[flag] = flag_change
+			required_changes[pkg] = changes
+		elif state == "enabled":
+			changes = required_changes.get(pkg, {})
+			flag_change = changes.get(flag, "")
+			if flag_change == "disabled":
+				flag_change = "contradiction"
+			else:
+				flag_change = "enabled"
+			
+			changes[flag] = flag_change
+			required_changes[pkg] = changes
+								
+	def _check_solution(self, config, all_involved_flags, all_conflict_atoms_by_slotatom):
+		"""
+		Given a configuartion and all involved flags, all possible settings for the involved
+		flags are checked if they solve the slot conflict.
+		"""
+		_pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+		if self.debug:
+			#The code is a bit verbose, because the states might not
+			#be a string, but a _value_helper.
+			msg = "Solution candidate: "
+			msg += "["
+			first = True
+			for involved_flags in all_involved_flags:
+				if first:
+					first = False
+				else:
+					msg += ", "
+				msg += "{"
+				inner_first = True
+				for flag, state in involved_flags.items():
+					if inner_first:
+						inner_first = False
+					else:
+						msg += ", "
+					msg += flag + ": " + str(state)
+				msg += "}"
+			msg += "]\n"
+			writemsg(msg, noiselevel=-1)
+		
+		required_changes = {}
+		for id, pkg in enumerate(config):
+			if not pkg.installed:
+				#We can't change the USE of installed packages.
+				for flag in all_involved_flags[id]:
+					if not pkg.iuse.is_valid_flag(flag):
+						continue
+					state = all_involved_flags[id][flag]
+					self._force_flag_for_package(required_changes, pkg, flag, state)
+
+			#Go through all (parent, atom) pairs for the current slot conflict.
+			for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+				use = atom.unevaluated_atom.use
+				if not use:
+					#No need to force something for an atom without USE conditionals.
+					#These atoms are already satisfied.
+					continue
+				for flag in all_involved_flags[id]:
+					state = all_involved_flags[id][flag]
+					
+					if flag not in use.required or not use.conditional:
+						continue
+					if flag in use.conditional.enabled:
+						#[flag?]
+						if state == "enabled":
+							#no need to change anything, the atom won't
+							#force -flag on pkg
+							pass
+						elif state == "disabled":
+							#if flag is enabled we get [flag] -> it must be disabled
+							self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+					elif flag in use.conditional.disabled:
+						#[!flag?]
+						if state == "enabled":
+							#if flag is enabled we get [-flag] -> it must be disabled
+							self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+						elif state == "disabled":
+							#no need to change anything, the atom won't
+							#force +flag on pkg
+							pass
+					elif flag in use.conditional.equal:
+						#[flag=]
+						if state == "enabled":
+							#if flag is disabled we get [-flag] -> it must be enabled
+							self._force_flag_for_package(required_changes, ppkg, flag, "enabled")
+						elif state == "disabled":
+							#if flag is enabled we get [flag] -> it must be disabled
+							self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+					elif flag in use.conditional.not_equal:
+						#[!flag=]
+						if state == "enabled":
+							#if flag is enabled we get [-flag] -> it must be disabled
+							self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+						elif state == "disabled":
+							#if flag is disabled we get [flag] -> it must be enabled
+							self._force_flag_for_package(required_changes, ppkg, flag, "enabled")
+
+		is_valid_solution = True
+		for pkg in required_changes:
+			for state in required_changes[pkg].values():
+				if not state in ("enabled", "disabled"):
+					is_valid_solution = False
+		
+		if not is_valid_solution:
+			return None
+
+		#Check if all atoms are satisfied after the changes are applied.
+		for id, pkg in enumerate(config):
+			new_use = _pkg_use_enabled(pkg)
+			if pkg in required_changes:
+				old_use = pkg.use.enabled
+				new_use = set(new_use)
+				for flag, state in required_changes[pkg].items():
+					if state == "enabled":
+						new_use.add(flag)
+					elif state == "disabled":
+						new_use.discard(flag)
+				if not new_use.symmetric_difference(old_use):
+					#avoid copying the package in findAtomForPackage if possible
+					new_use = old_use
+
+			for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+				if not hasattr(ppkg, "use"):
+					#It's a SetArg or something like that.
+					continue
+				ppkg_new_use = set(_pkg_use_enabled(ppkg))
+				if ppkg in required_changes:
+					for flag, state in required_changes[ppkg].items():
+						if state == "enabled":
+							ppkg_new_use.add(flag)
+						elif state == "disabled":
+							ppkg_new_use.discard(flag)
+
+				new_atom = atom.unevaluated_atom.evaluate_conditionals(ppkg_new_use)
+				i = InternalPackageSet(initial_atoms=(new_atom,))
+				if not i.findAtomForPackage(pkg, new_use):
+					#We managed to create a new problem with our changes.
+					is_valid_solution = False
+					if self.debug:
+						writemsg("new conflict introduced: " + str(pkg) + \
+							" does not match " + new_atom + " from " + str(ppkg) + "\n", noiselevel=-1)
+					break
+
+			if not is_valid_solution:
+				break
+
+		#Make sure the changes don't violate REQUIRED_USE
+		for pkg in required_changes:
+			required_use = pkg.metadata["REQUIRED_USE"]
+			if not required_use:
+				continue
+
+			use = set(_pkg_use_enabled(pkg))
+			for flag, state in required_changes[pkg].items():
+				if state == "enabled":
+					use.add(flag)
+				else:
+					use.discard(flag)
+
+			if not check_required_use(required_use, use, pkg.iuse.is_valid_flag):
+				is_valid_solution = False
+				break
+
+		if is_valid_solution and required_changes:
+			return required_changes
+		else:
+			return None
+
+class _configuration_generator(object):
+	def __init__(self, conflict_pkgs):
+		#reorder packages such that installed packages come last
+		self.conflict_pkgs = []
+		for pkgs in conflict_pkgs:
+			new_pkgs = []
+			for pkg in pkgs:
+				if not pkg.installed:
+					new_pkgs.append(pkg)
+			for pkg in pkgs:
+				if pkg.installed:
+					new_pkgs.append(pkg)
+			self.conflict_pkgs.append(new_pkgs)
+			
+		self.solution_ids = []
+		for pkgs in self.conflict_pkgs:
+			self.solution_ids.append(0)
+		self._is_first_solution = True
+		
+	def get_configuration(self):
+		if self._is_first_solution:
+			self._is_first_solution = False
+		else:
+			if not self._next():
+				return None
+		
+		solution = []
+		for id, pkgs in enumerate(self.conflict_pkgs):
+			solution.append(pkgs[self.solution_ids[id]])
+		return solution
+	
+	def _next(self, id=None):
+		solution_ids = self.solution_ids
+		conflict_pkgs = self.conflict_pkgs
+		
+		if id is None:
+			id = len(solution_ids)-1
+
+		if solution_ids[id] == len(conflict_pkgs[id])-1:
+			if id > 0:
+				return self._next(id=id-1)
+			else:
+				return False
+		else:
+			solution_ids[id] += 1
+			for other_id in range(id+1, len(solution_ids)):
+				solution_ids[other_id] = 0
+			return True
+
+class _solution_candidate_generator(object):
+	class _value_helper(object):
+		def __init__(self, value=None):
+			self.value = value
+		def __eq__(self, other):
+			if isinstance(other, basestring):
+				return self.value == other
+			else:
+				return self.value == other.value
+		def __str__(self):
+			return str(self.value)
+	
+	def __init__(self, all_involved_flags):
+		#A copy of all_involved_flags with all "cond" values
+		#replaced by a _value_helper object.
+		self.all_involved_flags = []
+		
+		#A list tracking references to all used _value_helper
+		#objects.
+		self.conditional_values = []
+		
+		for involved_flags in all_involved_flags:
+			new_involved_flags = {}
+			for flag, state in involved_flags.items():
+				if state in ("enabled", "disabled"):
+					new_involved_flags[flag] = state
+				else:
+					v = self._value_helper("disabled")
+					new_involved_flags[flag] = v
+					self.conditional_values.append(v)
+			self.all_involved_flags.append(new_involved_flags)
+			
+		self._is_first_solution = True
+
+	def get_candidate(self):
+		if self._is_first_solution:
+			self._is_first_solution = False
+		else:
+			if not self._next():
+				return None
+
+		return self.all_involved_flags
+	
+	def _next(self, id=None):
+		values = self.conditional_values
+		
+		if not values:
+			return False
+		
+		if id is None:
+			id = len(values)-1
+
+		if values[id].value == "enabled":
+			if id > 0:
+				return self._next(id=id-1)
+			else:
+				return False
+		else:
+			values[id].value = "enabled"
+			for other_id in range(id+1, len(values)):
+				values[other_id].value = "disabled"
+			return True
+		
+		

diff --git a/portage_with_autodep/pym/_emerge/search.py b/portage_with_autodep/pym/_emerge/search.py
new file mode 100644
index 0000000..35f0412
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/search.py
@@ -0,0 +1,385 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import re
+import portage
+from portage import os
+from portage.output import  bold, bold as white, darkgreen, green, red
+from portage.util import writemsg_stdout
+
+from _emerge.Package import Package
+
+class search(object):
+
+	#
+	# class constants
+	#
+	VERSION_SHORT=1
+	VERSION_RELEASE=2
+
+	#
+	# public interface
+	#
+	def __init__(self, root_config, spinner, searchdesc,
+		verbose, usepkg, usepkgonly):
+		"""Searches the available and installed packages for the supplied search key.
+		The list of available and installed packages is created at object instantiation.
+		This makes successive searches faster."""
+		self.settings = root_config.settings
+		self.vartree = root_config.trees["vartree"]
+		self.spinner = spinner
+		self.verbose = verbose
+		self.searchdesc = searchdesc
+		self.root_config = root_config
+		self.setconfig = root_config.setconfig
+		self.matches = {"pkg" : []}
+		self.mlen = 0
+
+		self._dbs = []
+
+		portdb = root_config.trees["porttree"].dbapi
+		bindb = root_config.trees["bintree"].dbapi
+		vardb = root_config.trees["vartree"].dbapi
+
+		if not usepkgonly and portdb._have_root_eclass_dir:
+			self._dbs.append(portdb)
+
+		if (usepkg or usepkgonly) and bindb.cp_all():
+			self._dbs.append(bindb)
+
+		self._dbs.append(vardb)
+		self._portdb = portdb
+
+	def _spinner_update(self):
+		if self.spinner:
+			self.spinner.update()
+
+	def _cp_all(self):
+		cp_all = set()
+		for db in self._dbs:
+			cp_all.update(db.cp_all())
+		return list(sorted(cp_all))
+
+	def _aux_get(self, *args, **kwargs):
+		for db in self._dbs:
+			try:
+				return db.aux_get(*args, **kwargs)
+			except KeyError:
+				pass
+		raise
+
+	def _findname(self, *args, **kwargs):
+		for db in self._dbs:
+			if db is not self._portdb:
+				# We don't want findname to return anything
+				# unless it's an ebuild in a portage tree.
+				# Otherwise, it's already built and we don't
+				# care about it.
+				continue
+			func = getattr(db, "findname", None)
+			if func:
+				value = func(*args, **kwargs)
+				if value:
+					return value
+		return None
+
+	def _getFetchMap(self, *args, **kwargs):
+		for db in self._dbs:
+			func = getattr(db, "getFetchMap", None)
+			if func:
+				value = func(*args, **kwargs)
+				if value:
+					return value
+		return {}
+
+	def _visible(self, db, cpv, metadata):
+		installed = db is self.vartree.dbapi
+		built = installed or db is not self._portdb
+		pkg_type = "ebuild"
+		if installed:
+			pkg_type = "installed"
+		elif built:
+			pkg_type = "binary"
+		return Package(type_name=pkg_type,
+			root_config=self.root_config,
+			cpv=cpv, built=built, installed=installed,
+			metadata=metadata).visible
+
+	def _xmatch(self, level, atom):
+		"""
+		This method does not expand old-style virtuals because it
+		is restricted to returning matches for a single ${CATEGORY}/${PN}
+		and old-style virual matches unreliable for that when querying
+		multiple package databases. If necessary, old-style virtuals
+		can be performed on atoms prior to calling this method.
+		"""
+		cp = portage.dep_getkey(atom)
+		if level == "match-all":
+			matches = set()
+			for db in self._dbs:
+				if hasattr(db, "xmatch"):
+					matches.update(db.xmatch(level, atom))
+				else:
+					matches.update(db.match(atom))
+			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+			db._cpv_sort_ascending(result)
+		elif level == "match-visible":
+			matches = set()
+			for db in self._dbs:
+				if hasattr(db, "xmatch"):
+					matches.update(db.xmatch(level, atom))
+				else:
+					db_keys = list(db._aux_cache_keys)
+					for cpv in db.match(atom):
+						metadata = zip(db_keys,
+							db.aux_get(cpv, db_keys))
+						if not self._visible(db, cpv, metadata):
+							continue
+						matches.add(cpv)
+			result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+			db._cpv_sort_ascending(result)
+		elif level == "bestmatch-visible":
+			result = None
+			for db in self._dbs:
+				if hasattr(db, "xmatch"):
+					cpv = db.xmatch("bestmatch-visible", atom)
+					if not cpv or portage.cpv_getkey(cpv) != cp:
+						continue
+					if not result or cpv == portage.best([cpv, result]):
+						result = cpv
+				else:
+					db_keys = Package.metadata_keys
+					# break out of this loop with highest visible
+					# match, checked in descending order
+					for cpv in reversed(db.match(atom)):
+						if portage.cpv_getkey(cpv) != cp:
+							continue
+						metadata = zip(db_keys,
+							db.aux_get(cpv, db_keys))
+						if not self._visible(db, cpv, metadata):
+							continue
+						if not result or cpv == portage.best([cpv, result]):
+							result = cpv
+						break
+		else:
+			raise NotImplementedError(level)
+		return result
+
+	def execute(self,searchkey):
+		"""Performs the search for the supplied search key"""
+		match_category = 0
+		self.searchkey=searchkey
+		self.packagematches = []
+		if self.searchdesc:
+			self.searchdesc=1
+			self.matches = {"pkg":[], "desc":[], "set":[]}
+		else:
+			self.searchdesc=0
+			self.matches = {"pkg":[], "set":[]}
+		print("Searching...   ", end=' ')
+
+		regexsearch = False
+		if self.searchkey.startswith('%'):
+			regexsearch = True
+			self.searchkey = self.searchkey[1:]
+		if self.searchkey.startswith('@'):
+			match_category = 1
+			self.searchkey = self.searchkey[1:]
+		if regexsearch:
+			self.searchre=re.compile(self.searchkey,re.I)
+		else:
+			self.searchre=re.compile(re.escape(self.searchkey), re.I)
+
+		for package in self._cp_all():
+			self._spinner_update()
+
+			if match_category:
+				match_string  = package[:]
+			else:
+				match_string  = package.split("/")[-1]
+
+			masked=0
+			if self.searchre.search(match_string):
+				if not self._xmatch("match-visible", package):
+					masked=1
+				self.matches["pkg"].append([package,masked])
+			elif self.searchdesc: # DESCRIPTION searching
+				full_package = self._xmatch("bestmatch-visible", package)
+				if not full_package:
+					#no match found; we don't want to query description
+					full_package = portage.best(
+						self._xmatch("match-all", package))
+					if not full_package:
+						continue
+					else:
+						masked=1
+				try:
+					full_desc = self._aux_get(
+						full_package, ["DESCRIPTION"])[0]
+				except KeyError:
+					print("emerge: search: aux_get() failed, skipping")
+					continue
+				if self.searchre.search(full_desc):
+					self.matches["desc"].append([full_package,masked])
+
+		self.sdict = self.setconfig.getSets()
+		for setname in self.sdict:
+			self._spinner_update()
+			if match_category:
+				match_string = setname
+			else:
+				match_string = setname.split("/")[-1]
+			
+			if self.searchre.search(match_string):
+				self.matches["set"].append([setname, False])
+			elif self.searchdesc:
+				if self.searchre.search(
+					self.sdict[setname].getMetadata("DESCRIPTION")):
+					self.matches["set"].append([setname, False])
+			
+		self.mlen=0
+		for mtype in self.matches:
+			self.matches[mtype].sort()
+			self.mlen += len(self.matches[mtype])
+
+	def addCP(self, cp):
+		if not self._xmatch("match-all", cp):
+			return
+		masked = 0
+		if not self._xmatch("bestmatch-visible", cp):
+			masked = 1
+		self.matches["pkg"].append([cp, masked])
+		self.mlen += 1
+
+	def output(self):
+		"""Outputs the results of the search."""
+		msg = []
+		msg.append("\b\b  \n[ Results for search key : " + \
+			bold(self.searchkey) + " ]\n")
+		msg.append("[ Applications found : " + \
+			bold(str(self.mlen)) + " ]\n\n")
+		vardb = self.vartree.dbapi
+		for mtype in self.matches:
+			for match,masked in self.matches[mtype]:
+				full_package = None
+				if mtype == "pkg":
+					catpack = match
+					full_package = self._xmatch(
+						"bestmatch-visible", match)
+					if not full_package:
+						#no match found; we don't want to query description
+						masked=1
+						full_package = portage.best(
+							self._xmatch("match-all",match))
+				elif mtype == "desc":
+					full_package = match
+					match        = portage.cpv_getkey(match)
+				elif mtype == "set":
+					msg.append(green("*") + "  " + bold(match) + "\n")
+					if self.verbose:
+						msg.append("      " + darkgreen("Description:") + \
+							"   " + \
+							self.sdict[match].getMetadata("DESCRIPTION") \
+							+ "\n\n")
+				if full_package:
+					try:
+						desc, homepage, license = self._aux_get(
+							full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
+					except KeyError:
+						msg.append("emerge: search: aux_get() failed, skipping\n")
+						continue
+					if masked:
+						msg.append(green("*") + "  " + \
+							white(match) + " " + red("[ Masked ]") + "\n")
+					else:
+						msg.append(green("*") + "  " + bold(match) + "\n")
+					myversion = self.getVersion(full_package, search.VERSION_RELEASE)
+
+					mysum = [0,0]
+					file_size_str = None
+					mycat = match.split("/")[0]
+					mypkg = match.split("/")[1]
+					mycpv = match + "-" + myversion
+					myebuild = self._findname(mycpv)
+					if myebuild:
+						pkgdir = os.path.dirname(myebuild)
+						from portage import manifest
+						mf = manifest.Manifest(
+							pkgdir, self.settings["DISTDIR"])
+						try:
+							uri_map = self._getFetchMap(mycpv)
+						except portage.exception.InvalidDependString as e:
+							file_size_str = "Unknown (%s)" % (e,)
+							del e
+						else:
+							try:
+								mysum[0] = mf.getDistfilesSize(uri_map)
+							except KeyError as e:
+								file_size_str = "Unknown (missing " + \
+									"digest for %s)" % (e,)
+								del e
+
+					available = False
+					for db in self._dbs:
+						if db is not vardb and \
+							db.cpv_exists(mycpv):
+							available = True
+							if not myebuild and hasattr(db, "bintree"):
+								myebuild = db.bintree.getname(mycpv)
+								try:
+									mysum[0] = os.stat(myebuild).st_size
+								except OSError:
+									myebuild = None
+							break
+
+					if myebuild and file_size_str is None:
+						mystr = str(mysum[0] // 1024)
+						mycount = len(mystr)
+						while (mycount > 3):
+							mycount -= 3
+							mystr = mystr[:mycount] + "," + mystr[mycount:]
+						file_size_str = mystr + " kB"
+
+					if self.verbose:
+						if available:
+							msg.append("      %s %s\n" % \
+								(darkgreen("Latest version available:"),
+								myversion))
+						msg.append("      %s\n" % \
+							self.getInstallationStatus(mycat+'/'+mypkg))
+						if myebuild:
+							msg.append("      %s %s\n" % \
+								(darkgreen("Size of files:"), file_size_str))
+						msg.append("      " + darkgreen("Homepage:") + \
+							"      " + homepage + "\n")
+						msg.append("      " + darkgreen("Description:") \
+							+ "   " + desc + "\n")
+						msg.append("      " + darkgreen("License:") + \
+							"       " + license + "\n\n")
+		writemsg_stdout(''.join(msg), noiselevel=-1)
+	#
+	# private interface
+	#
+	def getInstallationStatus(self,package):
+		installed_package = self.vartree.dep_bestmatch(package)
+		result = ""
+		version = self.getVersion(installed_package,search.VERSION_RELEASE)
+		if len(version) > 0:
+			result = darkgreen("Latest version installed:")+" "+version
+		else:
+			result = darkgreen("Latest version installed:")+" [ Not Installed ]"
+		return result
+
+	def getVersion(self,full_package,detail):
+		if len(full_package) > 1:
+			package_parts = portage.catpkgsplit(full_package)
+			if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
+				result = package_parts[2]+ "-" + package_parts[3]
+			else:
+				result = package_parts[2]
+		else:
+			result = ""
+		return result
+

diff --git a/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.py b/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.py
new file mode 100644
index 0000000..a230b31
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.py
@@ -0,0 +1,35 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import textwrap
+import portage
+from portage import os
+from portage.util import writemsg_level
+
+def show_invalid_depstring_notice(parent_node, depstring, error_msg):
+
+	msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
+		"\n\n%s\n\n%s\n\n" % (error_msg, parent_node)
+	p_key = parent_node.cpv
+	p_status = parent_node.operation
+	msg = []
+	if p_status == "nomerge":
+		category, pf = portage.catsplit(p_key)
+		pkg_location = os.path.join(parent_node.root_config.settings['EROOT'], portage.VDB_PATH, category, pf)
+		msg.append("Portage is unable to process the dependencies of the ")
+		msg.append("'%s' package. " % p_key)
+		msg.append("In order to correct this problem, the package ")
+		msg.append("should be uninstalled, reinstalled, or upgraded. ")
+		msg.append("As a temporary workaround, the --nodeps option can ")
+		msg.append("be used to ignore all dependencies.  For reference, ")
+		msg.append("the problematic dependencies can be found in the ")
+		msg.append("*DEPEND files located in '%s/'." % pkg_location)
+	else:
+		msg.append("This package can not be installed. ")
+		msg.append("Please notify the '%s' package maintainer " % p_key)
+		msg.append("about this problem.")
+
+	msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
+	writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
+

diff --git a/portage_with_autodep/pym/_emerge/stdout_spinner.py b/portage_with_autodep/pym/_emerge/stdout_spinner.py
new file mode 100644
index 0000000..5ad31f0
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/stdout_spinner.py
@@ -0,0 +1,83 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import platform
+import sys
+import time
+
+from portage.output import darkgreen, green
+
+class stdout_spinner(object):
+	scroll_msgs = [
+		"Gentoo Rocks ("+platform.system()+")",
+		"Thank you for using Gentoo. :)",
+		"Are you actually trying to read this?",
+		"How many times have you stared at this?",
+		"We are generating the cache right now",
+		"You are paying too much attention.",
+		"A theory is better than its explanation.",
+		"Phasers locked on target, Captain.",
+		"Thrashing is just virtual crashing.",
+		"To be is to program.",
+		"Real Users hate Real Programmers.",
+		"When all else fails, read the instructions.",
+		"Functionality breeds Contempt.",
+		"The future lies ahead.",
+		"3.1415926535897932384626433832795028841971694",
+		"Sometimes insanity is the only alternative.",
+		"Inaccuracy saves a world of explanation.",
+	]
+
+	twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
+
+	def __init__(self):
+		self.spinpos = 0
+		self.update = self.update_twirl
+		self.scroll_sequence = self.scroll_msgs[
+			int(time.time() * 100) % len(self.scroll_msgs)]
+		self.last_update = 0
+		self.min_display_latency = 0.05
+
+	def _return_early(self):
+		"""
+		Flushing ouput to the tty too frequently wastes cpu time. Therefore,
+		each update* method should return without doing any output when this
+		method returns True.
+		"""
+		cur_time = time.time()
+		if cur_time - self.last_update < self.min_display_latency:
+			return True
+		self.last_update = cur_time
+		return False
+
+	def update_basic(self):
+		self.spinpos = (self.spinpos + 1) % 500
+		if self._return_early():
+			return
+		if (self.spinpos % 100) == 0:
+			if self.spinpos == 0:
+				sys.stdout.write(". ")
+			else:
+				sys.stdout.write(".")
+		sys.stdout.flush()
+
+	def update_scroll(self):
+		if self._return_early():
+			return
+		if(self.spinpos >= len(self.scroll_sequence)):
+			sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
+				len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
+		else:
+			sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
+		sys.stdout.flush()
+		self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
+
+	def update_twirl(self):
+		self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
+		if self._return_early():
+			return
+		sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
+		sys.stdout.flush()
+
+	def update_quiet(self):
+		return

diff --git a/portage_with_autodep/pym/_emerge/sync/__init__.py b/portage_with_autodep/pym/_emerge/sync/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/sync/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.py b/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.py
new file mode 100644
index 0000000..5e6009c
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+def getaddrinfo_validate(addrinfos):
+	"""
+	Validate structures returned from getaddrinfo(),
+	since they may be corrupt, especially when python
+	has IPv6 support disabled (bug #340899).
+	"""
+	valid_addrinfos = []
+	for addrinfo in addrinfos:
+		try:
+			if len(addrinfo) != 5:
+				continue
+			if len(addrinfo[4]) < 2:
+				continue
+			if not isinstance(addrinfo[4][0], basestring):
+				continue
+		except TypeError:
+			continue
+
+		valid_addrinfos.append(addrinfo)
+
+	return valid_addrinfos

diff --git a/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.py b/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.py
new file mode 100644
index 0000000..9b35aed
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.py
@@ -0,0 +1,98 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import locale
+import logging
+import time
+
+from portage import os
+from portage.exception import PortageException
+from portage.localization import _
+from portage.output import EOutput
+from portage.util import grabfile, writemsg_level
+
+def have_english_locale():
+	lang, enc = locale.getdefaultlocale()
+	if lang is not None:
+		lang = lang.lower()
+		lang = lang.split('_', 1)[0]
+	return lang is None or lang in ('c', 'en')
+
+def whenago(seconds):
+	sec = int(seconds)
+	mins = 0
+	days = 0
+	hrs = 0
+	years = 0
+	out = []
+
+	if sec > 60:
+		mins = sec / 60
+		sec = sec % 60
+	if mins > 60:
+		hrs = mins / 60
+		mins = mins % 60
+	if hrs > 24:
+		days = hrs / 24
+		hrs = hrs % 24
+	if days > 365:
+		years = days / 365
+		days = days % 365
+
+	if years:
+		out.append("%dy " % years)
+	if days:
+		out.append("%dd " % days)
+	if hrs:
+		out.append("%dh " % hrs)
+	if mins:
+		out.append("%dm " % mins)
+	if sec:
+		out.append("%ds " % sec)
+
+	return "".join(out).strip()
+
+def old_tree_timestamp_warn(portdir, settings):
+	unixtime = time.time()
+	default_warnsync = 30
+
+	timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
+	try:
+		lastsync = grabfile(timestamp_file)
+	except PortageException:
+		return False
+
+	if not lastsync:
+		return False
+
+	lastsync = lastsync[0].split()
+	if not lastsync:
+		return False
+
+	try:
+		lastsync = int(lastsync[0])
+	except ValueError:
+		return False
+
+	var_name = 'PORTAGE_SYNC_STALE'
+	try:
+		warnsync = float(settings.get(var_name, default_warnsync))
+	except ValueError:
+		writemsg_level("!!! %s contains non-numeric value: %s\n" % \
+			(var_name, settings[var_name]),
+			level=logging.ERROR, noiselevel=-1)
+		return False
+
+	if warnsync <= 0:
+		return False
+
+	if (unixtime - 86400 * warnsync) > lastsync:
+		out = EOutput()
+		if have_english_locale():
+			out.ewarn("Last emerge --sync was %s ago." % \
+				whenago(unixtime - lastsync))
+		else:
+			out.ewarn(_("Last emerge --sync was %s.") % \
+				time.strftime('%c', time.localtime(lastsync)))
+		return True
+	return False

diff --git a/portage_with_autodep/pym/_emerge/unmerge.py b/portage_with_autodep/pym/_emerge/unmerge.py
new file mode 100644
index 0000000..3db3a8b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/unmerge.py
@@ -0,0 +1,578 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import sys
+import textwrap
+import portage
+from portage import os
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.output import bold, colorize, darkgreen, green
+from portage._sets import SETPREFIX
+from portage._sets.base import EditablePackageSet
+from portage.util import cmp_sort_key
+
+from _emerge.emergelog import emergelog
+from _emerge.Package import Package
+from _emerge.UninstallFailure import UninstallFailure
+from _emerge.userquery import userquery
+from _emerge.countdown import countdown
+
+def _unmerge_display(root_config, myopts, unmerge_action,
+	unmerge_files, clean_delay=1, ordered=0,
+	writemsg_level=portage.util.writemsg_level):
+	"""
+	Returns a tuple of (returncode, pkgmap) where returncode is
+	os.EX_OK if no errors occur, and 1 otherwise.
+	"""
+
+	quiet = "--quiet" in myopts
+	settings = root_config.settings
+	sets = root_config.sets
+	vartree = root_config.trees["vartree"]
+	candidate_catpkgs=[]
+	global_unmerge=0
+	out = portage.output.EOutput()
+	pkg_cache = {}
+	db_keys = list(vartree.dbapi._aux_cache_keys)
+
+	def _pkg(cpv):
+		pkg = pkg_cache.get(cpv)
+		if pkg is None:
+			pkg = Package(built=True, cpv=cpv, installed=True,
+				metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
+				operation="uninstall", root_config=root_config,
+				type_name="installed")
+			pkg_cache[cpv] = pkg
+		return pkg
+
+	vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
+	try:
+		# At least the parent needs to exist for the lock file.
+		portage.util.ensure_dirs(vdb_path)
+	except portage.exception.PortageException:
+		pass
+	vdb_lock = None
+	try:
+		if os.access(vdb_path, os.W_OK):
+			vartree.dbapi.lock()
+			vdb_lock = True
+
+		realsyslist = []
+		sys_virt_map = {}
+		for x in sets["system"].getAtoms():
+			for atom in expand_new_virt(vartree.dbapi, x):
+				if not atom.blocker:
+					realsyslist.append(atom)
+					if atom.cp != x.cp:
+						sys_virt_map[atom.cp] = x.cp
+
+		syslist = []
+		for x in realsyslist:
+			mycp = x.cp
+			# Since Gentoo stopped using old-style virtuals in
+			# 2011, typically it's possible to avoid getvirtuals()
+			# calls entirely. It will not be triggered here by
+			# new-style virtuals since those are expanded to
+			# non-virtual atoms above by expand_new_virt().
+			if mycp.startswith("virtual/") and \
+				mycp in settings.getvirtuals():
+				providers = []
+				for provider in settings.getvirtuals()[mycp]:
+					if vartree.dbapi.match(provider):
+						providers.append(provider)
+				if len(providers) == 1:
+					syslist.extend(providers)
+			else:
+				syslist.append(mycp)
+		syslist = frozenset(syslist)
+	
+		if not unmerge_files:
+			if unmerge_action == "unmerge":
+				print()
+				print(bold("emerge unmerge") + " can only be used with specific package names")
+				print()
+				return 1, {}
+			else:
+				global_unmerge = 1
+	
+		localtree = vartree
+		# process all arguments and add all
+		# valid db entries to candidate_catpkgs
+		if global_unmerge:
+			if not unmerge_files:
+				candidate_catpkgs.extend(vartree.dbapi.cp_all())
+		else:
+			#we've got command-line arguments
+			if not unmerge_files:
+				print("\nNo packages to unmerge have been provided.\n")
+				return 1, {}
+			for x in unmerge_files:
+				arg_parts = x.split('/')
+				if x[0] not in [".","/"] and \
+					arg_parts[-1][-7:] != ".ebuild":
+					#possible cat/pkg or dep; treat as such
+					candidate_catpkgs.append(x)
+				elif unmerge_action in ["prune","clean"]:
+					print("\n!!! Prune and clean do not accept individual" + \
+						" ebuilds as arguments;\n    skipping.\n")
+					continue
+				else:
+					# it appears that the user is specifying an installed
+					# ebuild and we're in "unmerge" mode, so it's ok.
+					if not os.path.exists(x):
+						print("\n!!! The path '"+x+"' doesn't exist.\n")
+						return 1, {}
+	
+					absx   = os.path.abspath(x)
+					sp_absx = absx.split("/")
+					if sp_absx[-1][-7:] == ".ebuild":
+						del sp_absx[-1]
+						absx = "/".join(sp_absx)
+	
+					sp_absx_len = len(sp_absx)
+	
+					vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
+	
+					sp_vdb     = vdb_path.split("/")
+					sp_vdb_len = len(sp_vdb)
+	
+					if not os.path.exists(absx+"/CONTENTS"):
+						print("!!! Not a valid db dir: "+str(absx))
+						return 1, {}
+	
+					if sp_absx_len <= sp_vdb_len:
+						# The Path is shorter... so it can't be inside the vdb.
+						print(sp_absx)
+						print(absx)
+						print("\n!!!",x,"cannot be inside "+ \
+							vdb_path+"; aborting.\n")
+						return 1, {}
+	
+					for idx in range(0,sp_vdb_len):
+						if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
+							print(sp_absx)
+							print(absx)
+							print("\n!!!", x, "is not inside "+\
+								vdb_path+"; aborting.\n")
+							return 1, {}
+	
+					print("="+"/".join(sp_absx[sp_vdb_len:]))
+					candidate_catpkgs.append(
+						"="+"/".join(sp_absx[sp_vdb_len:]))
+	
+		newline=""
+		if (not "--quiet" in myopts):
+			newline="\n"
+		if settings["ROOT"] != "/":
+			writemsg_level(darkgreen(newline+ \
+				">>> Using system located in ROOT tree %s\n" % \
+				settings["ROOT"]))
+
+		if (("--pretend" in myopts) or ("--ask" in myopts)) and \
+			not ("--quiet" in myopts):
+			writemsg_level(darkgreen(newline+\
+				">>> These are the packages that would be unmerged:\n"))
+
+		# Preservation of order is required for --depclean and --prune so
+		# that dependencies are respected. Use all_selected to eliminate
+		# duplicate packages since the same package may be selected by
+		# multiple atoms.
+		pkgmap = []
+		all_selected = set()
+		for x in candidate_catpkgs:
+			# cycle through all our candidate deps and determine
+			# what will and will not get unmerged
+			try:
+				mymatch = vartree.dbapi.match(x)
+			except portage.exception.AmbiguousPackageName as errpkgs:
+				print("\n\n!!! The short ebuild name \"" + \
+					x + "\" is ambiguous.  Please specify")
+				print("!!! one of the following fully-qualified " + \
+					"ebuild names instead:\n")
+				for i in errpkgs[0]:
+					print("    " + green(i))
+				print()
+				sys.exit(1)
+	
+			if not mymatch and x[0] not in "<>=~":
+				mymatch = localtree.dep_match(x)
+			if not mymatch:
+				portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
+					(x.replace("null/", ""), unmerge_action), noiselevel=-1)
+				continue
+
+			pkgmap.append(
+				{"protected": set(), "selected": set(), "omitted": set()})
+			mykey = len(pkgmap) - 1
+			if unmerge_action=="unmerge":
+					for y in mymatch:
+						if y not in all_selected:
+							pkgmap[mykey]["selected"].add(y)
+							all_selected.add(y)
+			elif unmerge_action == "prune":
+				if len(mymatch) == 1:
+					continue
+				best_version = mymatch[0]
+				best_slot = vartree.getslot(best_version)
+				best_counter = vartree.dbapi.cpv_counter(best_version)
+				for mypkg in mymatch[1:]:
+					myslot = vartree.getslot(mypkg)
+					mycounter = vartree.dbapi.cpv_counter(mypkg)
+					if (myslot == best_slot and mycounter > best_counter) or \
+						mypkg == portage.best([mypkg, best_version]):
+						if myslot == best_slot:
+							if mycounter < best_counter:
+								# On slot collision, keep the one with the
+								# highest counter since it is the most
+								# recently installed.
+								continue
+						best_version = mypkg
+						best_slot = myslot
+						best_counter = mycounter
+				pkgmap[mykey]["protected"].add(best_version)
+				pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
+					if mypkg != best_version and mypkg not in all_selected)
+				all_selected.update(pkgmap[mykey]["selected"])
+			else:
+				# unmerge_action == "clean"
+				slotmap={}
+				for mypkg in mymatch:
+					if unmerge_action == "clean":
+						myslot = localtree.getslot(mypkg)
+					else:
+						# since we're pruning, we don't care about slots
+						# and put all the pkgs in together
+						myslot = 0
+					if myslot not in slotmap:
+						slotmap[myslot] = {}
+					slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
+
+				for mypkg in vartree.dbapi.cp_list(
+					portage.cpv_getkey(mymatch[0])):
+					myslot = vartree.getslot(mypkg)
+					if myslot not in slotmap:
+						slotmap[myslot] = {}
+					slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
+
+				for myslot in slotmap:
+					counterkeys = list(slotmap[myslot])
+					if not counterkeys:
+						continue
+					counterkeys.sort()
+					pkgmap[mykey]["protected"].add(
+						slotmap[myslot][counterkeys[-1]])
+					del counterkeys[-1]
+
+					for counter in counterkeys[:]:
+						mypkg = slotmap[myslot][counter]
+						if mypkg not in mymatch:
+							counterkeys.remove(counter)
+							pkgmap[mykey]["protected"].add(
+								slotmap[myslot][counter])
+
+					#be pretty and get them in order of merge:
+					for ckey in counterkeys:
+						mypkg = slotmap[myslot][ckey]
+						if mypkg not in all_selected:
+							pkgmap[mykey]["selected"].add(mypkg)
+							all_selected.add(mypkg)
+					# ok, now the last-merged package
+					# is protected, and the rest are selected
+		numselected = len(all_selected)
+		if global_unmerge and not numselected:
+			portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
+			return 1, {}
+	
+		if not numselected:
+			portage.writemsg_stdout(
+				"\n>>> No packages selected for removal by " + \
+				unmerge_action + "\n")
+			return 1, {}
+	finally:
+		if vdb_lock:
+			vartree.dbapi.flush_cache()
+			vartree.dbapi.unlock()
+
+	# generate a list of package sets that are directly or indirectly listed in "selected",
+	# as there is no persistent list of "installed" sets
+	installed_sets = ["selected"]
+	stop = False
+	pos = 0
+	while not stop:
+		stop = True
+		pos = len(installed_sets)
+		for s in installed_sets[pos - 1:]:
+			if s not in sets:
+				continue
+			candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
+			if candidates:
+				stop = False
+				installed_sets += candidates
+	installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
+	del stop, pos
+
+	# we don't want to unmerge packages that are still listed in user-editable package sets
+	# listed in "world" as they would be remerged on the next update of "world" or the 
+	# relevant package sets.
+	unknown_sets = set()
+	for cp in range(len(pkgmap)):
+		for cpv in pkgmap[cp]["selected"].copy():
+			try:
+				pkg = _pkg(cpv)
+			except KeyError:
+				# It could have been uninstalled
+				# by a concurrent process.
+				continue
+
+			if unmerge_action != "clean" and root_config.root == "/":
+				skip_pkg = False
+				if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
+					msg = ("Not unmerging package %s since there is no valid reason "
+						"for Portage to unmerge itself.") % (pkg.cpv,)
+					skip_pkg = True
+				elif vartree.dbapi._dblink(cpv).isowner(portage._python_interpreter):
+					msg = ("Not unmerging package %s since there is no valid reason "
+						"for Portage to unmerge currently used Python interpreter.") % (pkg.cpv,)
+					skip_pkg = True
+				if skip_pkg:
+					for line in textwrap.wrap(msg, 75):
+						out.eerror(line)
+					# adjust pkgmap so the display output is correct
+					pkgmap[cp]["selected"].remove(cpv)
+					all_selected.remove(cpv)
+					pkgmap[cp]["protected"].add(cpv)
+					continue
+
+			parents = []
+			for s in installed_sets:
+				# skip sets that the user requested to unmerge, and skip world 
+				# user-selected set, since the package will be removed from
+				# that set later on.
+				if s in root_config.setconfig.active or s == "selected":
+					continue
+
+				if s not in sets:
+					if s in unknown_sets:
+						continue
+					unknown_sets.add(s)
+					out = portage.output.EOutput()
+					out.eerror(("Unknown set '@%s' in %s%s") % \
+						(s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE))
+					continue
+
+				# only check instances of EditablePackageSet as other classes are generally used for
+				# special purposes and can be ignored here (and are usually generated dynamically, so the
+				# user can't do much about them anyway)
+				if isinstance(sets[s], EditablePackageSet):
+
+					# This is derived from a snippet of code in the
+					# depgraph._iter_atoms_for_pkg() method.
+					for atom in sets[s].iterAtomsForPackage(pkg):
+						inst_matches = vartree.dbapi.match(atom)
+						inst_matches.reverse() # descending order
+						higher_slot = None
+						for inst_cpv in inst_matches:
+							try:
+								inst_pkg = _pkg(inst_cpv)
+							except KeyError:
+								# It could have been uninstalled
+								# by a concurrent process.
+								continue
+
+							if inst_pkg.cp != atom.cp:
+								continue
+							if pkg >= inst_pkg:
+								# This is descending order, and we're not
+								# interested in any versions <= pkg given.
+								break
+							if pkg.slot_atom != inst_pkg.slot_atom:
+								higher_slot = inst_pkg
+								break
+						if higher_slot is None:
+							parents.append(s)
+							break
+			if parents:
+				print(colorize("WARN", "Package %s is going to be unmerged," % cpv))
+				print(colorize("WARN", "but still listed in the following package sets:"))
+				print("    %s\n" % ", ".join(parents))
+
+	del installed_sets
+
+	numselected = len(all_selected)
+	if not numselected:
+		writemsg_level(
+			"\n>>> No packages selected for removal by " + \
+			unmerge_action + "\n")
+		return 1, {}
+
+	# Unmerge order only matters in some cases
+	if not ordered:
+		unordered = {}
+		for d in pkgmap:
+			selected = d["selected"]
+			if not selected:
+				continue
+			cp = portage.cpv_getkey(next(iter(selected)))
+			cp_dict = unordered.get(cp)
+			if cp_dict is None:
+				cp_dict = {}
+				unordered[cp] = cp_dict
+				for k in d:
+					cp_dict[k] = set()
+			for k, v in d.items():
+				cp_dict[k].update(v)
+		pkgmap = [unordered[cp] for cp in sorted(unordered)]
+
+	for x in range(len(pkgmap)):
+		selected = pkgmap[x]["selected"]
+		if not selected:
+			continue
+		for mytype, mylist in pkgmap[x].items():
+			if mytype == "selected":
+				continue
+			mylist.difference_update(all_selected)
+		cp = portage.cpv_getkey(next(iter(selected)))
+		for y in localtree.dep_match(cp):
+			if y not in pkgmap[x]["omitted"] and \
+				y not in pkgmap[x]["selected"] and \
+				y not in pkgmap[x]["protected"] and \
+				y not in all_selected:
+				pkgmap[x]["omitted"].add(y)
+		if global_unmerge and not pkgmap[x]["selected"]:
+			#avoid cluttering the preview printout with stuff that isn't getting unmerged
+			continue
+		if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
+			virt_cp = sys_virt_map.get(cp)
+			if virt_cp is None:
+				cp_info = "'%s'" % (cp,)
+			else:
+				cp_info = "'%s' (%s)" % (cp, virt_cp)
+			writemsg_level(colorize("BAD","\n\n!!! " + \
+				"%s is part of your system profile.\n" % (cp_info,)),
+				level=logging.WARNING, noiselevel=-1)
+			writemsg_level(colorize("WARN","!!! Unmerging it may " + \
+				"be damaging to your system.\n\n"),
+				level=logging.WARNING, noiselevel=-1)
+			if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
+				countdown(int(settings["EMERGE_WARNING_DELAY"]),
+					colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
+		if not quiet:
+			writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
+		else:
+			writemsg_level(bold(cp) + ": ", noiselevel=-1)
+		for mytype in ["selected","protected","omitted"]:
+			if not quiet:
+				writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
+			if pkgmap[x][mytype]:
+				sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
+				sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
+				for pn, ver, rev in sorted_pkgs:
+					if rev == "r0":
+						myversion = ver
+					else:
+						myversion = ver + "-" + rev
+					if mytype == "selected":
+						writemsg_level(
+							colorize("UNMERGE_WARN", myversion + " "),
+							noiselevel=-1)
+					else:
+						writemsg_level(
+							colorize("GOOD", myversion + " "), noiselevel=-1)
+			else:
+				writemsg_level("none ", noiselevel=-1)
+			if not quiet:
+				writemsg_level("\n", noiselevel=-1)
+		if quiet:
+			writemsg_level("\n", noiselevel=-1)
+
+	writemsg_level("\nAll selected packages: %s\n" % " ".join(all_selected), noiselevel=-1)
+
+	writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
+		" packages are slated for removal.\n")
+	writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
+			" and " + colorize("GOOD", "'omitted'") + \
+			" packages will not be removed.\n\n")
+
+	return os.EX_OK, pkgmap
+
+def unmerge(root_config, myopts, unmerge_action,
+	unmerge_files, ldpath_mtimes, autoclean=0,
+	clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
+	scheduler=None, writemsg_level=portage.util.writemsg_level):
+	"""
+	Returns 1 if successful, otherwise 0.
+	"""
+
+	if clean_world:
+		clean_world = myopts.get('--deselect') != 'n'
+
+	rval, pkgmap = _unmerge_display(root_config, myopts,
+		unmerge_action, unmerge_files,
+		clean_delay=clean_delay, ordered=ordered,
+		writemsg_level=writemsg_level)
+
+	if rval != os.EX_OK:
+		return 0
+
+	enter_invalid = '--ask-enter-invalid' in myopts
+	vartree = root_config.trees["vartree"]
+	sets = root_config.sets
+	settings = root_config.settings
+	mysettings = portage.config(clone=settings)
+	xterm_titles = "notitles" not in settings.features
+
+	if "--pretend" in myopts:
+		#we're done... return
+		return 0
+	if "--ask" in myopts:
+		if userquery("Would you like to unmerge these packages?",
+			enter_invalid) == "No":
+			# enter pretend mode for correct formatting of results
+			myopts["--pretend"] = True
+			print()
+			print("Quitting.")
+			print()
+			return 0
+	#the real unmerging begins, after a short delay....
+	if clean_delay and not autoclean:
+		countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
+
+	for x in range(len(pkgmap)):
+		for y in pkgmap[x]["selected"]:
+			writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
+			emergelog(xterm_titles, "=== Unmerging... ("+y+")")
+			mysplit = y.split("/")
+			#unmerge...
+			retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
+				mysettings, unmerge_action not in ["clean","prune"],
+				vartree=vartree, ldpath_mtimes=ldpath_mtimes,
+				scheduler=scheduler)
+
+			if retval != os.EX_OK:
+				emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
+				if raise_on_error:
+					raise UninstallFailure(retval)
+				sys.exit(retval)
+			else:
+				if clean_world and hasattr(sets["selected"], "cleanPackage")\
+						and hasattr(sets["selected"], "lock"):
+					sets["selected"].lock()
+					if hasattr(sets["selected"], "load"):
+						sets["selected"].load()
+					sets["selected"].cleanPackage(vartree.dbapi, y)
+					sets["selected"].unlock()
+				emergelog(xterm_titles, " >>> unmerge success: "+y)
+
+	if clean_world and hasattr(sets["selected"], "remove")\
+			and hasattr(sets["selected"], "lock"):
+		sets["selected"].lock()
+		# load is called inside remove()
+		for s in root_config.setconfig.active:
+			sets["selected"].remove(SETPREFIX + s)
+		sets["selected"].unlock()
+
+	return 1
+

diff --git a/portage_with_autodep/pym/_emerge/userquery.py b/portage_with_autodep/pym/_emerge/userquery.py
new file mode 100644
index 0000000..e7ed400
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/userquery.py
@@ -0,0 +1,55 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+
+from portage.output import bold, create_color_func
+
+def userquery(prompt, enter_invalid, responses=None, colours=None):
+	"""Displays a prompt and a set of responses, then waits for a response
+	which is checked against the responses and the first to match is
+	returned. An empty response will match the first value in responses,
+	unless enter_invalid is True. The input buffer is *not* cleared prior
+	to the prompt!
+
+	prompt: a String.
+	responses: a List of Strings.
+	colours: a List of Functions taking and returning a String, used to
+	process the responses for display. Typically these will be functions
+	like red() but could be e.g. lambda x: "DisplayString".
+	If responses is omitted, defaults to ["Yes", "No"], [green, red].
+	If only colours is omitted, defaults to [bold, ...].
+
+	Returns a member of the List responses. (If called without optional
+	arguments, returns "Yes" or "No".)
+	KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
+	printed."""
+	if responses is None:
+		responses = ["Yes", "No"]
+		colours = [
+			create_color_func("PROMPT_CHOICE_DEFAULT"),
+			create_color_func("PROMPT_CHOICE_OTHER")
+		]
+	elif colours is None:
+		colours=[bold]
+	colours=(colours*len(responses))[:len(responses)]
+	print(bold(prompt), end=' ')
+	try:
+		while True:
+			if sys.hexversion >= 0x3000000:
+				response=input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
+			else:
+				response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
+			if response or not enter_invalid:
+				for key in responses:
+					# An empty response will match the
+					# first value in responses.
+					if response.upper()==key[:len(response)].upper():
+						return key
+			print("Sorry, response '%s' not understood." % response, end=' ')
+	except (EOFError, KeyboardInterrupt):
+		print("Interrupted.")
+		sys.exit(1)
+

diff --git a/portage_with_autodep/pym/portage/__init__.py b/portage_with_autodep/pym/portage/__init__.py
new file mode 100644
index 0000000..2a2eb99
--- /dev/null
+++ b/portage_with_autodep/pym/portage/__init__.py
@@ -0,0 +1,610 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+VERSION="HEAD"
+
+# ===========================================================================
+# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
+# ===========================================================================
+
+try:
+	import sys
+	import errno
+	if not hasattr(errno, 'ESTALE'):
+		# ESTALE may not be defined on some systems, such as interix.
+		errno.ESTALE = -1
+	import re
+	import types
+
+	# Try the commands module first, since this allows us to eliminate
+	# the subprocess module from the baseline imports under python2.
+	try:
+		from commands import getstatusoutput as subprocess_getstatusoutput
+	except ImportError:
+		from subprocess import getstatusoutput as subprocess_getstatusoutput
+
+	import platform
+
+	# Temporarily delete these imports, to ensure that only the
+	# wrapped versions are imported by portage internals.
+	import os
+	del os
+	import shutil
+	del shutil
+
+except ImportError as e:
+	sys.stderr.write("\n\n")
+	sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
+	sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+	sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
+
+	sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+	sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
+	sys.stderr.write("    "+str(e)+"\n\n");
+	raise
+
+try:
+
+	import portage.proxy.lazyimport
+	import portage.proxy as proxy
+	proxy.lazyimport.lazyimport(globals(),
+		'portage.cache.cache_errors:CacheError',
+		'portage.checksum',
+		'portage.checksum:perform_checksum,perform_md5,prelink_capable',
+		'portage.cvstree',
+		'portage.data',
+		'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
+			'uid,userland,userpriv_groups,wheelgid',
+		'portage.dbapi',
+		'portage.dbapi.bintree:bindbapi,binarytree',
+		'portage.dbapi.cpv_expand:cpv_expand',
+		'portage.dbapi.dep_expand:dep_expand',
+		'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
+			'portagetree,portdbapi',
+		'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
+		'portage.dbapi.virtual:fakedbapi',
+		'portage.dep',
+		'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
+			'flatten,get_operator,isjustname,isspecific,isvalidatom,' + \
+			'match_from_list,match_to_list',
+		'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
+		'portage.eclass_cache',
+		'portage.exception',
+		'portage.getbinpkg',
+		'portage.locks',
+		'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
+		'portage.mail',
+		'portage.manifest:Manifest',
+		'portage.output',
+		'portage.output:bold,colorize',
+		'portage.package.ebuild.doebuild:doebuild,' + \
+			'doebuild_environment,spawn,spawnebuild',
+		'portage.package.ebuild.config:autouse,best_from_dict,' + \
+			'check_config_instance,config',
+		'portage.package.ebuild.deprecated_profile_check:' + \
+			'deprecated_profile_check',
+		'portage.package.ebuild.digestcheck:digestcheck',
+		'portage.package.ebuild.digestgen:digestgen',
+		'portage.package.ebuild.fetch:fetch',
+		'portage.package.ebuild.getmaskingreason:getmaskingreason',
+		'portage.package.ebuild.getmaskingstatus:getmaskingstatus',
+		'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+		'portage.process',
+		'portage.process:atexit_register,run_exitfuncs',
+		'portage.update:dep_transform,fixdbentries,grab_updates,' + \
+			'parse_updates,update_config_files,update_dbentries,' + \
+			'update_dbentry',
+		'portage.util',
+		'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
+			'apply_recursive_permissions,dump_traceback,getconfig,' + \
+			'grabdict,grabdict_package,grabfile,grabfile_package,' + \
+			'map_dictlist_vals,new_protect_filename,normalize_path,' + \
+			'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
+			'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
+			'writemsg_stdout,write_atomic',
+		'portage.util.digraph:digraph',
+		'portage.util.env_update:env_update',
+		'portage.util.ExtractKernelVersion:ExtractKernelVersion',
+		'portage.util.listdir:cacheddir,listdir',
+		'portage.util.movefile:movefile',
+		'portage.util.mtimedb:MtimeDB',
+		'portage.versions',
+		'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
+			'cpv_getkey@getCPFromCPV,endversion_keys,' + \
+			'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
+		'portage.xpak',
+		'time',
+	)
+
+	try:
+		from collections import OrderedDict
+	except ImportError:
+		proxy.lazyimport.lazyimport(globals(),
+			'portage.cache.mappings:OrderedDict')
+
+	import portage.const
+	from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
+		USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
+		PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
+		EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
+		MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
+		DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
+		INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
+		INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
+
+except ImportError as e:
+	sys.stderr.write("\n\n")
+	sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
+	sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
+	sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
+	sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
+	sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
+	sys.stderr.write("!!! a recovery of portage.\n")
+	sys.stderr.write("    "+str(e)+"\n\n")
+	raise
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+	long = int
+
+# Assume utf_8 fs encoding everywhere except in merge code, where the
+# user's locale is respected.
+_encodings = {
+	'content'                : 'utf_8',
+	'fs'                     : 'utf_8',
+	'merge'                  : sys.getfilesystemencoding(),
+	'repo.content'           : 'utf_8',
+	'stdio'                  : 'utf_8',
+}
+
+# This can happen if python is built with USE=build (stage 1).
+if _encodings['merge'] is None:
+	_encodings['merge'] = 'ascii'
+
+if sys.hexversion >= 0x3000000:
+	def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+		if isinstance(s, str):
+			s = s.encode(encoding, errors)
+		return s
+
+	def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+		if isinstance(s, bytes):
+			s = str(s, encoding=encoding, errors=errors)
+		return s
+else:
+	def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+		if isinstance(s, unicode):
+			s = s.encode(encoding, errors)
+		return s
+
+	def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+		if isinstance(s, bytes):
+			s = unicode(s, encoding=encoding, errors=errors)
+		return s
+
+class _unicode_func_wrapper(object):
+	"""
+	Wraps a function, converts arguments from unicode to bytes,
+	and return values to unicode from bytes. Function calls
+	will raise UnicodeEncodeError if an argument fails to be
+	encoded with the required encoding. Return values that
+	are single strings are decoded with errors='replace'. Return 
+	values that are lists of strings are decoded with errors='strict'
+	and elements that fail to be decoded are omitted from the returned
+	list.
+	"""
+	__slots__ = ('_func', '_encoding')
+
+	def __init__(self, func, encoding=_encodings['fs']):
+		self._func = func
+		self._encoding = encoding
+
+	def __call__(self, *args, **kwargs):
+
+		encoding = self._encoding
+		wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
+			for x in args]
+		if kwargs:
+			wrapped_kwargs = dict(
+				(k, _unicode_encode(v, encoding=encoding, errors='strict'))
+				for k, v in kwargs.items())
+		else:
+			wrapped_kwargs = {}
+
+		rval = self._func(*wrapped_args, **wrapped_kwargs)
+
+		# Don't use isinstance() since we don't want to convert subclasses
+		# of tuple such as posix.stat_result in python-3.2.
+		if rval.__class__ in (list, tuple):
+			decoded_rval = []
+			for x in rval:
+				try:
+					x = _unicode_decode(x, encoding=encoding, errors='strict')
+				except UnicodeDecodeError:
+					pass
+				else:
+					decoded_rval.append(x)
+
+			if isinstance(rval, tuple):
+				rval = tuple(decoded_rval)
+			else:
+				rval = decoded_rval
+		else:
+			rval = _unicode_decode(rval, encoding=encoding, errors='replace')
+
+		return rval
+
+class _unicode_module_wrapper(object):
+	"""
+	Wraps a module and wraps all functions with _unicode_func_wrapper.
+	"""
+	__slots__ = ('_mod', '_encoding', '_overrides', '_cache')
+
+	def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
+		object.__setattr__(self, '_mod', mod)
+		object.__setattr__(self, '_encoding', encoding)
+		object.__setattr__(self, '_overrides', overrides)
+		if cache:
+			cache = {}
+		else:
+			cache = None
+		object.__setattr__(self, '_cache', cache)
+
+	def __getattribute__(self, attr):
+		cache = object.__getattribute__(self, '_cache')
+		if cache is not None:
+			result = cache.get(attr)
+			if result is not None:
+				return result
+		result = getattr(object.__getattribute__(self, '_mod'), attr)
+		encoding = object.__getattribute__(self, '_encoding')
+		overrides = object.__getattribute__(self, '_overrides')
+		override = None
+		if overrides is not None:
+			override = overrides.get(id(result))
+		if override is not None:
+			result = override
+		elif isinstance(result, type):
+			pass
+		elif type(result) is types.ModuleType:
+			result = _unicode_module_wrapper(result,
+				encoding=encoding, overrides=overrides)
+		elif hasattr(result, '__call__'):
+			result = _unicode_func_wrapper(result, encoding=encoding)
+		if cache is not None:
+			cache[attr] = result
+		return result
+
+import os as _os
+_os_overrides = {
+	id(_os.fdopen)        : _os.fdopen,
+	id(_os.mkfifo)        : _os.mkfifo,
+	id(_os.popen)         : _os.popen,
+	id(_os.read)          : _os.read,
+	id(_os.system)        : _os.system,
+}
+
+if hasattr(_os, 'statvfs'):
+	_os_overrides[id(_os.statvfs)] = _os.statvfs
+
+os = _unicode_module_wrapper(_os, overrides=_os_overrides,
+	encoding=_encodings['fs'])
+_os_merge = _unicode_module_wrapper(_os,
+	encoding=_encodings['merge'], overrides=_os_overrides)
+
+import shutil as _shutil
+shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
+
+# Imports below this point rely on the above unicode wrapper definitions.
+try:
+	__import__('selinux')
+	import portage._selinux
+	selinux = _unicode_module_wrapper(_selinux,
+		encoding=_encodings['fs'])
+	_selinux_merge = _unicode_module_wrapper(_selinux,
+		encoding=_encodings['merge'])
+except (ImportError, OSError) as e:
+	if isinstance(e, OSError):
+		sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
+	del e
+	_selinux = None
+	selinux = None
+	_selinux_merge = None
+
+# ===========================================================================
+# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
+# ===========================================================================
+
+_python_interpreter = os.path.realpath(sys.executable)
+_bin_path = PORTAGE_BIN_PATH
+_pym_path = PORTAGE_PYM_PATH
+
+def _shell_quote(s):
+	"""
+	Quote a string in double-quotes and use backslashes to
+	escape any backslashes, double-quotes, dollar signs, or
+	backquotes in the string.
+	"""
+	for letter in "\\\"$`":
+		if letter in s:
+			s = s.replace(letter, "\\" + letter)
+	return "\"%s\"" % s
+
+bsd_chflags = None
+
+if platform.system() in ('FreeBSD',):
+
+	class bsd_chflags(object):
+
+		@classmethod
+		def chflags(cls, path, flags, opts=""):
+			cmd = 'chflags %s %o %s' % (opts, flags, _shell_quote(path))
+			status, output = subprocess_getstatusoutput(cmd)
+			if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+				return
+			# Try to generate an ENOENT error if appropriate.
+			if 'h' in opts:
+				_os_merge.lstat(path)
+			else:
+				_os_merge.stat(path)
+			# Make sure the binary exists.
+			if not portage.process.find_binary('chflags'):
+				raise portage.exception.CommandNotFound('chflags')
+			# Now we're not sure exactly why it failed or what
+			# the real errno was, so just report EPERM.
+			e = OSError(errno.EPERM, output)
+			e.errno = errno.EPERM
+			e.filename = path
+			e.message = output
+			raise e
+
+		@classmethod
+		def lchflags(cls, path, flags):
+			return cls.chflags(path, flags, opts='-h')
+
+def load_mod(name):
+	modname = ".".join(name.split(".")[:-1])
+	mod = __import__(modname)
+	components = name.split('.')
+	for comp in components[1:]:
+		mod = getattr(mod, comp)
+	return mod
+
+def getcwd():
+	"this fixes situations where the current directory doesn't exist"
+	try:
+		return os.getcwd()
+	except OSError: #dir doesn't exist
+		os.chdir("/")
+		return "/"
+getcwd()
+
+def abssymlink(symlink):
+	"This reads symlinks, resolving the relative symlinks, and returning the absolute."
+	mylink=os.readlink(symlink)
+	if mylink[0] != '/':
+		mydir=os.path.dirname(symlink)
+		mylink=mydir+"/"+mylink
+	return os.path.normpath(mylink)
+
+_doebuild_manifest_exempt_depend = 0
+
+_testing_eapis = frozenset(["4-python"])
+_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1"])
+
+def _eapi_is_deprecated(eapi):
+	return eapi in _deprecated_eapis
+
+def eapi_is_supported(eapi):
+	if not isinstance(eapi, basestring):
+		# Only call str() when necessary since with python2 it
+		# can trigger UnicodeEncodeError if EAPI is corrupt.
+		eapi = str(eapi)
+	eapi = eapi.strip()
+
+	if _eapi_is_deprecated(eapi):
+		return True
+
+	if eapi in _testing_eapis:
+		return True
+
+	try:
+		eapi = int(eapi)
+	except ValueError:
+		eapi = -1
+	if eapi < 0:
+		return False
+	return eapi <= portage.const.EAPI
+
+# Generally, it's best not to assume that cache entries for unsupported EAPIs
+# can be validated. However, the current package manager specification does not
+# guarantee that the EAPI can be parsed without sourcing the ebuild, so
+# it's too costly to discard existing cache entries for unsupported EAPIs.
+# Therefore, by default, assume that cache entries for unsupported EAPIs can be
+# validated. If FEATURES=parse-eapi-* is enabled, this assumption is discarded
+# since the EAPI can be determined without the incurring the cost of sourcing
+# the ebuild.
+_validate_cache_for_unsupported_eapis = True
+
+_parse_eapi_ebuild_head_re = re.compile(r'^EAPI=[\'"]?([^\'"#]*)')
+_parse_eapi_ebuild_head_max_lines = 30
+
+def _parse_eapi_ebuild_head(f):
+	count = 0
+	for line in f:
+		m = _parse_eapi_ebuild_head_re.match(line)
+		if m is not None:
+			return m.group(1).strip()
+		count += 1
+		if count >= _parse_eapi_ebuild_head_max_lines:
+			break
+	return '0'
+
+def _movefile(src, dest, **kwargs):
+	"""Calls movefile and raises a PortageException if an error occurs."""
+	if movefile(src, dest, **kwargs) is None:
+		raise portage.exception.PortageException(
+			"mv '%s' '%s'" % (src, dest))
+
+auxdbkeys = (
+  'DEPEND',    'RDEPEND',   'SLOT',      'SRC_URI',
+	'RESTRICT',  'HOMEPAGE',  'LICENSE',   'DESCRIPTION',
+	'KEYWORDS',  'INHERITED', 'IUSE', 'REQUIRED_USE',
+	'PDEPEND',   'PROVIDE', 'EAPI',
+	'PROPERTIES', 'DEFINED_PHASES', 'UNUSED_05', 'UNUSED_04',
+	'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
+)
+auxdbkeylen=len(auxdbkeys)
+
+def portageexit():
+	if data.secpass > 1 and os.environ.get("SANDBOX_ON") != "1":
+		close_portdbapi_caches()
+
+def create_trees(config_root=None, target_root=None, trees=None):
+	if trees is None:
+		trees = {}
+	else:
+		# clean up any existing portdbapi instances
+		for myroot in trees:
+			portdb = trees[myroot]["porttree"].dbapi
+			portdb.close_caches()
+			portdbapi.portdbapi_instances.remove(portdb)
+			del trees[myroot]["porttree"], myroot, portdb
+
+	settings = config(config_root=config_root, target_root=target_root,
+		config_incrementals=portage.const.INCREMENTALS)
+	settings.lock()
+
+	myroots = [(settings["ROOT"], settings)]
+	if settings["ROOT"] != "/":
+
+		# When ROOT != "/" we only want overrides from the calling
+		# environment to apply to the config that's associated
+		# with ROOT != "/", so pass a nearly empty dict for the env parameter.
+		clean_env = {}
+		for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_USERNAME',
+			'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
+			'ftp_proxy', 'http_proxy', 'no_proxy'):
+			v = settings.get(k)
+			if v is not None:
+				clean_env[k] = v
+		settings = config(config_root=None, target_root="/", env=clean_env)
+		settings.lock()
+		myroots.append((settings["ROOT"], settings))
+
+	for myroot, mysettings in myroots:
+		trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
+		trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals)
+		trees[myroot].addLazySingleton(
+			"vartree", vartree, myroot, categories=mysettings.categories,
+				settings=mysettings)
+		trees[myroot].addLazySingleton("porttree",
+			portagetree, myroot, settings=mysettings)
+		trees[myroot].addLazySingleton("bintree",
+			binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
+	return trees
+
+if VERSION == 'HEAD':
+	class _LazyVersion(proxy.objectproxy.ObjectProxy):
+		def _get_target(self):
+			global VERSION
+			if VERSION is not self:
+				return VERSION
+			if os.path.isdir(os.path.join(PORTAGE_BASE_PATH, '.git')):
+				status, output = subprocess_getstatusoutput((
+					"cd %s ; git describe --tags || exit $? ; " + \
+					"if [ -n \"`git diff-index --name-only --diff-filter=M HEAD`\" ] ; " + \
+					"then echo modified ; git rev-list --format=%%ct -n 1 HEAD ; fi ; " + \
+					"exit 0") % _shell_quote(PORTAGE_BASE_PATH))
+				if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+					output_lines = output.splitlines()
+					if output_lines:
+						version_split = output_lines[0].split('-')
+						if version_split:
+							VERSION = version_split[0].lstrip('v')
+							patchlevel = False
+							if len(version_split) > 1:
+								patchlevel = True
+								VERSION = "%s_p%s" %(VERSION, version_split[1])
+							if len(output_lines) > 1 and output_lines[1] == 'modified':
+								head_timestamp = None
+								if len(output_lines) > 3:
+									try:
+										head_timestamp = long(output_lines[3])
+									except ValueError:
+										pass
+								timestamp = long(time.time())
+								if head_timestamp is not None and timestamp > head_timestamp:
+									timestamp = timestamp - head_timestamp
+								if not patchlevel:
+									VERSION = "%s_p0" % (VERSION,)
+								VERSION = "%s_p%d" % (VERSION, timestamp)
+							return VERSION
+			VERSION = 'HEAD'
+			return VERSION
+	VERSION = _LazyVersion()
+
+if "_legacy_globals_constructed" in globals():
+	# The module has been reloaded, so perform any relevant cleanup
+	# and prevent memory leaks.
+	if "db" in _legacy_globals_constructed:
+		try:
+			db
+		except NameError:
+			pass
+		else:
+			if isinstance(db, dict) and db:
+				for _x in db.values():
+					try:
+						if "porttree" in _x.lazy_items:
+							continue
+					except (AttributeError, TypeError):
+						continue
+					try:
+						_x = _x["porttree"].dbapi
+					except (AttributeError, KeyError):
+						continue
+					if not isinstance(_x, portdbapi):
+						continue
+					_x.close_caches()
+					try:
+						portdbapi.portdbapi_instances.remove(_x)
+					except ValueError:
+						pass
+				del _x
+
+class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
+
+	__slots__ = ('_name',)
+
+	def __init__(self, name):
+		proxy.objectproxy.ObjectProxy.__init__(self)
+		object.__setattr__(self, '_name', name)
+
+	def _get_target(self):
+		name = object.__getattribute__(self, '_name')
+		from portage._legacy_globals import _get_legacy_global
+		return _get_legacy_global(name)
+
+_legacy_global_var_names = ("archlist", "db", "features",
+	"groups", "mtimedb", "mtimedbfile", "pkglines",
+	"portdb", "profiledir", "root", "selinux_enabled",
+	"settings", "thirdpartymirrors")
+
+for k in _legacy_global_var_names:
+	globals()[k] = _LegacyGlobalProxy(k)
+del k
+
+_legacy_globals_constructed = set()
+
+def _disable_legacy_globals():
+	"""
+	This deletes the ObjectProxy instances that are used
+	for lazy initialization of legacy global variables.
+	The purpose of deleting them is to prevent new code
+	from referencing these deprecated variables.
+	"""
+	global _legacy_global_var_names
+	for k in _legacy_global_var_names:
+		globals().pop(k, None)

diff --git a/portage_with_autodep/pym/portage/_global_updates.py b/portage_with_autodep/pym/portage/_global_updates.py
new file mode 100644
index 0000000..868d1ee
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_global_updates.py
@@ -0,0 +1,250 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import stat
+
+from portage import best, os
+from portage.const import WORLD_FILE
+from portage.data import secpass
+from portage.exception import DirectoryNotFound
+from portage.localization import _
+from portage.output import bold, colorize
+from portage.update import grab_updates, parse_updates, update_config_files, update_dbentry
+from portage.util import grabfile, shlex_split, \
+	writemsg, writemsg_stdout, write_atomic
+
+def _global_updates(trees, prev_mtimes, quiet=False):
+	"""
+	Perform new global updates if they exist in 'profiles/updates/'
+	subdirectories of all active repositories (PORTDIR + PORTDIR_OVERLAY).
+	This simply returns if ROOT != "/" (when len(trees) != 1). If ROOT != "/"
+	then the user should instead use emaint --fix movebin and/or moveinst.
+
+	@param trees: A dictionary containing portage trees.
+	@type trees: dict
+	@param prev_mtimes: A dictionary containing mtimes of files located in
+		$PORTDIR/profiles/updates/.
+	@type prev_mtimes: dict
+	@rtype: bool
+	@return: True if update commands have been performed, otherwise False
+	"""
+	# only do this if we're root and not running repoman/ebuild digest
+
+	retupd = False
+	if secpass < 2 or \
+		"SANDBOX_ACTIVE" in os.environ or \
+		len(trees) != 1:
+		return retupd
+	root = "/"
+	mysettings = trees[root]["vartree"].settings
+	portdb = trees[root]["porttree"].dbapi
+	vardb = trees[root]["vartree"].dbapi
+	bindb = trees[root]["bintree"].dbapi
+	if not os.access(bindb.bintree.pkgdir, os.W_OK):
+		bindb = None
+	else:
+		# Call binarytree.populate(), since we want to make sure it's
+		# only populated with local packages here (getbinpkgs=0).
+		bindb.bintree.populate()
+
+	world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
+	world_list = grabfile(world_file)
+	world_modified = False
+	world_warnings = set()
+	updpath_map = {}
+	# Maps repo_name to list of updates. If a given repo has no updates
+	# directory, it will be omitted. If a repo has an updates directory
+	# but none need to be applied (according to timestamp logic), the
+	# value in the dict will be an empty list.
+	repo_map = {}
+	timestamps = {}
+
+	update_notice_printed = False
+	for repo_name in portdb.getRepositories():
+		repo = portdb.getRepositoryPath(repo_name)
+		updpath = os.path.join(repo, "profiles", "updates")
+		if not os.path.isdir(updpath):
+			continue
+
+		if updpath in updpath_map:
+			repo_map[repo_name] = updpath_map[updpath]
+			continue
+
+		try:
+			if mysettings.get("PORTAGE_CALLER") == "fixpackages":
+				update_data = grab_updates(updpath)
+			else:
+				update_data = grab_updates(updpath, prev_mtimes)
+		except DirectoryNotFound:
+			continue
+		myupd = []
+		updpath_map[updpath] = myupd
+		repo_map[repo_name] = myupd
+		if len(update_data) > 0:
+			for mykey, mystat, mycontent in update_data:
+				if not update_notice_printed:
+					update_notice_printed = True
+					writemsg_stdout("\n")
+					if quiet:
+						writemsg_stdout(colorize("GOOD",
+							_("Performing Global Updates\n")))
+						writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+					else:
+						writemsg_stdout(colorize("GOOD",
+							_("Performing Global Updates:\n")))
+						writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+						writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
+							"%s='/var/db update'  %s='/var/db move'\n"
+							"  %s='/var/db SLOT move'  %s='binary move'  "
+							"%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
+							(bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
+				valid_updates, errors = parse_updates(mycontent)
+				myupd.extend(valid_updates)
+				if not quiet:
+					writemsg_stdout(bold(mykey))
+					writemsg_stdout(len(valid_updates) * "." + "\n")
+				if len(errors) == 0:
+					# Update our internal mtime since we
+					# processed all of our directives.
+					timestamps[mykey] = mystat[stat.ST_MTIME]
+				else:
+					for msg in errors:
+						writemsg("%s\n" % msg, noiselevel=-1)
+			if myupd:
+				retupd = True
+
+	master_repo = portdb.getRepositoryName(portdb.porttree_root)
+	if master_repo in repo_map:
+		repo_map['DEFAULT'] = repo_map[master_repo]
+
+	for repo_name, myupd in repo_map.items():
+			if repo_name == 'DEFAULT':
+				continue
+			if not myupd:
+				continue
+
+			def repo_match(repository):
+				return repository == repo_name or \
+					(repo_name == master_repo and repository not in repo_map)
+
+			def _world_repo_match(atoma, atomb):
+				"""
+				Check whether to perform a world change from atoma to atomb.
+				If best vardb match for atoma comes from the same repository
+				as the update file, allow that. Additionally, if portdb still
+				can find a match for old atom name, warn about that.
+				"""
+				matches = vardb.match(atoma)
+				if not matches:
+					matches = vardb.match(atomb)
+				if matches and \
+					repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
+					if portdb.match(atoma):
+						world_warnings.add((atoma, atomb))
+					return True
+				else:
+					return False
+
+			for update_cmd in myupd:
+				for pos, atom in enumerate(world_list):
+					new_atom = update_dbentry(update_cmd, atom)
+					if atom != new_atom:
+						if _world_repo_match(atom, new_atom):
+							world_list[pos] = new_atom
+							world_modified = True
+
+			for update_cmd in myupd:
+				if update_cmd[0] == "move":
+					moves = vardb.move_ent(update_cmd, repo_match=repo_match)
+					if moves:
+						writemsg_stdout(moves * "@")
+					if bindb:
+						moves = bindb.move_ent(update_cmd, repo_match=repo_match)
+						if moves:
+							writemsg_stdout(moves * "%")
+				elif update_cmd[0] == "slotmove":
+					moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
+					if moves:
+						writemsg_stdout(moves * "s")
+					if bindb:
+						moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
+						if moves:
+							writemsg_stdout(moves * "S")
+
+	if world_modified:
+		world_list.sort()
+		write_atomic(world_file,
+			"".join("%s\n" % (x,) for x in world_list))
+		if world_warnings:
+			# XXX: print warning that we've updated world entries
+			# and the old name still matches something (from an overlay)?
+			pass
+
+	if retupd:
+
+			def _config_repo_match(repo_name, atoma, atomb):
+				"""
+				Check whether to perform a world change from atoma to atomb.
+				If best vardb match for atoma comes from the same repository
+				as the update file, allow that. Additionally, if portdb still
+				can find a match for old atom name, warn about that.
+				"""
+				matches = vardb.match(atoma)
+				if not matches:
+					matches = vardb.match(atomb)
+					if not matches:
+						return False
+				repository = vardb.aux_get(best(matches), ['repository'])[0]
+				return repository == repo_name or \
+					(repo_name == master_repo and repository not in repo_map)
+
+			update_config_files(root,
+				shlex_split(mysettings.get("CONFIG_PROTECT", "")),
+				shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
+				repo_map, match_callback=_config_repo_match)
+
+			# The above global updates proceed quickly, so they
+			# are considered a single mtimedb transaction.
+			if timestamps:
+				# We do not update the mtime in the mtimedb
+				# until after _all_ of the above updates have
+				# been processed because the mtimedb will
+				# automatically commit when killed by ctrl C.
+				for mykey, mtime in timestamps.items():
+					prev_mtimes[mykey] = mtime
+
+			do_upgrade_packagesmessage = False
+			# We gotta do the brute force updates for these now.
+			if mysettings.get("PORTAGE_CALLER") == "fixpackages" or \
+			"fixpackages" in mysettings.features:
+				def onUpdate(maxval, curval):
+					if curval > 0:
+						writemsg_stdout("#")
+				if quiet:
+					onUpdate = None
+				vardb.update_ents(repo_map, onUpdate=onUpdate)
+				if bindb:
+					def onUpdate(maxval, curval):
+						if curval > 0:
+							writemsg_stdout("*")
+					if quiet:
+						onUpdate = None
+					bindb.update_ents(repo_map, onUpdate=onUpdate)
+			else:
+				do_upgrade_packagesmessage = 1
+
+			# Update progress above is indicated by characters written to stdout so
+			# we print a couple new lines here to separate the progress output from
+			# what follows.
+			print()
+			print()
+
+			if do_upgrade_packagesmessage and bindb and \
+				bindb.cpv_all():
+				writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
+				writemsg_stdout(bold(_("Note: This can take a very long time.")))
+				writemsg_stdout("\n")
+
+	return retupd

diff --git a/portage_with_autodep/pym/portage/_legacy_globals.py b/portage_with_autodep/pym/portage/_legacy_globals.py
new file mode 100644
index 0000000..615591a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_legacy_globals.py
@@ -0,0 +1,81 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.const import CACHE_PATH, PROFILE_PATH
+
+def _get_legacy_global(name):
+	constructed = portage._legacy_globals_constructed
+	if name in constructed:
+		return getattr(portage, name)
+
+	if name == 'portdb':
+		portage.portdb = portage.db[portage.root]["porttree"].dbapi
+		constructed.add(name)
+		return getattr(portage, name)
+
+	elif name in ('mtimedb', 'mtimedbfile'):
+		portage.mtimedbfile = os.path.join(portage.settings['EROOT'],
+			CACHE_PATH, "mtimedb")
+		constructed.add('mtimedbfile')
+		portage.mtimedb = portage.MtimeDB(portage.mtimedbfile)
+		constructed.add('mtimedb')
+		return getattr(portage, name)
+
+	# Portage needs to ensure a sane umask for the files it creates.
+	os.umask(0o22)
+
+	kwargs = {}
+	for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
+		kwargs[k] = os.environ.get(envvar)
+
+	portage._initializing_globals = True
+	portage.db = portage.create_trees(**kwargs)
+	constructed.add('db')
+	del portage._initializing_globals
+
+	settings = portage.db["/"]["vartree"].settings
+
+	for root in portage.db:
+		if root != "/":
+			settings = portage.db[root]["vartree"].settings
+			break
+
+	portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
+
+	portage.settings = settings
+	constructed.add('settings')
+
+	portage.root = root
+	constructed.add('root')
+
+	# COMPATIBILITY
+	# These attributes should not be used within
+	# Portage under any circumstances.
+
+	portage.archlist = settings.archlist()
+	constructed.add('archlist')
+
+	portage.features = settings.features
+	constructed.add('features')
+
+	portage.groups = settings["ACCEPT_KEYWORDS"].split()
+	constructed.add('groups')
+
+	portage.pkglines = settings.packages
+	constructed.add('pkglines')
+
+	portage.selinux_enabled = settings.selinux_enabled()
+	constructed.add('selinux_enabled')
+
+	portage.thirdpartymirrors = settings.thirdpartymirrors()
+	constructed.add('thirdpartymirrors')
+
+	profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
+	if not os.path.isdir(profiledir):
+		profiledir = None
+	portage.profiledir = profiledir
+	constructed.add('profiledir')
+
+	return getattr(portage, name)

diff --git a/portage_with_autodep/pym/portage/_selinux.py b/portage_with_autodep/pym/portage/_selinux.py
new file mode 100644
index 0000000..9470978
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_selinux.py
@@ -0,0 +1,129 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Don't use the unicode-wrapped os and shutil modules here since
+# the whole _selinux module itself will be wrapped.
+import os
+import shutil
+
+import portage
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.localization import _
+portage.proxy.lazyimport.lazyimport(globals(),
+	'selinux')
+
+def copyfile(src, dest):
+	src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
+	dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+	(rc, ctx) = selinux.lgetfilecon(src)
+	if rc < 0:
+		src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+		raise OSError(_("copyfile: Failed getting context of \"%s\".") % src)
+
+	setfscreate(ctx)
+	try:
+		shutil.copyfile(src, dest)
+	finally:
+		setfscreate()
+
+def getcontext():
+	(rc, ctx) = selinux.getcon()
+	if rc < 0:
+		raise OSError(_("getcontext: Failed getting current process context."))
+
+	return ctx
+
+def is_selinux_enabled():
+	return selinux.is_selinux_enabled()
+
+def mkdir(target, refdir):
+	target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
+	refdir = _unicode_encode(refdir, encoding=_encodings['fs'], errors='strict')
+	(rc, ctx) = selinux.getfilecon(refdir)
+	if rc < 0:
+		refdir = _unicode_decode(refdir, encoding=_encodings['fs'],
+			errors='replace')
+		raise OSError(
+			_("mkdir: Failed getting context of reference directory \"%s\".") \
+			% refdir)
+
+	setfscreate(ctx)
+	try:
+		os.mkdir(target)
+	finally:
+		setfscreate()
+
+def rename(src, dest):
+	src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
+	dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
+	(rc, ctx) = selinux.lgetfilecon(src)
+	if rc < 0:
+		src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+		raise OSError(_("rename: Failed getting context of \"%s\".") % src)
+
+	setfscreate(ctx)
+	try:
+		os.rename(src,dest)
+	finally:
+		setfscreate()
+
+def settype(newtype):
+	ret = getcontext().split(":")
+	ret[2] = newtype
+	return ":".join(ret)
+
+def setexec(ctx="\n"):
+	ctx = _unicode_encode(ctx, encoding=_encodings['content'], errors='strict')
+	if selinux.setexeccon(ctx) < 0:
+		ctx = _unicode_decode(ctx, encoding=_encodings['content'],
+			errors='replace')
+		if selinux.security_getenforce() == 1:
+			raise OSError(_("Failed setting exec() context \"%s\".") % ctx)
+		else:
+			portage.writemsg("!!! " + \
+				_("Failed setting exec() context \"%s\".") % ctx, \
+				noiselevel=-1)
+
+def setfscreate(ctx="\n"):
+	ctx = _unicode_encode(ctx,
+		encoding=_encodings['content'], errors='strict')
+	if selinux.setfscreatecon(ctx) < 0:
+		ctx = _unicode_decode(ctx,
+			encoding=_encodings['content'], errors='replace')
+		raise OSError(
+			_("setfscreate: Failed setting fs create context \"%s\".") % ctx)
+
+def spawn_wrapper(spawn_func, selinux_type):
+
+	selinux_type = _unicode_encode(selinux_type,
+		encoding=_encodings['content'], errors='strict')
+
+	def wrapper_func(*args, **kwargs):
+		con = settype(selinux_type)
+		setexec(con)
+		try:
+			return spawn_func(*args, **kwargs)
+		finally:
+			setexec()
+
+	return wrapper_func
+
+def symlink(target, link, reflnk):
+	target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
+	link = _unicode_encode(link, encoding=_encodings['fs'], errors='strict')
+	reflnk = _unicode_encode(reflnk, encoding=_encodings['fs'], errors='strict')
+	(rc, ctx) = selinux.lgetfilecon(reflnk)
+	if rc < 0:
+		reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'],
+			errors='replace')
+		raise OSError(
+			_("symlink: Failed getting context of reference symlink \"%s\".") \
+			% reflnk)
+
+	setfscreate(ctx)
+	try:
+		os.symlink(target, link)
+	finally:
+		setfscreate()

diff --git a/portage_with_autodep/pym/portage/_sets/__init__.py b/portage_with_autodep/pym/portage/_sets/__init__.py
new file mode 100644
index 0000000..1b3484e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/__init__.py
@@ -0,0 +1,245 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+__all__ = ["SETPREFIX", "get_boolean", "SetConfigError",
+	"SetConfig", "load_default_config"]
+
+try:
+	from configparser import SafeConfigParser, NoOptionError
+except ImportError:
+	from ConfigParser import SafeConfigParser, NoOptionError
+from portage import os
+from portage import load_mod
+from portage.const import USER_CONFIG_PATH, GLOBAL_CONFIG_PATH
+from portage.const import _ENABLE_SET_CONFIG
+from portage.exception import PackageSetNotFound
+from portage.localization import _
+
+SETPREFIX = "@"
+
+def get_boolean(options, name, default):
+	if not name in options:
+		return default
+	elif options[name].lower() in ("1", "yes", "on", "true"):
+		return True
+	elif options[name].lower() in ("0", "no", "off", "false"):
+		return False
+	else:
+		raise SetConfigError(_("invalid value '%(value)s' for option '%(option)s'") % {"value": options[name], "option": name})
+
+class SetConfigError(Exception):
+	pass
+
+class SetConfig(object):
+	def __init__(self, paths, settings, trees):
+		self._parser = SafeConfigParser(
+			defaults={
+				"EPREFIX" : settings["EPREFIX"],
+				"EROOT" : settings["EROOT"],
+				"PORTAGE_CONFIGROOT" : settings["PORTAGE_CONFIGROOT"],
+				"ROOT" : settings["ROOT"],
+			})
+
+		if _ENABLE_SET_CONFIG:
+			self._parser.read(paths)
+		else:
+			self._create_default_config()
+
+		self.errors = []
+		self.psets = {}
+		self.trees = trees
+		self.settings = settings
+		self._parsed = False
+		self.active = []
+
+	def _create_default_config(self):
+		"""
+		Create a default hardcoded set configuration for a portage version
+		that does not support set configuration files. This is only used
+		in the current branch of portage if _ENABLE_SET_CONFIG is False.
+		Even if it's not used in this branch, keep it here in order to
+		minimize the diff between branches.
+
+			[world]
+			class = portage.sets.base.DummyPackageSet
+			packages = @selected @system
+
+			[selected]
+			class = portage.sets.files.WorldSelectedSet
+
+			[system]
+			class = portage.sets.profiles.PackagesSystemSet
+
+		"""
+		parser = self._parser
+
+		parser.add_section("world")
+		parser.set("world", "class", "portage.sets.base.DummyPackageSet")
+		parser.set("world", "packages", "@selected @system")
+
+		parser.add_section("selected")
+		parser.set("selected", "class", "portage.sets.files.WorldSelectedSet")
+
+		parser.add_section("system")
+		parser.set("system", "class", "portage.sets.profiles.PackagesSystemSet")
+
+	def update(self, setname, options):
+		parser = self._parser
+		self.errors = []
+		if not setname in self.psets:
+			options["name"] = setname
+			options["world-candidate"] = "False"
+			
+			# for the unlikely case that there is already a section with the requested setname
+			import random
+			while setname in parser.sections():
+				setname = "%08d" % random.randint(0, 10**10)
+			
+			parser.add_section(setname)
+			for k, v in options.items():
+				parser.set(setname, k, v)
+		else:
+			section = self.psets[setname].creator
+			if parser.has_option(section, "multiset") and \
+				parser.getboolean(section, "multiset"):
+				self.errors.append(_("Invalid request to reconfigure set '%(set)s' generated "
+					"by multiset section '%(section)s'") % {"set": setname, "section": section})
+				return
+			for k, v in options.items():
+				parser.set(section, k, v)
+		self._parse(update=True)
+
+	def _parse(self, update=False):
+		if self._parsed and not update:
+			return
+		parser = self._parser
+		for sname in parser.sections():
+			# find classname for current section, default to file based sets
+			if not parser.has_option(sname, "class"):
+				classname = "portage._sets.files.StaticFileSet"
+			else:
+				classname = parser.get(sname, "class")
+
+			if classname.startswith('portage.sets.'):
+				# The module has been made private, but we still support
+				# the previous namespace for sets.conf entries.
+				classname = classname.replace('sets', '_sets', 1)
+
+			# try to import the specified class
+			try:
+				setclass = load_mod(classname)
+			except (ImportError, AttributeError):
+				try:
+					setclass = load_mod("portage._sets." + classname)
+				except (ImportError, AttributeError):
+					self.errors.append(_("Could not import '%(class)s' for section "
+						"'%(section)s'") % {"class": classname, "section": sname})
+					continue
+			# prepare option dict for the current section
+			optdict = {}
+			for oname in parser.options(sname):
+				optdict[oname] = parser.get(sname, oname)
+			
+			# create single or multiple instances of the given class depending on configuration
+			if parser.has_option(sname, "multiset") and \
+				parser.getboolean(sname, "multiset"):
+				if hasattr(setclass, "multiBuilder"):
+					newsets = {}
+					try:
+						newsets = setclass.multiBuilder(optdict, self.settings, self.trees)
+					except SetConfigError as e:
+						self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+						continue
+					for x in newsets:
+						if x in self.psets and not update:
+							self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (x, self.psets[x].creator, sname))
+						newsets[x].creator = sname
+						if parser.has_option(sname, "world-candidate") and \
+							parser.getboolean(sname, "world-candidate"):
+							newsets[x].world_candidate = True
+					self.psets.update(newsets)
+				else:
+					self.errors.append(_("Section '%(section)s' is configured as multiset, but '%(class)s' "
+						"doesn't support that configuration") % {"section": sname, "class": classname})
+					continue
+			else:
+				try:
+					setname = parser.get(sname, "name")
+				except NoOptionError:
+					setname = sname
+				if setname in self.psets and not update:
+					self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (setname, self.psets[setname].creator, sname))
+				if hasattr(setclass, "singleBuilder"):
+					try:
+						self.psets[setname] = setclass.singleBuilder(optdict, self.settings, self.trees)
+						self.psets[setname].creator = sname
+						if parser.has_option(sname, "world-candidate") and \
+							parser.getboolean(sname, "world-candidate"):
+							self.psets[setname].world_candidate = True
+					except SetConfigError as e:
+						self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+						continue
+				else:
+					self.errors.append(_("'%(class)s' does not support individual set creation, section '%(section)s' "
+						"must be configured as multiset") % {"class": classname, "section": sname})
+					continue
+		self._parsed = True
+	
+	def getSets(self):
+		self._parse()
+		return self.psets.copy()
+
+	def getSetAtoms(self, setname, ignorelist=None):
+		"""
+		This raises PackageSetNotFound if the give setname does not exist.
+		"""
+		self._parse()
+		try:
+			myset = self.psets[setname]
+		except KeyError:
+			raise PackageSetNotFound(setname)
+		myatoms = myset.getAtoms()
+		parser = self._parser
+
+		if ignorelist is None:
+			ignorelist = set()
+
+		ignorelist.add(setname)
+		for n in myset.getNonAtoms():
+			if n.startswith(SETPREFIX):
+				s = n[len(SETPREFIX):]
+				if s in self.psets:
+					if s not in ignorelist:
+						myatoms.update(self.getSetAtoms(s,
+							ignorelist=ignorelist))
+				else:
+					raise PackageSetNotFound(s)
+
+		return myatoms
+
+def load_default_config(settings, trees):
+
+	if not _ENABLE_SET_CONFIG:
+		return SetConfig(None, settings, trees)
+
+	global_config_path = GLOBAL_CONFIG_PATH
+	if settings['EPREFIX']:
+		global_config_path = os.path.join(settings['EPREFIX'],
+			GLOBAL_CONFIG_PATH.lstrip(os.sep))
+	def _getfiles():
+		for path, dirs, files in os.walk(os.path.join(global_config_path, "sets")):
+			for f in files:
+				if not f.startswith(b'.'):
+					yield os.path.join(path, f)
+
+		dbapi = trees["porttree"].dbapi
+		for repo in dbapi.getRepositories():
+			path = dbapi.getRepositoryPath(repo)
+			yield os.path.join(path, "sets.conf")
+
+		yield os.path.join(settings["PORTAGE_CONFIGROOT"],
+			USER_CONFIG_PATH, "sets.conf")
+
+	return SetConfig(_getfiles(), settings, trees)

diff --git a/portage_with_autodep/pym/portage/_sets/base.py b/portage_with_autodep/pym/portage/_sets/base.py
new file mode 100644
index 0000000..c8d3ae4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/base.py
@@ -0,0 +1,264 @@
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dep import Atom, ExtendedAtomDict, best_match_to_list, match_from_list
+from portage.exception import InvalidAtom
+from portage.versions import cpv_getkey
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+OPERATIONS = ["merge", "unmerge"]
+
+class PackageSet(object):
+	# Set this to operations that are supported by your subclass. While 
+	# technically there is no difference between "merge" and "unmerge" regarding
+	# package sets, the latter doesn't make sense for some sets like "system"
+	# or "security" and therefore isn't supported by them.
+	_operations = ["merge"]
+	description = "generic package set"
+	
+	def __init__(self, allow_wildcard=False, allow_repo=False):
+		self._atoms = set()
+		self._atommap = ExtendedAtomDict(set)
+		self._loaded = False
+		self._loading = False
+		self.errors = []
+		self._nonatoms = set()
+		self.world_candidate = False
+		self._allow_wildcard = allow_wildcard
+		self._allow_repo = allow_repo
+
+	def __contains__(self, atom):
+		self._load()
+		return atom in self._atoms or atom in self._nonatoms
+	
+	def __iter__(self):
+		self._load()
+		for x in self._atoms:
+			yield x
+		for x in self._nonatoms:
+			yield x
+
+	def __bool__(self):
+		self._load()
+		return bool(self._atoms or self._nonatoms)
+
+	if sys.hexversion < 0x3000000:
+		__nonzero__ = __bool__
+
+	def supportsOperation(self, op):
+		if not op in OPERATIONS:
+			raise ValueError(op)
+		return op in self._operations
+
+	def _load(self):
+		if not (self._loaded or self._loading):
+			self._loading = True
+			self.load()
+			self._loaded = True
+			self._loading = False
+
+	def getAtoms(self):
+		self._load()
+		return self._atoms.copy()
+
+	def getNonAtoms(self):
+		self._load()
+		return self._nonatoms.copy()
+
+	def _setAtoms(self, atoms):
+		self._atoms.clear()
+		self._nonatoms.clear()
+		for a in atoms:
+			if not isinstance(a, Atom):
+				if isinstance(a, basestring):
+					a = a.strip()
+				if not a:
+					continue
+				try:
+					a = Atom(a, allow_wildcard=True, allow_repo=True)
+				except InvalidAtom:
+					self._nonatoms.add(a)
+					continue
+			if not self._allow_wildcard and a.extended_syntax:
+				raise InvalidAtom("extended atom syntax not allowed here")
+			if not self._allow_repo and a.repo:
+				raise InvalidAtom("repository specification not allowed here")
+			self._atoms.add(a)
+
+		self._updateAtomMap()
+
+	def load(self):
+		# This method must be overwritten by subclasses
+		# Editable sets should use the value of self._mtime to determine if they
+		# need to reload themselves
+		raise NotImplementedError()
+
+	def containsCPV(self, cpv):
+		self._load()
+		for a in self._atoms:
+			if match_from_list(a, [cpv]):
+				return True
+		return False
+	
+	def getMetadata(self, key):
+		if hasattr(self, key.lower()):
+			return getattr(self, key.lower())
+		else:
+			return ""
+	
+	def _updateAtomMap(self, atoms=None):
+		"""Update self._atommap for specific atoms or all atoms."""
+		if not atoms:
+			self._atommap.clear()
+			atoms = self._atoms
+		for a in atoms:
+			self._atommap.setdefault(a.cp, set()).add(a)
+	
+	# Not sure if this one should really be in PackageSet
+	def findAtomForPackage(self, pkg, modified_use=None):
+		"""Return the best match for a given package from the arguments, or
+		None if there are no matches.  This matches virtual arguments against
+		the PROVIDE metadata.  This can raise an InvalidDependString exception
+		if an error occurs while parsing PROVIDE."""
+
+		if modified_use is not None and modified_use is not pkg.use.enabled:
+			pkg = pkg.copy()
+			pkg.metadata["USE"] = " ".join(modified_use)
+
+		# Atoms matched via PROVIDE must be temporarily transformed since
+		# match_from_list() only works correctly when atom.cp == pkg.cp.
+		rev_transform = {}
+		for atom in self.iterAtomsForPackage(pkg):
+			if atom.cp == pkg.cp:
+				rev_transform[atom] = atom
+			else:
+				rev_transform[Atom(atom.replace(atom.cp, pkg.cp, 1), allow_wildcard=True, allow_repo=True)] = atom
+		best_match = best_match_to_list(pkg, iter(rev_transform))
+		if best_match:
+			return rev_transform[best_match]
+		return None
+
+	def iterAtomsForPackage(self, pkg):
+		"""
+		Find all matching atoms for a given package. This matches virtual
+		arguments against the PROVIDE metadata.  This will raise an
+		InvalidDependString exception if PROVIDE is invalid.
+		"""
+		cpv_slot_list = [pkg]
+		cp = cpv_getkey(pkg.cpv)
+		self._load() # make sure the atoms are loaded
+
+		atoms = self._atommap.get(cp)
+		if atoms:
+			for atom in atoms:
+				if match_from_list(atom, cpv_slot_list):
+					yield atom
+		provides = pkg.metadata['PROVIDE']
+		if not provides:
+			return
+		provides = provides.split()
+		for provide in provides:
+			try:
+				provided_cp = Atom(provide).cp
+			except InvalidAtom:
+				continue
+			atoms = self._atommap.get(provided_cp)
+			if atoms:
+				for atom in atoms:
+					if match_from_list(atom.replace(provided_cp, cp),
+						cpv_slot_list):
+						yield atom
+
+class EditablePackageSet(PackageSet):
+
+	def __init__(self, allow_wildcard=False, allow_repo=False):
+		super(EditablePackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+		
+	def update(self, atoms):
+		self._load()
+		modified = False
+		normal_atoms = []
+		for a in atoms:
+			if not isinstance(a, Atom):
+				try:
+					a = Atom(a, allow_wildcard=True, allow_repo=True)
+				except InvalidAtom:
+					modified = True
+					self._nonatoms.add(a)
+					continue
+			if not self._allow_wildcard and a.extended_syntax:
+				raise InvalidAtom("extended atom syntax not allowed here")
+			if not self._allow_repo and a.repo:
+				raise InvalidAtom("repository specification not allowed here")
+			normal_atoms.append(a)
+
+		if normal_atoms:
+			modified = True
+			self._atoms.update(normal_atoms)
+			self._updateAtomMap(atoms=normal_atoms)
+		if modified:
+			self.write()
+	
+	def add(self, atom):
+		self.update([atom])
+
+	def replace(self, atoms):
+		self._setAtoms(atoms)
+		self.write()
+
+	def remove(self, atom):
+		self._load()
+		self._atoms.discard(atom)
+		self._nonatoms.discard(atom)
+		self._updateAtomMap()
+		self.write()
+
+	def removePackageAtoms(self, cp):
+		self._load()
+		for a in list(self._atoms):
+			if a.cp == cp:
+				self.remove(a)
+		self.write()
+
+	def write(self):
+		# This method must be overwritten in subclasses that should be editable
+		raise NotImplementedError()
+
+class InternalPackageSet(EditablePackageSet):
+	def __init__(self, initial_atoms=None, allow_wildcard=False, allow_repo=True):
+		"""
+		Repo atoms are allowed more often than not, so it makes sense for this
+		class to allow them by default. The Atom constructor and isvalidatom()
+		functions default to allow_repo=False, which is sufficient to ensure
+		that repo atoms are prohibited when necessary.
+		"""
+		super(InternalPackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+		if initial_atoms != None:
+			self.update(initial_atoms)
+
+	def clear(self):
+		self._atoms.clear()
+		self._updateAtomMap()
+	
+	def load(self):
+		pass
+
+	def write(self):
+		pass
+
+class DummyPackageSet(PackageSet):
+	def __init__(self, atoms=None):
+		super(DummyPackageSet, self).__init__()
+		if atoms:
+			self._setAtoms(atoms)
+	
+	def load(self):
+		pass
+	
+	def singleBuilder(cls, options, settings, trees):
+		atoms = options.get("packages", "").split()
+		return DummyPackageSet(atoms=atoms)
+	singleBuilder = classmethod(singleBuilder)

diff --git a/portage_with_autodep/pym/portage/_sets/dbapi.py b/portage_with_autodep/pym/portage/_sets/dbapi.py
new file mode 100644
index 0000000..0f238f0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/dbapi.py
@@ -0,0 +1,383 @@
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+
+from portage import os
+from portage.versions import catpkgsplit, catsplit, pkgcmp, best
+from portage.dep import Atom
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError, get_boolean
+import portage
+
+__all__ = ["CategorySet", "DowngradeSet",
+	"EverythingSet", "OwnerSet", "VariableSet"]
+
+class EverythingSet(PackageSet):
+	_operations = ["merge"]
+	description = "Package set which contains SLOT " + \
+		"atoms to match all installed packages"
+	_filter = None
+
+	def __init__(self, vdbapi, **kwargs):
+		super(EverythingSet, self).__init__()
+		self._db = vdbapi
+
+	def load(self):
+		myatoms = []
+		db_keys = ["SLOT"]
+		aux_get = self._db.aux_get
+		cp_list = self._db.cp_list
+
+		for cp in self._db.cp_all():
+			for cpv in cp_list(cp):
+				# NOTE: Create SLOT atoms even when there is only one
+				# SLOT installed, in order to avoid the possibility
+				# of unwanted upgrades as reported in bug #338959.
+				slot, = aux_get(cpv, db_keys)
+				atom = Atom("%s:%s" % (cp, slot))
+				if self._filter:
+					if self._filter(atom):
+						myatoms.append(atom)
+				else:
+					myatoms.append(atom)
+
+		self._setAtoms(myatoms)
+	
+	def singleBuilder(self, options, settings, trees):
+		return EverythingSet(trees["vartree"].dbapi)
+	singleBuilder = classmethod(singleBuilder)
+
+class OwnerSet(PackageSet):
+
+	_operations = ["merge", "unmerge"]
+
+	description = "Package set which contains all packages " + \
+		"that own one or more files."
+
+	def __init__(self, vardb=None, exclude_files=None, files=None):
+		super(OwnerSet, self).__init__()
+		self._db = vardb
+		self._exclude_files = exclude_files
+		self._files = files
+
+	def mapPathsToAtoms(self, paths, exclude_paths=None):
+		"""
+		All paths must have $EROOT stripped from the left side.
+		"""
+		rValue = set()
+		vardb = self._db
+		aux_get = vardb.aux_get
+		aux_keys = ["SLOT"]
+		if exclude_paths is None:
+			for link, p in vardb._owners.iter_owners(paths):
+				cat, pn = catpkgsplit(link.mycpv)[:2]
+				slot, = aux_get(link.mycpv, aux_keys)
+				rValue.add("%s/%s:%s" % (cat, pn, slot))
+		else:
+			all_paths = set()
+			all_paths.update(paths)
+			all_paths.update(exclude_paths)
+			exclude_atoms = set()
+			for link, p in vardb._owners.iter_owners(all_paths):
+				cat, pn = catpkgsplit(link.mycpv)[:2]
+				slot, = aux_get(link.mycpv, aux_keys)
+				atom = "%s/%s:%s" % (cat, pn, slot)
+				rValue.add(atom)
+				if p in exclude_paths:
+					exclude_atoms.add(atom)
+			rValue.difference_update(exclude_atoms)
+
+		return rValue
+
+	def load(self):
+		self._setAtoms(self.mapPathsToAtoms(self._files,
+			exclude_paths=self._exclude_files))
+
+	def singleBuilder(cls, options, settings, trees):
+		if not "files" in options:
+			raise SetConfigError(_("no files given"))
+
+		exclude_files = options.get("exclude-files")
+		if exclude_files is not None:
+			exclude_files = frozenset(portage.util.shlex_split(exclude_files))
+		return cls(vardb=trees["vartree"].dbapi, exclude_files=exclude_files,
+			files=frozenset(portage.util.shlex_split(options["files"])))
+
+	singleBuilder = classmethod(singleBuilder)
+
+class VariableSet(EverythingSet):
+
+	_operations = ["merge", "unmerge"]
+
+	description = "Package set which contains all packages " + \
+		"that match specified values of a specified variable."
+
+	def __init__(self, vardb, metadatadb=None, variable=None, includes=None, excludes=None):
+		super(VariableSet, self).__init__(vardb)
+		self._metadatadb = metadatadb
+		self._variable = variable
+		self._includes = includes
+		self._excludes = excludes
+
+	def _filter(self, atom):
+		ebuild = best(self._metadatadb.match(atom))
+		if not ebuild:
+			return False
+		values, = self._metadatadb.aux_get(ebuild, [self._variable])
+		values = values.split()
+		if self._includes and not self._includes.intersection(values):
+			return False
+		if self._excludes and self._excludes.intersection(values):
+			return False
+		return True
+
+	def singleBuilder(cls, options, settings, trees):
+
+		variable = options.get("variable")
+		if variable is None:
+			raise SetConfigError(_("missing required attribute: 'variable'"))
+
+		includes = options.get("includes", "")
+		excludes = options.get("excludes", "")
+
+		if not (includes or excludes):
+			raise SetConfigError(_("no includes or excludes given"))
+		
+		metadatadb = options.get("metadata-source", "vartree")
+		if not metadatadb in trees:
+			raise SetConfigError(_("invalid value '%s' for option metadata-source") % metadatadb)
+
+		return cls(trees["vartree"].dbapi,
+			metadatadb=trees[metadatadb].dbapi,
+			excludes=frozenset(excludes.split()),
+			includes=frozenset(includes.split()),
+			variable=variable)
+
+	singleBuilder = classmethod(singleBuilder)
+
+class DowngradeSet(PackageSet):
+
+	_operations = ["merge", "unmerge"]
+
+	description = "Package set which contains all packages " + \
+		"for which the highest visible ebuild version is lower than " + \
+		"the currently installed version."
+
+	def __init__(self, portdb=None, vardb=None):
+		super(DowngradeSet, self).__init__()
+		self._portdb = portdb
+		self._vardb = vardb
+
+	def load(self):
+		atoms = []
+		xmatch = self._portdb.xmatch
+		xmatch_level = "bestmatch-visible"
+		cp_list = self._vardb.cp_list
+		aux_get = self._vardb.aux_get
+		aux_keys = ["SLOT"]
+		for cp in self._vardb.cp_all():
+			for cpv in cp_list(cp):
+				slot, = aux_get(cpv, aux_keys)
+				slot_atom = "%s:%s" % (cp, slot)
+				ebuild = xmatch(xmatch_level, slot_atom)
+				if not ebuild:
+					continue
+				ebuild_split = catpkgsplit(ebuild)[1:]
+				installed_split = catpkgsplit(cpv)[1:]
+				if pkgcmp(installed_split, ebuild_split) > 0:
+					atoms.append(slot_atom)
+
+		self._setAtoms(atoms)
+
+	def singleBuilder(cls, options, settings, trees):
+		return cls(portdb=trees["porttree"].dbapi,
+			vardb=trees["vartree"].dbapi)
+
+	singleBuilder = classmethod(singleBuilder)
+
+class UnavailableSet(EverythingSet):
+
+	_operations = ["unmerge"]
+
+	description = "Package set which contains all installed " + \
+		"packages for which there are no visible ebuilds " + \
+		"corresponding to the same $CATEGORY/$PN:$SLOT."
+
+	def __init__(self, vardb, metadatadb=None):
+		super(UnavailableSet, self).__init__(vardb)
+		self._metadatadb = metadatadb
+
+	def _filter(self, atom):
+		return not self._metadatadb.match(atom)
+
+	def singleBuilder(cls, options, settings, trees):
+
+		metadatadb = options.get("metadata-source", "porttree")
+		if not metadatadb in trees:
+			raise SetConfigError(_("invalid value '%s' for option "
+				"metadata-source") % (metadatadb,))
+
+		return cls(trees["vartree"].dbapi,
+			metadatadb=trees[metadatadb].dbapi)
+
+	singleBuilder = classmethod(singleBuilder)
+
+class UnavailableBinaries(EverythingSet):
+
+	_operations = ('merge', 'unmerge',)
+
+	description = "Package set which contains all installed " + \
+		"packages for which corresponding binary packages " + \
+		"are not available."
+
+	def __init__(self, vardb, metadatadb=None):
+		super(UnavailableBinaries, self).__init__(vardb)
+		self._metadatadb = metadatadb
+
+	def _filter(self, atom):
+		inst_pkg = self._db.match(atom)
+		if not inst_pkg:
+			return False
+		inst_cpv = inst_pkg[0]
+		return not self._metadatadb.cpv_exists(inst_cpv)
+
+	def singleBuilder(cls, options, settings, trees):
+
+		metadatadb = options.get("metadata-source", "bintree")
+		if not metadatadb in trees:
+			raise SetConfigError(_("invalid value '%s' for option "
+				"metadata-source") % (metadatadb,))
+
+		return cls(trees["vartree"].dbapi,
+			metadatadb=trees[metadatadb].dbapi)
+
+	singleBuilder = classmethod(singleBuilder)
+
+class CategorySet(PackageSet):
+	_operations = ["merge", "unmerge"]
+	
+	def __init__(self, category, dbapi, only_visible=True):
+		super(CategorySet, self).__init__()
+		self._db = dbapi
+		self._category = category
+		self._check = only_visible
+		if only_visible:
+			s="visible"
+		else:
+			s="all"
+		self.description = "Package set containing %s packages of category %s" % (s, self._category)
+			
+	def load(self):
+		myatoms = []
+		for cp in self._db.cp_all():
+			if catsplit(cp)[0] == self._category:
+				if (not self._check) or len(self._db.match(cp)) > 0:
+					myatoms.append(cp)
+		self._setAtoms(myatoms)
+	
+	def _builderGetRepository(cls, options, repositories):
+		repository = options.get("repository", "porttree")
+		if not repository in repositories:
+			raise SetConfigError(_("invalid repository class '%s'") % repository)
+		return repository
+	_builderGetRepository = classmethod(_builderGetRepository)
+
+	def _builderGetVisible(cls, options):
+		return get_boolean(options, "only_visible", True)
+	_builderGetVisible = classmethod(_builderGetVisible)
+		
+	def singleBuilder(cls, options, settings, trees):
+		if not "category" in options:
+			raise SetConfigError(_("no category given"))
+
+		category = options["category"]
+		if not category in settings.categories:
+			raise SetConfigError(_("invalid category name '%s'") % category)
+
+		repository = cls._builderGetRepository(options, trees.keys())
+		visible = cls._builderGetVisible(options)
+		
+		return CategorySet(category, dbapi=trees[repository].dbapi, only_visible=visible)
+	singleBuilder = classmethod(singleBuilder)
+
+	def multiBuilder(cls, options, settings, trees):
+		rValue = {}
+	
+		if "categories" in options:
+			categories = options["categories"].split()
+			invalid = set(categories).difference(settings.categories)
+			if invalid:
+				raise SetConfigError(_("invalid categories: %s") % ", ".join(list(invalid)))
+		else:
+			categories = settings.categories
+	
+		repository = cls._builderGetRepository(options, trees.keys())
+		visible = cls._builderGetVisible(options)
+		name_pattern = options.get("name_pattern", "$category/*")
+	
+		if not "$category" in name_pattern and not "${category}" in name_pattern:
+			raise SetConfigError(_("name_pattern doesn't include $category placeholder"))
+	
+		for cat in categories:
+			myset = CategorySet(cat, trees[repository].dbapi, only_visible=visible)
+			myname = name_pattern.replace("$category", cat)
+			myname = myname.replace("${category}", cat)
+			rValue[myname] = myset
+		return rValue
+	multiBuilder = classmethod(multiBuilder)
+
+class AgeSet(EverythingSet):
+	_operations = ["merge", "unmerge"]
+
+	def __init__(self, vardb, mode="older", age=7):
+		super(AgeSet, self).__init__(vardb)
+		self._mode = mode
+		self._age = age
+
+	def _filter(self, atom):
+	
+		cpv = self._db.match(atom)[0]
+		path = self._db.getpath(cpv, filename="COUNTER")
+		age = (time.time() - os.stat(path).st_mtime) / (3600 * 24)
+		if ((self._mode == "older" and age <= self._age) \
+			or (self._mode == "newer" and age >= self._age)):
+			return False
+		else:
+			return True
+	
+	def singleBuilder(cls, options, settings, trees):
+		mode = options.get("mode", "older")
+		if str(mode).lower() not in ["newer", "older"]:
+			raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
+		try:
+			age = int(options.get("age", "7"))
+		except ValueError as e:
+			raise SetConfigError(_("value of option 'age' is not an integer"))
+		return AgeSet(vardb=trees["vartree"].dbapi, mode=mode, age=age)
+
+	singleBuilder = classmethod(singleBuilder)
+
+class RebuiltBinaries(EverythingSet):
+	_operations = ('merge',)
+	_aux_keys = ('BUILD_TIME',)
+
+	def __init__(self, vardb, bindb=None):
+		super(RebuiltBinaries, self).__init__(vardb, bindb=bindb)
+		self._bindb = bindb
+
+	def _filter(self, atom):
+		cpv = self._db.match(atom)[0]
+		inst_build_time, = self._db.aux_get(cpv, self._aux_keys)
+		try:
+			bin_build_time, = self._bindb.aux_get(cpv, self._aux_keys)
+		except KeyError:
+			return False
+		return bool(bin_build_time and (inst_build_time != bin_build_time))
+
+	def singleBuilder(cls, options, settings, trees):
+		return RebuiltBinaries(trees["vartree"].dbapi,
+			bindb=trees["bintree"].dbapi)
+
+	singleBuilder = classmethod(singleBuilder)

diff --git a/portage_with_autodep/pym/portage/_sets/files.py b/portage_with_autodep/pym/portage/_sets/files.py
new file mode 100644
index 0000000..f19ecf6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/files.py
@@ -0,0 +1,341 @@
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+from itertools import chain
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.util import grabfile, write_atomic, ensure_dirs, normalize_path
+from portage.const import USER_CONFIG_PATH, WORLD_FILE, WORLD_SETS_FILE
+from portage.const import _ENABLE_SET_CONFIG
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage import portage_gid
+from portage._sets.base import PackageSet, EditablePackageSet
+from portage._sets import SetConfigError, SETPREFIX, get_boolean
+from portage.env.loaders import ItemFileLoader, KeyListFileLoader
+from portage.env.validators import ValidAtomValidator
+from portage import cpv_getkey
+
+__all__ = ["StaticFileSet", "ConfigFileSet", "WorldSelectedSet"]
+
+class StaticFileSet(EditablePackageSet):
+	_operations = ["merge", "unmerge"]
+	_repopath_match = re.compile(r'.*\$\{repository:(?P<reponame>.+)\}.*')
+	_repopath_sub = re.compile(r'\$\{repository:(?P<reponame>.+)\}')
+		
+	def __init__(self, filename, greedy=False, dbapi=None):
+		super(StaticFileSet, self).__init__(allow_repo=True)
+		self._filename = filename
+		self._mtime = None
+		self.description = "Package set loaded from file %s" % self._filename
+		self.loader = ItemFileLoader(self._filename, self._validate)
+		if greedy and not dbapi:
+			self.errors.append(_("%s configured as greedy set, but no dbapi instance passed in constructor") % self._filename)
+			greedy = False
+		self.greedy = greedy
+		self.dbapi = dbapi
+
+		metadata = grabfile(self._filename + ".metadata")
+		key = None
+		value = []
+		for line in metadata:
+			line = line.strip()
+			if len(line) == 0 and key != None:
+				setattr(self, key, " ".join(value))
+				key = None
+			elif line[-1] == ":" and key == None:
+				key = line[:-1].lower()
+				value = []
+			elif key != None:
+				value.append(line)
+			else:
+				pass
+		else:
+			if key != None:
+				setattr(self, key, " ".join(value))
+
+	def _validate(self, atom):
+		return bool(atom[:1] == SETPREFIX or ValidAtomValidator(atom, allow_repo=True))
+
+	def write(self):
+		write_atomic(self._filename, "".join("%s\n" % (atom,) \
+			for atom in sorted(chain(self._atoms, self._nonatoms))))
+
+	def load(self):
+		try:
+			mtime = os.stat(self._filename).st_mtime
+		except (OSError, IOError):
+			mtime = None
+		if (not self._loaded or self._mtime != mtime):
+			try:
+				data, errors = self.loader.load()
+				for fname in errors:
+					for e in errors[fname]:
+						self.errors.append(fname+": "+e)
+			except EnvironmentError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+				data = {}
+			if self.greedy:
+				atoms = []
+				for a in data:
+					matches = self.dbapi.match(a)
+					for cpv in matches:
+						atoms.append("%s:%s" % (cpv_getkey(cpv),
+							self.dbapi.aux_get(cpv, ["SLOT"])[0]))
+					# In addition to any installed slots, also try to pull
+					# in the latest new slot that may be available.
+					atoms.append(a)
+			else:
+				atoms = iter(data)
+			self._setAtoms(atoms)
+			self._mtime = mtime
+		
+	def singleBuilder(self, options, settings, trees):
+		if not "filename" in options:
+			raise SetConfigError(_("no filename specified"))
+		greedy = get_boolean(options, "greedy", False)
+		filename = options["filename"]
+		# look for repository path variables
+		match = self._repopath_match.match(filename)
+		if match:
+			try:
+				filename = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], filename)
+			except KeyError:
+				raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+		return StaticFileSet(filename, greedy=greedy, dbapi=trees["vartree"].dbapi)
+	singleBuilder = classmethod(singleBuilder)
+	
+	def multiBuilder(self, options, settings, trees):
+		rValue = {}
+		directory = options.get("directory",
+			os.path.join(settings["PORTAGE_CONFIGROOT"],
+			USER_CONFIG_PATH, "sets"))
+		name_pattern = options.get("name_pattern", "${name}")
+		if not "$name" in name_pattern and not "${name}" in name_pattern:
+			raise SetConfigError(_("name_pattern doesn't include ${name} placeholder"))
+		greedy = get_boolean(options, "greedy", False)
+		# look for repository path variables
+		match = self._repopath_match.match(directory)
+		if match:
+			try:
+				directory = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], directory)
+			except KeyError:
+				raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+
+		try:
+			directory = _unicode_decode(directory,
+				encoding=_encodings['fs'], errors='strict')
+			# Now verify that we can also encode it.
+			_unicode_encode(directory,
+				encoding=_encodings['fs'], errors='strict')
+		except UnicodeError:
+			directory = _unicode_decode(directory,
+				encoding=_encodings['fs'], errors='replace')
+			raise SetConfigError(
+				_("Directory path contains invalid character(s) for encoding '%s': '%s'") \
+				% (_encodings['fs'], directory))
+
+		if os.path.isdir(directory):
+			directory = normalize_path(directory)
+
+			for parent, dirs, files in os.walk(directory):
+				try:
+					parent = _unicode_decode(parent,
+						encoding=_encodings['fs'], errors='strict')
+				except UnicodeDecodeError:
+					continue
+				for d in dirs[:]:
+					if d[:1] == '.':
+						dirs.remove(d)
+				for filename in files:
+					try:
+						filename = _unicode_decode(filename,
+							encoding=_encodings['fs'], errors='strict')
+					except UnicodeDecodeError:
+						continue
+					if filename[:1] == '.':
+						continue
+					if filename.endswith(".metadata"):
+						continue
+					filename = os.path.join(parent,
+						filename)[1 + len(directory):]
+					myname = name_pattern.replace("$name", filename)
+					myname = myname.replace("${name}", filename)
+					rValue[myname] = StaticFileSet(
+						os.path.join(directory, filename),
+						greedy=greedy, dbapi=trees["vartree"].dbapi)
+		return rValue
+	multiBuilder = classmethod(multiBuilder)
+	
+class ConfigFileSet(PackageSet):
+	def __init__(self, filename):
+		super(ConfigFileSet, self).__init__()
+		self._filename = filename
+		self.description = "Package set generated from %s" % self._filename
+		self.loader = KeyListFileLoader(self._filename, ValidAtomValidator)
+
+	def load(self):
+		data, errors = self.loader.load()
+		self._setAtoms(iter(data))
+	
+	def singleBuilder(self, options, settings, trees):
+		if not "filename" in options:
+			raise SetConfigError(_("no filename specified"))
+		return ConfigFileSet(options["filename"])
+	singleBuilder = classmethod(singleBuilder)
+	
+	def multiBuilder(self, options, settings, trees):
+		rValue = {}
+		directory = options.get("directory",
+			os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH))
+		name_pattern = options.get("name_pattern", "sets/package_$suffix")
+		if not "$suffix" in name_pattern and not "${suffix}" in name_pattern:
+			raise SetConfigError(_("name_pattern doesn't include $suffix placeholder"))
+		for suffix in ["keywords", "use", "mask", "unmask"]:
+			myname = name_pattern.replace("$suffix", suffix)
+			myname = myname.replace("${suffix}", suffix)
+			rValue[myname] = ConfigFileSet(os.path.join(directory, "package."+suffix))
+		return rValue
+	multiBuilder = classmethod(multiBuilder)
+
+class WorldSelectedSet(EditablePackageSet):
+	description = "Set of packages that were directly installed by the user"
+	
+	def __init__(self, eroot):
+		super(WorldSelectedSet, self).__init__(allow_repo=True)
+		# most attributes exist twice as atoms and non-atoms are stored in 
+		# separate files
+		self._lock = None
+		self._filename = os.path.join(eroot, WORLD_FILE)
+		self.loader = ItemFileLoader(self._filename, self._validate)
+		self._mtime = None
+		
+		self._filename2 = os.path.join(eroot, WORLD_SETS_FILE)
+		self.loader2 = ItemFileLoader(self._filename2, self._validate2)
+		self._mtime2 = None
+		
+	def _validate(self, atom):
+		return ValidAtomValidator(atom, allow_repo=True)
+
+	def _validate2(self, setname):
+		return setname.startswith(SETPREFIX)
+
+	def write(self):
+		write_atomic(self._filename,
+			"".join(sorted("%s\n" % x for x in self._atoms)))
+
+		if _ENABLE_SET_CONFIG:
+			write_atomic(self._filename2,
+				"".join(sorted("%s\n" % x for x in self._nonatoms)))
+
+	def load(self):
+		atoms = []
+		nonatoms = []
+		atoms_changed = False
+		# load atoms and non-atoms from different files so the worldfile is 
+		# backwards-compatible with older versions and other PMs, even though 
+		# it's supposed to be private state data :/
+		try:
+			mtime = os.stat(self._filename).st_mtime
+		except (OSError, IOError):
+			mtime = None
+		if (not self._loaded or self._mtime != mtime):
+			try:
+				data, errors = self.loader.load()
+				for fname in errors:
+					for e in errors[fname]:
+						self.errors.append(fname+": "+e)
+			except EnvironmentError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+				data = {}
+			atoms = list(data)
+			self._mtime = mtime
+			atoms_changed = True
+		else:
+			atoms.extend(self._atoms)
+
+		if _ENABLE_SET_CONFIG:
+			changed2, nonatoms = self._load2()
+			atoms_changed |= changed2
+
+		if atoms_changed:
+			self._setAtoms(atoms+nonatoms)
+
+	def _load2(self):
+		changed = False
+		try:
+			mtime = os.stat(self._filename2).st_mtime
+		except (OSError, IOError):
+			mtime = None
+		if (not self._loaded or self._mtime2 != mtime):
+			try:
+				data, errors = self.loader2.load()
+				for fname in errors:
+					for e in errors[fname]:
+						self.errors.append(fname+": "+e)
+			except EnvironmentError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+				data = {}
+			nonatoms = list(data)
+			self._mtime2 = mtime
+			changed = True
+		else:
+			nonatoms = list(self._nonatoms)
+
+		return changed, nonatoms
+
+	def _ensure_dirs(self):
+		ensure_dirs(os.path.dirname(self._filename), gid=portage_gid, mode=0o2750, mask=0o2)
+
+	def lock(self):
+		self._ensure_dirs()
+		self._lock = lockfile(self._filename, wantnewlockfile=1)
+
+	def unlock(self):
+		unlockfile(self._lock)
+		self._lock = None
+
+	def cleanPackage(self, vardb, cpv):
+		'''
+		Before calling this function you should call lock and load.
+		After calling this function you should call unlock.
+		'''
+		if not self._lock:
+			raise AssertionError('cleanPackage needs the set to be locked')
+
+		worldlist = list(self._atoms)
+		mykey = cpv_getkey(cpv)
+		newworldlist = []
+		for x in worldlist:
+			if x.cp == mykey:
+				matches = vardb.match(x, use_cache=0)
+				if not matches:
+					#zap our world entry
+					pass
+				elif len(matches) == 1 and matches[0] == cpv:
+					#zap our world entry
+					pass
+				else:
+					#others are around; keep it.
+					newworldlist.append(x)
+			else:
+				#this doesn't match the package we're unmerging; keep it.
+				newworldlist.append(x)
+
+		newworldlist.extend(self._nonatoms)
+		self.replace(newworldlist)
+
+	def singleBuilder(self, options, settings, trees):
+		return WorldSelectedSet(settings["EROOT"])
+	singleBuilder = classmethod(singleBuilder)

diff --git a/portage_with_autodep/pym/portage/_sets/libs.py b/portage_with_autodep/pym/portage/_sets/libs.py
new file mode 100644
index 0000000..6c5babc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/libs.py
@@ -0,0 +1,98 @@
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean, SetConfigError
+from portage.versions import cpv_getkey
+import portage
+
+class LibraryConsumerSet(PackageSet):
+	_operations = ["merge", "unmerge"]
+
+	def __init__(self, vardbapi, debug=False):
+		super(LibraryConsumerSet, self).__init__()
+		self.dbapi = vardbapi
+		self.debug = debug
+
+	def mapPathsToAtoms(self, paths):
+		rValue = set()
+		for p in paths:
+			for cpv in self.dbapi._linkmap.getOwners(p):
+				try:
+					slot, = self.dbapi.aux_get(cpv, ["SLOT"])
+				except KeyError:
+					# This is expected for preserved libraries
+					# of packages that have been uninstalled
+					# without replacement.
+					pass
+				else:
+					rValue.add("%s:%s" % (cpv_getkey(cpv), slot))
+		return rValue
+
+class LibraryFileConsumerSet(LibraryConsumerSet):
+
+	"""
+	Note: This does not detect libtool archive (*.la) files that consume the
+	specified files (revdep-rebuild is able to detect them).
+	"""
+
+	description = "Package set which contains all packages " + \
+		"that consume the specified library file(s)."
+
+	def __init__(self, vardbapi, files, **kargs):
+		super(LibraryFileConsumerSet, self).__init__(vardbapi, **kargs)
+		self.files = files
+
+	def load(self):
+		consumers = set()
+		for lib in self.files:
+			consumers.update(self.dbapi._linkmap.findConsumers(lib))
+
+		if not consumers:
+			return
+		self._setAtoms(self.mapPathsToAtoms(consumers))
+
+	def singleBuilder(cls, options, settings, trees):
+		files = tuple(portage.util.shlex_split(options.get("files", "")))
+		if not files:
+			raise SetConfigError(_("no files given"))
+		debug = get_boolean(options, "debug", False)
+		return LibraryFileConsumerSet(trees["vartree"].dbapi,
+			files, debug=debug)
+	singleBuilder = classmethod(singleBuilder)
+
+class PreservedLibraryConsumerSet(LibraryConsumerSet):
+	def load(self):
+		reg = self.dbapi._plib_registry
+		if reg is None:
+			# preserve-libs is entirely disabled
+			return
+		consumers = set()
+		if reg:
+			plib_dict = reg.getPreservedLibs()
+			for libs in plib_dict.values():
+				for lib in libs:
+					if self.debug:
+						print(lib)
+						for x in sorted(self.dbapi._linkmap.findConsumers(lib)):
+							print("    ", x)
+						print("-"*40)
+					consumers.update(self.dbapi._linkmap.findConsumers(lib))
+			# Don't rebuild packages just because they contain preserved
+			# libs that happen to be consumers of other preserved libs.
+			for libs in plib_dict.values():
+				consumers.difference_update(libs)
+		else:
+			return
+		if not consumers:
+			return
+		self._setAtoms(self.mapPathsToAtoms(consumers))
+
+	def singleBuilder(cls, options, settings, trees):
+		debug = get_boolean(options, "debug", False)
+		return PreservedLibraryConsumerSet(trees["vartree"].dbapi,
+			debug=debug)
+	singleBuilder = classmethod(singleBuilder)

diff --git a/portage_with_autodep/pym/portage/_sets/profiles.py b/portage_with_autodep/pym/portage/_sets/profiles.py
new file mode 100644
index 0000000..39a2968
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/profiles.py
@@ -0,0 +1,53 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+from portage import os
+from portage.util import grabfile_package, stack_lists
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean
+from portage.util import writemsg_level
+
+__all__ = ["PackagesSystemSet"]
+
+class PackagesSystemSet(PackageSet):
+	_operations = ["merge"]
+
+	def __init__(self, profile_paths, debug=False):
+		super(PackagesSystemSet, self).__init__()
+		self._profile_paths = profile_paths
+		self._debug = debug
+		if profile_paths:
+			description = self._profile_paths[-1]
+			if description == "/etc/portage/profile" and \
+				len(self._profile_paths) > 1:
+				description = self._profile_paths[-2]
+		else:
+			description = None
+		self.description = "System packages for profile %s" % description
+
+	def load(self):
+		debug = self._debug
+		if debug:
+			writemsg_level("\nPackagesSystemSet: profile paths: %s\n" % \
+				(self._profile_paths,), level=logging.DEBUG, noiselevel=-1)
+
+		mylist = [grabfile_package(os.path.join(x, "packages"), verify_eapi=True) for x in self._profile_paths]
+
+		if debug:
+			writemsg_level("\nPackagesSystemSet: raw packages: %s\n" % \
+				(mylist,), level=logging.DEBUG, noiselevel=-1)
+
+		mylist = stack_lists(mylist, incremental=1)
+
+		if debug:
+			writemsg_level("\nPackagesSystemSet: stacked packages: %s\n" % \
+				(mylist,), level=logging.DEBUG, noiselevel=-1)
+
+		self._setAtoms([x[1:] for x in mylist if x[0] == "*"])
+
+	def singleBuilder(self, options, settings, trees):
+		debug = get_boolean(options, "debug", False)
+		return PackagesSystemSet(settings.profiles, debug=debug)
+	singleBuilder = classmethod(singleBuilder)

diff --git a/portage_with_autodep/pym/portage/_sets/security.py b/portage_with_autodep/pym/portage/_sets/security.py
new file mode 100644
index 0000000..2d8fcf6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/security.py
@@ -0,0 +1,86 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.glsa as glsa
+from portage._sets.base import PackageSet
+from portage.versions import catpkgsplit, pkgcmp
+from portage._sets import get_boolean
+
+__all__ = ["SecuritySet", "NewGlsaSet", "NewAffectedSet", "AffectedSet"]
+
+class SecuritySet(PackageSet):
+	_operations = ["merge"]
+	_skip_applied = False
+	
+	description = "package set that includes all packages possibly affected by a GLSA"
+		
+	def __init__(self, settings, vardbapi, portdbapi, least_change=True):
+		super(SecuritySet, self).__init__()
+		self._settings = settings
+		self._vardbapi = vardbapi
+		self._portdbapi = portdbapi
+		self._least_change = least_change
+
+	def getGlsaList(self, skip_applied):
+		glsaindexlist = glsa.get_glsa_list(self._settings)
+		if skip_applied:
+			applied_list = glsa.get_applied_glsas(self._settings)
+			glsaindexlist = set(glsaindexlist).difference(applied_list)
+			glsaindexlist = list(glsaindexlist)
+		glsaindexlist.sort()
+		return glsaindexlist
+		
+	def load(self):
+		glsaindexlist = self.getGlsaList(self._skip_applied)
+		atomlist = []
+		for glsaid in glsaindexlist:
+			myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+			#print glsaid, myglsa.isVulnerable(), myglsa.isApplied(), myglsa.getMergeList()
+			if self.useGlsa(myglsa):
+				atomlist += ["="+x for x in myglsa.getMergeList(least_change=self._least_change)]
+		self._setAtoms(self._reduce(atomlist))
+	
+	def _reduce(self, atomlist):
+		mydict = {}
+		for atom in atomlist[:]:
+			cpv = self._portdbapi.xmatch("match-all", atom)[0]
+			slot = self._portdbapi.aux_get(cpv, ["SLOT"])[0]
+			cps = "/".join(catpkgsplit(cpv)[0:2]) + ":" + slot
+			if not cps in mydict:
+				mydict[cps] = (atom, cpv)
+			else:
+				other_cpv = mydict[cps][1]
+				if pkgcmp(catpkgsplit(cpv)[1:], catpkgsplit(other_cpv)[1:]) > 0:
+					atomlist.remove(mydict[cps][0])
+					mydict[cps] = (atom, cpv)
+		return atomlist
+	
+	def useGlsa(self, myglsa):
+		return True
+
+	def updateAppliedList(self):
+		glsaindexlist = self.getGlsaList(True)
+		applied_list = glsa.get_applied_glsas(self._settings)
+		for glsaid in glsaindexlist:
+			myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+			if not myglsa.isVulnerable() and not myglsa.nr in applied_list:
+				myglsa.inject()
+	
+	def singleBuilder(cls, options, settings, trees):
+		least_change = not get_boolean(options, "use_emerge_resolver", False)
+		return cls(settings, trees["vartree"].dbapi, trees["porttree"].dbapi, least_change=least_change)
+	singleBuilder = classmethod(singleBuilder)
+	
+class NewGlsaSet(SecuritySet):
+	_skip_applied = True
+	description = "Package set that includes all packages possibly affected by an unapplied GLSA"
+
+class AffectedSet(SecuritySet):
+	description = "Package set that includes all packages affected by an unapplied GLSA"
+
+	def useGlsa(self, myglsa):
+		return myglsa.isVulnerable()
+
+class NewAffectedSet(AffectedSet):
+	_skip_applied = True
+	description = "Package set that includes all packages affected by an unapplied GLSA"

diff --git a/portage_with_autodep/pym/portage/_sets/shell.py b/portage_with_autodep/pym/portage/_sets/shell.py
new file mode 100644
index 0000000..2c95845
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_sets/shell.py
@@ -0,0 +1,44 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+from portage import os
+from portage import _unicode_decode
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError
+
+__all__ = ["CommandOutputSet"]
+
+class CommandOutputSet(PackageSet):
+	"""This class creates a PackageSet from the output of a shell command.
+	   The shell command should produce one atom per line, that is:
+
+	   >>> atom1
+	       atom2
+	       ...
+	       atomN
+
+	   Args:
+	     name: A string that identifies the set.
+	     command: A string or sequence identifying the command to run
+	     (see the subprocess.Popen documentaion for the format)
+	"""
+	_operations = ["merge", "unmerge"]
+
+	def __init__(self, command):
+		super(CommandOutputSet, self).__init__()
+		self._command = command
+		self.description = "Package set generated from output of '%s'" % self._command
+	
+	def load(self):
+		pipe = subprocess.Popen(self._command, stdout=subprocess.PIPE, shell=True)
+		stdout, stderr = pipe.communicate()
+		if pipe.wait() == os.EX_OK:
+			self._setAtoms(_unicode_decode(stdout).splitlines())
+
+	def singleBuilder(self, options, settings, trees):
+		if not "command" in options:
+			raise SetConfigError("no command specified")
+		return CommandOutputSet(options["command"])
+	singleBuilder = classmethod(singleBuilder)

diff --git a/portage_with_autodep/pym/portage/cache/__init__.py b/portage_with_autodep/pym/portage/cache/__init__.py
new file mode 100644
index 0000000..e7fe599
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/__init__.py
@@ -0,0 +1,4 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+

diff --git a/portage_with_autodep/pym/portage/cache/anydbm.py b/portage_with_autodep/pym/portage/cache/anydbm.py
new file mode 100644
index 0000000..1d56b14
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/anydbm.py
@@ -0,0 +1,113 @@
+# Copyright 2005-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from __future__ import absolute_import
+
+try:
+	import anydbm as anydbm_module
+except ImportError:
+	# python 3.x
+	import dbm as anydbm_module
+
+try:
+	import dbm.gnu as gdbm
+except ImportError:
+	try:
+		import gdbm
+	except ImportError:
+		gdbm = None
+
+try:
+	from dbm import whichdb
+except ImportError:
+	from whichdb import whichdb
+
+try:
+	import cPickle as pickle
+except ImportError:
+	import pickle
+from portage import _unicode_encode
+from portage import os
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+
+
+class database(fs_template.FsBased):
+
+	autocommits = True
+	cleanse_keys = True
+	serialize_eclasses = False
+
+	def __init__(self, *args, **config):
+		super(database,self).__init__(*args, **config)
+
+		default_db = config.get("dbtype","anydbm")
+		if not default_db.startswith("."):
+			default_db = '.' + default_db
+
+		self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db)
+		self.__db = None
+		mode = "w"
+		if whichdb(self._db_path) in ("dbm.gnu", "gdbm"):
+			# Allow multiple concurrent writers (see bug #53607).
+			mode += "u"
+		try:
+			# dbm.open() will not work with bytes in python-3.1:
+			#   TypeError: can't concat bytes to str
+			self.__db = anydbm_module.open(self._db_path,
+				mode, self._perms)
+		except anydbm_module.error:
+			# XXX handle this at some point
+			try:
+				self._ensure_dirs()
+				self._ensure_dirs(self._db_path)
+			except (OSError, IOError) as e:
+				raise cache_errors.InitializationError(self.__class__, e)
+
+			# try again if failed
+			try:
+				if self.__db == None:
+					# dbm.open() will not work with bytes in python-3.1:
+					#   TypeError: can't concat bytes to str
+					if gdbm is None:
+						self.__db = anydbm_module.open(self._db_path,
+							"c", self._perms)
+					else:
+						# Prefer gdbm type if available, since it allows
+						# multiple concurrent writers (see bug #53607).
+						self.__db = gdbm.open(self._db_path,
+							"cu", self._perms)
+			except anydbm_module.error as e:
+				raise cache_errors.InitializationError(self.__class__, e)
+		self._ensure_access(self._db_path)
+
+	def iteritems(self):
+		# dbm doesn't implement items()
+		for k in self.__db.keys():
+			yield (k, self[k])
+
+	def _getitem(self, cpv):
+		# we override getitem because it's just a cpickling of the data handed in.
+		return pickle.loads(self.__db[_unicode_encode(cpv)])
+
+	def _setitem(self, cpv, values):
+		self.__db[_unicode_encode(cpv)] = pickle.dumps(values,pickle.HIGHEST_PROTOCOL)
+
+	def _delitem(self, cpv):
+		del self.__db[cpv]
+
+	def __iter__(self):
+		return iter(list(self.__db.keys()))
+
+	def __contains__(self, cpv):
+		return cpv in self.__db
+
+	def __del__(self):
+		if "__db" in self.__dict__ and self.__db != None:
+			self.__db.sync()
+			self.__db.close()
+
+	if sys.hexversion >= 0x3000000:
+		items = iteritems

diff --git a/portage_with_autodep/pym/portage/cache/cache_errors.py b/portage_with_autodep/pym/portage/cache/cache_errors.py
new file mode 100644
index 0000000..3c1f239
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/cache_errors.py
@@ -0,0 +1,62 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+class CacheError(Exception):	pass
+
+class InitializationError(CacheError):
+	def __init__(self, class_name, error):
+		self.error, self.class_name = error, class_name
+	def __str__(self):
+		return "Creation of instance %s failed due to %s" % \
+			(self.class_name, str(self.error))
+
+
+class CacheCorruption(CacheError):
+	def __init__(self, key, ex):
+		self.key, self.ex = key, ex
+	def __str__(self):
+		return "%s is corrupt: %s" % (self.key, str(self.ex))
+
+
+class GeneralCacheCorruption(CacheError):
+	def __init__(self,ex):	self.ex = ex
+	def __str__(self):	return "corruption detected: %s" % str(self.ex)
+
+
+class InvalidRestriction(CacheError):
+	def __init__(self, key, restriction, exception=None):
+		if exception == None:	exception = ''
+		self.key, self.restriction, self.ex = key, restriction, ex
+	def __str__(self):
+		return "%s:%s is not valid: %s" % \
+			(self.key, self.restriction, str(self.ex))
+
+
+class ReadOnlyRestriction(CacheError):
+	def __init__(self, info=''):
+		self.info = info
+	def __str__(self):
+		return "cache is non-modifiable"+str(self.info)
+
+class StatCollision(CacheError):
+	"""
+	If the content of a cache entry changes and neither the file mtime nor
+	size changes, it will prevent rsync from detecting changes. Cache backends
+	may raise this exception from _setitem() if they detect this type of stat
+	collision. See bug #139134.
+	"""
+	def __init__(self, key, filename, mtime, size):
+		self.key = key
+		self.filename = filename
+		self.mtime = mtime
+		self.size = size
+
+	def __str__(self):
+		return "%s has stat collision with size %s and mtime %s" % \
+			(self.key, self.size, self.mtime)
+
+	def __repr__(self):
+		return "portage.cache.cache_errors.StatCollision(%s)" % \
+			(', '.join((repr(self.key), repr(self.filename),
+			repr(self.mtime), repr(self.size))),)

diff --git a/portage_with_autodep/pym/portage/cache/ebuild_xattr.py b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py
new file mode 100644
index 0000000..6b388fa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py
@@ -0,0 +1,171 @@
+# Copyright: 2009-2011 Gentoo Foundation
+# Author(s): Petteri R&#228;ty (betelgeuse@gentoo.org)
+# License: GPL2
+
+__all__ = ['database']
+
+import errno
+
+import portage
+from portage.cache import fs_template
+from portage.versions import catsplit
+from portage import cpv_getkey
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+portage.proxy.lazyimport.lazyimport(globals(),
+	'xattr')
+
+class NoValueException(Exception):
+	pass
+
+class database(fs_template.FsBased):
+
+	autocommits = True
+
+	def __init__(self, *args, **config):
+		super(database,self).__init__(*args, **config)
+		self.portdir = self.label
+		self.ns = xattr.NS_USER + '.gentoo.cache'
+		self.keys = set(self._known_keys)
+		self.keys.add('_mtime_')
+		self.keys.add('_eclasses_')
+		# xattrs have an upper length
+		self.max_len = self.__get_max()
+
+	def __get_max(self):
+		path = os.path.join(self.portdir,'profiles/repo_name')
+		try:
+			return int(self.__get(path,'value_max_len'))
+		except NoValueException as e:
+			max = self.__calc_max(path)
+			self.__set(path,'value_max_len',str(max))
+			return max
+
+	def __calc_max(self,path):
+		""" Find out max attribute length supported by the file system """
+
+		hundred = ''
+		for i in range(100):
+			hundred+='a'
+
+		s=hundred
+
+		# Could use finally but needs python 2.5 then
+		try:
+			while True:
+				self.__set(path,'test_max',s)
+				s+=hundred
+		except IOError as e:
+			# ext based give wrong errno
+			# http://bugzilla.kernel.org/show_bug.cgi?id=12793
+			if e.errno in (errno.E2BIG, errno.ENOSPC):
+				result = len(s)-100
+			else:
+				raise
+
+		try:
+			self.__remove(path,'test_max')
+		except IOError as e:
+			if e.errno != errno.ENODATA:
+				raise
+
+		return result
+
+	def __get_path(self,cpv):
+		cat,pn = catsplit(cpv_getkey(cpv))
+		return os.path.join(self.portdir,cat,pn,os.path.basename(cpv) + ".ebuild")
+
+	def __has_cache(self,path):
+		try:
+			self.__get(path,'_mtime_')
+		except NoValueException as e:
+			return False
+
+		return True
+
+	def __get(self,path,key,default=None):
+		try:
+			return xattr.get(path,key,namespace=self.ns)
+		except IOError as e:
+			if not default is None and errno.ENODATA == e.errno:
+				return default
+			else:
+				raise NoValueException()
+
+	def __remove(self,path,key):
+		xattr.remove(path,key,namespace=self.ns)
+
+	def __set(self,path,key,value):
+		xattr.set(path,key,value,namespace=self.ns)
+
+	def _getitem(self, cpv):
+		values = {}
+		path = self.__get_path(cpv)
+		all = {}
+		for tuple in xattr.get_all(path,namespace=self.ns):
+			key,value = tuple
+			all[key] = value
+
+		if not '_mtime_' in all:
+			raise KeyError(cpv)
+
+		# We default to '' like other caches
+		for key in self.keys:
+			attr_value = all.get(key,'1:')
+			parts,sep,value = attr_value.partition(':')
+			parts = int(parts)
+			if parts > 1:
+				for i in range(1,parts):
+					value += all.get(key+str(i))
+			values[key] = value
+
+		return values
+
+	def _setitem(self, cpv, values):
+		path = self.__get_path(cpv)
+		max = self.max_len
+		for key,value in values.items():
+			# mtime comes in as long so need to convert to strings
+			s = str(value)
+			# We need to split long values
+			value_len = len(s)
+			parts = 0
+			if value_len > max:
+				# Find out how many parts we need
+				parts = value_len/max
+				if value_len % max > 0:
+					parts += 1
+
+				# Only the first entry carries the number of parts
+				self.__set(path,key,'%s:%s'%(parts,s[0:max]))
+
+				# Write out the rest
+				for i in range(1,parts):
+					start = i * max
+					val = s[start:start+max]
+					self.__set(path,key+str(i),val)
+			else:
+				self.__set(path,key,"%s:%s"%(1,s))
+
+	def _delitem(self, cpv):
+		pass # Will be gone with the ebuild
+
+	def __contains__(self, cpv):
+		return os.path.exists(self.__get_path(cpv))
+
+	def __iter__(self):
+
+		for root, dirs, files in os.walk(self.portdir):
+			for file in files:
+				try:
+					file = _unicode_decode(file,
+						encoding=_encodings['fs'], errors='strict')
+				except UnicodeDecodeError:
+					continue
+				if file[-7:] == '.ebuild':
+					cat = os.path.basename(os.path.dirname(root))
+					pn_pv = file[:-7]
+					path = os.path.join(root,file)
+					if self.__has_cache(path):
+						yield "%s/%s/%s" % (cat,os.path.basename(root),file[:-7])

diff --git a/portage_with_autodep/pym/portage/cache/flat_hash.py b/portage_with_autodep/pym/portage/cache/flat_hash.py
new file mode 100644
index 0000000..b6bc074
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/flat_hash.py
@@ -0,0 +1,155 @@
+# Copyright: 2005-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from portage.cache import fs_template
+from portage.cache import cache_errors
+import errno
+import io
+import stat
+import sys
+import os as _os
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+# Coerce to unicode, in order to prevent TypeError when writing
+# raw bytes to TextIOWrapper with python2.
+_setitem_fmt = _unicode_decode("%s=%s\n")
+
+class database(fs_template.FsBased):
+
+	autocommits = True
+
+	def __init__(self, *args, **config):
+		super(database,self).__init__(*args, **config)
+		self.location = os.path.join(self.location, 
+			self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+		write_keys = set(self._known_keys)
+		write_keys.add("_eclasses_")
+		write_keys.add("_mtime_")
+		self._write_keys = sorted(write_keys)
+		if not self.readonly and not os.path.exists(self.location):
+			self._ensure_dirs()
+
+	def _getitem(self, cpv):
+		# Don't use os.path.join, for better performance.
+		fp = self.location + _os.sep + cpv
+		try:
+			myf = io.open(_unicode_encode(fp,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'],
+				errors='replace')
+			try:
+				lines = myf.read().split("\n")
+				if not lines[-1]:
+					lines.pop()
+				d = self._parse_data(lines, cpv)
+				if '_mtime_' not in d:
+					# Backward compatibility with old cache
+					# that uses mtime mangling.
+					d['_mtime_'] = _os.fstat(myf.fileno())[stat.ST_MTIME]
+				return d
+			finally:
+				myf.close()
+		except (IOError, OSError) as e:
+			if e.errno != errno.ENOENT:
+				raise cache_errors.CacheCorruption(cpv, e)
+			raise KeyError(cpv, e)
+
+	def _parse_data(self, data, cpv):
+		try:
+			return dict( x.split("=", 1) for x in data )
+		except ValueError as e:
+			# If a line is missing an "=", the split length is 1 instead of 2.
+			raise cache_errors.CacheCorruption(cpv, e)
+
+	def _setitem(self, cpv, values):
+#		import pdb;pdb.set_trace()
+		s = cpv.rfind("/")
+		fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+		try:
+			myf = io.open(_unicode_encode(fp,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='w', encoding=_encodings['repo.content'],
+				errors='backslashreplace')
+		except (IOError, OSError) as e:
+			if errno.ENOENT == e.errno:
+				try:
+					self._ensure_dirs(cpv)
+					myf = io.open(_unicode_encode(fp,
+						encoding=_encodings['fs'], errors='strict'),
+						mode='w', encoding=_encodings['repo.content'],
+						errors='backslashreplace')
+				except (OSError, IOError) as e:
+					raise cache_errors.CacheCorruption(cpv, e)
+			else:
+				raise cache_errors.CacheCorruption(cpv, e)
+
+		try:
+			for k in self._write_keys:
+				v = values.get(k)
+				if not v:
+					continue
+				myf.write(_setitem_fmt % (k, v))
+		finally:
+			myf.close()
+		self._ensure_access(fp)
+
+		#update written.  now we move it.
+
+		new_fp = os.path.join(self.location,cpv)
+		try:
+			os.rename(fp, new_fp)
+		except (OSError, IOError) as e:
+			os.remove(fp)
+			raise cache_errors.CacheCorruption(cpv, e)
+
+	def _delitem(self, cpv):
+#		import pdb;pdb.set_trace()
+		try:
+			os.remove(os.path.join(self.location,cpv))
+		except OSError as e:
+			if errno.ENOENT == e.errno:
+				raise KeyError(cpv)
+			else:
+				raise cache_errors.CacheCorruption(cpv, e)
+
+	def __contains__(self, cpv):
+		return os.path.exists(os.path.join(self.location, cpv))
+
+	def __iter__(self):
+		"""generator for walking the dir struct"""
+		dirs = [(0, self.location)]
+		len_base = len(self.location)
+		while dirs:
+			depth, dir_path = dirs.pop()
+			try:
+				dir_list = os.listdir(dir_path)
+			except OSError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+				continue
+			for l in dir_list:
+				if l.endswith(".cpickle"):
+					continue
+				p = os.path.join(dir_path, l)
+				try:
+					st = os.lstat(p)
+				except OSError:
+					# Cache entry disappeared.
+					continue
+				if stat.S_ISDIR(st.st_mode):
+					# Only recurse 1 deep, in order to avoid iteration over
+					# entries from another nested cache instance. This can
+					# happen if the user nests an overlay inside
+					# /usr/portage/local as in bug #302764.
+					if depth < 1:
+						dirs.append((depth+1, p))
+					continue
+				yield p[len_base+1:]

diff --git a/portage_with_autodep/pym/portage/cache/flat_list.py b/portage_with_autodep/pym/portage/cache/flat_list.py
new file mode 100644
index 0000000..7288307
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/flat_list.py
@@ -0,0 +1,134 @@
+# Copyright 2005-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.cache import fs_template
+from portage.cache import cache_errors
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import errno
+import io
+import stat
+import sys
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+# Coerce to unicode, in order to prevent TypeError when writing
+# raw bytes to TextIOWrapper with python2.
+_setitem_fmt = _unicode_decode("%s\n")
+
+# store the current key order *here*.
+class database(fs_template.FsBased):
+
+	autocommits = True
+
+	# do not screw with this ordering. _eclasses_ needs to be last
+	auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+		'RESTRICT',  'HOMEPAGE',  'LICENSE', 'DESCRIPTION',
+		'KEYWORDS',  'IUSE', 'REQUIRED_USE',
+		'PDEPEND',   'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
+
+	def __init__(self, *args, **config):
+		super(database,self).__init__(*args, **config)
+		self.location = os.path.join(self.location, 
+			self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+		if len(self._known_keys) > len(self.auxdbkey_order) + 2:
+			raise Exception("less ordered keys then auxdbkeys")
+		if not os.path.exists(self.location):
+			self._ensure_dirs()
+
+
+	def _getitem(self, cpv):
+		d = {}
+		try:
+			myf = io.open(_unicode_encode(os.path.join(self.location, cpv),
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'],
+				errors='replace')
+			for k,v in zip(self.auxdbkey_order, myf):
+				d[k] = v.rstrip("\n")
+		except (OSError, IOError) as e:
+			if errno.ENOENT == e.errno:
+				raise KeyError(cpv)
+			raise cache_errors.CacheCorruption(cpv, e)
+
+		try:
+			d["_mtime_"] = os.fstat(myf.fileno())[stat.ST_MTIME]
+		except OSError as e:	
+			myf.close()
+			raise cache_errors.CacheCorruption(cpv, e)
+		myf.close()
+		return d
+
+
+	def _setitem(self, cpv, values):
+		s = cpv.rfind("/")
+		fp=os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+		try:
+			myf = io.open(_unicode_encode(fp,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='w', encoding=_encodings['repo.content'],
+				errors='backslashreplace')
+		except (OSError, IOError) as e:
+			if errno.ENOENT == e.errno:
+				try:
+					self._ensure_dirs(cpv)
+					myf = io.open(_unicode_encode(fp,
+						encoding=_encodings['fs'], errors='strict'),
+						mode='w', encoding=_encodings['repo.content'],
+						errors='backslashreplace')
+				except (OSError, IOError) as e:
+					raise cache_errors.CacheCorruption(cpv, e)
+			else:
+				raise cache_errors.CacheCorruption(cpv, e)
+		
+
+		for x in self.auxdbkey_order:
+			myf.write(_setitem_fmt % (values.get(x, ""),))
+
+		myf.close()
+		self._ensure_access(fp, mtime=values["_mtime_"])
+		#update written.  now we move it.
+		new_fp = os.path.join(self.location,cpv)
+		try:
+			os.rename(fp, new_fp)
+		except (OSError, IOError) as e:
+			os.remove(fp)
+			raise cache_errors.CacheCorruption(cpv, e)
+
+
+	def _delitem(self, cpv):
+		try:
+			os.remove(os.path.join(self.location,cpv))
+		except OSError as e:
+			if errno.ENOENT == e.errno:
+				raise KeyError(cpv)
+			else:
+				raise cache_errors.CacheCorruption(cpv, e)
+
+
+	def __contains__(self, cpv):
+		return os.path.exists(os.path.join(self.location, cpv))
+
+
+	def __iter__(self):
+		"""generator for walking the dir struct"""
+		dirs = [self.location]
+		len_base = len(self.location)
+		while len(dirs):
+			for l in os.listdir(dirs[0]):
+				if l.endswith(".cpickle"):
+					continue
+				p = os.path.join(dirs[0],l)
+				st = os.lstat(p)
+				if stat.S_ISDIR(st.st_mode):
+					dirs.append(p)
+					continue
+				yield p[len_base+1:]
+			dirs.pop(0)
+
+
+	def commit(self):	pass

diff --git a/portage_with_autodep/pym/portage/cache/fs_template.py b/portage_with_autodep/pym/portage/cache/fs_template.py
new file mode 100644
index 0000000..a82e862
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/fs_template.py
@@ -0,0 +1,90 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import sys
+from portage.cache import template
+from portage import os
+
+from portage.proxy.lazyimport import lazyimport
+lazyimport(globals(),
+	'portage.data:portage_gid',
+	'portage.exception:PortageException',
+	'portage.util:apply_permissions',
+)
+del lazyimport
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+class FsBased(template.database):
+	"""template wrapping fs needed options, and providing _ensure_access as a way to 
+	attempt to ensure files have the specified owners/perms"""
+
+	def __init__(self, *args, **config):
+		"""throws InitializationError if needs args aren't specified
+		gid and perms aren't listed do to an oddity python currying mechanism
+		gid=portage_gid
+		perms=0665"""
+
+		for x, y in (("gid", -1), ("perms", -1)):
+			if x in config:
+				setattr(self, "_"+x, config[x])
+				del config[x]
+			else:
+				setattr(self, "_"+x, y)
+		super(FsBased, self).__init__(*args, **config)
+
+		if self.label.startswith(os.path.sep):
+			# normpath.
+			self.label = os.path.sep + os.path.normpath(self.label).lstrip(os.path.sep)
+
+
+	def _ensure_access(self, path, mtime=-1):
+		"""returns true or false if it's able to ensure that path is properly chmod'd and chowned.
+		if mtime is specified, attempts to ensure that's correct also"""
+		try:
+			apply_permissions(path, gid=self._gid, mode=self._perms)
+			if mtime != -1:
+				mtime=long(mtime)
+				os.utime(path, (mtime, mtime))
+		except (PortageException, EnvironmentError):
+			return False
+		return True
+
+	def _ensure_dirs(self, path=None):
+		"""with path!=None, ensure beyond self.location.  otherwise, ensure self.location"""
+		if path:
+			path = os.path.dirname(path)
+			base = self.location
+		else:
+			path = self.location
+			base='/'
+
+		for dir in path.lstrip(os.path.sep).rstrip(os.path.sep).split(os.path.sep):
+			base = os.path.join(base,dir)
+			if not os.path.exists(base):
+				if self._perms != -1:
+					um = os.umask(0)
+				try:
+					perms = self._perms
+					if perms == -1:
+						perms = 0
+					perms |= 0o755
+					os.mkdir(base, perms)
+					if self._gid != -1:
+						os.chown(base, -1, self._gid)
+				finally:
+					if self._perms != -1:
+						os.umask(um)
+
+	
+def gen_label(base, label):
+	"""if supplied label is a path, generate a unique label based upon label, and supplied base path"""
+	if label.find(os.path.sep) == -1:
+		return label
+	label = label.strip("\"").strip("'")
+	label = os.path.join(*(label.rstrip(os.path.sep).split(os.path.sep)))
+	tail = os.path.split(label)[1]
+	return "%s-%X" % (tail, abs(label.__hash__()))
+

diff --git a/portage_with_autodep/pym/portage/cache/mappings.py b/portage_with_autodep/pym/portage/cache/mappings.py
new file mode 100644
index 0000000..60a918e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/mappings.py
@@ -0,0 +1,485 @@
+# Copyright: 2005-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+__all__ = ["Mapping", "MutableMapping", "UserDict", "ProtectedDict",
+	"LazyLoad", "slot_dict_class"]
+
+import sys
+import weakref
+
+class Mapping(object):
+	"""
+	In python-3.0, the UserDict.DictMixin class has been replaced by
+	Mapping and MutableMapping from the collections module, but 2to3
+	doesn't currently account for this change:
+
+	    http://bugs.python.org/issue2876
+
+	As a workaround for the above issue, use this class as a substitute
+	for UserDict.DictMixin so that code converted via 2to3 will run.
+	"""
+
+	__slots__ = ()
+
+	def __iter__(self):
+		return iter(self.keys())
+
+	def keys(self):
+		return list(self.__iter__())
+
+	def __contains__(self, key):
+		try:
+			value = self[key]
+		except KeyError:
+			return False
+		return True
+
+	def iteritems(self):
+		for k in self:
+			yield (k, self[k])
+
+	def iterkeys(self):
+		return self.__iter__()
+
+	def itervalues(self):
+		for _, v in self.items():
+			yield v
+
+	def values(self):
+		return [v for _, v in self.iteritems()]
+
+	def items(self):
+		return list(self.iteritems())
+
+	def get(self, key, default=None):
+		try:
+			return self[key]
+		except KeyError:
+			return default
+
+	def __repr__(self):
+		return repr(dict(self.items()))
+
+	def __len__(self):
+		return len(list(self))
+
+	if sys.hexversion >= 0x3000000:
+		items = iteritems
+		keys = __iter__
+		values = itervalues
+
+class MutableMapping(Mapping):
+	"""
+	A mutable vesion of the Mapping class.
+	"""
+
+	__slots__ = ()
+
+	def clear(self):
+		for key in list(self):
+			del self[key]
+
+	def setdefault(self, key, default=None):
+		try:
+			return self[key]
+		except KeyError:
+			self[key] = default
+		return default
+
+	def pop(self, key, *args):
+		if len(args) > 1:
+			raise TypeError("pop expected at most 2 arguments, got " + \
+				repr(1 + len(args)))
+		try:
+			value = self[key]
+		except KeyError:
+			if args:
+				return args[0]
+			raise
+		del self[key]
+		return value
+
+	def popitem(self):
+		try:
+			k, v = next(iter(self.items()))
+		except StopIteration:
+			raise KeyError('container is empty')
+		del self[k]
+		return (k, v)
+
+	def update(self, *args, **kwargs):
+		if len(args) > 1:
+			raise TypeError(
+				"expected at most 1 positional argument, got " + \
+				repr(len(args)))
+		other = None
+		if args:
+			other = args[0]
+		if other is None:
+			pass
+		elif hasattr(other, 'iteritems'):
+			# Use getattr to avoid interference from 2to3.
+			for k, v in getattr(other, 'iteritems')():
+				self[k] = v
+		elif hasattr(other, 'items'):
+			# Use getattr to avoid interference from 2to3.
+			for k, v in getattr(other, 'items')():
+				self[k] = v
+		elif hasattr(other, 'keys'):
+			for k in other.keys():
+				self[k] = other[k]
+		else:
+			for k, v in other:
+				self[k] = v
+		if kwargs:
+			self.update(kwargs)
+
+class UserDict(MutableMapping):
+	"""
+	Use this class as a substitute for UserDict.UserDict so that
+	code converted via 2to3 will run:
+
+	     http://bugs.python.org/issue2876
+	"""
+
+	__slots__ = ('data',)
+
+	def __init__(self, *args, **kwargs):
+
+		self.data = {}
+
+		if len(args) > 1:
+			raise TypeError(
+				"expected at most 1 positional argument, got " + \
+				repr(len(args)))
+
+		if args:
+			self.update(args[0])
+
+		if kwargs:
+			self.update(kwargs)
+
+	def __repr__(self):
+		return repr(self.data)
+
+	def __contains__(self, key):
+		return key in self.data
+
+	def __iter__(self):
+		return iter(self.data)
+
+	def __len__(self):
+		return len(self.data)
+
+	def __getitem__(self, key):
+		return self.data[key]
+
+	def __setitem__(self, key, item):
+		self.data[key] = item
+
+	def __delitem__(self, key):
+		del self.data[key]
+
+	def clear(self):
+		self.data.clear()
+
+	if sys.hexversion >= 0x3000000:
+		keys = __iter__
+
+class OrderedDict(UserDict):
+
+	__slots__ = ('_order',)
+
+	def __init__(self, *args, **kwargs):
+		self._order = []
+		UserDict.__init__(self, *args, **kwargs)
+
+	def __iter__(self):
+		return iter(self._order)
+
+	def __setitem__(self, key, item):
+		if key in self:
+			self._order.remove(key)
+		UserDict.__setitem__(self, key, item)
+		self._order.append(key)
+
+	def __delitem__(self, key):
+		UserDict.__delitem__(self, key)
+		self._order.remove(key)
+
+	def clear(self):
+		UserDict.clear(self)
+		del self._order[:]
+
+	if sys.hexversion >= 0x3000000:
+		keys = __iter__
+
+class ProtectedDict(MutableMapping):
+	"""
+	given an initial dict, this wraps that dict storing changes in a secondary dict, protecting
+	the underlying dict from changes
+	"""
+	__slots__=("orig","new","blacklist")
+
+	def __init__(self, orig):
+		self.orig = orig
+		self.new = {}
+		self.blacklist = {}
+
+
+	def __setitem__(self, key, val):
+		self.new[key] = val
+		if key in self.blacklist:
+			del self.blacklist[key]
+
+
+	def __getitem__(self, key):
+		if key in self.new:
+			return self.new[key]
+		if key in self.blacklist:
+			raise KeyError(key)
+		return self.orig[key]
+
+
+	def __delitem__(self, key):
+		if key in self.new:
+			del self.new[key]
+		elif key in self.orig:
+			if key not in self.blacklist:
+				self.blacklist[key] = True
+				return
+		raise KeyError(key)
+			
+
+	def __iter__(self):
+		for k in self.new:
+			yield k
+		for k in self.orig:
+			if k not in self.blacklist and k not in self.new:
+				yield k
+
+	def __contains__(self, key):
+		return key in self.new or (key not in self.blacklist and key in self.orig)
+
+	if sys.hexversion >= 0x3000000:
+		keys = __iter__
+
+class LazyLoad(Mapping):
+	"""
+	Lazy loading of values for a dict
+	"""
+	__slots__=("pull", "d")
+
+	def __init__(self, pull_items_func, initial_items=[]):
+		self.d = {}
+		for k, v in initial_items:
+			self.d[k] = v
+		self.pull = pull_items_func
+
+	def __getitem__(self, key):
+		if key in self.d:
+			return self.d[key]
+		elif self.pull != None:
+			self.d.update(self.pull())
+			self.pull = None
+		return self.d[key]
+
+	def __iter__(self):
+		if self.pull is not None:
+			self.d.update(self.pull())
+			self.pull = None
+		return iter(self.d)
+
+	def __contains__(self, key):
+		if key in self.d:
+			return True
+		elif self.pull != None:
+			self.d.update(self.pull())
+			self.pull = None
+		return key in self.d
+
+	if sys.hexversion >= 0x3000000:
+		keys = __iter__
+
+_slot_dict_classes = weakref.WeakValueDictionary()
+
+def slot_dict_class(keys, prefix="_val_"):
+	"""
+	Generates mapping classes that behave similar to a dict but store values
+	as object attributes that are allocated via __slots__. Instances of these
+	objects have a smaller memory footprint than a normal dict object.
+
+	@param keys: Fixed set of allowed keys
+	@type keys: Iterable
+	@param prefix: a prefix to use when mapping
+		attribute names from keys
+	@type prefix: String
+	@rtype: SlotDict
+	@returns: A class that constructs SlotDict instances
+		having the specified keys.
+	"""
+	if isinstance(keys, frozenset):
+		keys_set = keys
+	else:
+		keys_set = frozenset(keys)
+	v = _slot_dict_classes.get((keys_set, prefix))
+	if v is None:
+
+		class SlotDict(object):
+
+			allowed_keys = keys_set
+			_prefix = prefix
+			__slots__ = ("__weakref__",) + \
+				tuple(prefix + k for k in allowed_keys)
+
+			def __init__(self, *args, **kwargs):
+
+				if len(args) > 1:
+					raise TypeError(
+						"expected at most 1 positional argument, got " + \
+						repr(len(args)))
+
+				if args:
+					self.update(args[0])
+
+				if kwargs:
+					self.update(kwargs)
+
+			def __iter__(self):
+				for k, v in self.iteritems():
+					yield k
+
+			def __len__(self):
+				l = 0
+				for i in self.iteritems():
+					l += 1
+				return l
+
+			def keys(self):
+				return list(self)
+
+			def iteritems(self):
+				prefix = self._prefix
+				for k in self.allowed_keys:
+					try:
+						yield (k, getattr(self, prefix + k))
+					except AttributeError:
+						pass
+
+			def items(self):
+				return list(self.iteritems())
+
+			def itervalues(self):
+				for k, v in self.iteritems():
+					yield v
+
+			def values(self):
+				return list(self.itervalues())
+
+			def __delitem__(self, k):
+				try:
+					delattr(self, self._prefix + k)
+				except AttributeError:
+					raise KeyError(k)
+
+			def __setitem__(self, k, v):
+				setattr(self, self._prefix + k, v)
+
+			def setdefault(self, key, default=None):
+				try:
+					return self[key]
+				except KeyError:
+					self[key] = default
+				return default
+
+			def update(self, *args, **kwargs):
+				if len(args) > 1:
+					raise TypeError(
+						"expected at most 1 positional argument, got " + \
+						repr(len(args)))
+				other = None
+				if args:
+					other = args[0]
+				if other is None:
+					pass
+				elif hasattr(other, 'iteritems'):
+					# Use getattr to avoid interference from 2to3.
+					for k, v in getattr(other, 'iteritems')():
+						self[k] = v
+				elif hasattr(other, 'items'):
+					# Use getattr to avoid interference from 2to3.
+					for k, v in getattr(other, 'items')():
+						self[k] = v
+				elif hasattr(other, 'keys'):
+					for k in other.keys():
+						self[k] = other[k]
+				else:
+					for k, v in other:
+						self[k] = v
+				if kwargs:
+					self.update(kwargs)
+
+			def __getitem__(self, k):
+				try:
+					return getattr(self, self._prefix + k)
+				except AttributeError:
+					raise KeyError(k)
+
+			def get(self, key, default=None):
+				try:
+					return self[key]
+				except KeyError:
+					return default
+
+			def __contains__(self, k):
+				return hasattr(self, self._prefix + k)
+
+			def pop(self, key, *args):
+				if len(args) > 1:
+					raise TypeError(
+						"pop expected at most 2 arguments, got " + \
+						repr(1 + len(args)))
+				try:
+					value = self[key]
+				except KeyError:
+					if args:
+						return args[0]
+					raise
+				del self[key]
+				return value
+
+			def popitem(self):
+				try:
+					k, v = self.iteritems().next()
+				except StopIteration:
+					raise KeyError('container is empty')
+				del self[k]
+				return (k, v)
+
+			def copy(self):
+				c = self.__class__()
+				c.update(self)
+				return c
+
+			def clear(self):
+				for k in self.allowed_keys:
+					try:
+						delattr(self, self._prefix + k)
+					except AttributeError:
+						pass
+
+			def __str__(self):
+				return str(dict(self.iteritems()))
+
+			def __repr__(self):
+				return repr(dict(self.iteritems()))
+
+			if sys.hexversion >= 0x3000000:
+				items = iteritems
+				keys = __iter__
+				values = itervalues
+
+		v = SlotDict
+		_slot_dict_classes[v.allowed_keys] = v
+	return v

diff --git a/portage_with_autodep/pym/portage/cache/metadata.py b/portage_with_autodep/pym/portage/cache/metadata.py
new file mode 100644
index 0000000..4c735d7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/metadata.py
@@ -0,0 +1,154 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import errno
+import re
+import stat
+import sys
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.cache import cache_errors, flat_hash
+import portage.eclass_cache
+from portage.cache.template import reconstruct_eclasses
+from portage.cache.mappings import ProtectedDict
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+	long = int
+
+# this is the old cache format, flat_list.  count maintained here.
+magic_line_count = 22
+
+# store the current key order *here*.
+class database(flat_hash.database):
+	complete_eclass_entries = False
+	auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+		'RESTRICT',  'HOMEPAGE',  'LICENSE', 'DESCRIPTION',
+		'KEYWORDS',  'INHERITED', 'IUSE', 'REQUIRED_USE',
+		'PDEPEND',   'PROVIDE', 'EAPI', 'PROPERTIES', 'DEFINED_PHASES')
+
+	autocommits = True
+	serialize_eclasses = False
+
+	_hashed_re = re.compile('^(\\w+)=([^\n]*)')
+
+	def __init__(self, location, *args, **config):
+		loc = location
+		super(database, self).__init__(location, *args, **config)
+		self.location = os.path.join(loc, "metadata","cache")
+		self.ec = None
+		self.raise_stat_collision = False
+
+	def _parse_data(self, data, cpv):
+		_hashed_re_match = self._hashed_re.match
+		d = {}
+
+		for line in data:
+			hashed = False
+			hashed_match = _hashed_re_match(line)
+			if hashed_match is None:
+				d.clear()
+				try:
+					for i, key in enumerate(self.auxdbkey_order):
+						d[key] = data[i]
+				except IndexError:
+					pass
+				break
+			else:
+				d[hashed_match.group(1)] = hashed_match.group(2)
+
+		if "_eclasses_" not in d:
+			if "INHERITED" in d:
+				if self.ec is None:
+					self.ec = portage.eclass_cache.cache(self.location[:-15])
+				try:
+					d["_eclasses_"] = self.ec.get_eclass_data(
+						d["INHERITED"].split())
+				except KeyError as e:
+					# INHERITED contains a non-existent eclass.
+					raise cache_errors.CacheCorruption(cpv, e)
+				del d["INHERITED"]
+			else:
+				d["_eclasses_"] = {}
+		elif isinstance(d["_eclasses_"], basestring):
+			# We skip this if flat_hash.database._parse_data() was called above
+			# because it calls reconstruct_eclasses() internally.
+			d["_eclasses_"] = reconstruct_eclasses(None, d["_eclasses_"])
+
+		return d
+
+	def _setitem(self, cpv, values):
+		if "_eclasses_" in values:
+			values = ProtectedDict(values)
+			values["INHERITED"] = ' '.join(sorted(values["_eclasses_"]))
+
+		new_content = []
+		for k in self.auxdbkey_order:
+			new_content.append(values.get(k, ''))
+			new_content.append('\n')
+		for i in range(magic_line_count - len(self.auxdbkey_order)):
+			new_content.append('\n')
+		new_content = ''.join(new_content)
+		new_content = _unicode_encode(new_content,
+			_encodings['repo.content'], errors='backslashreplace')
+
+		new_fp = os.path.join(self.location, cpv)
+		try:
+			f = open(_unicode_encode(new_fp,
+				encoding=_encodings['fs'], errors='strict'), 'rb')
+		except EnvironmentError:
+			pass
+		else:
+			try:
+				try:
+					existing_st = os.fstat(f.fileno())
+					existing_content = f.read()
+				finally:
+					f.close()
+			except EnvironmentError:
+				pass
+			else:
+				existing_mtime = existing_st[stat.ST_MTIME]
+				if values['_mtime_'] == existing_mtime and \
+					existing_content == new_content:
+					return
+
+				if self.raise_stat_collision and \
+					values['_mtime_'] == existing_mtime and \
+					len(new_content) == existing_st.st_size:
+					raise cache_errors.StatCollision(cpv, new_fp,
+						existing_mtime, existing_st.st_size)
+
+		s = cpv.rfind("/")
+		fp = os.path.join(self.location,cpv[:s],
+			".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+		try:
+			myf = open(_unicode_encode(fp,
+				encoding=_encodings['fs'], errors='strict'), 'wb')
+		except EnvironmentError as e:
+			if errno.ENOENT == e.errno:
+				try:
+					self._ensure_dirs(cpv)
+					myf = open(_unicode_encode(fp,
+						encoding=_encodings['fs'], errors='strict'), 'wb')
+				except EnvironmentError as e:
+					raise cache_errors.CacheCorruption(cpv, e)
+			else:
+				raise cache_errors.CacheCorruption(cpv, e)
+
+		try:
+			myf.write(new_content)
+		finally:
+			myf.close()
+		self._ensure_access(fp, mtime=values["_mtime_"])
+
+		try:
+			os.rename(fp, new_fp)
+		except EnvironmentError as e:
+			try:
+				os.unlink(fp)
+			except EnvironmentError:
+				pass
+			raise cache_errors.CacheCorruption(cpv, e)

diff --git a/portage_with_autodep/pym/portage/cache/metadata_overlay.py b/portage_with_autodep/pym/portage/cache/metadata_overlay.py
new file mode 100644
index 0000000..cfa0051
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/metadata_overlay.py
@@ -0,0 +1,105 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.cache import template
+from portage.cache.cache_errors import CacheCorruption
+from portage.cache.flat_hash import database as db_rw
+from portage.cache.metadata import database as db_ro
+
+class database(template.database):
+
+	serialize_eclasses = False
+
+	def __init__(self, location, label, auxdbkeys, db_rw=db_rw, db_ro=db_ro,
+		*args, **config):
+		super_config = config.copy()
+		super_config.pop("gid", None)
+		super_config.pop("perms", None)
+		super(database, self).__init__(location, label, auxdbkeys,
+			*args, **super_config)
+		self.db_rw = db_rw(location, label, auxdbkeys, **config)
+		self.commit = self.db_rw.commit
+		self.autocommits = self.db_rw.autocommits
+		if isinstance(db_ro, type):
+			ro_config = config.copy()
+			ro_config["readonly"] = True
+			self.db_ro = db_ro(label, "metadata/cache", auxdbkeys, **ro_config)
+		else:
+			self.db_ro = db_ro
+
+	def __getitem__(self, cpv):
+		"""funnel whiteout validation through here, since value needs to be fetched"""
+		try:
+			value = self.db_rw[cpv]
+		except KeyError:
+			return self.db_ro[cpv] # raises a KeyError when necessary
+		except CacheCorruption:
+			del self.db_rw[cpv]
+			return self.db_ro[cpv] # raises a KeyError when necessary
+		if self._is_whiteout(value):
+			if self._is_whiteout_valid(cpv, value):
+				raise KeyError(cpv)
+			else:
+				del self.db_rw[cpv]
+				return self.db_ro[cpv] # raises a KeyError when necessary
+		else:
+			return value
+
+	def _setitem(self, name, values):
+		try:
+			value_ro = self.db_ro.get(name)
+		except CacheCorruption:
+			value_ro = None
+		if value_ro is not None and \
+			self._are_values_identical(value_ro, values):
+			# we have matching values in the underlying db_ro
+			# so it is unnecessary to store data in db_rw
+			try:
+				del self.db_rw[name] # delete unwanted whiteout when necessary
+			except KeyError:
+				pass
+			return
+		self.db_rw[name] = values
+
+	def _delitem(self, cpv):
+		value = self[cpv] # validates whiteout and/or raises a KeyError when necessary
+		if cpv in self.db_ro:
+			self.db_rw[cpv] = self._create_whiteout(value)
+		else:
+			del self.db_rw[cpv]
+
+	def __contains__(self, cpv):
+		try:
+			self[cpv] # validates whiteout when necessary
+		except KeyError:
+			return False
+		return True
+
+	def __iter__(self):
+		s = set()
+		for cpv in self.db_rw:
+			if cpv in self: # validates whiteout when necessary
+				yield cpv
+			# set includes whiteouts so they won't be yielded later
+			s.add(cpv)
+		for cpv in self.db_ro:
+			if cpv not in s:
+				yield cpv
+
+	def _is_whiteout(self, value):
+		return value["EAPI"] == "whiteout"
+
+	def _create_whiteout(self, value):
+		return {"EAPI":"whiteout","_eclasses_":value["_eclasses_"],"_mtime_":value["_mtime_"]}
+
+	def _is_whiteout_valid(self, name, value_rw):
+		try:
+			value_ro = self.db_ro[name]
+			return self._are_values_identical(value_rw,value_ro)
+		except KeyError:
+			return False
+
+	def _are_values_identical(self, value1, value2):
+		if value1['_mtime_'] != value2['_mtime_']:
+			return False
+		return value1["_eclasses_"] == value2["_eclasses_"]

diff --git a/portage_with_autodep/pym/portage/cache/sql_template.py b/portage_with_autodep/pym/portage/cache/sql_template.py
new file mode 100644
index 0000000..d023b1b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/sql_template.py
@@ -0,0 +1,301 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import sys
+from portage.cache import template, cache_errors
+from portage.cache.template import reconstruct_eclasses
+
+class SQLDatabase(template.database):
+	"""template class for RDBM based caches
+	
+	This class is designed such that derivatives don't have to change much code, mostly constant strings.
+	_BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived
+	from.
+
+	SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE-
+	basically you need to deal with creation of a unique pkgid.  If the dbapi2 rdbms class has a method of 
+	recovering that id, then modify _insert_cpv to remove the extra select.
+
+	Creation of a derived class involves supplying _initdb_con, and table_exists.
+	Additionally, the default schemas may have to be modified.
+	"""
+	
+	SCHEMA_PACKAGE_NAME	= "package_cache"
+	SCHEMA_PACKAGE_CREATE 	= "CREATE TABLE %s (\
+		pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME
+	SCHEMA_PACKAGE_DROP	= "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
+
+	SCHEMA_VALUES_NAME	= "values_cache"
+	SCHEMA_VALUES_CREATE	= "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \
+		key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME)
+	SCHEMA_VALUES_DROP	= "DROP TABLE %s" % SCHEMA_VALUES_NAME
+	SCHEMA_INSERT_CPV_INTO_PACKAGE	= "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME
+
+	_BaseError = ()
+	_dbClass = None
+
+	autocommits = False
+#	cleanse_keys = True
+
+	# boolean indicating if the derived RDBMS class supports replace syntax
+	_supports_replace = False
+
+	def __init__(self, location, label, auxdbkeys, *args, **config):
+		"""initialize the instance.
+		derived classes shouldn't need to override this"""
+
+		super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config)
+
+		config.setdefault("host","127.0.0.1")
+		config.setdefault("autocommit", self.autocommits)
+		self._initdb_con(config)
+
+		self.label = self._sfilter(self.label)
+
+
+	def _dbconnect(self, config):
+		"""should be overridden if the derived class needs special parameters for initializing
+		the db connection, or cursor"""
+		self.db = self._dbClass(**config)
+		self.con = self.db.cursor()
+
+
+	def _initdb_con(self,config):
+		"""ensure needed tables are in place.
+		If the derived class needs a different set of table creation commands, overload the approriate
+		SCHEMA_ attributes.  If it needs additional execution beyond, override"""
+
+		self._dbconnect(config)
+		if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
+			if self.readonly:
+				raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+					self.SCHEMA_PACKAGE_NAME)
+			try:
+				self.con.execute(self.SCHEMA_PACKAGE_CREATE)
+			except  self._BaseError as e:
+				raise cache_errors.InitializationError(self.__class__, e)
+
+		if not self._table_exists(self.SCHEMA_VALUES_NAME):
+			if self.readonly:
+				raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+					self.SCHEMA_VALUES_NAME)
+			try:
+				self.con.execute(self.SCHEMA_VALUES_CREATE)
+			except	self._BaseError as e:
+				raise cache_errors.InitializationError(self.__class__, e)
+
+
+	def _table_exists(self, tbl):
+		"""return true if a table exists
+		derived classes must override this"""
+		raise NotImplementedError
+
+
+	def _sfilter(self, s):
+		"""meta escaping, returns quoted string for use in sql statements"""
+		return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"")
+
+
+	def _getitem(self, cpv):
+		try:
+			self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s "
+			"WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+			self.label, self._sfilter(cpv)))
+		except self._BaseError as e:
+			raise cache_errors.CacheCorruption(self, cpv, e)
+
+		rows = self.con.fetchall()
+
+		if len(rows) == 0:
+			raise KeyError(cpv)
+
+		vals = dict([(k,"") for k in self._known_keys])
+		vals.update(dict(rows))
+		return vals
+
+
+	def _delitem(self, cpv):
+		"""delete a cpv cache entry
+		derived RDBM classes for this *must* either support cascaded deletes, or 
+		override this method"""
+		try:
+			try:	
+				self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
+				(self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+				if self.autocommits:
+					self.commit()
+			except self._BaseError as e:
+				raise cache_errors.CacheCorruption(self, cpv, e)
+			if self.con.rowcount <= 0:
+				raise KeyError(cpv)
+		except SystemExit:
+			raise
+		except Exception:
+			if not self.autocommits:
+				self.db.rollback()
+				# yes, this can roll back a lot more then just the delete.  deal.
+			raise
+
+	def __del__(self):
+		# just to be safe.
+		if "db" in self.__dict__ and self.db != None:
+			self.commit()
+			self.db.close()
+
+	def _setitem(self, cpv, values):
+
+		try:
+			# insert.
+			try:
+				pkgid = self._insert_cpv(cpv)
+			except self._BaseError as e:
+				raise cache_errors.CacheCorruption(cpv, e)
+
+			# __getitem__ fills out missing values, 
+			# so we store only what's handed to us and is a known key
+			db_values = []
+			for key in self._known_keys:
+				if key in values and values[key]:
+					db_values.append({"key":key, "value":values[key]})
+
+			if len(db_values) > 0:
+				try:
+					self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \
+					(self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
+				except self._BaseError as e:
+					raise cache_errors.CacheCorruption(cpv, e)
+			if self.autocommits:
+				self.commit()
+
+		except SystemExit:
+			raise
+		except Exception:
+			if not self.autocommits:
+				try:
+					self.db.rollback()
+				except self._BaseError:
+					pass
+			raise
+
+
+	def _insert_cpv(self, cpv):
+		"""uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition
+		doesn't support auto-increment columns for pkgid.
+		returns the cpvs new pkgid
+		note this doesn't commit the transaction.  The caller is expected to."""
+		
+		cpv = self._sfilter(cpv)
+		if self._supports_replace:
+			query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1)
+		else:
+			# just delete it.
+			try:
+				del self[cpv]
+			except (cache_errors.CacheCorruption, KeyError):
+				pass
+			query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
+		try:
+			self.con.execute(query_str % (self.label, cpv))
+		except self._BaseError:
+			self.db.rollback()
+			raise
+		self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
+			(self.SCHEMA_PACKAGE_NAME, self.label, cpv))
+			
+		if self.con.rowcount != 1:
+			raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found "
+				" %i matches upon the following select!" % len(rows))
+		return self.con.fetchone()[0]
+
+
+	def __contains__(self, cpv):
+		if not self.autocommits:
+			try:
+				self.commit()
+			except self._BaseError as e:
+				raise cache_errors.GeneralCacheCorruption(e)
+
+		try:
+			self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
+				(self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+		except self._BaseError as e:
+			raise cache_errors.GeneralCacheCorruption(e)
+		return self.con.rowcount > 0
+
+
+	def __iter__(self):
+		if not self.autocommits:
+			try:
+				self.commit()
+			except self._BaseError as e:
+				raise cache_errors.GeneralCacheCorruption(e)
+
+		try:
+			self.con.execute("SELECT cpv FROM %s WHERE label=%s" % 
+				(self.SCHEMA_PACKAGE_NAME, self.label))
+		except self._BaseError as e:
+			raise cache_errors.GeneralCacheCorruption(e)
+#		return [ row[0] for row in self.con.fetchall() ]
+		for x in self.con.fetchall():
+			yield x[0]
+
+	def iteritems(self):
+		try:
+			self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s "
+			"WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+			self.label))
+		except self._BaseError as e:
+			raise cache_errors.CacheCorruption(self, cpv, e)
+		
+		oldcpv = None
+		l = []
+		for x, y, v in self.con.fetchall():
+			if oldcpv != x:
+				if oldcpv != None:
+					d = dict(l)
+					if "_eclasses_" in d:
+						d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+					else:
+						d["_eclasses_"] = {}
+					yield cpv, d
+				l.clear()
+				oldcpv = x
+			l.append((y,v))
+		if oldcpv != None:
+			d = dict(l)
+			if "_eclasses_" in d:
+				d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+			else:
+				d["_eclasses_"] = {}
+			yield cpv, d			
+
+	def commit(self):
+		self.db.commit()
+
+	def get_matches(self,match_dict):
+		query_list = []
+		for k,v in match_dict.items():
+			if k not in self._known_keys:
+				raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance")
+			v = v.replace("%","\\%")
+			v = v.replace(".*","%")
+			query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v)))
+
+		if len(query_list):
+			query = " AND "+" AND ".join(query_list)
+		else:
+			query = ''
+
+		print("query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query))
+		try:
+			self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \
+				(self.label, query))
+		except self._BaseError as e:
+			raise cache_errors.GeneralCacheCorruption(e)
+
+		return [ row[0] for row in self.con.fetchall() ]
+
+	if sys.hexversion >= 0x3000000:
+		items = iteritems
+		keys = __iter__

diff --git a/portage_with_autodep/pym/portage/cache/sqlite.py b/portage_with_autodep/pym/portage/cache/sqlite.py
new file mode 100644
index 0000000..fcc62ff
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/sqlite.py
@@ -0,0 +1,245 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+from portage import os
+from portage import _unicode_decode
+from portage.util import writemsg
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+class database(fs_template.FsBased):
+
+	autocommits = False
+	synchronous = False
+	# cache_bytes is used together with page_size (set at sqlite build time)
+	# to calculate the number of pages requested, according to the following
+	# equation: cache_bytes = page_bytes * page_count
+	cache_bytes = 1024 * 1024 * 10
+	_db_table = None
+
+	def __init__(self, *args, **config):
+		super(database, self).__init__(*args, **config)
+		self._import_sqlite()
+		self._allowed_keys = ["_mtime_", "_eclasses_"]
+		self._allowed_keys.extend(self._known_keys)
+		self._allowed_keys.sort()
+		self.location = os.path.join(self.location, 
+			self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+		if not self.readonly and not os.path.exists(self.location):
+			self._ensure_dirs()
+
+		config.setdefault("autocommit", self.autocommits)
+		config.setdefault("cache_bytes", self.cache_bytes)
+		config.setdefault("synchronous", self.synchronous)
+		# Timeout for throwing a "database is locked" exception (pysqlite
+		# default is 5.0 seconds).
+		config.setdefault("timeout", 15)
+		self._db_init_connection(config)
+		self._db_init_structures()
+
+	def _import_sqlite(self):
+		# sqlite3 is optional with >=python-2.5
+		try:
+			import sqlite3 as db_module
+		except ImportError:
+			try:
+				from pysqlite2 import dbapi2 as db_module
+			except ImportError as e:
+				raise cache_errors.InitializationError(self.__class__, e)
+
+		self._db_module = db_module
+		self._db_error = db_module.Error
+
+	def _db_escape_string(self, s):
+		"""meta escaping, returns quoted string for use in sql statements"""
+		if not isinstance(s, basestring):
+			# Avoid potential UnicodeEncodeError in python-2.x by
+			# only calling str() when it's absolutely necessary.
+			s = str(s)
+		# This is equivalent to the _quote function from pysqlite 1.1.
+		return "'%s'" % s.replace("'", "''")
+
+	def _db_init_connection(self, config):
+		self._dbpath = self.location + ".sqlite"
+		#if os.path.exists(self._dbpath):
+		#	os.unlink(self._dbpath)
+		connection_kwargs = {}
+		connection_kwargs["timeout"] = config["timeout"]
+		try:
+			if not self.readonly:
+				self._ensure_dirs()
+			self._db_connection = self._db_module.connect(
+				database=_unicode_decode(self._dbpath), **connection_kwargs)
+			self._db_cursor = self._db_connection.cursor()
+			self._db_cursor.execute("PRAGMA encoding = %s" % self._db_escape_string("UTF-8"))
+			if not self.readonly and not self._ensure_access(self._dbpath):
+				raise cache_errors.InitializationError(self.__class__, "can't ensure perms on %s" % self._dbpath)
+			self._db_init_cache_size(config["cache_bytes"])
+			self._db_init_synchronous(config["synchronous"])
+		except self._db_error as e:
+			raise cache_errors.InitializationError(self.__class__, e)
+
+	def _db_init_structures(self):
+		self._db_table = {}
+		self._db_table["packages"] = {}
+		mytable = "portage_packages"
+		self._db_table["packages"]["table_name"] = mytable
+		self._db_table["packages"]["package_id"] = "internal_db_package_id"
+		self._db_table["packages"]["package_key"] = "portage_package_key"
+		self._db_table["packages"]["internal_columns"] = \
+			[self._db_table["packages"]["package_id"],
+			self._db_table["packages"]["package_key"]]
+		create_statement = []
+		create_statement.append("CREATE TABLE")
+		create_statement.append(mytable)
+		create_statement.append("(")
+		table_parameters = []
+		table_parameters.append("%s INTEGER PRIMARY KEY AUTOINCREMENT" % self._db_table["packages"]["package_id"])
+		table_parameters.append("%s TEXT" % self._db_table["packages"]["package_key"])
+		for k in self._allowed_keys:
+			table_parameters.append("%s TEXT" % k)
+		table_parameters.append("UNIQUE(%s)" % self._db_table["packages"]["package_key"])
+		create_statement.append(",".join(table_parameters))
+		create_statement.append(")")
+		
+		self._db_table["packages"]["create"] = " ".join(create_statement)
+		self._db_table["packages"]["columns"] = \
+			self._db_table["packages"]["internal_columns"] + \
+			self._allowed_keys
+
+		cursor = self._db_cursor
+		for k, v in self._db_table.items():
+			if self._db_table_exists(v["table_name"]):
+				create_statement = self._db_table_get_create(v["table_name"])
+				if create_statement != v["create"]:
+					writemsg(_("sqlite: dropping old table: %s\n") % v["table_name"])
+					cursor.execute("DROP TABLE %s" % v["table_name"])
+					cursor.execute(v["create"])
+			else:
+				cursor.execute(v["create"])
+
+	def _db_table_exists(self, table_name):
+		"""return true/false dependant on a tbl existing"""
+		cursor = self._db_cursor
+		cursor.execute("SELECT name FROM sqlite_master WHERE type=\"table\" AND name=%s" % \
+			self._db_escape_string(table_name))
+		return len(cursor.fetchall()) == 1
+
+	def _db_table_get_create(self, table_name):
+		"""return true/false dependant on a tbl existing"""
+		cursor = self._db_cursor
+		cursor.execute("SELECT sql FROM sqlite_master WHERE name=%s" % \
+			self._db_escape_string(table_name))
+		return cursor.fetchall()[0][0]
+
+	def _db_init_cache_size(self, cache_bytes):
+		cursor = self._db_cursor
+		cursor.execute("PRAGMA page_size")
+		page_size=int(cursor.fetchone()[0])
+		# number of pages, sqlite default is 2000
+		cache_size = cache_bytes / page_size
+		cursor.execute("PRAGMA cache_size = %d" % cache_size)
+		cursor.execute("PRAGMA cache_size")
+		actual_cache_size = int(cursor.fetchone()[0])
+		del cursor
+		if actual_cache_size != cache_size:
+			raise cache_errors.InitializationError(self.__class__,"actual cache_size = "+actual_cache_size+" does does not match requested size of "+cache_size)
+
+	def _db_init_synchronous(self, synchronous):
+		cursor = self._db_cursor
+		cursor.execute("PRAGMA synchronous = %d" % synchronous)
+		cursor.execute("PRAGMA synchronous")
+		actual_synchronous=int(cursor.fetchone()[0])
+		del cursor
+		if actual_synchronous!=synchronous:
+			raise cache_errors.InitializationError(self.__class__,"actual synchronous = "+actual_synchronous+" does does not match requested value of "+synchronous)
+
+	def _getitem(self, cpv):
+		cursor = self._db_cursor
+		cursor.execute("select * from %s where %s=%s" % \
+			(self._db_table["packages"]["table_name"],
+			self._db_table["packages"]["package_key"],
+			self._db_escape_string(cpv)))
+		result = cursor.fetchall()
+		if len(result) == 1:
+			pass
+		elif len(result) == 0:
+			raise KeyError(cpv)
+		else:
+			raise cache_errors.CacheCorruption(cpv, "key is not unique")
+		d = {}
+		internal_columns = self._db_table["packages"]["internal_columns"]
+		column_index = -1
+		for k in self._db_table["packages"]["columns"]:
+			column_index +=1
+			if k not in internal_columns:
+				d[k] = result[0][column_index]
+
+		return d
+
+	def _setitem(self, cpv, values):
+		update_statement = []
+		update_statement.append("REPLACE INTO %s" % self._db_table["packages"]["table_name"])
+		update_statement.append("(")
+		update_statement.append(','.join([self._db_table["packages"]["package_key"]] + self._allowed_keys))
+		update_statement.append(")")
+		update_statement.append("VALUES")
+		update_statement.append("(")
+		values_parameters = []
+		values_parameters.append(self._db_escape_string(cpv))
+		for k in self._allowed_keys:
+			values_parameters.append(self._db_escape_string(values.get(k, '')))
+		update_statement.append(",".join(values_parameters))
+		update_statement.append(")")
+		cursor = self._db_cursor
+		try:
+			s = " ".join(update_statement)
+			cursor.execute(s)
+		except self._db_error as e:
+			writemsg("%s: %s\n" % (cpv, str(e)))
+			raise
+
+	def commit(self):
+		self._db_connection.commit()
+
+	def _delitem(self, cpv):
+		cursor = self._db_cursor
+		cursor.execute("DELETE FROM %s WHERE %s=%s" % \
+			(self._db_table["packages"]["table_name"],
+			self._db_table["packages"]["package_key"],
+			self._db_escape_string(cpv)))
+
+	def __contains__(self, cpv):
+		cursor = self._db_cursor
+		cursor.execute(" ".join(
+			["SELECT %s FROM %s" %
+			(self._db_table["packages"]["package_id"],
+			self._db_table["packages"]["table_name"]),
+			"WHERE %s=%s" % (
+			self._db_table["packages"]["package_key"],
+			self._db_escape_string(cpv))]))
+		result = cursor.fetchall()
+		if len(result) == 0:
+			return False
+		elif len(result) == 1:
+			return True
+		else:
+			raise cache_errors.CacheCorruption(cpv, "key is not unique")
+
+	def __iter__(self):
+		"""generator for walking the dir struct"""
+		cursor = self._db_cursor
+		cursor.execute("SELECT %s FROM %s" % \
+			(self._db_table["packages"]["package_key"],
+			self._db_table["packages"]["table_name"]))
+		result = cursor.fetchall()
+		key_list = [x[0] for x in result]
+		del result
+		while key_list:
+			yield key_list.pop()

diff --git a/portage_with_autodep/pym/portage/cache/template.py b/portage_with_autodep/pym/portage/cache/template.py
new file mode 100644
index 0000000..f84d8f4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/template.py
@@ -0,0 +1,236 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+from portage.cache import cache_errors
+from portage.cache.cache_errors import InvalidRestriction
+from portage.cache.mappings import ProtectedDict
+import sys
+import warnings
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+	long = int
+
+class database(object):
+	# this is for metadata/cache transfer.
+	# basically flags the cache needs be updated when transfered cache to cache.
+	# leave this.
+
+	complete_eclass_entries = True
+	autocommits = False
+	cleanse_keys = False
+	serialize_eclasses = True
+
+	def __init__(self, location, label, auxdbkeys, readonly=False):
+		""" initialize the derived class; specifically, store label/keys"""
+		self._known_keys = auxdbkeys
+		self.location = location
+		self.label = label
+		self.readonly = readonly
+		self.sync_rate = 0
+		self.updates = 0
+	
+	def __getitem__(self, cpv):
+		"""set a cpv to values
+		This shouldn't be overriden in derived classes since it handles the __eclasses__ conversion.
+		that said, if the class handles it, they can override it."""
+		if self.updates > self.sync_rate:
+			self.commit()
+			self.updates = 0
+		d=self._getitem(cpv)
+		if self.serialize_eclasses and "_eclasses_" in d:
+			d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"])
+		elif "_eclasses_" not in d:
+			d["_eclasses_"] = {}
+		mtime = d.get('_mtime_')
+		if mtime is None:
+			raise cache_errors.CacheCorruption(cpv,
+				'_mtime_ field is missing')
+		try:
+			mtime = long(mtime)
+		except ValueError:
+			raise cache_errors.CacheCorruption(cpv,
+				'_mtime_ conversion to long failed: %s' % (mtime,))
+		d['_mtime_'] = mtime
+		return d
+
+	def _getitem(self, cpv):
+		"""get cpv's values.
+		override this in derived classess"""
+		raise NotImplementedError
+
+	def __setitem__(self, cpv, values):
+		"""set a cpv to values
+		This shouldn't be overriden in derived classes since it handles the readonly checks"""
+		if self.readonly:
+			raise cache_errors.ReadOnlyRestriction()
+		if self.cleanse_keys:
+			d=ProtectedDict(values)
+			for k, v in list(d.items()):
+				if not v:
+					del d[k]
+			if self.serialize_eclasses and "_eclasses_" in values:
+				d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
+		elif self.serialize_eclasses and "_eclasses_" in values:
+			d = ProtectedDict(values)
+			d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
+		else:
+			d = values
+		self._setitem(cpv, d)
+		if not self.autocommits:
+			self.updates += 1
+			if self.updates > self.sync_rate:
+				self.commit()
+				self.updates = 0
+
+	def _setitem(self, name, values):
+		"""__setitem__ calls this after readonly checks.  override it in derived classes
+		note _eclassees_ key *must* be handled"""
+		raise NotImplementedError
+
+	def __delitem__(self, cpv):
+		"""delete a key from the cache.
+		This shouldn't be overriden in derived classes since it handles the readonly checks"""
+		if self.readonly:
+			raise cache_errors.ReadOnlyRestriction()
+		if not self.autocommits:
+			self.updates += 1
+		self._delitem(cpv)
+		if self.updates > self.sync_rate:
+			self.commit()
+			self.updates = 0
+
+	def _delitem(self,cpv):
+		"""__delitem__ calls this after readonly checks.  override it in derived classes"""
+		raise NotImplementedError
+
+	def has_key(self, cpv):
+		return cpv in self
+
+	def keys(self):
+		return list(self)
+
+	def iterkeys(self):
+		return iter(self)
+
+	def iteritems(self):
+		for x in self:
+			yield (x, self[x])
+
+	def items(self):
+		return list(self.iteritems())
+
+	def sync(self, rate=0):
+		self.sync_rate = rate
+		if(rate == 0):
+			self.commit()
+
+	def commit(self):
+		if not self.autocommits:
+			raise NotImplementedError
+
+	def __contains__(self, cpv):
+		"""This method should always be overridden.  It is provided only for
+		backward compatibility with modules that override has_key instead.  It
+		will automatically raise a NotImplementedError if has_key has not been
+		overridden."""
+		if self.has_key is database.has_key:
+			# prevent a possible recursive loop
+			raise NotImplementedError
+		warnings.warn("portage.cache.template.database.has_key() is "
+			"deprecated, override __contains__ instead",
+			DeprecationWarning)
+		return self.has_key(cpv)
+
+	def __iter__(self):
+		"""This method should always be overridden.  It is provided only for
+		backward compatibility with modules that override iterkeys instead.  It
+		will automatically raise a NotImplementedError if iterkeys has not been
+		overridden."""
+		if self.iterkeys is database.iterkeys:
+			# prevent a possible recursive loop
+			raise NotImplementedError(self)
+		return iter(self.keys())
+
+	def get(self, k, x=None):
+		try:
+			return self[k]
+		except KeyError:
+			return x
+
+	def get_matches(self, match_dict):
+		"""generic function for walking the entire cache db, matching restrictions to
+		filter what cpv's are returned.  Derived classes should override this if they
+		can implement a faster method then pulling each cpv:values, and checking it.
+		
+		For example, RDBMS derived classes should push the matching logic down to the
+		actual RDBM."""
+
+		import re
+		restricts = {}
+		for key,match in match_dict.items():
+			# XXX this sucks.
+			try:
+				if isinstance(match, basestring):
+					restricts[key] = re.compile(match).match
+				else:
+					restricts[key] = re.compile(match[0],match[1]).match
+			except re.error as e:
+				raise InvalidRestriction(key, match, e)
+			if key not in self.__known_keys:
+				raise InvalidRestriction(key, match, "Key isn't valid")
+
+		for cpv in self:
+			cont = True
+			vals = self[cpv]
+			for key, match in restricts.items():
+				if not match(vals[key]):
+					cont = False
+					break
+			if cont:
+				yield cpv
+
+	if sys.hexversion >= 0x3000000:
+		keys = __iter__
+		items = iteritems
+
+def serialize_eclasses(eclass_dict):
+	"""takes a dict, returns a string representing said dict"""
+	"""The "new format", which causes older versions of <portage-2.1.2 to
+	traceback with a ValueError due to failed long() conversion.  This format
+	isn't currently written, but the the capability to read it is already built
+	in.
+	return "\t".join(["%s\t%s" % (k, str(v)) \
+		for k, v in eclass_dict.iteritems()])
+	"""
+	if not eclass_dict:
+		return ""
+	return "\t".join(k + "\t%s\t%s" % eclass_dict[k] \
+		for k in sorted(eclass_dict))
+
+def reconstruct_eclasses(cpv, eclass_string):
+	"""returns a dict when handed a string generated by serialize_eclasses"""
+	eclasses = eclass_string.rstrip().lstrip().split("\t")
+	if eclasses == [""]:
+		# occasionally this occurs in the fs backends.  they suck.
+		return {}
+	
+	if len(eclasses) % 2 != 0 and len(eclasses) % 3 != 0:
+		raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+	d={}
+	try:
+		if eclasses[1].isdigit():
+			for x in range(0, len(eclasses), 2):
+				d[eclasses[x]] = ("", long(eclasses[x + 1]))
+		else:
+			# The old format contains paths that will be discarded.
+			for x in range(0, len(eclasses), 3):
+				d[eclasses[x]] = (eclasses[x + 1], long(eclasses[x + 2]))
+	except IndexError:
+		raise cache_errors.CacheCorruption(cpv,
+			"_eclasses_ was of invalid len %i" % len(eclasses))
+	except ValueError:
+		raise cache_errors.CacheCorruption(cpv, "_eclasses_ mtime conversion to long failed")
+	del eclasses
+	return d

diff --git a/portage_with_autodep/pym/portage/cache/util.py b/portage_with_autodep/pym/portage/cache/util.py
new file mode 100644
index 0000000..b824689
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/util.py
@@ -0,0 +1,170 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+from __future__ import print_function
+
+__all__ = ["mirror_cache", "non_quiet_mirroring", "quiet_mirroring"]
+
+from itertools import chain
+from portage.cache import cache_errors
+from portage.localization import _
+
+def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, eclass_cache=None, verbose_instance=None):
+
+	from portage import eapi_is_supported, \
+		_validate_cache_for_unsupported_eapis
+	if not src_cache.complete_eclass_entries and not eclass_cache:
+		raise Exception("eclass_cache required for cache's of class %s!" % src_cache.__class__)
+
+	if verbose_instance == None:
+		noise=quiet_mirroring()
+	else:
+		noise=verbose_instance
+
+	dead_nodes = set(trg_cache)
+	count=0
+
+	if not trg_cache.autocommits:
+		trg_cache.sync(100)
+
+	for x in valid_nodes_iterable:
+#		print "processing x=",x
+		count+=1
+		dead_nodes.discard(x)
+		try:
+			entry = src_cache[x]
+		except KeyError as e:
+			noise.missing_entry(x)
+			del e
+			continue
+		except cache_errors.CacheError as ce:
+			noise.exception(x, ce)
+			del ce
+			continue
+
+		eapi = entry.get('EAPI')
+		if not eapi:
+			eapi = '0'
+		eapi = eapi.lstrip('-')
+		eapi_supported = eapi_is_supported(eapi)
+		if not eapi_supported:
+			if not _validate_cache_for_unsupported_eapis:
+				noise.misc(x, _("unable to validate cache for EAPI='%s'") % eapi)
+				continue
+
+		write_it = True
+		trg = None
+		try:
+			trg = trg_cache[x]
+		except (KeyError, cache_errors.CacheError):
+			pass
+		else:
+			if trg['_mtime_'] == entry['_mtime_'] and \
+				eclass_cache.is_eclass_data_valid(trg['_eclasses_']) and \
+				set(trg['_eclasses_']) == set(entry['_eclasses_']):
+				write_it = False
+
+		for d in (entry, trg):
+			if d is not None and d.get('EAPI') in ('', '0'):
+				del d['EAPI']
+
+		if trg and not write_it:
+			""" We don't want to skip the write unless we're really sure that
+			the existing cache is identical, so don't trust _mtime_ and
+			_eclasses_ alone."""
+			for k in set(chain(entry, trg)).difference(
+				("_mtime_", "_eclasses_")):
+				if trg.get(k, "") != entry.get(k, ""):
+					write_it = True
+					break
+
+		if write_it:
+			try:
+				inherited = entry.get("INHERITED", "")
+				eclasses = entry.get("_eclasses_")
+			except cache_errors.CacheError as ce:
+				noise.exception(x, ce)
+				del ce
+				continue
+
+			if eclasses is not None:
+				if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
+					noise.eclass_stale(x)
+					continue
+				inherited = eclasses
+			else:
+				inherited = inherited.split()
+
+			if inherited:
+				if src_cache.complete_eclass_entries and eclasses is None:
+					noise.corruption(x, "missing _eclasses_ field")
+					continue
+
+				# Even if _eclasses_ already exists, replace it with data from
+				# eclass_cache, in order to insert local eclass paths.
+				try:
+					eclasses = eclass_cache.get_eclass_data(inherited)
+				except KeyError:
+					# INHERITED contains a non-existent eclass.
+					noise.eclass_stale(x)
+					continue
+
+				if eclasses is None:
+					noise.eclass_stale(x)
+					continue
+				entry["_eclasses_"] = eclasses
+
+			if not eapi_supported:
+				for k in set(entry).difference(("_mtime_", "_eclasses_")):
+					entry[k] = ""
+				entry["EAPI"] = "-" + eapi
+
+			# by this time, if it reaches here, the eclass has been validated, and the entry has 
+			# been updated/translated (if needs be, for metadata/cache mainly)
+			try:
+				trg_cache[x] = entry
+			except cache_errors.CacheError as ce:
+				noise.exception(x, ce)
+				del ce
+				continue
+		if count >= noise.call_update_min:
+			noise.update(x)
+			count = 0
+
+	if not trg_cache.autocommits:
+		trg_cache.commit()
+
+	# ok.  by this time, the trg_cache is up to date, and we have a dict
+	# with a crapload of cpv's.  we now walk the target db, removing stuff if it's in the list.
+	for key in dead_nodes:
+		try:
+			del trg_cache[key]
+		except KeyError:
+			pass
+		except cache_errors.CacheError as ce:
+			noise.exception(ce)
+			del ce
+	noise.finish()
+
+
+class quiet_mirroring(object):
+	# call_update_every is used by mirror_cache to determine how often to call in.
+	# quiet defaults to 2^24 -1.  Don't call update, 'cept once every 16 million or so :)
+	call_update_min = 0xffffff
+	def update(self,key,*arg):		pass
+	def exception(self,key,*arg):	pass
+	def eclass_stale(self,*arg):	pass
+	def missing_entry(self, key):	pass
+	def misc(self,key,*arg):		pass
+	def corruption(self, key, s):	pass
+	def finish(self, *arg):			pass
+	
+class non_quiet_mirroring(quiet_mirroring):
+	call_update_min=1
+	def update(self,key,*arg):	print("processed",key)
+	def exception(self, key, *arg):	print("exec",key,arg)
+	def missing(self,key):		print("key %s is missing", key)
+	def corruption(self,key,*arg):	print("corrupt %s:" % key,arg)
+	def eclass_stale(self,key,*arg):print("stale %s:"%key,arg)
+

diff --git a/portage_with_autodep/pym/portage/cache/volatile.py b/portage_with_autodep/pym/portage/cache/volatile.py
new file mode 100644
index 0000000..0bf6bab
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cache/volatile.py
@@ -0,0 +1,25 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from portage.cache import template
+
+class database(template.database):
+
+	autocommits = True
+	serialize_eclasses = False
+
+	def __init__(self, *args, **config):
+		config.pop("gid", None)
+		config.pop("perms", None)
+		super(database, self).__init__(*args, **config)
+		self._data = {}
+		self.__iter__ = self._data.__iter__
+		self._delitem = self._data.__delitem__
+		self.__contains__ = self._data.__contains__
+
+	def _setitem(self, name, values):
+		self._data[name] = copy.deepcopy(values)
+
+	def _getitem(self, cpv):
+		return copy.deepcopy(self._data[cpv])

diff --git a/portage_with_autodep/pym/portage/checksum.py b/portage_with_autodep/pym/portage/checksum.py
new file mode 100644
index 0000000..9e7e455
--- /dev/null
+++ b/portage_with_autodep/pym/portage/checksum.py
@@ -0,0 +1,291 @@
+# checksum.py -- core Portage functionality
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.const import PRELINK_BINARY,HASHING_BLOCKSIZE
+from portage.localization import _
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+import errno
+import stat
+import tempfile
+
+#dict of all available hash functions
+hashfunc_map = {}
+hashorigin_map = {}
+
+def _generate_hash_function(hashtype, hashobject, origin="unknown"):
+	def pyhash(filename):
+		"""
+		Run a checksum against a file.
+	
+		@param filename: File to run the checksum against
+		@type filename: String
+		@return: The hash and size of the data
+		"""
+		try:
+			f = open(_unicode_encode(filename,
+				encoding=_encodings['fs'], errors='strict'), 'rb')
+		except IOError as e:
+			func_call = "open('%s')" % filename
+			if e.errno == errno.EPERM:
+				raise portage.exception.OperationNotPermitted(func_call)
+			elif e.errno == errno.EACCES:
+				raise portage.exception.PermissionDenied(func_call)
+			elif e.errno == errno.ENOENT:
+				raise portage.exception.FileNotFound(filename)
+			else:
+				raise
+		blocksize = HASHING_BLOCKSIZE
+		data = f.read(blocksize)
+		size = 0
+		checksum = hashobject()
+		while data:
+			checksum.update(data)
+			size = size + len(data)
+			data = f.read(blocksize)
+		f.close()
+
+		return (checksum.hexdigest(), size)
+	hashfunc_map[hashtype] = pyhash
+	hashorigin_map[hashtype] = origin
+	return pyhash
+
+# Define hash functions, try to use the best module available. Later definitions
+# override earlier ones
+
+# Use the internal modules as last fallback
+try:
+	from hashlib import md5 as _new_md5
+except ImportError:
+	from md5 import new as _new_md5
+
+md5hash = _generate_hash_function("MD5", _new_md5, origin="internal")
+
+try:
+	from hashlib import sha1 as _new_sha1
+except ImportError:
+	from sha import new as _new_sha1
+
+sha1hash = _generate_hash_function("SHA1", _new_sha1, origin="internal")
+
+# Use pycrypto when available, prefer it over the internal fallbacks
+try:
+	from Crypto.Hash import SHA256, RIPEMD
+	sha256hash = _generate_hash_function("SHA256", SHA256.new, origin="pycrypto")
+	rmd160hash = _generate_hash_function("RMD160", RIPEMD.new, origin="pycrypto")
+except ImportError as e:
+	pass
+
+# Use hashlib from python-2.5 if available and prefer it over pycrypto and internal fallbacks.
+# Need special handling for RMD160 as it may not always be provided by hashlib.
+try:
+	import hashlib
+	
+	md5hash = _generate_hash_function("MD5", hashlib.md5, origin="hashlib")
+	sha1hash = _generate_hash_function("SHA1", hashlib.sha1, origin="hashlib")
+	sha256hash = _generate_hash_function("SHA256", hashlib.sha256, origin="hashlib")
+	try:
+		hashlib.new('ripemd160')
+	except ValueError:
+		pass
+	else:
+		def rmd160():
+			return hashlib.new('ripemd160')
+		rmd160hash = _generate_hash_function("RMD160", rmd160, origin="hashlib")
+except ImportError as e:
+	pass
+	
+
+# Use python-fchksum if available, prefer it over all other MD5 implementations
+try:
+	import fchksum
+	
+	def md5hash(filename):
+		return fchksum.fmd5t(filename)
+	hashfunc_map["MD5"] = md5hash
+	hashorigin_map["MD5"] = "python-fchksum"
+
+except ImportError:
+	pass
+
+# There is only one implementation for size
+def getsize(filename):
+	size = os.stat(filename).st_size
+	return (size, size)
+hashfunc_map["size"] = getsize
+
+# end actual hash functions
+
+prelink_capable = False
+if os.path.exists(PRELINK_BINARY):
+	results = portage.subprocess_getstatusoutput(
+		"%s --version > /dev/null 2>&1" % (PRELINK_BINARY,))
+	if (results[0] >> 8) == 0:
+		prelink_capable=1
+	del results
+
+def perform_md5(x, calc_prelink=0):
+	return perform_checksum(x, "MD5", calc_prelink)[0]
+
+def _perform_md5_merge(x, **kwargs):
+	return perform_md5(_unicode_encode(x,
+		encoding=_encodings['merge'], errors='strict'), **kwargs)
+
+def perform_all(x, calc_prelink=0):
+	mydict = {}
+	for k in hashfunc_map:
+		mydict[k] = perform_checksum(x, hashfunc_map[k], calc_prelink)[0]
+	return mydict
+
+def get_valid_checksum_keys():
+	return list(hashfunc_map)
+
+def get_hash_origin(hashtype):
+	if hashtype not in hashfunc_map:
+		raise KeyError(hashtype)
+	return hashorigin_map.get(hashtype, "unknown")
+
+def verify_all(filename, mydict, calc_prelink=0, strict=0):
+	"""
+	Verify all checksums against a file.
+
+	@param filename: File to run the checksums against
+	@type filename: String
+	@param calc_prelink: Whether or not to reverse prelink before running the checksum
+	@type calc_prelink: Integer
+	@param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
+	@type strict: Integer
+	@rtype: Tuple
+	@return: Result of the checks and possible message:
+		1) If size fails, False, and a tuple containing a message, the given size, and the actual size
+		2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
+		3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
+		4) If all checks succeed, return True and a fake reason
+	"""
+	# Dict relates to single file only.
+	# returns: (passed,reason)
+	file_is_ok = True
+	reason     = "Reason unknown"
+	try:
+		mysize = os.stat(filename)[stat.ST_SIZE]
+		if mydict["size"] != mysize:
+			return False,(_("Filesize does not match recorded size"), mysize, mydict["size"])
+	except OSError as e:
+		if e.errno == errno.ENOENT:
+			raise portage.exception.FileNotFound(filename)
+		return False, (str(e), None, None)
+
+	verifiable_hash_types = set(mydict).intersection(hashfunc_map)
+	verifiable_hash_types.discard("size")
+	if not verifiable_hash_types:
+		expected = set(hashfunc_map)
+		expected.discard("size")
+		expected = list(expected)
+		expected.sort()
+		expected = " ".join(expected)
+		got = set(mydict)
+		got.discard("size")
+		got = list(got)
+		got.sort()
+		got = " ".join(got)
+		return False, (_("Insufficient data for checksum verification"), got, expected)
+
+	for x in sorted(mydict):
+		if   x == "size":
+			continue
+		elif x in hashfunc_map:
+			myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
+			if mydict[x] != myhash:
+				if strict:
+					raise portage.exception.DigestException(
+						("Failed to verify '$(file)s' on " + \
+						"checksum type '%(type)s'") % \
+						{"file" : filename, "type" : x})
+				else:
+					file_is_ok = False
+					reason     = (("Failed on %s verification" % x), myhash,mydict[x])
+					break
+	return file_is_ok,reason
+
+def perform_checksum(filename, hashname="MD5", calc_prelink=0):
+	"""
+	Run a specific checksum against a file. The filename can
+	be either unicode or an encoded byte string. If filename
+	is unicode then a UnicodeDecodeError will be raised if
+	necessary.
+
+	@param filename: File to run the checksum against
+	@type filename: String
+	@param hashname: The type of hash function to run
+	@type hashname: String
+	@param calc_prelink: Whether or not to reverse prelink before running the checksum
+	@type calc_prelink: Integer
+	@rtype: Tuple
+	@return: The hash and size of the data
+	"""
+	global prelink_capable
+	# Make sure filename is encoded with the correct encoding before
+	# it is passed to spawn (for prelink) and/or the hash function.
+	filename = _unicode_encode(filename,
+		encoding=_encodings['fs'], errors='strict')
+	myfilename = filename
+	prelink_tmpfile = None
+	try:
+		if calc_prelink and prelink_capable:
+			# Create non-prelinked temporary file to checksum.
+			# Files rejected by prelink are summed in place.
+			try:
+				tmpfile_fd, prelink_tmpfile = tempfile.mkstemp()
+				try:
+					retval = portage.process.spawn([PRELINK_BINARY,
+						"--verify", filename], fd_pipes={1:tmpfile_fd})
+				finally:
+					os.close(tmpfile_fd)
+				if retval == os.EX_OK:
+					myfilename = prelink_tmpfile
+			except portage.exception.CommandNotFound:
+				# This happens during uninstallation of prelink.
+				prelink_capable = False
+		try:
+			if hashname not in hashfunc_map:
+				raise portage.exception.DigestException(hashname + \
+					" hash function not available (needs dev-python/pycrypto)")
+			myhash, mysize = hashfunc_map[hashname](myfilename)
+		except (OSError, IOError) as e:
+			if e.errno == errno.ENOENT:
+				raise portage.exception.FileNotFound(myfilename)
+			raise
+		return myhash, mysize
+	finally:
+		if prelink_tmpfile:
+			try:
+				os.unlink(prelink_tmpfile)
+			except OSError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+
+def perform_multiple_checksums(filename, hashes=["MD5"], calc_prelink=0):
+	"""
+	Run a group of checksums against a file.
+
+	@param filename: File to run the checksums against
+	@type filename: String
+	@param hashes: A list of checksum functions to run against the file
+	@type hashname: List
+	@param calc_prelink: Whether or not to reverse prelink before running the checksum
+	@type calc_prelink: Integer
+	@rtype: Tuple
+	@return: A dictionary in the form:
+		return_value[hash_name] = (hash_result,size)
+		for each given checksum
+	"""
+	rVal = {}
+	for x in hashes:
+		if x not in hashfunc_map:
+			raise portage.exception.DigestException(x+" hash function not available (needs dev-python/pycrypto or >=dev-lang/python-2.5)")
+		rVal[x] = perform_checksum(filename, x, calc_prelink)[0]
+	return rVal

diff --git a/portage_with_autodep/pym/portage/const.py b/portage_with_autodep/pym/portage/const.py
new file mode 100644
index 0000000..f34398d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/const.py
@@ -0,0 +1,143 @@
+# portage: Constants
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+
+# ===========================================================================
+# START OF CONSTANTS -- START OF CONSTANTS -- START OF CONSTANTS -- START OF
+# ===========================================================================
+
+# There are two types of variables here which can easily be confused,
+# resulting in arbitrary bugs, mainly exposed with an offset
+# installation (Prefix).  The two types relate to the usage of
+# config_root or target_root.
+# The first, config_root (PORTAGE_CONFIGROOT), can be a path somewhere,
+# from which all derived paths need to be relative (e.g.
+# USER_CONFIG_PATH) without EPREFIX prepended in Prefix.  This means
+# config_root can for instance be set to "$HOME/my/config".  Obviously,
+# in such case it is not appropriate to prepend EPREFIX to derived
+# constants.  The default value of config_root is EPREFIX (in non-Prefix
+# the empty string) -- overriding the value loses the EPREFIX as one
+# would expect.
+# Second there is target_root (ROOT) which is used to install somewhere
+# completely else, in Prefix of limited use.  Because this is an offset
+# always given, the EPREFIX should always be applied in it, hence the
+# code always prefixes them with EROOT.
+# The variables in this file are grouped by config_root, target_root.
+
+# variables used with config_root (these need to be relative)
+MAKE_CONF_FILE           = "etc/make.conf"
+USER_CONFIG_PATH         = "etc/portage"
+MODULES_FILE_PATH        = USER_CONFIG_PATH + "/modules"
+CUSTOM_PROFILE_PATH      = USER_CONFIG_PATH + "/profile"
+USER_VIRTUALS_FILE       = USER_CONFIG_PATH + "/virtuals"
+EBUILD_SH_ENV_FILE       = USER_CONFIG_PATH + "/bashrc"
+EBUILD_SH_ENV_DIR        = USER_CONFIG_PATH + "/env"
+CUSTOM_MIRRORS_FILE      = USER_CONFIG_PATH + "/mirrors"
+COLOR_MAP_FILE           = USER_CONFIG_PATH + "/color.map"
+PROFILE_PATH             = "etc/make.profile"
+MAKE_DEFAULTS_FILE       = PROFILE_PATH + "/make.defaults"  # FIXME: not used
+DEPRECATED_PROFILE_FILE  = PROFILE_PATH + "/deprecated"
+
+# variables used with targetroot (these need to be absolute, but not
+# have a leading '/' since they are used directly with os.path.join on EROOT)
+VDB_PATH                 = "var/db/pkg"
+CACHE_PATH               = "var/cache/edb"
+PRIVATE_PATH             = "var/lib/portage"
+WORLD_FILE               = PRIVATE_PATH + "/world"
+WORLD_SETS_FILE          = PRIVATE_PATH + "/world_sets"
+CONFIG_MEMORY_FILE       = PRIVATE_PATH + "/config"
+NEWS_LIB_PATH            = "var/lib/gentoo"
+
+# these variables get EPREFIX prepended automagically when they are
+# translated into their lowercase variants
+DEPCACHE_PATH            = "/var/cache/edb/dep"
+GLOBAL_CONFIG_PATH       = "/usr/share/portage/config"
+
+# these variables are not used with target_root or config_root
+PORTAGE_BASE_PATH        = os.path.join(os.sep, os.sep.join(__file__.split(os.sep)[:-3]))
+PORTAGE_BIN_PATH         = PORTAGE_BASE_PATH + "/bin"
+PORTAGE_PYM_PATH         = PORTAGE_BASE_PATH + "/pym"
+LOCALE_DATA_PATH         = PORTAGE_BASE_PATH + "/locale"  # FIXME: not used
+EBUILD_SH_BINARY         = PORTAGE_BIN_PATH + "/ebuild.sh"
+MISC_SH_BINARY           = PORTAGE_BIN_PATH + "/misc-functions.sh"
+SANDBOX_BINARY           = "/usr/bin/sandbox"
+FAKEROOT_BINARY          = "/usr/bin/fakeroot"
+BASH_BINARY              = "/bin/bash"
+MOVE_BINARY              = "/bin/mv"
+PRELINK_BINARY           = "/usr/sbin/prelink"
+AUTODEP_LIBRARY			 = "/usr/lib/file_hook.so"
+
+
+INVALID_ENV_FILE         = "/etc/spork/is/not/valid/profile.env"
+REPO_NAME_FILE           = "repo_name"
+REPO_NAME_LOC            = "profiles" + "/" + REPO_NAME_FILE
+
+PORTAGE_PACKAGE_ATOM     = "sys-apps/portage"
+LIBC_PACKAGE_ATOM        = "virtual/libc"
+OS_HEADERS_PACKAGE_ATOM  = "virtual/os-headers"
+
+INCREMENTALS             = ("USE", "USE_EXPAND", "USE_EXPAND_HIDDEN",
+                           "FEATURES", "ACCEPT_KEYWORDS",
+                           "CONFIG_PROTECT_MASK", "CONFIG_PROTECT",
+                           "PRELINK_PATH", "PRELINK_PATH_MASK",
+                           "PROFILE_ONLY_VARIABLES")
+EBUILD_PHASES            = ("pretend", "setup", "unpack", "prepare", "configure",
+                           "compile", "test", "install",
+                           "package", "preinst", "postinst","prerm", "postrm",
+                           "nofetch", "config", "info", "other")
+SUPPORTED_FEATURES       = frozenset([
+                           "allow-missing-manifests",
+                           "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy",
+                           "ccache", "chflags", "collision-protect", "compress-build-logs", 
+                           "depcheck", "depcheckstrict",
+                           "digest", "distcc", "distcc-pump", "distlocks", "ebuild-locks", "fakeroot",
+                           "fail-clean", "fixpackages", "force-mirror", "getbinpkg",
+                           "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",
+                           "metadata-transfer", "mirror", "multilib-strict", "news",
+                           "noauto", "noclean", "nodoc", "noinfo", "noman",
+                           "nostrip", "notitles", "parallel-fetch", "parallel-install",
+                           "parse-eapi-ebuild-head",
+                           "prelink-checksums", "preserve-libs",
+                           "protect-owned", "python-trace", "sandbox",
+                           "selinux", "sesandbox", "sfperms",
+                           "sign", "skiprocheck", "split-elog", "split-log", "splitdebug",
+                           "strict", "stricter", "suidctl", "test", "test-fail-continue",
+                           "unknown-features-filter", "unknown-features-warn",
+                           "unmerge-logs", "unmerge-orphans", "userfetch", "userpriv",
+                           "usersandbox", "usersync", "webrsync-gpg"])
+
+EAPI                     = 4
+
+HASHING_BLOCKSIZE        = 32768
+MANIFEST1_HASH_FUNCTIONS = ("MD5", "SHA256", "RMD160")
+MANIFEST2_HASH_FUNCTIONS = ("SHA1", "SHA256", "RMD160")
+
+MANIFEST1_REQUIRED_HASH  = "MD5"
+MANIFEST2_REQUIRED_HASH  = "SHA1"
+
+MANIFEST2_IDENTIFIERS    = ("AUX", "MISC", "DIST", "EBUILD")
+# ===========================================================================
+# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
+# ===========================================================================
+
+# Private constants for use in conditional code in order to minimize the diff
+# between branches.
+_ENABLE_DYN_LINK_MAP    = True
+_ENABLE_PRESERVE_LIBS   = True
+_ENABLE_REPO_NAME_WARN  = True
+_ENABLE_SET_CONFIG      = True
+_SANDBOX_COMPAT_LEVEL   = "22"
+
+
+# The definitions above will differ between branches, so it's useful to have
+# common lines of diff context here in order to avoid merge conflicts.
+
+if not _ENABLE_PRESERVE_LIBS:
+	SUPPORTED_FEATURES = set(SUPPORTED_FEATURES)
+	SUPPORTED_FEATURES.remove("preserve-libs")
+	SUPPORTED_FEATURES = frozenset(SUPPORTED_FEATURES)
+
+if not _ENABLE_SET_CONFIG:
+	WORLD_SETS_FILE = '/dev/null'

diff --git a/portage_with_autodep/pym/portage/cvstree.py b/portage_with_autodep/pym/portage/cvstree.py
new file mode 100644
index 0000000..9ba22f3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/cvstree.py
@@ -0,0 +1,293 @@
+# cvstree.py -- cvs tree utilities
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import io
+import re
+import stat
+import sys
+import time
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+# [D]/Name/Version/Date/Flags/Tags
+
+def pathdata(entries, path):
+	"""(entries,path)
+	Returns the data(dict) for a specific file/dir at the path specified."""
+	mysplit=path.split("/")
+	myentries=entries
+	mytarget=mysplit[-1]
+	mysplit=mysplit[:-1]
+	for mys in mysplit:
+		if mys in myentries["dirs"]:
+			myentries=myentries["dirs"][mys]
+		else:
+			return None
+	if mytarget in myentries["dirs"]:
+		return myentries["dirs"][mytarget]
+	elif mytarget in myentries["files"]:
+		return myentries["files"][mytarget]
+	else:
+		return None
+
+def fileat(entries, path):
+	return pathdata(entries,path)
+
+def isadded(entries, path):
+	"""(entries,path)
+	Returns true if the path exists and is added to the cvs tree."""
+	mytarget=pathdata(entries, path)
+	if mytarget:
+		if "cvs" in mytarget["status"]:
+			return 1
+
+	basedir=os.path.dirname(path)
+	filename=os.path.basename(path)
+
+	try:
+		myfile = io.open(
+			_unicode_encode(os.path.join(basedir, 'CVS', 'Entries'),
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['content'], errors='strict')
+	except IOError:
+		return 0
+	mylines=myfile.readlines()
+	myfile.close()
+
+	rep=re.compile("^\/"+re.escape(filename)+"\/");
+	for x in mylines:
+		if rep.search(x):
+			return 1
+
+	return 0
+
+def findnew(entries,recursive=0,basedir=""):
+	"""(entries,recursive=0,basedir="")
+	Recurses the entries tree to find all elements that have been added but
+	have not yet been committed. Returns a list of paths, optionally prepended
+	with a basedir."""
+	if basedir and basedir[-1]!="/":
+		basedir=basedir+"/"
+	mylist=[]
+	for myfile in entries["files"]:
+		if "cvs" in entries["files"][myfile]["status"]:
+			if "0" == entries["files"][myfile]["revision"]:
+				mylist.append(basedir+myfile)
+	if recursive:
+		for mydir in entries["dirs"]:
+			mylist+=findnew(entries["dirs"][mydir],recursive,basedir+mydir)
+	return mylist
+
+def findoption(entries, pattern, recursive=0, basedir=""):
+	"""(entries, pattern, recursive=0, basedir="")
+	Iterate over paths of cvs entries for which the pattern.search() method
+	finds a match. Returns a list of paths, optionally prepended with a
+	basedir."""
+	if not basedir.endswith("/"):
+		basedir += "/"
+	for myfile, mydata in entries["files"].items():
+		if "cvs" in mydata["status"]:
+			if pattern.search(mydata["flags"]):
+				yield basedir+myfile
+	if recursive:
+		for mydir, mydata in entries["dirs"].items():
+			for x in findoption(mydata, pattern,
+				recursive, basedir+mydir):
+				yield x
+
+def findchanged(entries,recursive=0,basedir=""):
+	"""(entries,recursive=0,basedir="")
+	Recurses the entries tree to find all elements that exist in the cvs tree
+	and differ from the committed version. Returns a list of paths, optionally
+	prepended with a basedir."""
+	if basedir and basedir[-1]!="/":
+		basedir=basedir+"/"
+	mylist=[]
+	for myfile in entries["files"]:
+		if "cvs" in entries["files"][myfile]["status"]:
+			if "current" not in entries["files"][myfile]["status"]:
+				if "exists" in entries["files"][myfile]["status"]:
+					if entries["files"][myfile]["revision"]!="0":
+						mylist.append(basedir+myfile)
+	if recursive:
+		for mydir in entries["dirs"]:
+			mylist+=findchanged(entries["dirs"][mydir],recursive,basedir+mydir)
+	return mylist
+	
+def findmissing(entries,recursive=0,basedir=""):
+	"""(entries,recursive=0,basedir="")
+	Recurses the entries tree to find all elements that are listed in the cvs
+	tree but do not exist on the filesystem. Returns a list of paths,
+	optionally prepended with a basedir."""
+	if basedir and basedir[-1]!="/":
+		basedir=basedir+"/"
+	mylist=[]
+	for myfile in entries["files"]:
+		if "cvs" in entries["files"][myfile]["status"]:
+			if "exists" not in entries["files"][myfile]["status"]:
+				if "removed" not in entries["files"][myfile]["status"]:
+					mylist.append(basedir+myfile)
+	if recursive:
+		for mydir in entries["dirs"]:
+			mylist+=findmissing(entries["dirs"][mydir],recursive,basedir+mydir)
+	return mylist
+
+def findunadded(entries,recursive=0,basedir=""):
+	"""(entries,recursive=0,basedir="")
+	Recurses the entries tree to find all elements that are in valid cvs
+	directories but are not part of the cvs tree. Returns a list of paths,
+	optionally prepended with a basedir."""
+	if basedir and basedir[-1]!="/":
+		basedir=basedir+"/"
+	mylist=[]
+
+	#ignore what cvs ignores.
+	for myfile in entries["files"]:
+		if "cvs" not in entries["files"][myfile]["status"]:
+			mylist.append(basedir+myfile)
+	if recursive:
+		for mydir in entries["dirs"]:
+			mylist+=findunadded(entries["dirs"][mydir],recursive,basedir+mydir)
+	return mylist
+
+def findremoved(entries,recursive=0,basedir=""):
+	"""(entries,recursive=0,basedir="")
+	Recurses the entries tree to find all elements that are in flagged for cvs
+	deletions. Returns a list of paths,	optionally prepended with a basedir."""
+	if basedir and basedir[-1]!="/":
+		basedir=basedir+"/"
+	mylist=[]
+	for myfile in entries["files"]:
+		if "removed" in entries["files"][myfile]["status"]:
+			mylist.append(basedir+myfile)
+	if recursive:
+		for mydir in entries["dirs"]:
+			mylist+=findremoved(entries["dirs"][mydir],recursive,basedir+mydir)
+	return mylist
+
+def findall(entries, recursive=0, basedir=""):
+	"""(entries,recursive=0,basedir="")
+	Recurses the entries tree to find all new, changed, missing, and unadded
+	entities. Returns a 4 element list of lists as returned from each find*()."""
+
+	if basedir and basedir[-1]!="/":
+		basedir=basedir+"/"
+	mynew     = findnew(entries,recursive,basedir)
+	mychanged = findchanged(entries,recursive,basedir)
+	mymissing = findmissing(entries,recursive,basedir)
+	myunadded = findunadded(entries,recursive,basedir)
+	myremoved = findremoved(entries,recursive,basedir)
+	return [mynew, mychanged, mymissing, myunadded, myremoved]
+
+ignore_list = re.compile("(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
+def apply_cvsignore_filter(list):
+	x=0
+	while x < len(list):
+		if ignore_list.match(list[x].split("/")[-1]):
+			list.pop(x)
+		else:
+			x+=1
+	return list
+	
+def getentries(mydir,recursive=0):
+	"""(basedir,recursive=0)
+	Scans the given directory and returns a datadict of all the entries in
+	the directory separated as a dirs dict and a files dict."""
+	myfn=mydir+"/CVS/Entries"
+	# entries=[dirs, files]
+	entries={"dirs":{},"files":{}}
+	if not os.path.exists(mydir):
+		return entries
+	try:
+		myfile = io.open(_unicode_encode(myfn,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['content'], errors='strict')
+		mylines=myfile.readlines()
+		myfile.close()
+	except SystemExit as e:
+		raise
+	except:
+		mylines=[]
+	for line in mylines:
+		if line and line[-1]=="\n":
+			line=line[:-1]
+		if not line:
+			continue
+		if line=="D": # End of entries file
+			break
+		mysplit=line.split("/")
+		if len(mysplit)!=6:
+			print("Confused:",mysplit)
+			continue
+		if mysplit[0]=="D":
+			entries["dirs"][mysplit[1]]={"dirs":{},"files":{},"status":[]}
+			entries["dirs"][mysplit[1]]["status"]=["cvs"]
+			if os.path.isdir(mydir+"/"+mysplit[1]):
+				entries["dirs"][mysplit[1]]["status"]+=["exists"]
+				entries["dirs"][mysplit[1]]["flags"]=mysplit[2:]
+				if recursive:
+					rentries=getentries(mydir+"/"+mysplit[1],recursive)
+					entries["dirs"][mysplit[1]]["dirs"]=rentries["dirs"]
+					entries["dirs"][mysplit[1]]["files"]=rentries["files"]
+		else:
+			# [D]/Name/revision/Date/Flags/Tags
+			entries["files"][mysplit[1]]={}
+			entries["files"][mysplit[1]]["revision"]=mysplit[2]
+			entries["files"][mysplit[1]]["date"]=mysplit[3]
+			entries["files"][mysplit[1]]["flags"]=mysplit[4]
+			entries["files"][mysplit[1]]["tags"]=mysplit[5]
+			entries["files"][mysplit[1]]["status"]=["cvs"]
+			if entries["files"][mysplit[1]]["revision"][0]=="-":
+				entries["files"][mysplit[1]]["status"]+=["removed"]
+
+	for file in apply_cvsignore_filter(os.listdir(mydir)):
+		if file=="CVS":
+			continue
+		if os.path.isdir(mydir+"/"+file):
+			if file not in entries["dirs"]:
+				entries["dirs"][file]={"dirs":{},"files":{}}
+				# It's normal for a directory to be unlisted in Entries
+				# when checked out without -P (see bug #257660).
+				rentries=getentries(mydir+"/"+file,recursive)
+				entries["dirs"][file]["dirs"]=rentries["dirs"]
+				entries["dirs"][file]["files"]=rentries["files"]
+			if "status" in entries["dirs"][file]:
+				if "exists" not in entries["dirs"][file]["status"]:
+					entries["dirs"][file]["status"]+=["exists"]
+			else:
+				entries["dirs"][file]["status"]=["exists"]
+		elif os.path.isfile(mydir+"/"+file):
+			if file not in entries["files"]:
+				entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
+			if "status" in entries["files"][file]:
+				if "exists" not in entries["files"][file]["status"]:
+					entries["files"][file]["status"]+=["exists"]
+			else:
+				entries["files"][file]["status"]=["exists"]
+			try:
+				mystat=os.stat(mydir+"/"+file)
+				mytime = time.asctime(time.gmtime(mystat[stat.ST_MTIME]))
+				if "status" not in entries["files"][file]:
+					entries["files"][file]["status"]=[]
+				if mytime==entries["files"][file]["date"]:
+					entries["files"][file]["status"]+=["current"]
+			except SystemExit as e:
+				raise
+			except Exception as e:
+				print("failed to stat",file)
+				print(e)
+				return
+				
+		else:
+			print()
+			print("File of unknown type:",mydir+"/"+file)
+			print()
+	return entries

diff --git a/portage_with_autodep/pym/portage/data.py b/portage_with_autodep/pym/portage/data.py
new file mode 100644
index 0000000..c38fa17
--- /dev/null
+++ b/portage_with_autodep/pym/portage/data.py
@@ -0,0 +1,122 @@
+# data.py -- Calculated/Discovered Data Values
+# Copyright 1998-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os, pwd, grp, platform
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.output:colorize',
+	'portage.util:writemsg',
+)
+from portage.localization import _
+
+ostype=platform.system()
+userland = None
+if ostype == "DragonFly" or ostype.endswith("BSD"):
+	userland = "BSD"
+else:
+	userland = "GNU"
+
+lchown = getattr(os, "lchown", None)
+
+if not lchown:
+	if ostype == "Darwin":
+		def lchown(*pos_args, **key_args):
+			pass
+	else:
+		def lchown(*pargs, **kwargs):
+			writemsg(colorize("BAD", "!!!") + _(
+				" It seems that os.lchown does not"
+				" exist.  Please rebuild python.\n"), noiselevel=-1)
+		lchown()
+
+lchown = portage._unicode_func_wrapper(lchown)
+
+def portage_group_warning():
+	warn_prefix = colorize("BAD", "*** WARNING ***  ")
+	mylines = [
+		"For security reasons, only system administrators should be",
+		"allowed in the portage group.  Untrusted users or processes",
+		"can potentially exploit the portage group for attacks such as",
+		"local privilege escalation."
+	]
+	for x in mylines:
+		writemsg(warn_prefix, noiselevel=-1)
+		writemsg(x, noiselevel=-1)
+		writemsg("\n", noiselevel=-1)
+	writemsg("\n", noiselevel=-1)
+
+# Portage has 3 security levels that depend on the uid and gid of the main
+# process and are assigned according to the following table:
+#
+# Privileges  secpass  uid    gid
+# normal      0        any    any
+# group       1        any    portage_gid
+# super       2        0      any
+#
+# If the "wheel" group does not exist then wheelgid falls back to 0.
+# If the "portage" group does not exist then portage_uid falls back to wheelgid.
+
+secpass=0
+
+uid=os.getuid()
+wheelgid=0
+
+if uid==0:
+	secpass=2
+try:
+	wheelgid=grp.getgrnam("wheel")[2]
+except KeyError:
+	pass
+
+# Allow the overriding of the user used for 'userpriv' and 'userfetch'
+_portage_uname = os.environ.get('PORTAGE_USERNAME', 'portage')
+_portage_grpname = os.environ.get('PORTAGE_GRPNAME', 'portage')
+
+#Discover the uid and gid of the portage user/group
+try:
+	portage_uid = pwd.getpwnam(_portage_uname)[2]
+	portage_gid = grp.getgrnam(_portage_grpname)[2]
+	if secpass < 1 and portage_gid in os.getgroups():
+		secpass=1
+except KeyError:
+	portage_uid=0
+	portage_gid=0
+	userpriv_groups = [portage_gid]
+	writemsg(colorize("BAD",
+		_("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1)
+	writemsg(_(
+		"         For the defaults, line 1 goes into passwd, "
+		"and 2 into group.\n"), noiselevel=-1)
+	writemsg(colorize("GOOD",
+		"         portage:x:250:250:portage:/var/tmp/portage:/bin/false") \
+		+ "\n", noiselevel=-1)
+	writemsg(colorize("GOOD", "         portage::250:portage") + "\n",
+		noiselevel=-1)
+	portage_group_warning()
+else:
+	userpriv_groups = [portage_gid]
+	if secpass >= 2:
+		class _LazyUserprivGroups(portage.proxy.objectproxy.ObjectProxy):
+			def _get_target(self):
+				global userpriv_groups
+				if userpriv_groups is not self:
+					return userpriv_groups
+				userpriv_groups = _userpriv_groups
+				# Get a list of group IDs for the portage user. Do not use
+				# grp.getgrall() since it is known to trigger spurious
+				# SIGPIPE problems with nss_ldap.
+				mystatus, myoutput = \
+					portage.subprocess_getstatusoutput("id -G %s" % _portage_uname)
+				if mystatus == os.EX_OK:
+					for x in myoutput.split():
+						try:
+							userpriv_groups.append(int(x))
+						except ValueError:
+							pass
+					userpriv_groups[:] = sorted(set(userpriv_groups))
+				return userpriv_groups
+
+		_userpriv_groups = userpriv_groups
+		userpriv_groups = _LazyUserprivGroups()

diff --git a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
new file mode 100644
index 0000000..34ed031
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
@@ -0,0 +1,282 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import shutil
+import signal
+import tempfile
+import traceback
+
+import errno
+import fcntl
+import portage
+from portage import os, _unicode_decode
+from portage.const import PORTAGE_PACKAGE_ATOM
+from portage.dep import match_from_list
+import portage.elog.messages
+from portage.elog import _preload_elog_modules
+from portage.util import ensure_dirs
+from _emerge.PollConstants import PollConstants
+from _emerge.SpawnProcess import SpawnProcess
+
+class MergeProcess(SpawnProcess):
+	"""
+	Merge packages in a subprocess, so the Scheduler can run in the main
+	thread while files are moved or copied asynchronously.
+	"""
+
+	__slots__ = ('mycat', 'mypkg', 'settings', 'treetype',
+		'vartree', 'scheduler', 'blockers', 'pkgloc', 'infloc', 'myebuild',
+		'mydbapi', 'prev_mtimes', 'unmerge', '_elog_reader_fd', '_elog_reg_id',
+		'_buf', '_elog_keys', '_locked_vdb')
+
+	def _start(self):
+		# Portage should always call setcpv prior to this
+		# point, but here we have a fallback as a convenience
+		# for external API consumers. It's important that
+		# this metadata access happens in the parent process,
+		# since closing of file descriptors in the subprocess
+		# can prevent access to open database connections such
+		# as that used by the sqlite metadata cache module.
+		cpv = "%s/%s" % (self.mycat, self.mypkg)
+		settings = self.settings
+		if cpv != settings.mycpv or \
+			"EAPI" not in settings.configdict["pkg"]:
+			settings.reload()
+			settings.reset()
+			settings.setcpv(cpv, mydb=self.mydbapi)
+
+		if not self.unmerge:
+			self._handle_self_reinstall()
+		super(MergeProcess, self)._start()
+
+	def _lock_vdb(self):
+		"""
+		Lock the vdb if FEATURES=parallel-install is NOT enabled,
+		otherwise do nothing. This is implemented with
+		vardbapi.lock(), which supports reentrance by the
+		subprocess that we spawn.
+		"""
+		if "parallel-install" not in self.settings.features:
+			self.vartree.dbapi.lock()
+			self._locked_vdb = True
+
+	def _unlock_vdb(self):
+		"""
+		Unlock the vdb if we hold a lock, otherwise do nothing.
+		"""
+		if self._locked_vdb:
+			self.vartree.dbapi.unlock()
+			self._locked_vdb = False
+
+	def _handle_self_reinstall(self):
+		"""
+		If portage is reinstalling itself, create temporary
+		copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
+		to avoid relying on the new versions which may be
+		incompatible. Register an atexit hook to clean up the
+		temporary directories. Pre-load elog modules here since
+		we won't be able to later if they get unmerged (happens
+		when namespace changes).
+		"""
+
+		settings = self.settings
+		cpv = settings.mycpv
+		reinstall_self = False
+		if self.settings["ROOT"] == "/" and \
+			match_from_list(PORTAGE_PACKAGE_ATOM, [cpv]):
+			inherited = frozenset(self.settings.get('INHERITED', '').split())
+			if not self.vartree.dbapi.cpv_exists(cpv) or \
+				'9999' in cpv or \
+				'git' in inherited or \
+				'git-2' in inherited:
+				reinstall_self = True
+
+		if reinstall_self:
+			# Load lazily referenced portage submodules into memory,
+			# so imports won't fail during portage upgrade/downgrade.
+			_preload_elog_modules(self.settings)
+			portage.proxy.lazyimport._preload_portage_submodules()
+
+			# Make the temp directory inside $PORTAGE_TMPDIR/portage, since
+			# it's common for /tmp and /var/tmp to be mounted with the
+			# "noexec" option (see bug #346899).
+			build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
+			ensure_dirs(build_prefix)
+			base_path_tmp = tempfile.mkdtemp(
+				"", "._portage_reinstall_.", build_prefix)
+			portage.process.atexit_register(shutil.rmtree, base_path_tmp)
+			dir_perms = 0o755
+			for subdir in "bin", "pym":
+				var_name = "PORTAGE_%s_PATH" % subdir.upper()
+				var_orig = settings[var_name]
+				var_new = os.path.join(base_path_tmp, subdir)
+				settings[var_name] = var_new
+				settings.backup_changes(var_name)
+				shutil.copytree(var_orig, var_new, symlinks=True)
+				os.chmod(var_new, dir_perms)
+			portage._bin_path = settings['PORTAGE_BIN_PATH']
+			portage._pym_path = settings['PORTAGE_PYM_PATH']
+			os.chmod(base_path_tmp, dir_perms)
+
+	def _elog_output_handler(self, fd, event):
+		output = None
+		if event & PollConstants.POLLIN:
+			try:
+				output = os.read(fd, self._bufsize)
+			except OSError as e:
+				if e.errno not in (errno.EAGAIN, errno.EINTR):
+					raise
+		if output:
+			lines = _unicode_decode(output).split('\n')
+			if len(lines) == 1:
+				self._buf += lines[0]
+			else:
+				lines[0] = self._buf + lines[0]
+				self._buf = lines.pop()
+				out = io.StringIO()
+				for line in lines:
+					funcname, phase, key, msg = line.split(' ', 3)
+					self._elog_keys.add(key)
+					reporter = getattr(portage.elog.messages, funcname)
+					reporter(msg, phase=phase, key=key, out=out)
+
+	def _spawn(self, args, fd_pipes, **kwargs):
+		"""
+		Fork a subprocess, apply local settings, and call
+		dblink.merge().
+		"""
+
+		elog_reader_fd, elog_writer_fd = os.pipe()
+		fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
+			fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+		blockers = None
+		if self.blockers is not None:
+			# Query blockers in the main process, since closing
+			# of file descriptors in the subprocess can prevent
+			# access to open database connections such as that
+			# used by the sqlite metadata cache module.
+			blockers = self.blockers()
+		mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
+			treetype=self.treetype, vartree=self.vartree,
+			blockers=blockers, scheduler=self.scheduler,
+			pipe=elog_writer_fd)
+		fd_pipes[elog_writer_fd] = elog_writer_fd
+		self._elog_reg_id = self.scheduler.register(elog_reader_fd,
+			self._registered_events, self._elog_output_handler)
+
+		# If a concurrent emerge process tries to install a package
+		# in the same SLOT as this one at the same time, there is an
+		# extremely unlikely chance that the COUNTER values will not be
+		# ordered correctly unless we lock the vdb here.
+		# FEATURES=parallel-install skips this lock in order to
+		# improve performance, and the risk is practically negligible.
+		self._lock_vdb()
+		counter = None
+		if not self.unmerge:
+			counter = self.vartree.dbapi.counter_tick()
+
+		pid = os.fork()
+		if pid != 0:
+			os.close(elog_writer_fd)
+			self._elog_reader_fd = elog_reader_fd
+			self._buf = ""
+			self._elog_keys = set()
+
+			# invalidate relevant vardbapi caches
+			if self.vartree.dbapi._categories is not None:
+				self.vartree.dbapi._categories = None
+			self.vartree.dbapi._pkgs_changed = True
+			self.vartree.dbapi._clear_pkg_cache(mylink)
+
+			portage.process.spawned_pids.append(pid)
+			return [pid]
+
+		os.close(elog_reader_fd)
+		portage.process._setup_pipes(fd_pipes)
+
+		# Use default signal handlers since the ones inherited
+		# from the parent process are irrelevant here.
+		signal.signal(signal.SIGINT, signal.SIG_DFL)
+		signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+		portage.output.havecolor = self.settings.get('NOCOLOR') \
+			not in ('yes', 'true')
+
+		# In this subprocess we want mylink._display_merge() to use
+		# stdout/stderr directly since they are pipes. This behavior
+		# is triggered when mylink._scheduler is None.
+		mylink._scheduler = None
+
+		# Avoid wastful updates of the vdb cache.
+		self.vartree.dbapi._flush_cache_enabled = False
+
+		# In this subprocess we don't want PORTAGE_BACKGROUND to
+		# suppress stdout/stderr output since they are pipes. We
+		# also don't want to open PORTAGE_LOG_FILE, since it will
+		# already be opened by the parent process, so we set the
+		# "subprocess" value for use in conditional logging code
+		# involving PORTAGE_LOG_FILE.
+		if not self.unmerge:
+			# unmerge phases have separate logs
+			if self.settings.get("PORTAGE_BACKGROUND") == "1":
+				self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
+			else:
+				self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
+			self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
+		self.settings["PORTAGE_BACKGROUND"] = "subprocess"
+		self.settings.backup_changes("PORTAGE_BACKGROUND")
+
+		rval = 1
+		try:
+			if self.unmerge:
+				if not mylink.exists():
+					rval = os.EX_OK
+				elif mylink.unmerge(
+					ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
+					mylink.lockdb()
+					try:
+						mylink.delete()
+					finally:
+						mylink.unlockdb()
+					rval = os.EX_OK
+			else:
+				rval = mylink.merge(self.pkgloc, self.infloc,
+					myebuild=self.myebuild, mydbapi=self.mydbapi,
+					prev_mtimes=self.prev_mtimes, counter=counter)
+		except SystemExit:
+			raise
+		except:
+			traceback.print_exc()
+		finally:
+			# Call os._exit() from finally block, in order to suppress any
+			# finally blocks from earlier in the call stack. See bug #345289.
+			os._exit(rval)
+
+	def _unregister(self):
+		"""
+		Unregister from the scheduler and close open files.
+		"""
+
+		if not self.unmerge:
+			# Populate the vardbapi cache for the new package
+			# while its inodes are still hot.
+			try:
+				self.vartree.dbapi.aux_get(self.settings.mycpv, ["EAPI"])
+			except KeyError:
+				pass
+
+		self._unlock_vdb()
+		if self._elog_reg_id is not None:
+			self.scheduler.unregister(self._elog_reg_id)
+			self._elog_reg_id = None
+		if self._elog_reader_fd:
+			os.close(self._elog_reader_fd)
+			self._elog_reader_fd = None
+		if self._elog_keys is not None:
+			for key in self._elog_keys:
+				portage.elog.elog_process(key, self.settings,
+					phasefilter=("prerm", "postrm"))
+			self._elog_keys = None
+
+		super(MergeProcess, self)._unregister()

diff --git a/portage_with_autodep/pym/portage/dbapi/__init__.py b/portage_with_autodep/pym/portage/dbapi/__init__.py
new file mode 100644
index 0000000..e386faa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/__init__.py
@@ -0,0 +1,302 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["dbapi"]
+
+import re
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.dbapi.dep_expand:dep_expand@_dep_expand',
+	'portage.dep:match_from_list',
+	'portage.output:colorize',
+	'portage.util:cmp_sort_key,writemsg',
+	'portage.versions:catsplit,catpkgsplit,vercmp',
+)
+
+from portage import os
+from portage import auxdbkeys
+from portage.localization import _
+
+class dbapi(object):
+	_category_re = re.compile(r'^\w[-.+\w]*$')
+	_categories = None
+	_use_mutable = False
+	_known_keys = frozenset(x for x in auxdbkeys
+		if not x.startswith("UNUSED_0"))
+	def __init__(self):
+		pass
+
+	@property
+	def categories(self):
+		"""
+		Use self.cp_all() to generate a category list. Mutable instances
+		can delete the self._categories attribute in cases when the cached
+		categories become invalid and need to be regenerated.
+		"""
+		if self._categories is not None:
+			return self._categories
+		self._categories = tuple(sorted(set(catsplit(x)[0] \
+			for x in self.cp_all())))
+		return self._categories
+
+	def close_caches(self):
+		pass
+
+	def cp_list(self, cp, use_cache=1):
+		raise NotImplementedError(self)
+
+	def _cpv_sort_ascending(self, cpv_list):
+		"""
+		Use this to sort self.cp_list() results in ascending
+		order. It sorts in place and returns None.
+		"""
+		if len(cpv_list) > 1:
+			# If the cpv includes explicit -r0, it has to be preserved
+			# for consistency in findname and aux_get calls, so use a
+			# dict to map strings back to their original values.
+			ver_map = {}
+			for cpv in cpv_list:
+				ver_map[cpv] = '-'.join(catpkgsplit(cpv)[2:])
+			def cmp_cpv(cpv1, cpv2):
+				return vercmp(ver_map[cpv1], ver_map[cpv2])
+			cpv_list.sort(key=cmp_sort_key(cmp_cpv))
+
+	def cpv_all(self):
+		"""Return all CPVs in the db
+		Args:
+			None
+		Returns:
+			A list of Strings, 1 per CPV
+
+		This function relies on a subclass implementing cp_all, this is why the hasattr is there
+		"""
+
+		if not hasattr(self, "cp_all"):
+			raise NotImplementedError
+		cpv_list = []
+		for cp in self.cp_all():
+			cpv_list.extend(self.cp_list(cp))
+		return cpv_list
+
+	def cp_all(self):
+		""" Implement this in a child class
+		Args
+			None
+		Returns:
+			A list of strings 1 per CP in the datastore
+		"""
+		return NotImplementedError
+
+	def aux_get(self, mycpv, mylist, myrepo=None):
+		"""Return the metadata keys in mylist for mycpv
+		Args:
+			mycpv - "sys-apps/foo-1.0"
+			mylist - ["SLOT","DEPEND","HOMEPAGE"]
+			myrepo - The repository name.
+		Returns: 
+			a list of results, in order of keys in mylist, such as:
+			["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
+		"""
+		raise NotImplementedError
+	
+	def aux_update(self, cpv, metadata_updates):
+		"""
+		Args:
+		  cpv - "sys-apps/foo-1.0"
+			metadata_updates = { key : newvalue }
+		Returns:
+			None
+		"""
+		raise NotImplementedError
+
+	def match(self, origdep, use_cache=1):
+		"""Given a dependency, try to find packages that match
+		Args:
+			origdep - Depend atom
+			use_cache - Boolean indicating if we should use the cache or not
+			NOTE: Do we ever not want the cache?
+		Returns:
+			a list of packages that match origdep
+		"""
+		mydep = _dep_expand(origdep, mydb=self, settings=self.settings)
+		return list(self._iter_match(mydep,
+			self.cp_list(mydep.cp, use_cache=use_cache)))
+
+	def _iter_match(self, atom, cpv_iter):
+		cpv_iter = iter(match_from_list(atom, cpv_iter))
+		if atom.slot:
+			cpv_iter = self._iter_match_slot(atom, cpv_iter)
+		if atom.unevaluated_atom.use:
+			cpv_iter = self._iter_match_use(atom, cpv_iter)
+		if atom.repo:
+			cpv_iter = self._iter_match_repo(atom, cpv_iter)
+		return cpv_iter
+
+	def _iter_match_repo(self, atom, cpv_iter):
+		for cpv in cpv_iter:
+			try:
+				if self.aux_get(cpv, ["repository"], myrepo=atom.repo)[0] == atom.repo:
+					yield cpv
+			except KeyError:
+				continue
+
+	def _iter_match_slot(self, atom, cpv_iter):
+		for cpv in cpv_iter:
+			try:
+				if self.aux_get(cpv, ["SLOT"], myrepo=atom.repo)[0] == atom.slot:
+					yield cpv
+			except KeyError:
+				continue
+
+	def _iter_match_use(self, atom, cpv_iter):
+		"""
+		1) Check for required IUSE intersection (need implicit IUSE here).
+		2) Check enabled/disabled flag states.
+		"""
+
+		iuse_implicit_match = self.settings._iuse_implicit_match
+		for cpv in cpv_iter:
+			try:
+				iuse, slot, use = self.aux_get(cpv, ["IUSE", "SLOT", "USE"], myrepo=atom.repo)
+			except KeyError:
+				continue
+			iuse = frozenset(x.lstrip('+-') for x in iuse.split())
+			missing_iuse = False
+			for x in atom.unevaluated_atom.use.required:
+				if x not in iuse and not iuse_implicit_match(x):
+					missing_iuse = True
+					break
+			if missing_iuse:
+				continue
+			if not atom.use:
+				pass
+			elif not self._use_mutable:
+				# Use IUSE to validate USE settings for built packages,
+				# in case the package manager that built this package
+				# failed to do that for some reason (or in case of
+				# data corruption).
+				use = frozenset(x for x in use.split() if x in iuse or \
+					iuse_implicit_match(x))
+				missing_enabled = atom.use.missing_enabled.difference(iuse)
+				missing_disabled = atom.use.missing_disabled.difference(iuse)
+
+				if atom.use.enabled:
+					if atom.use.enabled.intersection(missing_disabled):
+						continue
+					need_enabled = atom.use.enabled.difference(use)
+					if need_enabled:
+						need_enabled = need_enabled.difference(missing_enabled)
+						if need_enabled:
+							continue
+
+				if atom.use.disabled:
+					if atom.use.disabled.intersection(missing_enabled):
+						continue
+					need_disabled = atom.use.disabled.intersection(use)
+					if need_disabled:
+						need_disabled = need_disabled.difference(missing_disabled)
+						if need_disabled:
+							continue
+			else:
+				# Check masked and forced flags for repoman.
+				mysettings = getattr(self, 'settings', None)
+				if mysettings is not None and not mysettings.local_config:
+
+					pkg = "%s:%s" % (cpv, slot)
+					usemask = mysettings._getUseMask(pkg)
+					if usemask.intersection(atom.use.enabled):
+						continue
+
+					useforce = mysettings._getUseForce(pkg).difference(usemask)
+					if useforce.intersection(atom.use.disabled):
+						continue
+
+			yield cpv
+
+	def invalidentry(self, mypath):
+		if '/-MERGING-' in mypath:
+			if os.path.exists(mypath):
+				writemsg(colorize("BAD", _("INCOMPLETE MERGE:"))+" %s\n" % mypath,
+					noiselevel=-1)
+		else:
+			writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
+
+	def update_ents(self, updates, onProgress=None, onUpdate=None):
+		"""
+		Update metadata of all packages for package moves.
+		@param updates: A list of move commands, or dict of {repo_name: list}
+		@type updates: list or dict
+		@param onProgress: A progress callback function
+		@type onProgress: a callable that takes 2 integer arguments: maxval and curval
+		@param onUpdate: A progress callback function called only
+			for packages that are modified by updates.
+		@type onUpdate: a callable that takes 2 integer arguments:
+			maxval and curval
+		"""
+		cpv_all = self.cpv_all()
+		cpv_all.sort()
+		maxval = len(cpv_all)
+		aux_get = self.aux_get
+		aux_update = self.aux_update
+		meta_keys = ["DEPEND", "RDEPEND", "PDEPEND", "PROVIDE", 'repository']
+		repo_dict = None
+		if isinstance(updates, dict):
+			repo_dict = updates
+		from portage.update import update_dbentries
+		if onUpdate:
+			onUpdate(maxval, 0)
+		if onProgress:
+			onProgress(maxval, 0)
+		for i, cpv in enumerate(cpv_all):
+			metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+			repo = metadata.pop('repository')
+			if repo_dict is None:
+				updates_list = updates
+			else:
+				try:
+					updates_list = repo_dict[repo]
+				except KeyError:
+					try:
+						updates_list = repo_dict['DEFAULT']
+					except KeyError:
+						continue
+
+			if not updates_list:
+				continue
+
+			metadata_updates = update_dbentries(updates_list, metadata)
+			if metadata_updates:
+				aux_update(cpv, metadata_updates)
+				if onUpdate:
+					onUpdate(maxval, i+1)
+			if onProgress:
+				onProgress(maxval, i+1)
+
+	def move_slot_ent(self, mylist, repo_match=None):
+		"""This function takes a sequence:
+		Args:
+			mylist: a sequence of (package, originalslot, newslot)
+			repo_match: callable that takes single repo_name argument
+				and returns True if the update should be applied
+		Returns:
+			The number of slotmoves this function did
+		"""
+		pkg = mylist[1]
+		origslot = mylist[2]
+		newslot = mylist[3]
+		origmatches = self.match(pkg)
+		moves = 0
+		if not origmatches:
+			return moves
+		for mycpv in origmatches:
+			slot = self.aux_get(mycpv, ["SLOT"])[0]
+			if slot != origslot:
+				continue
+			if repo_match is not None \
+				and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
+				continue
+			moves += 1
+			mydata = {"SLOT": newslot+"\n"}
+			self.aux_update(mycpv, mydata)
+		return moves

diff --git a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
new file mode 100644
index 0000000..6d6a27d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
@@ -0,0 +1,72 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.dep import Atom, _get_useflag_re
+
+def expand_new_virt(vardb, atom):
+	"""
+	Iterate over the recursively expanded RDEPEND atoms of
+	a new-style virtual. If atom is not a new-style virtual
+	or it does not match an installed package then it is
+	yielded without any expansion.
+	"""
+	if not isinstance(atom, Atom):
+		atom = Atom(atom)
+
+	if not atom.cp.startswith("virtual/"):
+		yield atom
+		return
+
+	traversed = set()
+	stack = [atom]
+
+	while stack:
+		atom = stack.pop()
+		if atom.blocker or \
+			not atom.cp.startswith("virtual/"):
+			yield atom
+			continue
+
+		matches = vardb.match(atom)
+		if not (matches and matches[-1].startswith("virtual/")):
+			yield atom
+			continue
+
+		virt_cpv = matches[-1]
+		if virt_cpv in traversed:
+			continue
+
+		traversed.add(virt_cpv)
+		eapi, iuse, rdepend, use = vardb.aux_get(virt_cpv,
+			["EAPI", "IUSE", "RDEPEND", "USE"])
+		if not portage.eapi_is_supported(eapi):
+			yield atom
+			continue
+
+		# Validate IUSE and IUSE, for early detection of vardb corruption.
+		useflag_re = _get_useflag_re(eapi)
+		valid_iuse = []
+		for x in iuse.split():
+			if x[:1] in ("+", "-"):
+				x = x[1:]
+			if useflag_re.match(x) is not None:
+				valid_iuse.append(x)
+		valid_iuse = frozenset(valid_iuse)
+
+		iuse_implicit_match = vardb.settings._iuse_implicit_match
+		valid_use = []
+		for x in use.split():
+			if x in valid_iuse or iuse_implicit_match(x):
+				valid_use.append(x)
+		valid_use = frozenset(valid_use)
+
+		success, atoms = portage.dep_check(rdepend,
+			None, vardb.settings, myuse=valid_use,
+			myroot=vardb.root, trees={vardb.root:{"porttree":vardb.vartree,
+			"vartree":vardb.vartree}})
+
+		if success:
+			stack.extend(atoms)
+		else:
+			yield atom

diff --git a/portage_with_autodep/pym/portage/dbapi/bintree.py b/portage_with_autodep/pym/portage/dbapi/bintree.py
new file mode 100644
index 0000000..62fc623
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/bintree.py
@@ -0,0 +1,1366 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["bindbapi", "binarytree"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.checksum:hashfunc_map,perform_multiple_checksums,verify_all',
+	'portage.dbapi.dep_expand:dep_expand',
+	'portage.dep:dep_getkey,isjustname,match_from_list',
+	'portage.output:EOutput,colorize',
+	'portage.locks:lockfile,unlockfile',
+	'portage.package.ebuild.doebuild:_vdb_use_conditional_atoms',
+	'portage.package.ebuild.fetch:_check_distfile',
+	'portage.update:update_dbentries',
+	'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
+		'writemsg,writemsg_stdout',
+	'portage.util.listdir:listdir',
+	'portage.versions:best,catpkgsplit,catsplit',
+)
+
+from portage.cache.mappings import slot_dict_class
+from portage.const import CACHE_PATH
+from portage.dbapi.virtual import fakedbapi
+from portage.dep import Atom, use_reduce, paren_enclose
+from portage.exception import AlarmSignal, InvalidPackageName, \
+	PermissionDenied, PortageException
+from portage.localization import _
+from portage import _movefile
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+import codecs
+import errno
+import io
+import re
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+from itertools import chain
+try:
+	from urllib.parse import urlparse
+	from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+	from urlparse import urlparse
+	from urllib import urlopen as urllib_request_urlopen
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+	long = int
+
+class bindbapi(fakedbapi):
+	_known_keys = frozenset(list(fakedbapi._known_keys) + \
+		["CHOST", "repository", "USE"])
+	def __init__(self, mybintree=None, **kwargs):
+		fakedbapi.__init__(self, **kwargs)
+		self.bintree = mybintree
+		self.move_ent = mybintree.move_ent
+		self.cpvdict={}
+		self.cpdict={}
+		# Selectively cache metadata in order to optimize dep matching.
+		self._aux_cache_keys = set(
+			["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
+			"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
+			"RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES",
+			"REQUIRED_USE"])
+		self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
+		self._aux_cache = {}
+
+	def match(self, *pargs, **kwargs):
+		if self.bintree and not self.bintree.populated:
+			self.bintree.populate()
+		return fakedbapi.match(self, *pargs, **kwargs)
+
+	def cpv_exists(self, cpv, myrepo=None):
+		if self.bintree and not self.bintree.populated:
+			self.bintree.populate()
+		return fakedbapi.cpv_exists(self, cpv)
+
+	def cpv_inject(self, cpv, **kwargs):
+		self._aux_cache.pop(cpv, None)
+		fakedbapi.cpv_inject(self, cpv, **kwargs)
+
+	def cpv_remove(self, cpv):
+		self._aux_cache.pop(cpv, None)
+		fakedbapi.cpv_remove(self, cpv)
+
+	def aux_get(self, mycpv, wants, myrepo=None):
+		if self.bintree and not self.bintree.populated:
+			self.bintree.populate()
+		cache_me = False
+		if not self._known_keys.intersection(
+			wants).difference(self._aux_cache_keys):
+			aux_cache = self._aux_cache.get(mycpv)
+			if aux_cache is not None:
+				return [aux_cache.get(x, "") for x in wants]
+			cache_me = True
+		mysplit = mycpv.split("/")
+		mylist = []
+		tbz2name = mysplit[1]+".tbz2"
+		if not self.bintree._remotepkgs or \
+			not self.bintree.isremote(mycpv):
+			tbz2_path = self.bintree.getname(mycpv)
+			if not os.path.exists(tbz2_path):
+				raise KeyError(mycpv)
+			metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
+			def getitem(k):
+				v = metadata_bytes.get(_unicode_encode(k,
+					encoding=_encodings['repo.content'],
+					errors='backslashreplace'))
+				if v is not None:
+					v = _unicode_decode(v,
+						encoding=_encodings['repo.content'], errors='replace')
+				return v
+		else:
+			getitem = self.bintree._remotepkgs[mycpv].get
+		mydata = {}
+		mykeys = wants
+		if cache_me:
+			mykeys = self._aux_cache_keys.union(wants)
+		for x in mykeys:
+			myval = getitem(x)
+			# myval is None if the key doesn't exist
+			# or the tbz2 is corrupt.
+			if myval:
+				mydata[x] = " ".join(myval.split())
+
+		if not mydata.setdefault('EAPI', _unicode_decode('0')):
+			mydata['EAPI'] = _unicode_decode('0')
+
+		if cache_me:
+			aux_cache = self._aux_cache_slot_dict()
+			for x in self._aux_cache_keys:
+				aux_cache[x] = mydata.get(x, _unicode_decode(''))
+			self._aux_cache[mycpv] = aux_cache
+		return [mydata.get(x, _unicode_decode('')) for x in wants]
+
+	def aux_update(self, cpv, values):
+		if not self.bintree.populated:
+			self.bintree.populate()
+		tbz2path = self.bintree.getname(cpv)
+		if not os.path.exists(tbz2path):
+			raise KeyError(cpv)
+		mytbz2 = portage.xpak.tbz2(tbz2path)
+		mydata = mytbz2.get_data()
+
+		for k, v in values.items():
+			k = _unicode_encode(k,
+				encoding=_encodings['repo.content'], errors='backslashreplace')
+			v = _unicode_encode(v,
+				encoding=_encodings['repo.content'], errors='backslashreplace')
+			mydata[k] = v
+
+		for k, v in list(mydata.items()):
+			if not v:
+				del mydata[k]
+		mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+		# inject will clear stale caches via cpv_inject.
+		self.bintree.inject(cpv)
+
+	def cp_list(self, *pargs, **kwargs):
+		if not self.bintree.populated:
+			self.bintree.populate()
+		return fakedbapi.cp_list(self, *pargs, **kwargs)
+
+	def cp_all(self):
+		if not self.bintree.populated:
+			self.bintree.populate()
+		return fakedbapi.cp_all(self)
+
+	def cpv_all(self):
+		if not self.bintree.populated:
+			self.bintree.populate()
+		return fakedbapi.cpv_all(self)
+
+def _pkgindex_cpv_map_latest_build(pkgindex):
+	"""
+	Given a PackageIndex instance, create a dict of cpv -> metadata map.
+	If multiple packages have identical CPV values, prefer the package
+	with latest BUILD_TIME value.
+	@param pkgindex: A PackageIndex instance.
+	@type pkgindex: PackageIndex
+	@rtype: dict
+	@returns: a dict containing entry for the give cpv.
+	"""
+	cpv_map = {}
+
+	for d in pkgindex.packages:
+		cpv = d["CPV"]
+
+		btime = d.get('BUILD_TIME', '')
+		try:
+			btime = int(btime)
+		except ValueError:
+			btime = None
+
+		other_d = cpv_map.get(cpv)
+		if other_d is not None:
+			other_btime = other_d.get('BUILD_TIME', '')
+			try:
+				other_btime = int(other_btime)
+			except ValueError:
+				other_btime = None
+			if other_btime and (not btime or other_btime > btime):
+				continue
+
+		cpv_map[cpv] = d
+
+	return cpv_map
+
+class binarytree(object):
+	"this tree scans for a list of all packages available in PKGDIR"
+	def __init__(self, root, pkgdir, virtual=None, settings=None):
+		if True:
+			self.root = root
+			#self.pkgdir=settings["PKGDIR"]
+			self.pkgdir = normalize_path(pkgdir)
+			self.dbapi = bindbapi(self, settings=settings)
+			self.update_ents = self.dbapi.update_ents
+			self.move_slot_ent = self.dbapi.move_slot_ent
+			self.populated = 0
+			self.tree = {}
+			self._remote_has_index = False
+			self._remotepkgs = None # remote metadata indexed by cpv
+			self.invalids = []
+			self.settings = settings
+			self._pkg_paths = {}
+			self._pkgindex_uri = {}
+			self._populating = False
+			self._all_directory = os.path.isdir(
+				os.path.join(self.pkgdir, "All"))
+			self._pkgindex_version = 0
+			self._pkgindex_hashes = ["MD5","SHA1"]
+			self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
+			self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
+			self._pkgindex_keys.update(["CPV", "MTIME", "SIZE"])
+			self._pkgindex_aux_keys = \
+				["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
+				"IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
+				"PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
+				"REQUIRED_USE", "BASE_URI"]
+			self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
+			self._pkgindex_use_evaluated_keys = \
+				("LICENSE", "RDEPEND", "DEPEND",
+				"PDEPEND", "PROPERTIES", "PROVIDE")
+			self._pkgindex_header_keys = set([
+				"ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
+				"ACCEPT_PROPERTIES", "CBUILD",
+				"CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
+				"GENTOO_MIRRORS", "INSTALL_MASK", "SYNC", "USE"])
+			self._pkgindex_default_pkg_data = {
+				"BUILD_TIME"         : "",
+				"DEPEND"  : "",
+				"EAPI"    : "0",
+				"IUSE"    : "",
+				"KEYWORDS": "",
+				"LICENSE" : "",
+				"PATH"    : "",
+				"PDEPEND" : "",
+				"PROPERTIES" : "",
+				"PROVIDE" : "",
+				"RDEPEND" : "",
+				"RESTRICT": "",
+				"SLOT"    : "0",
+				"USE"     : "",
+				"DEFINED_PHASES" : "",
+				"REQUIRED_USE" : ""
+			}
+			self._pkgindex_inherited_keys = ["CHOST", "repository"]
+
+			# Populate the header with appropriate defaults.
+			self._pkgindex_default_header_data = {
+				"CHOST"        : self.settings.get("CHOST", ""),
+				"repository"   : "",
+			}
+
+			# It is especially important to populate keys like
+			# "repository" that save space when entries can
+			# inherit them from the header. If an existing
+			# pkgindex header already defines these keys, then
+			# they will appropriately override our defaults.
+			main_repo = self.settings.repositories.mainRepo()
+			if main_repo is not None and not main_repo.missing_repo_name:
+				self._pkgindex_default_header_data["repository"] = \
+					main_repo.name
+
+			self._pkgindex_translated_keys = (
+				("DESCRIPTION"   ,   "DESC"),
+				("repository"    ,   "REPO"),
+			)
+
+			self._pkgindex_allowed_pkg_keys = set(chain(
+				self._pkgindex_keys,
+				self._pkgindex_aux_keys,
+				self._pkgindex_hashes,
+				self._pkgindex_default_pkg_data,
+				self._pkgindex_inherited_keys,
+				chain(*self._pkgindex_translated_keys)
+			))
+
+	def move_ent(self, mylist, repo_match=None):
+		if not self.populated:
+			self.populate()
+		origcp = mylist[1]
+		newcp = mylist[2]
+		# sanity check
+		for atom in (origcp, newcp):
+			if not isjustname(atom):
+				raise InvalidPackageName(str(atom))
+		mynewcat = catsplit(newcp)[0]
+		origmatches=self.dbapi.cp_list(origcp)
+		moves = 0
+		if not origmatches:
+			return moves
+		for mycpv in origmatches:
+			mycpv_cp = portage.cpv_getkey(mycpv)
+			if mycpv_cp != origcp:
+				# Ignore PROVIDE virtual match.
+				continue
+			if repo_match is not None \
+				and not repo_match(self.dbapi.aux_get(mycpv,
+					['repository'])[0]):
+				continue
+			mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
+			myoldpkg = catsplit(mycpv)[1]
+			mynewpkg = catsplit(mynewcpv)[1]
+
+			if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+				writemsg(_("!!! Cannot update binary: Destination exists.\n"),
+					noiselevel=-1)
+				writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
+				continue
+
+			tbz2path = self.getname(mycpv)
+			if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+				writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
+					noiselevel=-1)
+				continue
+
+			moves += 1
+			mytbz2 = portage.xpak.tbz2(tbz2path)
+			mydata = mytbz2.get_data()
+			updated_items = update_dbentries([mylist], mydata)
+			mydata.update(updated_items)
+			mydata[b'PF'] = \
+				_unicode_encode(mynewpkg + "\n",
+				encoding=_encodings['repo.content'])
+			mydata[b'CATEGORY'] = \
+				_unicode_encode(mynewcat + "\n",
+				encoding=_encodings['repo.content'])
+			if mynewpkg != myoldpkg:
+				ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
+					encoding=_encodings['repo.content']), None)
+				if ebuild_data is not None:
+					mydata[_unicode_encode(mynewpkg + '.ebuild',
+						encoding=_encodings['repo.content'])] = ebuild_data
+
+			mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+
+			self.dbapi.cpv_remove(mycpv)
+			del self._pkg_paths[mycpv]
+			new_path = self.getname(mynewcpv)
+			self._pkg_paths[mynewcpv] = os.path.join(
+				*new_path.split(os.path.sep)[-2:])
+			if new_path != mytbz2:
+				self._ensure_dir(os.path.dirname(new_path))
+				_movefile(tbz2path, new_path, mysettings=self.settings)
+				self._remove_symlink(mycpv)
+				if new_path.split(os.path.sep)[-2] == "All":
+					self._create_symlink(mynewcpv)
+			self.inject(mynewcpv)
+
+		return moves
+
+	def _remove_symlink(self, cpv):
+		"""Remove a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink and also remove
+		the ${PKGDIR}/${CATEGORY} directory if empty.  The file will not be
+		removed if os.path.islink() returns False."""
+		mycat, mypkg = catsplit(cpv)
+		mylink = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+		if os.path.islink(mylink):
+			"""Only remove it if it's really a link so that this method never
+			removes a real package that was placed here to avoid a collision."""
+			os.unlink(mylink)
+		try:
+			os.rmdir(os.path.join(self.pkgdir, mycat))
+		except OSError as e:
+			if e.errno not in (errno.ENOENT,
+				errno.ENOTEMPTY, errno.EEXIST):
+				raise
+			del e
+
+	def _create_symlink(self, cpv):
+		"""Create a ${PKGDIR}/${CATEGORY}/${PF}.tbz2 symlink (and
+		${PKGDIR}/${CATEGORY} directory, if necessary).  Any file that may
+		exist in the location of the symlink will first be removed."""
+		mycat, mypkg = catsplit(cpv)
+		full_path = os.path.join(self.pkgdir, mycat, mypkg + ".tbz2")
+		self._ensure_dir(os.path.dirname(full_path))
+		try:
+			os.unlink(full_path)
+		except OSError as e:
+			if e.errno != errno.ENOENT:
+				raise
+			del e
+		os.symlink(os.path.join("..", "All", mypkg + ".tbz2"), full_path)
+
+	def prevent_collision(self, cpv):
+		"""Make sure that the file location ${PKGDIR}/All/${PF}.tbz2 is safe to
+		use for a given cpv.  If a collision will occur with an existing
+		package from another category, the existing package will be bumped to
+		${PKGDIR}/${CATEGORY}/${PF}.tbz2 so that both can coexist."""
+		if not self._all_directory:
+			return
+
+		# Copy group permissions for new directories that
+		# may have been created.
+		for path in ("All", catsplit(cpv)[0]):
+			path = os.path.join(self.pkgdir, path)
+			self._ensure_dir(path)
+			if not os.access(path, os.W_OK):
+				raise PermissionDenied("access('%s', W_OK)" % path)
+
+		full_path = self.getname(cpv)
+		if "All" == full_path.split(os.path.sep)[-2]:
+			return
+		"""Move a colliding package if it exists.  Code below this point only
+		executes in rare cases."""
+		mycat, mypkg = catsplit(cpv)
+		myfile = mypkg + ".tbz2"
+		mypath = os.path.join("All", myfile)
+		dest_path = os.path.join(self.pkgdir, mypath)
+
+		try:
+			st = os.lstat(dest_path)
+		except OSError:
+			st = None
+		else:
+			if stat.S_ISLNK(st.st_mode):
+				st = None
+				try:
+					os.unlink(dest_path)
+				except OSError:
+					if os.path.exists(dest_path):
+						raise
+
+		if st is not None:
+			# For invalid packages, other_cat could be None.
+			other_cat = portage.xpak.tbz2(dest_path).getfile(b"CATEGORY")
+			if other_cat:
+				other_cat = _unicode_decode(other_cat,
+					encoding=_encodings['repo.content'], errors='replace')
+				other_cat = other_cat.strip()
+				other_cpv = other_cat + "/" + mypkg
+				self._move_from_all(other_cpv)
+				self.inject(other_cpv)
+		self._move_to_all(cpv)
+
+	def _ensure_dir(self, path):
+		"""
+		Create the specified directory. Also, copy gid and group mode
+		bits from self.pkgdir if possible.
+		@param cat_dir: Absolute path of the directory to be created.
+		@type cat_dir: String
+		"""
+		try:
+			pkgdir_st = os.stat(self.pkgdir)
+		except OSError:
+			ensure_dirs(path)
+			return
+		pkgdir_gid = pkgdir_st.st_gid
+		pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
+		try:
+			ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
+		except PortageException:
+			if not os.path.isdir(path):
+				raise
+
+	def _move_to_all(self, cpv):
+		"""If the file exists, move it.  Whether or not it exists, update state
+		for future getname() calls."""
+		mycat, mypkg = catsplit(cpv)
+		myfile = mypkg + ".tbz2"
+		self._pkg_paths[cpv] = os.path.join("All", myfile)
+		src_path = os.path.join(self.pkgdir, mycat, myfile)
+		try:
+			mystat = os.lstat(src_path)
+		except OSError as e:
+			mystat = None
+		if mystat and stat.S_ISREG(mystat.st_mode):
+			self._ensure_dir(os.path.join(self.pkgdir, "All"))
+			dest_path = os.path.join(self.pkgdir, "All", myfile)
+			_movefile(src_path, dest_path, mysettings=self.settings)
+			self._create_symlink(cpv)
+			self.inject(cpv)
+
+	def _move_from_all(self, cpv):
+		"""Move a package from ${PKGDIR}/All/${PF}.tbz2 to
+		${PKGDIR}/${CATEGORY}/${PF}.tbz2 and update state from getname calls."""
+		self._remove_symlink(cpv)
+		mycat, mypkg = catsplit(cpv)
+		myfile = mypkg + ".tbz2"
+		mypath = os.path.join(mycat, myfile)
+		dest_path = os.path.join(self.pkgdir, mypath)
+		self._ensure_dir(os.path.dirname(dest_path))
+		src_path = os.path.join(self.pkgdir, "All", myfile)
+		_movefile(src_path, dest_path, mysettings=self.settings)
+		self._pkg_paths[cpv] = mypath
+
+	def populate(self, getbinpkgs=0):
+		"populates the binarytree"
+
+		if self._populating:
+			return
+
+		pkgindex_lock = None
+		try:
+			if os.access(self.pkgdir, os.W_OK):
+				pkgindex_lock = lockfile(self._pkgindex_file,
+					wantnewlockfile=1)
+			self._populating = True
+			self._populate(getbinpkgs)
+		finally:
+			if pkgindex_lock:
+				unlockfile(pkgindex_lock)
+			self._populating = False
+
+	def _populate(self, getbinpkgs=0):
+		if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
+			return 0
+
+		# Clear all caches in case populate is called multiple times
+		# as may be the case when _global_updates calls populate()
+		# prior to performing package moves since it only wants to
+		# operate on local packages (getbinpkgs=0).
+		self._remotepkgs = None
+		self.dbapi._clear_cache()
+		self.dbapi._aux_cache.clear()
+		if True:
+			pkg_paths = {}
+			self._pkg_paths = pkg_paths
+			dirs = listdir(self.pkgdir, dirsonly=True, EmptyOnError=True)
+			if "All" in dirs:
+				dirs.remove("All")
+			dirs.sort()
+			dirs.insert(0, "All")
+			pkgindex = self._load_pkgindex()
+			pf_index = None
+			if not self._pkgindex_version_supported(pkgindex):
+				pkgindex = self._new_pkgindex()
+			header = pkgindex.header
+			metadata = {}
+			for d in pkgindex.packages:
+				metadata[d["CPV"]] = d
+			update_pkgindex = False
+			for mydir in dirs:
+				for myfile in listdir(os.path.join(self.pkgdir, mydir)):
+					if not myfile.endswith(".tbz2"):
+						continue
+					mypath = os.path.join(mydir, myfile)
+					full_path = os.path.join(self.pkgdir, mypath)
+					s = os.lstat(full_path)
+					if stat.S_ISLNK(s.st_mode):
+						continue
+
+					# Validate data from the package index and try to avoid
+					# reading the xpak if possible.
+					if mydir != "All":
+						possibilities = None
+						d = metadata.get(mydir+"/"+myfile[:-5])
+						if d:
+							possibilities = [d]
+					else:
+						if pf_index is None:
+							pf_index = {}
+							for mycpv in metadata:
+								mycat, mypf = catsplit(mycpv)
+								pf_index.setdefault(
+									mypf, []).append(metadata[mycpv])
+						possibilities = pf_index.get(myfile[:-5])
+					if possibilities:
+						match = None
+						for d in possibilities:
+							try:
+								if long(d["MTIME"]) != s[stat.ST_MTIME]:
+									continue
+							except (KeyError, ValueError):
+								continue
+							try:
+								if long(d["SIZE"]) != long(s.st_size):
+									continue
+							except (KeyError, ValueError):
+								continue
+							if not self._pkgindex_keys.difference(d):
+								match = d
+								break
+						if match:
+							mycpv = match["CPV"]
+							if mycpv in pkg_paths:
+								# discard duplicates (All/ is preferred)
+								continue
+							pkg_paths[mycpv] = mypath
+							# update the path if the package has been moved
+							oldpath = d.get("PATH")
+							if oldpath and oldpath != mypath:
+								update_pkgindex = True
+							if mypath != mycpv + ".tbz2":
+								d["PATH"] = mypath
+								if not oldpath:
+									update_pkgindex = True
+							else:
+								d.pop("PATH", None)
+								if oldpath:
+									update_pkgindex = True
+							self.dbapi.cpv_inject(mycpv)
+							if not self.dbapi._aux_cache_keys.difference(d):
+								aux_cache = self.dbapi._aux_cache_slot_dict()
+								for k in self.dbapi._aux_cache_keys:
+									aux_cache[k] = d[k]
+								self.dbapi._aux_cache[mycpv] = aux_cache
+							continue
+					if not os.access(full_path, os.R_OK):
+						writemsg(_("!!! Permission denied to read " \
+							"binary package: '%s'\n") % full_path,
+							noiselevel=-1)
+						self.invalids.append(myfile[:-5])
+						continue
+					metadata_bytes = portage.xpak.tbz2(full_path).get_data()
+					mycat = _unicode_decode(metadata_bytes.get(b"CATEGORY", ""),
+						encoding=_encodings['repo.content'], errors='replace')
+					mypf = _unicode_decode(metadata_bytes.get(b"PF", ""),
+						encoding=_encodings['repo.content'], errors='replace')
+					slot = _unicode_decode(metadata_bytes.get(b"SLOT", ""),
+						encoding=_encodings['repo.content'], errors='replace')
+					mypkg = myfile[:-5]
+					if not mycat or not mypf or not slot:
+						#old-style or corrupt package
+						writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
+							noiselevel=-1)
+						missing_keys = []
+						if not mycat:
+							missing_keys.append("CATEGORY")
+						if not mypf:
+							missing_keys.append("PF")
+						if not slot:
+							missing_keys.append("SLOT")
+						msg = []
+						if missing_keys:
+							missing_keys.sort()
+							msg.append(_("Missing metadata key(s): %s.") % \
+								", ".join(missing_keys))
+						msg.append(_(" This binary package is not " \
+							"recoverable and should be deleted."))
+						for line in textwrap.wrap("".join(msg), 72):
+							writemsg("!!! %s\n" % line, noiselevel=-1)
+						self.invalids.append(mypkg)
+						continue
+					mycat = mycat.strip()
+					slot = slot.strip()
+					if mycat != mydir and mydir != "All":
+						continue
+					if mypkg != mypf.strip():
+						continue
+					mycpv = mycat + "/" + mypkg
+					if mycpv in pkg_paths:
+						# All is first, so it's preferred.
+						continue
+					if not self.dbapi._category_re.match(mycat):
+						writemsg(_("!!! Binary package has an " \
+							"unrecognized category: '%s'\n") % full_path,
+							noiselevel=-1)
+						writemsg(_("!!! '%s' has a category that is not" \
+							" listed in %setc/portage/categories\n") % \
+							(mycpv, self.settings["PORTAGE_CONFIGROOT"]),
+							noiselevel=-1)
+						continue
+					pkg_paths[mycpv] = mypath
+					self.dbapi.cpv_inject(mycpv)
+					update_pkgindex = True
+					d = metadata.get(mycpv, {})
+					if d:
+						try:
+							if long(d["MTIME"]) != s[stat.ST_MTIME]:
+								d.clear()
+						except (KeyError, ValueError):
+							d.clear()
+					if d:
+						try:
+							if long(d["SIZE"]) != long(s.st_size):
+								d.clear()
+						except (KeyError, ValueError):
+							d.clear()
+
+					d["CPV"] = mycpv
+					d["SLOT"] = slot
+					d["MTIME"] = str(s[stat.ST_MTIME])
+					d["SIZE"] = str(s.st_size)
+
+					d.update(zip(self._pkgindex_aux_keys,
+						self.dbapi.aux_get(mycpv, self._pkgindex_aux_keys)))
+					try:
+						self._eval_use_flags(mycpv, d)
+					except portage.exception.InvalidDependString:
+						writemsg(_("!!! Invalid binary package: '%s'\n") % \
+							self.getname(mycpv), noiselevel=-1)
+						self.dbapi.cpv_remove(mycpv)
+						del pkg_paths[mycpv]
+
+					# record location if it's non-default
+					if mypath != mycpv + ".tbz2":
+						d["PATH"] = mypath
+					else:
+						d.pop("PATH", None)
+					metadata[mycpv] = d
+					if not self.dbapi._aux_cache_keys.difference(d):
+						aux_cache = self.dbapi._aux_cache_slot_dict()
+						for k in self.dbapi._aux_cache_keys:
+							aux_cache[k] = d[k]
+						self.dbapi._aux_cache[mycpv] = aux_cache
+
+			for cpv in list(metadata):
+				if cpv not in pkg_paths:
+					del metadata[cpv]
+
+			# Do not bother to write the Packages index if $PKGDIR/All/ exists
+			# since it will provide no benefit due to the need to read CATEGORY
+			# from xpak.
+			if update_pkgindex and os.access(self.pkgdir, os.W_OK):
+				del pkgindex.packages[:]
+				pkgindex.packages.extend(iter(metadata.values()))
+				self._update_pkgindex_header(pkgindex.header)
+				f = atomic_ofstream(self._pkgindex_file)
+				pkgindex.write(f)
+				f.close()
+
+		if getbinpkgs and not self.settings["PORTAGE_BINHOST"]:
+			writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
+				noiselevel=-1)
+
+		if not getbinpkgs or 'PORTAGE_BINHOST' not in self.settings:
+			self.populated=1
+			return
+		self._remotepkgs = {}
+		for base_url in self.settings["PORTAGE_BINHOST"].split():
+			parsed_url = urlparse(base_url)
+			host = parsed_url.netloc
+			port = parsed_url.port
+			user = None
+			passwd = None
+			user_passwd = ""
+			if "@" in host:
+				user, host = host.split("@", 1)
+				user_passwd = user + "@"
+				if ":" in user:
+					user, passwd = user.split(":", 1)
+			port_args = []
+			if port is not None:
+				port_str = ":%s" % (port,)
+				if host.endswith(port_str):
+					host = host[:-len(port_str)]
+			pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
+				host, parsed_url.path.lstrip("/"), "Packages")
+			pkgindex = self._new_pkgindex()
+			try:
+				f = io.open(_unicode_encode(pkgindex_file,
+					encoding=_encodings['fs'], errors='strict'),
+					mode='r', encoding=_encodings['repo.content'],
+					errors='replace')
+				try:
+					pkgindex.read(f)
+				finally:
+					f.close()
+			except EnvironmentError as e:
+				if e.errno != errno.ENOENT:
+					raise
+			local_timestamp = pkgindex.header.get("TIMESTAMP", None)
+			rmt_idx = self._new_pkgindex()
+			proc = None
+			tmp_filename = None
+			try:
+				# urlparse.urljoin() only works correctly with recognized
+				# protocols and requires the base url to have a trailing
+				# slash, so join manually...
+				url = base_url.rstrip("/") + "/Packages"
+				try:
+					f = urllib_request_urlopen(url)
+				except IOError:
+					path = parsed_url.path.rstrip("/") + "/Packages"
+
+					if parsed_url.scheme == 'sftp':
+						# The sftp command complains about 'Illegal seek' if
+						# we try to make it write to /dev/stdout, so use a
+						# temp file instead.
+						fd, tmp_filename = tempfile.mkstemp()
+						os.close(fd)
+						if port is not None:
+							port_args = ['-P', "%s" % (port,)]
+						proc = subprocess.Popen(['sftp'] + port_args + \
+							[user_passwd + host + ":" + path, tmp_filename])
+						if proc.wait() != os.EX_OK:
+							raise
+						f = open(tmp_filename, 'rb')
+					elif parsed_url.scheme == 'ssh':
+						if port is not None:
+							port_args = ['-p', "%s" % (port,)]
+						proc = subprocess.Popen(['ssh'] + port_args + \
+							[user_passwd + host, '--', 'cat', path],
+							stdout=subprocess.PIPE)
+						f = proc.stdout
+					else:
+						setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
+						fcmd = self.settings.get(setting)
+						if not fcmd:
+							raise
+						fd, tmp_filename = tempfile.mkstemp()
+						tmp_dirname, tmp_basename = os.path.split(tmp_filename)
+						os.close(fd)
+						success = portage.getbinpkg.file_get(url,
+						     tmp_dirname, fcmd=fcmd, filename=tmp_basename)
+						if not success:
+							raise EnvironmentError("%s failed" % (setting,))
+						f = open(tmp_filename, 'rb')
+
+				f_dec = codecs.iterdecode(f,
+					_encodings['repo.content'], errors='replace')
+				try:
+					rmt_idx.readHeader(f_dec)
+					remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
+					if not remote_timestamp:
+						# no timestamp in the header, something's wrong
+						pkgindex = None
+						writemsg(_("\n\n!!! Binhost package index " \
+						" has no TIMESTAMP field.\n"), noiselevel=-1)
+					else:
+						if not self._pkgindex_version_supported(rmt_idx):
+							writemsg(_("\n\n!!! Binhost package index version" \
+							" is not supported: '%s'\n") % \
+							rmt_idx.header.get("VERSION"), noiselevel=-1)
+							pkgindex = None
+						elif local_timestamp != remote_timestamp:
+							rmt_idx.readBody(f_dec)
+							pkgindex = rmt_idx
+				finally:
+					# Timeout after 5 seconds, in case close() blocks
+					# indefinitely (see bug #350139).
+					try:
+						try:
+							AlarmSignal.register(5)
+							f.close()
+						finally:
+							AlarmSignal.unregister()
+					except AlarmSignal:
+						writemsg("\n\n!!! %s\n" % \
+							_("Timed out while closing connection to binhost"),
+							noiselevel=-1)
+			except EnvironmentError as e:
+				writemsg(_("\n\n!!! Error fetching binhost package" \
+					" info from '%s'\n") % base_url)
+				writemsg("!!! %s\n\n" % str(e))
+				del e
+				pkgindex = None
+			if proc is not None:
+				if proc.poll() is None:
+					proc.kill()
+					proc.wait()
+				proc = None
+			if tmp_filename is not None:
+				try:
+					os.unlink(tmp_filename)
+				except OSError:
+					pass
+			if pkgindex is rmt_idx:
+				pkgindex.modified = False # don't update the header
+				try:
+					ensure_dirs(os.path.dirname(pkgindex_file))
+					f = atomic_ofstream(pkgindex_file)
+					pkgindex.write(f)
+					f.close()
+				except (IOError, PortageException):
+					if os.access(os.path.dirname(pkgindex_file), os.W_OK):
+						raise
+					# The current user doesn't have permission to cache the
+					# file, but that's alright.
+			if pkgindex:
+				# Organize remote package list as a cpv -> metadata map.
+				remotepkgs = _pkgindex_cpv_map_latest_build(pkgindex)
+				remote_base_uri = pkgindex.header.get("URI", base_url)
+				for cpv, remote_metadata in remotepkgs.items():
+					remote_metadata["BASE_URI"] = remote_base_uri
+					self._pkgindex_uri[cpv] = url
+				self._remotepkgs.update(remotepkgs)
+				self._remote_has_index = True
+				for cpv in remotepkgs:
+					self.dbapi.cpv_inject(cpv)
+				if True:
+					# Remote package instances override local package
+					# if they are not identical.
+					hash_names = ["SIZE"] + self._pkgindex_hashes
+					for cpv, local_metadata in metadata.items():
+						remote_metadata = self._remotepkgs.get(cpv)
+						if remote_metadata is None:
+							continue
+						# Use digests to compare identity.
+						identical = True
+						for hash_name in hash_names:
+							local_value = local_metadata.get(hash_name)
+							if local_value is None:
+								continue
+							remote_value = remote_metadata.get(hash_name)
+							if remote_value is None:
+								continue
+							if local_value != remote_value:
+								identical = False
+								break
+						if identical:
+							del self._remotepkgs[cpv]
+						else:
+							# Override the local package in the aux_get cache.
+							self.dbapi._aux_cache[cpv] = remote_metadata
+				else:
+					# Local package instances override remote instances.
+					for cpv in metadata:
+						self._remotepkgs.pop(cpv, None)
+				continue
+			try:
+				chunk_size = long(self.settings["PORTAGE_BINHOST_CHUNKSIZE"])
+				if chunk_size < 8:
+					chunk_size = 8
+			except (ValueError, KeyError):
+				chunk_size = 3000
+			writemsg_stdout("\n")
+			writemsg_stdout(
+				colorize("GOOD", _("Fetching bininfo from ")) + \
+				re.sub(r'//(.+):.+@(.+)/', r'//\1:*password*@\2/', base_url) + "\n")
+			remotepkgs = portage.getbinpkg.dir_get_metadata(
+				base_url, chunk_size=chunk_size)
+
+			for mypkg, remote_metadata in remotepkgs.items():
+				mycat = remote_metadata.get("CATEGORY")
+				if mycat is None:
+					#old-style or corrupt package
+					writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
+						noiselevel=-1)
+					continue
+				mycat = mycat.strip()
+				fullpkg = mycat+"/"+mypkg[:-5]
+
+				if fullpkg in metadata:
+					# When using this old protocol, comparison with the remote
+					# package isn't supported, so the local package is always
+					# preferred even if getbinpkgsonly is enabled.
+					continue
+
+				if not self.dbapi._category_re.match(mycat):
+					writemsg(_("!!! Remote binary package has an " \
+						"unrecognized category: '%s'\n") % fullpkg,
+						noiselevel=-1)
+					writemsg(_("!!! '%s' has a category that is not" \
+						" listed in %setc/portage/categories\n") % \
+						(fullpkg, self.settings["PORTAGE_CONFIGROOT"]),
+						noiselevel=-1)
+					continue
+				mykey = portage.cpv_getkey(fullpkg)
+				try:
+					# invalid tbz2's can hurt things.
+					self.dbapi.cpv_inject(fullpkg)
+					for k, v in remote_metadata.items():
+						remote_metadata[k] = v.strip()
+					remote_metadata["BASE_URI"] = base_url
+
+					# Eliminate metadata values with names that digestCheck
+					# uses, since they are not valid when using the old
+					# protocol. Typically this is needed for SIZE metadata
+					# which corresponds to the size of the unpacked files
+					# rather than the binpkg file size, triggering digest
+					# verification failures as reported in bug #303211.
+					remote_metadata.pop('SIZE', None)
+					for k in portage.checksum.hashfunc_map:
+						remote_metadata.pop(k, None)
+
+					self._remotepkgs[fullpkg] = remote_metadata
+				except SystemExit as e:
+					raise
+				except:
+					writemsg(_("!!! Failed to inject remote binary package: %s\n") % fullpkg,
+						noiselevel=-1)
+					continue
+		self.populated=1
+
+	def inject(self, cpv, filename=None):
+		"""Add a freshly built package to the database.  This updates
+		$PKGDIR/Packages with the new package metadata (including MD5).
+		@param cpv: The cpv of the new package to inject
+		@type cpv: string
+		@param filename: File path of the package to inject, or None if it's
+			already in the location returned by getname()
+		@type filename: string
+		@rtype: None
+		"""
+		mycat, mypkg = catsplit(cpv)
+		if not self.populated:
+			self.populate()
+		if filename is None:
+			full_path = self.getname(cpv)
+		else:
+			full_path = filename
+		try:
+			s = os.stat(full_path)
+		except OSError as e:
+			if e.errno != errno.ENOENT:
+				raise
+			del e
+			writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
+				noiselevel=-1)
+			return
+		mytbz2 = portage.xpak.tbz2(full_path)
+		slot = mytbz2.getfile("SLOT")
+		if slot is None:
+			writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
+				noiselevel=-1)
+			return
+		slot = slot.strip()
+		self.dbapi.cpv_inject(cpv)
+
+		# Reread the Packages index (in case it's been changed by another
+		# process) and then updated it, all while holding a lock.
+		pkgindex_lock = None
+		created_symlink = False
+		try:
+			pkgindex_lock = lockfile(self._pkgindex_file,
+				wantnewlockfile=1)
+			if filename is not None:
+				new_filename = self.getname(cpv)
+				try:
+					samefile = os.path.samefile(filename, new_filename)
+				except OSError:
+					samefile = False
+				if not samefile:
+					self._ensure_dir(os.path.dirname(new_filename))
+					_movefile(filename, new_filename, mysettings=self.settings)
+			if self._all_directory and \
+				self.getname(cpv).split(os.path.sep)[-2] == "All":
+				self._create_symlink(cpv)
+				created_symlink = True
+			pkgindex = self._load_pkgindex()
+
+			if not self._pkgindex_version_supported(pkgindex):
+				pkgindex = self._new_pkgindex()
+
+			# Discard remote metadata to ensure that _pkgindex_entry
+			# gets the local metadata. This also updates state for future
+			# isremote calls.
+			if self._remotepkgs is not None:
+				self._remotepkgs.pop(cpv, None)
+
+			# Discard cached metadata to ensure that _pkgindex_entry
+			# doesn't return stale metadata.
+			self.dbapi._aux_cache.pop(cpv, None)
+
+			try:
+				d = self._pkgindex_entry(cpv)
+			except portage.exception.InvalidDependString:
+				writemsg(_("!!! Invalid binary package: '%s'\n") % \
+					self.getname(cpv), noiselevel=-1)
+				self.dbapi.cpv_remove(cpv)
+				del self._pkg_paths[cpv]
+				return
+
+			# If found, remove package(s) with duplicate path.
+			path = d.get("PATH", "")
+			for i in range(len(pkgindex.packages) - 1, -1, -1):
+				d2 = pkgindex.packages[i]
+				if path and path == d2.get("PATH"):
+					# Handle path collisions in $PKGDIR/All
+					# when CPV is not identical.
+					del pkgindex.packages[i]
+				elif cpv == d2.get("CPV"):
+					if path == d2.get("PATH", ""):
+						del pkgindex.packages[i]
+					elif created_symlink and not d2.get("PATH", ""):
+						# Delete entry for the package that was just
+						# overwritten by a symlink to this package.
+						del pkgindex.packages[i]
+
+			pkgindex.packages.append(d)
+
+			self._update_pkgindex_header(pkgindex.header)
+			f = atomic_ofstream(os.path.join(self.pkgdir, "Packages"))
+			pkgindex.write(f)
+			f.close()
+		finally:
+			if pkgindex_lock:
+				unlockfile(pkgindex_lock)
+
+	def _pkgindex_entry(self, cpv):
+		"""
+		Performs checksums and evaluates USE flag conditionals.
+		Raises InvalidDependString if necessary.
+		@rtype: dict
+		@returns: a dict containing entry for the give cpv.
+		"""
+
+		pkg_path = self.getname(cpv)
+
+		d = dict(zip(self._pkgindex_aux_keys,
+			self.dbapi.aux_get(cpv, self._pkgindex_aux_keys)))
+
+		d.update(perform_multiple_checksums(
+			pkg_path, hashes=self._pkgindex_hashes))
+
+		d["CPV"] = cpv
+		st = os.stat(pkg_path)
+		d["MTIME"] = str(st[stat.ST_MTIME])
+		d["SIZE"] = str(st.st_size)
+
+		rel_path = self._pkg_paths[cpv]
+		# record location if it's non-default
+		if rel_path != cpv + ".tbz2":
+			d["PATH"] = rel_path
+
+		self._eval_use_flags(cpv, d)
+		return d
+
+	def _new_pkgindex(self):
+		return portage.getbinpkg.PackageIndex(
+			allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
+			default_header_data=self._pkgindex_default_header_data,
+			default_pkg_data=self._pkgindex_default_pkg_data,
+			inherited_keys=self._pkgindex_inherited_keys,
+			translated_keys=self._pkgindex_translated_keys)
+
+	def _update_pkgindex_header(self, header):
+		portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
+		profiles_base = os.path.join(portdir, "profiles") + os.path.sep
+		if self.settings.profile_path:
+			profile_path = normalize_path(
+				os.path.realpath(self.settings.profile_path))
+			if profile_path.startswith(profiles_base):
+				profile_path = profile_path[len(profiles_base):]
+			header["PROFILE"] = profile_path
+		header["VERSION"] = str(self._pkgindex_version)
+		base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
+		if base_uri:
+			header["URI"] = base_uri
+		else:
+			header.pop("URI", None)
+		for k in self._pkgindex_header_keys:
+			v = self.settings.get(k, None)
+			if v:
+				header[k] = v
+			else:
+				header.pop(k, None)
+
+	def _pkgindex_version_supported(self, pkgindex):
+		version = pkgindex.header.get("VERSION")
+		if version:
+			try:
+				if int(version) <= self._pkgindex_version:
+					return True
+			except ValueError:
+				pass
+		return False
+
+	def _eval_use_flags(self, cpv, metadata):
+		use = frozenset(metadata["USE"].split())
+		raw_use = use
+		iuse = set(f.lstrip("-+") for f in metadata["IUSE"].split())
+		use = [f for f in use if f in iuse]
+		use.sort()
+		metadata["USE"] = " ".join(use)
+		for k in self._pkgindex_use_evaluated_keys:
+			if k.endswith('DEPEND'):
+				token_class = Atom
+			else:
+				token_class = None
+
+			try:
+				deps = metadata[k]
+				deps = use_reduce(deps, uselist=raw_use, token_class=token_class)
+				deps = paren_enclose(deps)
+			except portage.exception.InvalidDependString as e:
+				writemsg("%s: %s\n" % (k, str(e)),
+					noiselevel=-1)
+				raise
+			metadata[k] = deps
+
+	def exists_specific(self, cpv):
+		if not self.populated:
+			self.populate()
+		return self.dbapi.match(
+			dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
+
+	def dep_bestmatch(self, mydep):
+		"compatibility method -- all matches, not just visible ones"
+		if not self.populated:
+			self.populate()
+		writemsg("\n\n", 1)
+		writemsg("mydep: %s\n" % mydep, 1)
+		mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
+		writemsg("mydep: %s\n" % mydep, 1)
+		mykey = dep_getkey(mydep)
+		writemsg("mykey: %s\n" % mykey, 1)
+		mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+		writemsg("mymatch: %s\n" % mymatch, 1)
+		if mymatch is None:
+			return ""
+		return mymatch
+
+	def getname(self, pkgname):
+		"""Returns a file location for this package.  The default location is
+		${PKGDIR}/All/${PF}.tbz2, but will be ${PKGDIR}/${CATEGORY}/${PF}.tbz2
+		in the rare event of a collision.  The prevent_collision() method can
+		be called to ensure that ${PKGDIR}/All/${PF}.tbz2 is available for a
+		specific cpv."""
+		if not self.populated:
+			self.populate()
+		mycpv = pkgname
+		mypath = self._pkg_paths.get(mycpv, None)
+		if mypath:
+			return os.path.join(self.pkgdir, mypath)
+		mycat, mypkg = catsplit(mycpv)
+		if self._all_directory:
+			mypath = os.path.join("All", mypkg + ".tbz2")
+			if mypath in self._pkg_paths.values():
+				mypath = os.path.join(mycat, mypkg + ".tbz2")
+		else:
+			mypath = os.path.join(mycat, mypkg + ".tbz2")
+		self._pkg_paths[mycpv] = mypath # cache for future lookups
+		return os.path.join(self.pkgdir, mypath)
+
+	def isremote(self, pkgname):
+		"""Returns true if the package is kept remotely and it has not been
+		downloaded (or it is only partially downloaded)."""
+		if self._remotepkgs is None or pkgname not in self._remotepkgs:
+			return False
+		# Presence in self._remotepkgs implies that it's remote. When a
+		# package is downloaded, state is updated by self.inject().
+		return True
+
+	def get_pkgindex_uri(self, pkgname):
+		"""Returns the URI to the Packages file for a given package."""
+		return self._pkgindex_uri.get(pkgname)
+
+	def gettbz2(self, pkgname):
+		"""Fetches the package from a remote site, if necessary.  Attempts to
+		resume if the file appears to be partially downloaded."""
+		tbz2_path = self.getname(pkgname)
+		tbz2name = os.path.basename(tbz2_path)
+		resume = False
+		if os.path.exists(tbz2_path):
+			if (tbz2name not in self.invalids):
+				return
+			else:
+				resume = True
+				writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
+					noiselevel=-1)
+		
+		mydest = os.path.dirname(self.getname(pkgname))
+		self._ensure_dir(mydest)
+		# urljoin doesn't work correctly with unrecognized protocols like sftp
+		if self._remote_has_index:
+			rel_url = self._remotepkgs[pkgname].get("PATH")
+			if not rel_url:
+				rel_url = pkgname+".tbz2"
+			remote_base_uri = self._remotepkgs[pkgname]["BASE_URI"]
+			url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
+		else:
+			url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
+		protocol = urlparse(url)[0]
+		fcmd_prefix = "FETCHCOMMAND"
+		if resume:
+			fcmd_prefix = "RESUMECOMMAND"
+		fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
+		if not fcmd:
+			fcmd = self.settings.get(fcmd_prefix)
+		success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
+		if not success:
+			try:
+				os.unlink(self.getname(pkgname))
+			except OSError:
+				pass
+			raise portage.exception.FileNotFound(mydest)
+		self.inject(pkgname)
+
+	def _load_pkgindex(self):
+		pkgindex = self._new_pkgindex()
+		try:
+			f = io.open(_unicode_encode(self._pkgindex_file,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'],
+				errors='replace')
+		except EnvironmentError:
+			pass
+		else:
+			try:
+				pkgindex.read(f)
+			finally:
+				f.close()
+		return pkgindex
+
+	def digestCheck(self, pkg):
+		"""
+		Verify digests for the given package and raise DigestException
+		if verification fails.
+		@rtype: bool
+		@returns: True if digests could be located, False otherwise.
+		"""
+		cpv = pkg
+		if not isinstance(cpv, basestring):
+			cpv = pkg.cpv
+			pkg = None
+
+		pkg_path = self.getname(cpv)
+		metadata = None
+		if self._remotepkgs is None or cpv not in self._remotepkgs:
+			for d in self._load_pkgindex().packages:
+				if d["CPV"] == cpv:
+					metadata = d
+					break
+		else:
+			metadata = self._remotepkgs[cpv]
+		if metadata is None:
+			return False
+
+		digests = {}
+		for k in hashfunc_map:
+			v = metadata.get(k)
+			if not v:
+				continue
+			digests[k] = v
+
+		if "SIZE" in metadata:
+			try:
+				digests["size"] = int(metadata["SIZE"])
+			except ValueError:
+				writemsg(_("!!! Malformed SIZE attribute in remote " \
+				"metadata for '%s'\n") % cpv)
+
+		if not digests:
+			return False
+
+		eout = EOutput()
+		eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
+		ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
+		if not ok:
+			ok, reason = verify_all(pkg_path, digests)
+			if not ok:
+				raise portage.exception.DigestException(
+					(pkg_path,) + tuple(reason))
+
+		return True
+
+	def getslot(self, mycatpkg):
+		"Get a slot for a catpkg; assume it exists."
+		myslot = ""
+		try:
+			myslot = self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
+		except SystemExit as e:
+			raise
+		except Exception as e:
+			pass
+		return myslot

diff --git a/portage_with_autodep/pym/portage/dbapi/cpv_expand.py b/portage_with_autodep/pym/portage/dbapi/cpv_expand.py
new file mode 100644
index 0000000..947194c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/cpv_expand.py
@@ -0,0 +1,106 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["cpv_expand"]
+
+import portage
+from portage.exception import AmbiguousPackageName
+from portage.localization import _
+from portage.util import writemsg
+from portage.versions import _pkgsplit
+
+def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
+	"""Given a string (packagename or virtual) expand it into a valid
+	cat/package string. Virtuals use the mydb to determine which provided
+	virtual is a valid choice and defaults to the first element when there
+	are no installed/available candidates."""
+	myslash=mycpv.split("/")
+	mysplit = _pkgsplit(myslash[-1])
+	if settings is None:
+		try:
+			settings = mydb.settings
+		except AttributeError:
+			settings = portage.settings
+	if len(myslash)>2:
+		# this is illegal case.
+		mysplit=[]
+		mykey=mycpv
+	elif len(myslash)==2:
+		if mysplit:
+			mykey=myslash[0]+"/"+mysplit[0]
+		else:
+			mykey=mycpv
+
+		# Since Gentoo stopped using old-style virtuals in
+		# 2011, typically it's possible to avoid getvirtuals()
+		# calls entirely. Therefore, only call getvirtuals()
+		# if the atom category is "virtual" and cp_list()
+		# returns nothing.
+		if mykey.startswith("virtual/") and \
+			hasattr(mydb, "cp_list") and \
+			not mydb.cp_list(mykey, use_cache=use_cache):
+				if hasattr(mydb, "vartree"):
+					settings._populate_treeVirtuals_if_needed(mydb.vartree)
+				virts = settings.getvirtuals().get(mykey)
+				if virts:
+					mykey_orig = mykey
+					for vkey in virts:
+						# The virtuals file can contain a versioned atom, so
+						# it may be necessary to remove the operator and
+						# version from the atom before it is passed into
+						# dbapi.cp_list().
+						if mydb.cp_list(vkey.cp):
+							mykey = str(vkey)
+							break
+					if mykey == mykey_orig:
+						mykey = str(virts[0])
+			#we only perform virtual expansion if we are passed a dbapi
+	else:
+		#specific cpv, no category, ie. "foo-1.0"
+		if mysplit:
+			myp=mysplit[0]
+		else:
+			# "foo" ?
+			myp=mycpv
+		mykey=None
+		matches=[]
+		if mydb and hasattr(mydb, "categories"):
+			for x in mydb.categories:
+				if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
+					matches.append(x+"/"+myp)
+		if len(matches) > 1:
+			virtual_name_collision = False
+			if len(matches) == 2:
+				for x in matches:
+					if not x.startswith("virtual/"):
+						# Assume that the non-virtual is desired.  This helps
+						# avoid the ValueError for invalid deps that come from
+						# installed packages (during reverse blocker detection,
+						# for example).
+						mykey = x
+					else:
+						virtual_name_collision = True
+			if not virtual_name_collision:
+				# AmbiguousPackageName inherits from ValueError,
+				# for backward compatibility with calling code
+				# that already handles ValueError.
+				raise AmbiguousPackageName(matches)
+		elif matches:
+			mykey=matches[0]
+
+		if not mykey and not isinstance(mydb, list):
+			if hasattr(mydb, "vartree"):
+				settings._populate_treeVirtuals_if_needed(mydb.vartree)
+			virts_p = settings.get_virts_p().get(myp)
+			if virts_p:
+				mykey = virts_p[0]
+			#again, we only perform virtual expansion if we have a dbapi (not a list)
+		if not mykey:
+			mykey="null/"+myp
+	if mysplit:
+		if mysplit[2]=="r0":
+			return mykey+"-"+mysplit[1]
+		else:
+			return mykey+"-"+mysplit[1]+"-"+mysplit[2]
+	else:
+		return mykey

diff --git a/portage_with_autodep/pym/portage/dbapi/dep_expand.py b/portage_with_autodep/pym/portage/dbapi/dep_expand.py
new file mode 100644
index 0000000..ac8ccf4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/dep_expand.py
@@ -0,0 +1,56 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["dep_expand"]
+
+import re
+
+from portage.dbapi.cpv_expand import cpv_expand
+from portage.dep import Atom, isvalidatom
+from portage.exception import InvalidAtom
+from portage.versions import catsplit
+
+def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
+	'''
+	@rtype: Atom
+	'''
+	orig_dep = mydep
+	if isinstance(orig_dep, Atom):
+		has_cat = True
+	else:
+		if not mydep:
+			return mydep
+		if mydep[0] == "*":
+			mydep = mydep[1:]
+			orig_dep = mydep
+		has_cat = '/' in orig_dep
+		if not has_cat:
+			alphanum = re.search(r'\w', orig_dep)
+			if alphanum:
+				mydep = orig_dep[:alphanum.start()] + "null/" + \
+					orig_dep[alphanum.start():]
+		try:
+			mydep = Atom(mydep, allow_repo=True)
+		except InvalidAtom:
+			# Missing '=' prefix is allowed for backward compatibility.
+			if not isvalidatom("=" + mydep, allow_repo=True):
+				raise
+			mydep = Atom('=' + mydep, allow_repo=True)
+			orig_dep = '=' + orig_dep
+		if not has_cat:
+			null_cat, pn = catsplit(mydep.cp)
+			mydep = pn
+
+	if has_cat:
+		# Optimize most common cases to avoid calling cpv_expand.
+		if not mydep.cp.startswith("virtual/"):
+			return mydep
+		if not hasattr(mydb, "cp_list") or \
+			mydb.cp_list(mydep.cp):
+			return mydep
+		# Fallback to legacy cpv_expand for old-style PROVIDE virtuals.
+		mydep = mydep.cp
+
+	expanded = cpv_expand(mydep, mydb=mydb,
+		use_cache=use_cache, settings=settings)
+	return Atom(orig_dep.replace(mydep, expanded, 1), allow_repo=True)

diff --git a/portage_with_autodep/pym/portage/dbapi/porttree.py b/portage_with_autodep/pym/portage/dbapi/porttree.py
new file mode 100644
index 0000000..ecf275c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/porttree.py
@@ -0,0 +1,1168 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+	"close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
+]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.checksum',
+	'portage.data:portage_gid,secpass',
+	'portage.dbapi.dep_expand:dep_expand',
+	'portage.dep:Atom,dep_getkey,match_from_list,use_reduce',
+	'portage.package.ebuild.doebuild:doebuild',
+	'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level',
+	'portage.util.listdir:listdir',
+	'portage.versions:best,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp',
+)
+
+from portage.cache import metadata_overlay, volatile
+from portage.cache.cache_errors import CacheError
+from portage.cache.mappings import Mapping
+from portage.dbapi import dbapi
+from portage.exception import PortageException, \
+	FileNotFound, InvalidAtom, InvalidDependString, InvalidPackageName
+from portage.localization import _
+from portage.manifest import Manifest
+
+from portage import eclass_cache, auxdbkeys, \
+	eapi_is_supported, dep_check, \
+	_eapi_is_deprecated
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import OrderedDict
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.PollScheduler import PollScheduler
+
+import os as _os
+import io
+import stat
+import sys
+import traceback
+import warnings
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+	long = int
+
+class _repo_info(object):
+	__slots__ = ('name', 'path', 'eclass_db', 'portdir', 'portdir_overlay')
+	def __init__(self, name, path, eclass_db):
+		self.name = name
+		self.path = path
+		self.eclass_db = eclass_db
+		self.portdir = eclass_db.porttrees[0]
+		self.portdir_overlay = ' '.join(eclass_db.porttrees[1:])
+
+class portdbapi(dbapi):
+	"""this tree will scan a portage directory located at root (passed to init)"""
+	portdbapi_instances = []
+	_use_mutable = True
+
+	@property
+	def _categories(self):
+		return self.settings.categories
+
+	@property
+	def porttree_root(self):
+		return self.settings.repositories.mainRepoLocation()
+
+	def __init__(self, _unused_param=None, mysettings=None):
+		"""
+		@param _unused_param: deprecated, use mysettings['PORTDIR'] instead
+		@type _unused_param: None
+		@param mysettings: an immutable config instance
+		@type mysettings: portage.config
+		"""
+		portdbapi.portdbapi_instances.append(self)
+
+		from portage import config
+		if mysettings:
+			self.settings = mysettings
+		else:
+			from portage import settings
+			self.settings = config(clone=settings)
+
+		if _unused_param is not None:
+			warnings.warn("The first parameter of the " + \
+				"portage.dbapi.porttree.portdbapi" + \
+				" constructor is unused since portage-2.1.8. " + \
+				"mysettings['PORTDIR'] is used instead.",
+				DeprecationWarning, stacklevel=2)
+
+		self.repositories = self.settings.repositories
+		self.treemap = self.repositories.treemap
+
+		# This is strictly for use in aux_get() doebuild calls when metadata
+		# is generated by the depend phase.  It's safest to use a clone for
+		# this purpose because doebuild makes many changes to the config
+		# instance that is passed in.
+		self.doebuild_settings = config(clone=self.settings)
+		self.depcachedir = os.path.realpath(self.settings.depcachedir)
+		
+		if os.environ.get("SANDBOX_ON") == "1":
+			# Make api consumers exempt from sandbox violations
+			# when doing metadata cache updates.
+			sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+			if self.depcachedir not in sandbox_write:
+				sandbox_write.append(self.depcachedir)
+				os.environ["SANDBOX_WRITE"] = \
+					":".join(filter(None, sandbox_write))
+
+		self.porttrees = list(self.settings.repositories.repoLocationList())
+		self.eclassdb = eclass_cache.cache(self.settings.repositories.mainRepoLocation())
+
+		# This is used as sanity check for aux_get(). If there is no
+		# root eclass dir, we assume that PORTDIR is invalid or
+		# missing. This check allows aux_get() to detect a missing
+		# portage tree and return early by raising a KeyError.
+		self._have_root_eclass_dir = os.path.isdir(
+			os.path.join(self.settings.repositories.mainRepoLocation(), "eclass"))
+
+		self.metadbmodule = self.settings.load_best_module("portdbapi.metadbmodule")
+
+		#if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+		self.xcache = {}
+		self.frozen = 0
+
+		#Create eclass dbs
+		self._repo_info = {}
+		eclass_dbs = {self.settings.repositories.mainRepoLocation() : self.eclassdb}
+		for repo in self.repositories:
+			if repo.location in self._repo_info:
+				continue
+
+			eclass_db = None
+			for eclass_location in repo.eclass_locations:
+				tree_db = eclass_dbs.get(eclass_location)
+				if tree_db is None:
+					tree_db = eclass_cache.cache(eclass_location)
+					eclass_dbs[eclass_location] = tree_db
+				if eclass_db is None:
+					eclass_db = tree_db.copy()
+				else:
+					eclass_db.append(tree_db)
+
+			self._repo_info[repo.location] = _repo_info(repo.name, repo.location, eclass_db)
+
+		#Keep a list of repo names, sorted by priority (highest priority first).
+		self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order))
+
+		self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule")
+		self.auxdb = {}
+		self._pregen_auxdb = {}
+		self._init_cache_dirs()
+		depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
+		cache_kwargs = {
+			'gid'     : portage_gid,
+			'perms'   : 0o664
+		}
+
+		# XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
+		# ~harring
+		filtered_auxdbkeys = [x for x in auxdbkeys if not x.startswith("UNUSED_0")]
+		filtered_auxdbkeys.sort()
+		# If secpass < 1, we don't want to write to the cache
+		# since then we won't be able to apply group permissions
+		# to the cache entries/directories.
+		if secpass < 1 or not depcachedir_w_ok:
+			for x in self.porttrees:
+				try:
+					db_ro = self.auxdbmodule(self.depcachedir, x,
+						filtered_auxdbkeys, readonly=True, **cache_kwargs)
+				except CacheError:
+					self.auxdb[x] = volatile.database(
+						self.depcachedir, x, filtered_auxdbkeys,
+						**cache_kwargs)
+				else:
+					self.auxdb[x] = metadata_overlay.database(
+						self.depcachedir, x, filtered_auxdbkeys,
+						db_rw=volatile.database, db_ro=db_ro,
+						**cache_kwargs)
+		else:
+			for x in self.porttrees:
+				if x in self.auxdb:
+					continue
+				# location, label, auxdbkeys
+				self.auxdb[x] = self.auxdbmodule(
+					self.depcachedir, x, filtered_auxdbkeys, **cache_kwargs)
+				if self.auxdbmodule is metadata_overlay.database:
+					self.auxdb[x].db_ro.ec = self._repo_info[x].eclass_db
+		if "metadata-transfer" not in self.settings.features:
+			for x in self.porttrees:
+				if x in self._pregen_auxdb:
+					continue
+				if os.path.isdir(os.path.join(x, "metadata", "cache")):
+					self._pregen_auxdb[x] = self.metadbmodule(
+						x, "metadata/cache", filtered_auxdbkeys, readonly=True)
+					try:
+						self._pregen_auxdb[x].ec = self._repo_info[x].eclass_db
+					except AttributeError:
+						pass
+		# Selectively cache metadata in order to optimize dep matching.
+		self._aux_cache_keys = set(
+			["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
+			"PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND", "repository",
+			"RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
+
+		self._aux_cache = {}
+		self._broken_ebuilds = set()
+
+	def _init_cache_dirs(self):
+		"""Create /var/cache/edb/dep and adjust permissions for the portage
+		group."""
+
+		dirmode  = 0o2070
+		filemode =   0o60
+		modemask =    0o2
+
+		try:
+			ensure_dirs(self.depcachedir, gid=portage_gid,
+				mode=dirmode, mask=modemask)
+		except PortageException as e:
+			pass
+
+	def close_caches(self):
+		if not hasattr(self, "auxdb"):
+			# unhandled exception thrown from constructor
+			return
+		for x in self.auxdb:
+			self.auxdb[x].sync()
+		self.auxdb.clear()
+
+	def flush_cache(self):
+		for x in self.auxdb.values():
+			x.sync()
+
+	def findLicensePath(self, license_name):
+		for x in reversed(self.porttrees):
+			license_path = os.path.join(x, "licenses", license_name)
+			if os.access(license_path, os.R_OK):
+				return license_path
+		return None
+
+	def findname(self,mycpv, mytree = None, myrepo = None):
+		return self.findname2(mycpv, mytree, myrepo)[0]
+
+	def getRepositoryPath(self, repository_id):
+		"""
+		This function is required for GLEP 42 compliance; given a valid repository ID
+		it must return a path to the repository
+		TreeMap = { id:path }
+		"""
+		return self.treemap.get(repository_id)
+
+	def getRepositoryName(self, canonical_repo_path):
+		"""
+		This is the inverse of getRepositoryPath().
+		@param canonical_repo_path: the canonical path of a repository, as
+			resolved by os.path.realpath()
+		@type canonical_repo_path: String
+		@returns: The repo_name for the corresponding repository, or None
+			if the path does not correspond a known repository
+		@rtype: String or None
+		"""
+		try:
+			return self.repositories.get_name_for_location(canonical_repo_path)
+		except KeyError:
+			return None
+
+	def getRepositories(self):
+		"""
+		This function is required for GLEP 42 compliance; it will return a list of
+		repository IDs
+		TreeMap = {id: path}
+		"""
+		return self._ordered_repo_name_list
+
+	def getMissingRepoNames(self):
+		"""
+		Returns a list of repository paths that lack profiles/repo_name.
+		"""
+		return self.settings.repositories.missing_repo_names
+
+	def getIgnoredRepos(self):
+		"""
+		Returns a list of repository paths that have been ignored, because
+		another repo with the same name exists.
+		"""
+		return self.settings.repositories.ignored_repos
+
+	def findname2(self, mycpv, mytree=None, myrepo = None):
+		""" 
+		Returns the location of the CPV, and what overlay it was in.
+		Searches overlays first, then PORTDIR; this allows us to return the first
+		matching file.  As opposed to starting in portdir and then doing overlays
+		second, we would have to exhaustively search the overlays until we found
+		the file we wanted.
+		If myrepo is not None it will find packages from this repository(overlay)
+		"""
+		if not mycpv:
+			return (None, 0)
+
+		if myrepo is not None:
+			mytree = self.treemap.get(myrepo)
+			if mytree is None:
+				return (None, 0)
+
+		mysplit = mycpv.split("/")
+		psplit = pkgsplit(mysplit[1])
+		if psplit is None or len(mysplit) != 2:
+			raise InvalidPackageName(mycpv)
+
+		# For optimal performace in this hot spot, we do manual unicode
+		# handling here instead of using the wrapped os module.
+		encoding = _encodings['fs']
+		errors = 'strict'
+
+		if mytree:
+			mytrees = [mytree]
+		else:
+			mytrees = reversed(self.porttrees)
+
+		relative_path = mysplit[0] + _os.sep + psplit[0] + _os.sep + \
+			mysplit[1] + ".ebuild"
+
+		for x in mytrees:
+			filename = x + _os.sep + relative_path
+			if _os.access(_unicode_encode(filename,
+				encoding=encoding, errors=errors), _os.R_OK):
+				return (filename, x)
+		return (None, 0)
+
+	def _metadata_process(self, cpv, ebuild_path, repo_path):
+		"""
+		Create an EbuildMetadataPhase instance to generate metadata for the
+		give ebuild.
+		@rtype: EbuildMetadataPhase
+		@returns: A new EbuildMetadataPhase instance, or None if the
+			metadata cache is already valid.
+		"""
+		metadata, st, emtime = self._pull_valid_cache(cpv, ebuild_path, repo_path)
+		if metadata is not None:
+			return None
+
+		process = EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
+			ebuild_mtime=emtime, metadata_callback=self._metadata_callback,
+			portdb=self, repo_path=repo_path, settings=self.doebuild_settings)
+		return process
+
+	def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata, mtime):
+
+		i = metadata
+		if hasattr(metadata, "items"):
+			i = iter(metadata.items())
+		metadata = dict(i)
+
+		if metadata.get("INHERITED", False):
+			metadata["_eclasses_"] = self._repo_info[repo_path
+				].eclass_db.get_eclass_data(metadata["INHERITED"].split())
+		else:
+			metadata["_eclasses_"] = {}
+
+		metadata.pop("INHERITED", None)
+		metadata["_mtime_"] = mtime
+
+		eapi = metadata.get("EAPI")
+		if not eapi or not eapi.strip():
+			eapi = "0"
+			metadata["EAPI"] = eapi
+		if not eapi_is_supported(eapi):
+			for k in set(metadata).difference(("_mtime_", "_eclasses_")):
+				metadata[k] = ""
+			metadata["EAPI"] = "-" + eapi.lstrip("-")
+
+		try:
+			self.auxdb[repo_path][cpv] = metadata
+		except CacheError:
+			# Normally this shouldn't happen, so we'll show
+			# a traceback for debugging purposes.
+			traceback.print_exc()
+		return metadata
+
+	def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
+		try:
+			# Don't use unicode-wrapped os module, for better performance.
+			st = _os.stat(_unicode_encode(ebuild_path,
+				encoding=_encodings['fs'], errors='strict'))
+			emtime = st[stat.ST_MTIME]
+		except OSError:
+			writemsg(_("!!! aux_get(): ebuild for " \
+				"'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
+			writemsg("!!!            %s\n" % ebuild_path, noiselevel=-1)
+			raise KeyError(cpv)
+
+		# Pull pre-generated metadata from the metadata/cache/
+		# directory if it exists and is valid, otherwise fall
+		# back to the normal writable cache.
+		auxdbs = []
+		pregen_auxdb = self._pregen_auxdb.get(repo_path)
+		if pregen_auxdb is not None:
+			auxdbs.append(pregen_auxdb)
+		auxdbs.append(self.auxdb[repo_path])
+		eclass_db = self._repo_info[repo_path].eclass_db
+
+		doregen = True
+		for auxdb in auxdbs:
+			try:
+				metadata = auxdb[cpv]
+			except KeyError:
+				pass
+			except CacheError:
+				if auxdb is not pregen_auxdb:
+					try:
+						del auxdb[cpv]
+					except KeyError:
+						pass
+					except CacheError:
+						pass
+			else:
+				eapi = metadata.get('EAPI', '').strip()
+				if not eapi:
+					eapi = '0'
+				if not (eapi[:1] == '-' and eapi_is_supported(eapi[1:])) and \
+					emtime == metadata['_mtime_'] and \
+					eclass_db.is_eclass_data_valid(metadata['_eclasses_']):
+					doregen = False
+
+			if not doregen:
+				break
+
+		if doregen:
+			metadata = None
+
+		return (metadata, st, emtime)
+
+	def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
+		"stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+		'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+		'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
+		cache_me = False
+		if myrepo is not None:
+			mytree = self.treemap.get(myrepo)
+			if mytree is None:
+				raise KeyError(myrepo)
+
+		if not mytree:
+			cache_me = True
+		if not mytree and not self._known_keys.intersection(
+			mylist).difference(self._aux_cache_keys):
+			aux_cache = self._aux_cache.get(mycpv)
+			if aux_cache is not None:
+				return [aux_cache.get(x, "") for x in mylist]
+			cache_me = True
+		global auxdbkeys, auxdbkeylen
+		try:
+			cat, pkg = mycpv.split("/", 1)
+		except ValueError:
+			# Missing slash. Can't find ebuild so raise KeyError.
+			raise KeyError(mycpv)
+
+		myebuild, mylocation = self.findname2(mycpv, mytree)
+
+		if not myebuild:
+			writemsg("!!! aux_get(): %s\n" % \
+				_("ebuild not found for '%s'") % mycpv, noiselevel=1)
+			raise KeyError(mycpv)
+
+		mydata, st, emtime = self._pull_valid_cache(mycpv, myebuild, mylocation)
+		doregen = mydata is None
+
+		if doregen:
+			if myebuild in self._broken_ebuilds:
+				raise KeyError(mycpv)
+
+			self.doebuild_settings.setcpv(mycpv)
+			eapi = None
+
+			if eapi is None and \
+				'parse-eapi-ebuild-head' in self.doebuild_settings.features:
+				eapi = portage._parse_eapi_ebuild_head(io.open(
+					_unicode_encode(myebuild,
+					encoding=_encodings['fs'], errors='strict'),
+					mode='r', encoding=_encodings['repo.content'],
+					errors='replace'))
+
+			if eapi is not None:
+				self.doebuild_settings.configdict['pkg']['EAPI'] = eapi
+
+			if eapi is not None and not portage.eapi_is_supported(eapi):
+				mydata = self._metadata_callback(
+					mycpv, myebuild, mylocation, {'EAPI':eapi}, emtime)
+			else:
+				proc = EbuildMetadataPhase(cpv=mycpv, ebuild_path=myebuild,
+					ebuild_mtime=emtime,
+					metadata_callback=self._metadata_callback, portdb=self,
+					repo_path=mylocation,
+					scheduler=PollScheduler().sched_iface,
+					settings=self.doebuild_settings)
+
+				proc.start()
+				proc.wait()
+
+				if proc.returncode != os.EX_OK:
+					self._broken_ebuilds.add(myebuild)
+					raise KeyError(mycpv)
+
+				mydata = proc.metadata
+
+		# do we have a origin repository name for the current package
+		mydata["repository"] = self.repositories.get_name_for_location(mylocation)
+		mydata["INHERITED"] = ' '.join(mydata.get("_eclasses_", []))
+		mydata["_mtime_"] = st[stat.ST_MTIME]
+
+		eapi = mydata.get("EAPI")
+		if not eapi:
+			eapi = "0"
+			mydata["EAPI"] = eapi
+		if not eapi_is_supported(eapi):
+			for k in set(mydata).difference(("_mtime_", "_eclasses_")):
+				mydata[k] = ""
+			mydata["EAPI"] = "-" + eapi.lstrip("-")
+
+		#finally, we look at our internal cache entry and return the requested data.
+		returnme = [mydata.get(x, "") for x in mylist]
+
+		if cache_me:
+			aux_cache = {}
+			for x in self._aux_cache_keys:
+				aux_cache[x] = mydata.get(x, "")
+			self._aux_cache[mycpv] = aux_cache
+
+		return returnme
+
+	def getFetchMap(self, mypkg, useflags=None, mytree=None):
+		"""
+		Get the SRC_URI metadata as a dict which maps each file name to a
+		set of alternative URIs.
+
+		@param mypkg: cpv for an ebuild
+		@type mypkg: String
+		@param useflags: a collection of enabled USE flags, for evaluation of
+			conditionals
+		@type useflags: set, or None to enable all conditionals
+		@param mytree: The canonical path of the tree in which the ebuild
+			is located, or None for automatic lookup
+		@type mypkg: String
+		@returns: A dict which maps each file name to a set of alternative
+			URIs.
+		@rtype: dict
+		"""
+
+		try:
+			eapi, myuris = self.aux_get(mypkg,
+				["EAPI", "SRC_URI"], mytree=mytree)
+		except KeyError:
+			# Convert this to an InvalidDependString exception since callers
+			# already handle it.
+			raise portage.exception.InvalidDependString(
+				"getFetchMap(): aux_get() error reading "+mypkg+"; aborting.")
+
+		if not eapi_is_supported(eapi):
+			# Convert this to an InvalidDependString exception
+			# since callers already handle it.
+			raise portage.exception.InvalidDependString(
+				"getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
+				(mypkg, eapi.lstrip("-")))
+
+		return _parse_uri_map(mypkg, {'EAPI':eapi,'SRC_URI':myuris},
+			use=useflags)
+
+	def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
+		# returns a filename:size dictionnary of remaining downloads
+		myebuild, mytree = self.findname2(mypkg, myrepo=myrepo)
+		if myebuild is None:
+			raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+		pkgdir = os.path.dirname(myebuild)
+		mf = Manifest(pkgdir, self.settings["DISTDIR"])
+		checksums = mf.getDigests()
+		if not checksums:
+			if debug: 
+				writemsg(_("[empty/missing/bad digest]: %s\n") % (mypkg,))
+			return {}
+		filesdict={}
+		myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+		#XXX: maybe this should be improved: take partial downloads
+		# into account? check checksums?
+		for myfile in myfiles:
+			try:
+				fetch_size = int(checksums[myfile]["size"])
+			except (KeyError, ValueError):
+				if debug:
+					writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg})
+				continue
+			file_path = os.path.join(self.settings["DISTDIR"], myfile)
+			mystat = None
+			try:
+				mystat = os.stat(file_path)
+			except OSError as e:
+				pass
+			if mystat is None:
+				existing_size = 0
+				ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS")
+				if ro_distdirs is not None:
+					for x in shlex_split(ro_distdirs):
+						try:
+							mystat = os.stat(os.path.join(x, myfile))
+						except OSError:
+							pass
+						else:
+							if mystat.st_size == fetch_size:
+								existing_size = fetch_size
+								break
+			else:
+				existing_size = mystat.st_size
+			remaining_size = fetch_size - existing_size
+			if remaining_size > 0:
+				# Assume the download is resumable.
+				filesdict[myfile] = remaining_size
+			elif remaining_size < 0:
+				# The existing file is too large and therefore corrupt.
+				filesdict[myfile] = int(checksums[myfile]["size"])
+		return filesdict
+
+	def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False, myrepo=None):
+		"""
+		TODO: account for PORTAGE_RO_DISTDIRS
+		"""
+		if all:
+			useflags = None
+		elif useflags is None:
+			if mysettings:
+				useflags = mysettings["USE"].split()
+		if myrepo is not None:
+			mytree = self.treemap.get(myrepo)
+			if mytree is None:
+				return False
+		else:
+			mytree = None
+
+		myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+		myebuild = self.findname(mypkg, myrepo=myrepo)
+		if myebuild is None:
+			raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+		pkgdir = os.path.dirname(myebuild)
+		mf = Manifest(pkgdir, self.settings["DISTDIR"])
+		mysums = mf.getDigests()
+
+		failures = {}
+		for x in myfiles:
+			if not mysums or x not in mysums:
+				ok     = False
+				reason = _("digest missing")
+			else:
+				try:
+					ok, reason = portage.checksum.verify_all(
+						os.path.join(self.settings["DISTDIR"], x), mysums[x])
+				except FileNotFound as e:
+					ok = False
+					reason = _("File Not Found: '%s'") % (e,)
+			if not ok:
+				failures[x] = reason
+		if failures:
+			return False
+		return True
+
+	def cpv_exists(self, mykey, myrepo=None):
+		"Tells us whether an actual ebuild exists on disk (no masking)"
+		cps2 = mykey.split("/")
+		cps = catpkgsplit(mykey, silent=0)
+		if not cps:
+			#invalid cat/pkg-v
+			return 0
+		if self.findname(cps[0] + "/" + cps2[1], myrepo=myrepo):
+			return 1
+		else:
+			return 0
+
+	def cp_all(self, categories=None, trees=None):
+		"""
+		This returns a list of all keys in our tree or trees
+		@param categories: optional list of categories to search or 
+			defaults to self.settings.categories
+		@param trees: optional list of trees to search the categories in or
+			defaults to self.porttrees
+		@rtype list of [cat/pkg,...]
+		"""
+		d = {}
+		if categories is None:
+			categories = self.settings.categories
+		if trees is None:
+			trees = self.porttrees
+		for x in categories:
+			for oroot in trees:
+				for y in listdir(oroot+"/"+x, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+					try:
+						atom = Atom("%s/%s" % (x, y))
+					except InvalidAtom:
+						continue
+					if atom != atom.cp:
+						continue
+					d[atom.cp] = None
+		l = list(d)
+		l.sort()
+		return l
+
+	def cp_list(self, mycp, use_cache=1, mytree=None):
+		if self.frozen and mytree is None:
+			cachelist = self.xcache["cp-list"].get(mycp)
+			if cachelist is not None:
+				# Try to propagate this to the match-all cache here for
+				# repoman since he uses separate match-all caches for each
+				# profile (due to old-style virtuals). Do not propagate
+				# old-style virtuals since cp_list() doesn't expand them.
+				if not (not cachelist and mycp.startswith("virtual/")):
+					self.xcache["match-all"][mycp] = cachelist
+				return cachelist[:]
+		mysplit = mycp.split("/")
+		invalid_category = mysplit[0] not in self._categories
+		d={}
+		if mytree is not None:
+			if isinstance(mytree, basestring):
+				mytrees = [mytree]
+			else:
+				# assume it's iterable
+				mytrees = mytree
+		else:
+			mytrees = self.porttrees
+		for oroot in mytrees:
+			try:
+				file_list = os.listdir(os.path.join(oroot, mycp))
+			except OSError:
+				continue
+			for x in file_list:
+				pf = None
+				if x[-7:] == '.ebuild':
+					pf = x[:-7]
+
+				if pf is not None:
+					ps = pkgsplit(pf)
+					if not ps:
+						writemsg(_("\nInvalid ebuild name: %s\n") % \
+							os.path.join(oroot, mycp, x), noiselevel=-1)
+						continue
+					if ps[0] != mysplit[1]:
+						writemsg(_("\nInvalid ebuild name: %s\n") % \
+							os.path.join(oroot, mycp, x), noiselevel=-1)
+						continue
+					ver_match = ver_regexp.match("-".join(ps[1:]))
+					if ver_match is None or not ver_match.groups():
+						writemsg(_("\nInvalid ebuild version: %s\n") % \
+							os.path.join(oroot, mycp, x), noiselevel=-1)
+						continue
+					d[mysplit[0]+"/"+pf] = None
+		if invalid_category and d:
+			writemsg(_("\n!!! '%s' has a category that is not listed in " \
+				"%setc/portage/categories\n") % \
+				(mycp, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1)
+			mylist = []
+		else:
+			mylist = list(d)
+		# Always sort in ascending order here since it's handy
+		# and the result can be easily cached and reused.
+		self._cpv_sort_ascending(mylist)
+		if self.frozen and mytree is None:
+			cachelist = mylist[:]
+			self.xcache["cp-list"][mycp] = cachelist
+			# Do not propagate old-style virtuals since
+			# cp_list() doesn't expand them.
+			if not (not cachelist and mycp.startswith("virtual/")):
+				self.xcache["match-all"][mycp] = cachelist
+		return mylist
+
+	def freeze(self):
+		for x in "bestmatch-visible", "cp-list", "list-visible", "match-all", \
+			"match-all-cpv-only", "match-visible", "minimum-all", \
+			"minimum-visible":
+			self.xcache[x]={}
+		self.frozen=1
+
+	def melt(self):
+		self.xcache = {}
+		self.frozen = 0
+
+	def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+		"caching match function; very trick stuff"
+		#if no updates are being made to the tree, we can consult our xcache...
+		if self.frozen:
+			try:
+				return self.xcache[level][origdep][:]
+			except KeyError:
+				pass
+
+		if mydep is None:
+			#this stuff only runs on first call of xmatch()
+			#create mydep, mykey from origdep
+			mydep = dep_expand(origdep, mydb=self, settings=self.settings)
+			mykey = mydep.cp
+
+		myval = None
+		mytree = None
+		if mydep.repo is not None:
+			mytree = self.treemap.get(mydep.repo)
+			if mytree is None:
+				myval = []
+
+		if myval is not None:
+			# Unknown repo, empty result.
+			pass
+		elif level == "match-all-cpv-only":
+			# match *all* packages, only against the cpv, in order
+			# to bypass unnecessary cache access for things like IUSE
+			# and SLOT.
+			if mydep == mykey:
+				# Share cache with match-all/cp_list when the result is the
+				# same. Note that this requires that mydep.repo is None and
+				# thus mytree is also None.
+				level = "match-all"
+				myval = self.cp_list(mykey, mytree=mytree)
+			else:
+				myval = match_from_list(mydep,
+					self.cp_list(mykey, mytree=mytree))
+
+		elif level == "list-visible":
+			#a list of all visible packages, not called directly (just by xmatch())
+			#myval = self.visible(self.cp_list(mykey))
+
+			myval = self.gvisible(self.visible(
+				self.cp_list(mykey, mytree=mytree)))
+		elif level == "minimum-all":
+			# Find the minimum matching version. This is optimized to
+			# minimize the number of metadata accesses (improves performance
+			# especially in cases where metadata needs to be generated).
+			if mydep == mykey:
+				cpv_iter = iter(self.cp_list(mykey, mytree=mytree))
+			else:
+				cpv_iter = self._iter_match(mydep,
+					self.cp_list(mykey, mytree=mytree))
+			try:
+				myval = next(cpv_iter)
+			except StopIteration:
+				myval = ""
+
+		elif level in ("minimum-visible", "bestmatch-visible"):
+			# Find the minimum matching visible version. This is optimized to
+			# minimize the number of metadata accesses (improves performance
+			# especially in cases where metadata needs to be generated).
+			if mydep == mykey:
+				mylist = self.cp_list(mykey, mytree=mytree)
+			else:
+				mylist = match_from_list(mydep,
+					self.cp_list(mykey, mytree=mytree))
+			myval = ""
+			settings = self.settings
+			local_config = settings.local_config
+			aux_keys = list(self._aux_cache_keys)
+			if level == "minimum-visible":
+				iterfunc = iter
+			else:
+				iterfunc = reversed
+			for cpv in iterfunc(mylist):
+				try:
+					metadata = dict(zip(aux_keys,
+						self.aux_get(cpv, aux_keys)))
+				except KeyError:
+					# ebuild masked by corruption
+					continue
+				if not eapi_is_supported(metadata["EAPI"]):
+					continue
+				if mydep.slot and mydep.slot != metadata["SLOT"]:
+					continue
+				if settings._getMissingKeywords(cpv, metadata):
+					continue
+				if settings._getMaskAtom(cpv, metadata):
+					continue
+				if settings._getProfileMaskAtom(cpv, metadata):
+					continue
+				if local_config:
+					metadata["USE"] = ""
+					if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
+						self.doebuild_settings.setcpv(cpv, mydb=metadata)
+						metadata["USE"] = self.doebuild_settings.get("USE", "")
+					try:
+						if settings._getMissingLicenses(cpv, metadata):
+							continue
+						if settings._getMissingProperties(cpv, metadata):
+							continue
+					except InvalidDependString:
+						continue
+				if mydep.use:
+					has_iuse = False
+					for has_iuse in self._iter_match_use(mydep, [cpv]):
+						break
+					if not has_iuse:
+						continue
+				myval = cpv
+				break
+		elif level == "bestmatch-list":
+			#dep match -- find best match but restrict search to sublist
+			#no point in calling xmatch again since we're not caching list deps
+
+			myval = best(list(self._iter_match(mydep, mylist)))
+		elif level == "match-list":
+			#dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+
+			myval = list(self._iter_match(mydep, mylist))
+		elif level == "match-visible":
+			#dep match -- find all visible matches
+			#get all visible packages, then get the matching ones
+			myval = list(self._iter_match(mydep,
+				self.xmatch("list-visible", mykey, mydep=Atom(mykey), mykey=mykey)))
+		elif level == "match-all":
+			#match *all* visible *and* masked packages
+			if mydep == mykey:
+				myval = self.cp_list(mykey, mytree=mytree)
+			else:
+				myval = list(self._iter_match(mydep,
+					self.cp_list(mykey, mytree=mytree)))
+		else:
+			raise AssertionError(
+				"Invalid level argument: '%s'" % level)
+
+		if self.frozen and (level not in ["match-list", "bestmatch-list"]):
+			self.xcache[level][mydep] = myval
+			if origdep and origdep != mydep:
+				self.xcache[level][origdep] = myval
+		return myval[:]
+
+	def match(self, mydep, use_cache=1):
+		return self.xmatch("match-visible", mydep)
+
+	def visible(self, mylist):
+		"""two functions in one.  Accepts a list of cpv values and uses the package.mask *and*
+		packages file to remove invisible entries, returning remaining items.  This function assumes
+		that all entries in mylist have the same category and package name."""
+		if not mylist:
+			return []
+
+		db_keys = ["SLOT"]
+		visible = []
+		getMaskAtom = self.settings._getMaskAtom
+		getProfileMaskAtom = self.settings._getProfileMaskAtom
+		for cpv in mylist:
+			try:
+				metadata = dict(zip(db_keys, self.aux_get(cpv, db_keys)))
+			except KeyError:
+				# masked by corruption
+				continue
+			if not metadata["SLOT"]:
+				continue
+			if getMaskAtom(cpv, metadata):
+				continue
+			if getProfileMaskAtom(cpv, metadata):
+				continue
+			visible.append(cpv)
+		return visible
+
+	def gvisible(self,mylist):
+		"strip out group-masked (not in current group) entries"
+
+		if mylist is None:
+			return []
+		newlist=[]
+		aux_keys = list(self._aux_cache_keys)
+		metadata = {}
+		local_config = self.settings.local_config
+		chost = self.settings.get('CHOST', '')
+		accept_chost = self.settings._accept_chost
+		for mycpv in mylist:
+			metadata.clear()
+			try:
+				metadata.update(zip(aux_keys, self.aux_get(mycpv, aux_keys)))
+			except KeyError:
+				continue
+			except PortageException as e:
+				writemsg("!!! Error: aux_get('%s', %s)\n" % (mycpv, aux_keys),
+					noiselevel=-1)
+				writemsg("!!! %s\n" % (e,), noiselevel=-1)
+				del e
+				continue
+			eapi = metadata["EAPI"]
+			if not eapi_is_supported(eapi):
+				continue
+			if _eapi_is_deprecated(eapi):
+				continue
+			if self.settings._getMissingKeywords(mycpv, metadata):
+				continue
+			if local_config:
+				metadata['CHOST'] = chost
+				if not accept_chost(mycpv, metadata):
+					continue
+				metadata["USE"] = ""
+				if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
+					self.doebuild_settings.setcpv(mycpv, mydb=metadata)
+					metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
+				try:
+					if self.settings._getMissingLicenses(mycpv, metadata):
+						continue
+					if self.settings._getMissingProperties(mycpv, metadata):
+						continue
+				except InvalidDependString:
+					continue
+			newlist.append(mycpv)
+		return newlist
+
+def close_portdbapi_caches():
+	for i in portdbapi.portdbapi_instances:
+		i.close_caches()
+
+portage.process.atexit_register(portage.portageexit)
+
+class portagetree(object):
+	def __init__(self, root=None, virtual=None, settings=None):
+		"""
+		Constructor for a PortageTree
+		
+		@param root: deprecated, defaults to settings['ROOT']
+		@type root: String/Path
+		@param virtual: UNUSED
+		@type virtual: No Idea
+		@param settings: Portage Configuration object (portage.settings)
+		@type settings: Instance of portage.config
+		"""
+
+		if settings is None:
+			settings = portage.settings
+		self.settings = settings
+
+		if root is not None and root != settings['ROOT']:
+			warnings.warn("The root parameter of the " + \
+				"portage.dbapi.porttree.portagetree" + \
+				" constructor is now unused. Use " + \
+				"settings['ROOT'] instead.",
+				DeprecationWarning, stacklevel=2)
+
+		self.portroot = settings["PORTDIR"]
+		self.virtual = virtual
+		self.dbapi = portdbapi(mysettings=settings)
+
+	@property
+	def root(self):
+		warnings.warn("The root attribute of " + \
+			"portage.dbapi.porttree.portagetree" + \
+			" is deprecated. Use " + \
+			"settings['ROOT'] instead.",
+			DeprecationWarning, stacklevel=2)
+		return self.settings['ROOT']
+
+	def dep_bestmatch(self,mydep):
+		"compatibility method"
+		mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
+		if mymatch is None:
+			return ""
+		return mymatch
+
+	def dep_match(self,mydep):
+		"compatibility method"
+		mymatch = self.dbapi.xmatch("match-visible",mydep)
+		if mymatch is None:
+			return []
+		return mymatch
+
+	def exists_specific(self,cpv):
+		return self.dbapi.cpv_exists(cpv)
+
+	def getallnodes(self):
+		"""new behavior: these are all *unmasked* nodes.  There may or may not be available
+		masked package for nodes in this nodes list."""
+		return self.dbapi.cp_all()
+
+	def getname(self, pkgname):
+		"returns file location for this particular package (DEPRECATED)"
+		if not pkgname:
+			return ""
+		mysplit = pkgname.split("/")
+		psplit = pkgsplit(mysplit[1])
+		return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
+
+	def depcheck(self, mycheck, use="yes", myusesplit=None):
+		return dep_check(mycheck, self.dbapi, use=use, myuse=myusesplit)
+
+	def getslot(self,mycatpkg):
+		"Get a slot for a catpkg; assume it exists."
+		myslot = ""
+		try:
+			myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+		except SystemExit as e:
+			raise
+		except Exception as e:
+			pass
+		return myslot
+
+class FetchlistDict(Mapping):
+	"""
+	This provide a mapping interface to retrieve fetch lists. It's used
+	to allow portage.manifest.Manifest to access fetch lists via a standard
+	mapping interface rather than use the dbapi directly.
+	"""
+	def __init__(self, pkgdir, settings, mydbapi):
+		"""pkgdir is a directory containing ebuilds and settings is passed into
+		portdbapi.getfetchlist for __getitem__ calls."""
+		self.pkgdir = pkgdir
+		self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
+		self.settings = settings
+		self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
+		self.portdb = mydbapi
+
+	def __getitem__(self, pkg_key):
+		"""Returns the complete fetch list for a given package."""
+		return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
+
+	def __contains__(self, cpv):
+		return cpv in self.__iter__()
+
+	def has_key(self, pkg_key):
+		"""Returns true if the given package exists within pkgdir."""
+		warnings.warn("portage.dbapi.porttree.FetchlistDict.has_key() is "
+			"deprecated, use the 'in' operator instead",
+			DeprecationWarning, stacklevel=2)
+		return pkg_key in self
+
+	def __iter__(self):
+		return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+	def __len__(self):
+		"""This needs to be implemented in order to avoid
+		infinite recursion in some cases."""
+		return len(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+	def keys(self):
+		"""Returns keys for all packages within pkgdir"""
+		return self.portdb.cp_list(self.cp, mytree=self.mytree)
+
+	if sys.hexversion >= 0x3000000:
+		keys = __iter__
+
+def _parse_uri_map(cpv, metadata, use=None):
+
+	myuris = use_reduce(metadata.get('SRC_URI', ''),
+		uselist=use, matchall=(use is None),
+		is_src_uri=True,
+		eapi=metadata['EAPI'])
+
+	uri_map = OrderedDict()
+
+	myuris.reverse()
+	while myuris:
+		uri = myuris.pop()
+		if myuris and myuris[-1] == "->":
+			operator = myuris.pop()
+			distfile = myuris.pop()
+		else:
+			distfile = os.path.basename(uri)
+			if not distfile:
+				raise portage.exception.InvalidDependString(
+					("getFetchMap(): '%s' SRC_URI has no file " + \
+					"name: '%s'") % (cpv, uri))
+
+		uri_set = uri_map.get(distfile)
+		if uri_set is None:
+			uri_set = set()
+			uri_map[distfile] = uri_set
+		uri_set.add(uri)
+		uri = None
+		operator = None
+
+	return uri_map

diff --git a/portage_with_autodep/pym/portage/dbapi/vartree.py b/portage_with_autodep/pym/portage/dbapi/vartree.py
new file mode 100644
index 0000000..7f7873b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/vartree.py
@@ -0,0 +1,4527 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+	"vardbapi", "vartree", "dblink"] + \
+	["write_contents", "tar_contents"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.checksum:_perform_md5_merge@perform_md5',
+	'portage.data:portage_gid,portage_uid,secpass',
+	'portage.dbapi.dep_expand:dep_expand',
+	'portage.dbapi._MergeProcess:MergeProcess',
+	'portage.dep:dep_getkey,isjustname,match_from_list,' + \
+	 	'use_reduce,_slot_re',
+	'portage.elog:collect_ebuild_messages,collect_messages,' + \
+		'elog_process,_merge_logentries',
+	'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
+	'portage.output:bold,colorize',
+	'portage.package.ebuild.doebuild:doebuild_environment,' + \
+		'_spawn_phase',
+	'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+	'portage.update:fixdbentries',
+	'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
+		'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
+		'grabdict,normalize_path,new_protect_filename',
+	'portage.util.digraph:digraph',
+	'portage.util.env_update:env_update',
+	'portage.util.listdir:dircache,listdir',
+	'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
+	'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
+	'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \
+		'_pkgsplit@pkgsplit',
+)
+
+from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
+	PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
+from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
+from portage.dbapi import dbapi
+from portage.exception import CommandNotFound, \
+	InvalidData, InvalidLocation, InvalidPackageName, \
+	FileNotFound, PermissionDenied, UnsupportedAPIException
+from portage.localization import _
+from portage.util.movefile import movefile
+
+from portage import abssymlink, _movefile, bsd_chflags
+
+# This is a special version of the os module, wrapped for unicode support.
+from portage import os
+from portage import _encodings
+from portage import _os_merge
+from portage import _selinux_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.PollScheduler import PollScheduler
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+
+import errno
+import gc
+import io
+from itertools import chain
+import logging
+import os as _os
+import re
+import shutil
+import stat
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+
+try:
+	import cPickle as pickle
+except ImportError:
+	import pickle
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+	long = int
+
+class vardbapi(dbapi):
+
+	_excluded_dirs = ["CVS", "lost+found"]
+	_excluded_dirs = [re.escape(x) for x in _excluded_dirs]
+	_excluded_dirs = re.compile(r'^(\..*|-MERGING-.*|' + \
+		"|".join(_excluded_dirs) + r')$')
+
+	_aux_cache_version        = "1"
+	_owners_cache_version     = "1"
+
+	# Number of uncached packages to trigger cache update, since
+	# it's wasteful to update it for every vdb change.
+	_aux_cache_threshold = 5
+
+	_aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
+	_aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
+
+	def __init__(self, _unused_param=None, categories=None, settings=None, vartree=None):
+		"""
+		The categories parameter is unused since the dbapi class
+		now has a categories property that is generated from the
+		available packages.
+		"""
+
+		# Used by emerge to check whether any packages
+		# have been added or removed.
+		self._pkgs_changed = False
+
+		# The _aux_cache_threshold doesn't work as designed
+		# if the cache is flushed from a subprocess, so we
+		# use this to avoid waste vdb cache updates.
+		self._flush_cache_enabled = True
+
+		#cache for category directory mtimes
+		self.mtdircache = {}
+
+		#cache for dependency checks
+		self.matchcache = {}
+
+		#cache for cp_list results
+		self.cpcache = {}
+
+		self.blockers = None
+		if settings is None:
+			settings = portage.settings
+		self.settings = settings
+		self.root = settings['ROOT']
+
+		if _unused_param is not None and _unused_param != self.root:
+			warnings.warn("The first parameter of the " + \
+				"portage.dbapi.vartree.vardbapi" + \
+				" constructor is now unused. Use " + \
+				"settings['ROOT'] instead.",
+				DeprecationWarning, stacklevel=2)
+
+		self._eroot = settings['EROOT']
+		self._dbroot = self._eroot + VDB_PATH
+		self._lock = None
+		self._lock_count = 0
+
+		self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
+		self._fs_lock_obj = None
+		self._fs_lock_count = 0
+
+		if vartree is None:
+			vartree = portage.db[self.root]["vartree"]
+		self.vartree = vartree
+		self._aux_cache_keys = set(
+			["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
+			"EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
+			"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
+			"repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
+			"REQUIRED_USE"])
+		self._aux_cache_obj = None
+		self._aux_cache_filename = os.path.join(self._eroot,
+			CACHE_PATH, "vdb_metadata.pickle")
+		self._counter_path = os.path.join(self._eroot,
+			CACHE_PATH, "counter")
+
+		self._plib_registry = None
+		if _ENABLE_PRESERVE_LIBS:
+			self._plib_registry = PreservedLibsRegistry(self.root,
+				os.path.join(self._eroot, PRIVATE_PATH,
+				"preserved_libs_registry"))
+
+		self._linkmap = None
+		if _ENABLE_DYN_LINK_MAP:
+			self._linkmap = LinkageMap(self)
+		self._owners = self._owners_db(self)
+
+		self._cached_counter = None
+
+	def getpath(self, mykey, filename=None):
+		# This is an optimized hotspot, so don't use unicode-wrapped
+		# os module and don't use os.path.join().
+		rValue = self._eroot + VDB_PATH + _os.sep + mykey
+		if filename is not None:
+			# If filename is always relative, we can do just
+			# rValue += _os.sep + filename
+			rValue = _os.path.join(rValue, filename)
+		return rValue
+
+	def lock(self):
+		"""
+		Acquire a reentrant lock, blocking, for cooperation with concurrent
+		processes. State is inherited by subprocesses, allowing subprocesses
+		to reenter a lock that was acquired by a parent process. However,
+		a lock can be released only by the same process that acquired it.
+		"""
+		if self._lock_count:
+			self._lock_count += 1
+		else:
+			if self._lock is not None:
+				raise AssertionError("already locked")
+			# At least the parent needs to exist for the lock file.
+			ensure_dirs(self._dbroot)
+			self._lock = lockdir(self._dbroot)
+			self._lock_count += 1
+
+	def unlock(self):
+		"""
+		Release a lock, decrementing the recursion level. Each unlock() call
+		must be matched with a prior lock() call, or else an AssertionError
+		will be raised if unlock() is called while not locked.
+		"""
+		if self._lock_count > 1:
+			self._lock_count -= 1
+		else:
+			if self._lock is None:
+				raise AssertionError("not locked")
+			self._lock_count = 0
+			unlockdir(self._lock)
+			self._lock = None
+
+	def _fs_lock(self):
+		"""
+		Acquire a reentrant lock, blocking, for cooperation with concurrent
+		processes.
+		"""
+		if self._fs_lock_count < 1:
+			if self._fs_lock_obj is not None:
+				raise AssertionError("already locked")
+			try:
+				self._fs_lock_obj = lockfile(self._conf_mem_file)
+			except InvalidLocation:
+				self.settings._init_dirs()
+				self._fs_lock_obj = lockfile(self._conf_mem_file)
+		self._fs_lock_count += 1
+
+	def _fs_unlock(self):
+		"""
+		Release a lock, decrementing the recursion level.
+		"""
+		if self._fs_lock_count <= 1:
+			if self._fs_lock_obj is None:
+				raise AssertionError("not locked")
+			unlockfile(self._fs_lock_obj)
+			self._fs_lock_obj = None
+		self._fs_lock_count -= 1
+
+	def _bump_mtime(self, cpv):
+		"""
+		This is called before an after any modifications, so that consumers
+		can use directory mtimes to validate caches. See bug #290428.
+		"""
+		base = self._eroot + VDB_PATH
+		cat = catsplit(cpv)[0]
+		catdir = base + _os.sep + cat
+		t = time.time()
+		t = (t, t)
+		try:
+			for x in (catdir, base):
+				os.utime(x, t)
+		except OSError:
+			ensure_dirs(catdir)
+
+	def cpv_exists(self, mykey, myrepo=None):
+		"Tells us whether an actual ebuild exists on disk (no masking)"
+		return os.path.exists(self.getpath(mykey))
+
+	def cpv_counter(self, mycpv):
+		"This method will grab the COUNTER. Returns a counter value."
+		try:
+			return long(self.aux_get(mycpv, ["COUNTER"])[0])
+		except (KeyError, ValueError):
+			pass
+		writemsg_level(_("portage: COUNTER for %s was corrupted; " \
+			"resetting to value of 0\n") % (mycpv,),
+			level=logging.ERROR, noiselevel=-1)
+		return 0
+
+	def cpv_inject(self, mycpv):
+		"injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+		ensure_dirs(self.getpath(mycpv))
+		counter = self.counter_tick(mycpv=mycpv)
+		# write local package counter so that emerge clean does the right thing
+		write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
+
+	def isInjected(self, mycpv):
+		if self.cpv_exists(mycpv):
+			if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
+				return True
+			if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
+				return True
+		return False
+
+	def move_ent(self, mylist, repo_match=None):
+		origcp = mylist[1]
+		newcp = mylist[2]
+
+		# sanity check
+		for atom in (origcp, newcp):
+			if not isjustname(atom):
+				raise InvalidPackageName(str(atom))
+		origmatches = self.match(origcp, use_cache=0)
+		moves = 0
+		if not origmatches:
+			return moves
+		for mycpv in origmatches:
+			mycpv_cp = cpv_getkey(mycpv)
+			if mycpv_cp != origcp:
+				# Ignore PROVIDE virtual match.
+				continue
+			if repo_match is not None \
+				and not repo_match(self.aux_get(mycpv, ['repository'])[0]):
+				continue
+			mynewcpv = mycpv.replace(mycpv_cp, str(newcp), 1)
+			mynewcat = catsplit(newcp)[0]
+			origpath = self.getpath(mycpv)
+			if not os.path.exists(origpath):
+				continue
+			moves += 1
+			if not os.path.exists(self.getpath(mynewcat)):
+				#create the directory
+				ensure_dirs(self.getpath(mynewcat))
+			newpath = self.getpath(mynewcpv)
+			if os.path.exists(newpath):
+				#dest already exists; keep this puppy where it is.
+				continue
+			_movefile(origpath, newpath, mysettings=self.settings)
+			self._clear_pkg_cache(self._dblink(mycpv))
+			self._clear_pkg_cache(self._dblink(mynewcpv))
+
+			# We need to rename the ebuild now.
+			old_pf = catsplit(mycpv)[1]
+			new_pf = catsplit(mynewcpv)[1]
+			if new_pf != old_pf:
+				try:
+					os.rename(os.path.join(newpath, old_pf + ".ebuild"),
+						os.path.join(newpath, new_pf + ".ebuild"))
+				except EnvironmentError as e:
+					if e.errno != errno.ENOENT:
+						raise
+					del e
+			write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
+			write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
+			fixdbentries([mylist], newpath)
+		return moves
+
+	def cp_list(self, mycp, use_cache=1):
+		mysplit=catsplit(mycp)
+		if mysplit[0] == '*':
+			mysplit[0] = mysplit[0][1:]
+		try:
+			mystat = os.stat(self.getpath(mysplit[0])).st_mtime
+		except OSError:
+			mystat = 0
+		if use_cache and mycp in self.cpcache:
+			cpc = self.cpcache[mycp]
+			if cpc[0] == mystat:
+				return cpc[1][:]
+		cat_dir = self.getpath(mysplit[0])
+		try:
+			dir_list = os.listdir(cat_dir)
+		except EnvironmentError as e:
+			if e.errno == PermissionDenied.errno:
+				raise PermissionDenied(cat_dir)
+			del e
+			dir_list = []
+
+		returnme = []
+		for x in dir_list:
+			if self._excluded_dirs.match(x) is not None:
+				continue
+			ps = pkgsplit(x)
+			if not ps:
+				self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
+				continue
+			if len(mysplit) > 1:
+				if ps[0] == mysplit[1]:
+					returnme.append(mysplit[0]+"/"+x)
+		self._cpv_sort_ascending(returnme)
+		if use_cache:
+			self.cpcache[mycp] = [mystat, returnme[:]]
+		elif mycp in self.cpcache:
+			del self.cpcache[mycp]
+		return returnme
+
+	def cpv_all(self, use_cache=1):
+		"""
+		Set use_cache=0 to bypass the portage.cachedir() cache in cases
+		when the accuracy of mtime staleness checks should not be trusted
+		(generally this is only necessary in critical sections that
+		involve merge or unmerge of packages).
+		"""
+		returnme = []
+		basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
+
+		if use_cache:
+			from portage import listdir
+		else:
+			def listdir(p, **kwargs):
+				try:
+					return [x for x in os.listdir(p) \
+						if os.path.isdir(os.path.join(p, x))]
+				except EnvironmentError as e:
+					if e.errno == PermissionDenied.errno:
+						raise PermissionDenied(p)
+					del e
+					return []
+
+		for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+			if self._excluded_dirs.match(x) is not None:
+				continue
+			if not self._category_re.match(x):
+				continue
+			for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
+				if self._excluded_dirs.match(y) is not None:
+					continue
+				subpath = x + "/" + y
+				# -MERGING- should never be a cpv, nor should files.
+				try:
+					if catpkgsplit(subpath) is None:
+						self.invalidentry(self.getpath(subpath))
+						continue
+				except InvalidData:
+					self.invalidentry(self.getpath(subpath))
+					continue
+				returnme.append(subpath)
+
+		return returnme
+
+	def cp_all(self, use_cache=1):
+		mylist = self.cpv_all(use_cache=use_cache)
+		d={}
+		for y in mylist:
+			if y[0] == '*':
+				y = y[1:]
+			try:
+				mysplit = catpkgsplit(y)
+			except InvalidData:
+				self.invalidentry(self.getpath(y))
+				continue
+			if not mysplit:
+				self.invalidentry(self.getpath(y))
+				continue
+			d[mysplit[0]+"/"+mysplit[1]] = None
+		return list(d)
+
+	def checkblockers(self, origdep):
+		pass
+
+	def _clear_cache(self):
+		self.mtdircache.clear()
+		self.matchcache.clear()
+		self.cpcache.clear()
+		self._aux_cache_obj = None
+
+	def _add(self, pkg_dblink):
+		self._pkgs_changed = True
+		self._clear_pkg_cache(pkg_dblink)
+
+	def _remove(self, pkg_dblink):
+		self._pkgs_changed = True
+		self._clear_pkg_cache(pkg_dblink)
+
+	def _clear_pkg_cache(self, pkg_dblink):
+		# Due to 1 second mtime granularity in <python-2.5, mtime checks
+		# are not always sufficient to invalidate vardbapi caches. Therefore,
+		# the caches need to be actively invalidated here.
+		self.mtdircache.pop(pkg_dblink.cat, None)
+		self.matchcache.pop(pkg_dblink.cat, None)
+		self.cpcache.pop(pkg_dblink.mysplit[0], None)
+		dircache.pop(pkg_dblink.dbcatdir, None)
+
+	def match(self, origdep, use_cache=1):
+		"caching match function"
+		mydep = dep_expand(
+			origdep, mydb=self, use_cache=use_cache, settings=self.settings)
+		mykey = dep_getkey(mydep)
+		mycat = catsplit(mykey)[0]
+		if not use_cache:
+			if mycat in self.matchcache:
+				del self.mtdircache[mycat]
+				del self.matchcache[mycat]
+			return list(self._iter_match(mydep,
+				self.cp_list(mydep.cp, use_cache=use_cache)))
+		try:
+			curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
+		except (IOError, OSError):
+			curmtime=0
+
+		if mycat not in self.matchcache or \
+			self.mtdircache[mycat] != curmtime:
+			# clear cache entry
+			self.mtdircache[mycat] = curmtime
+			self.matchcache[mycat] = {}
+		if mydep not in self.matchcache[mycat]:
+			mymatch = list(self._iter_match(mydep,
+				self.cp_list(mydep.cp, use_cache=use_cache)))
+			self.matchcache[mycat][mydep] = mymatch
+		return self.matchcache[mycat][mydep][:]
+
+	def findname(self, mycpv, myrepo=None):
+		return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
+
+	def flush_cache(self):
+		"""If the current user has permission and the internal aux_get cache has
+		been updated, save it to disk and mark it unmodified.  This is called
+		by emerge after it has loaded the full vdb for use in dependency
+		calculations.  Currently, the cache is only written if the user has
+		superuser privileges (since that's required to obtain a lock), but all
+		users have read access and benefit from faster metadata lookups (as
+		long as at least part of the cache is still valid)."""
+		if self._flush_cache_enabled and \
+			self._aux_cache is not None and \
+			len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
+			secpass >= 2:
+			self._owners.populate() # index any unindexed contents
+			valid_nodes = set(self.cpv_all())
+			for cpv in list(self._aux_cache["packages"]):
+				if cpv not in valid_nodes:
+					del self._aux_cache["packages"][cpv]
+			del self._aux_cache["modified"]
+			try:
+				f = atomic_ofstream(self._aux_cache_filename, 'wb')
+				pickle.dump(self._aux_cache, f, protocol=2)
+				f.close()
+				apply_secpass_permissions(
+					self._aux_cache_filename, gid=portage_gid, mode=0o644)
+			except (IOError, OSError) as e:
+				pass
+			self._aux_cache["modified"] = set()
+
+	@property
+	def _aux_cache(self):
+		if self._aux_cache_obj is None:
+			self._aux_cache_init()
+		return self._aux_cache_obj
+
+	def _aux_cache_init(self):
+		aux_cache = None
+		open_kwargs = {}
+		if sys.hexversion >= 0x3000000:
+			# Buffered io triggers extreme performance issues in
+			# Unpickler.load() (problem observed with python-3.0.1).
+			# Unfortunately, performance is still poor relative to
+			# python-2.x, but buffering makes it much worse.
+			open_kwargs["buffering"] = 0
+		try:
+			f = open(_unicode_encode(self._aux_cache_filename,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='rb', **open_kwargs)
+			mypickle = pickle.Unpickler(f)
+			try:
+				mypickle.find_global = None
+			except AttributeError:
+				# TODO: If py3k, override Unpickler.find_class().
+				pass
+			aux_cache = mypickle.load()
+			f.close()
+			del f
+		except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
+			if isinstance(e, pickle.UnpicklingError):
+				writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
+					(self._aux_cache_filename, e), noiselevel=-1)
+			del e
+
+		if not aux_cache or \
+			not isinstance(aux_cache, dict) or \
+			aux_cache.get("version") != self._aux_cache_version or \
+			not aux_cache.get("packages"):
+			aux_cache = {"version": self._aux_cache_version}
+			aux_cache["packages"] = {}
+
+		owners = aux_cache.get("owners")
+		if owners is not None:
+			if not isinstance(owners, dict):
+				owners = None
+			elif "version" not in owners:
+				owners = None
+			elif owners["version"] != self._owners_cache_version:
+				owners = None
+			elif "base_names" not in owners:
+				owners = None
+			elif not isinstance(owners["base_names"], dict):
+				owners = None
+
+		if owners is None:
+			owners = {
+				"base_names" : {},
+				"version"    : self._owners_cache_version
+			}
+			aux_cache["owners"] = owners
+
+		aux_cache["modified"] = set()
+		self._aux_cache_obj = aux_cache
+
+	def aux_get(self, mycpv, wants, myrepo = None):
+		"""This automatically caches selected keys that are frequently needed
+		by emerge for dependency calculations.  The cached metadata is
+		considered valid if the mtime of the package directory has not changed
+		since the data was cached.  The cache is stored in a pickled dict
+		object with the following format:
+
+		{version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
+
+		If an error occurs while loading the cache pickle or the version is
+		unrecognized, the cache will simple be recreated from scratch (it is
+		completely disposable).
+		"""
+		cache_these_wants = self._aux_cache_keys.intersection(wants)
+		for x in wants:
+			if self._aux_cache_keys_re.match(x) is not None:
+				cache_these_wants.add(x)
+
+		if not cache_these_wants:
+			return self._aux_get(mycpv, wants)
+
+		cache_these = set(self._aux_cache_keys)
+		cache_these.update(cache_these_wants)
+
+		mydir = self.getpath(mycpv)
+		mydir_stat = None
+		try:
+			mydir_stat = os.stat(mydir)
+		except OSError as e:
+			if e.errno != errno.ENOENT:
+				raise
+			raise KeyError(mycpv)
+		mydir_mtime = mydir_stat[stat.ST_MTIME]
+		pkg_data = self._aux_cache["packages"].get(mycpv)
+		pull_me = cache_these.union(wants)
+		mydata = {"_mtime_" : mydir_mtime}
+		cache_valid = False
+		cache_incomplete = False
+		cache_mtime = None
+		metadata = None
+		if pkg_data is not None:
+			if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
+				pkg_data = None
+			else:
+				cache_mtime, metadata = pkg_data
+				if not isinstance(cache_mtime, (long, int)) or \
+					not isinstance(metadata, dict):
+					pkg_data = None
+
+		if pkg_data:
+			cache_mtime, metadata = pkg_data
+			cache_valid = cache_mtime == mydir_mtime
+		if cache_valid:
+			# Migrate old metadata to unicode.
+			for k, v in metadata.items():
+				metadata[k] = _unicode_decode(v,
+					encoding=_encodings['repo.content'], errors='replace')
+
+			mydata.update(metadata)
+			pull_me.difference_update(mydata)
+
+		if pull_me:
+			# pull any needed data and cache it
+			aux_keys = list(pull_me)
+			for k, v in zip(aux_keys,
+				self._aux_get(mycpv, aux_keys, st=mydir_stat)):
+				mydata[k] = v
+			if not cache_valid or cache_these.difference(metadata):
+				cache_data = {}
+				if cache_valid and metadata:
+					cache_data.update(metadata)
+				for aux_key in cache_these:
+					cache_data[aux_key] = mydata[aux_key]
+				self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
+				self._aux_cache["modified"].add(mycpv)
+
+		if _slot_re.match(mydata['SLOT']) is None:
+			# Empty or invalid slot triggers InvalidAtom exceptions when
+			# generating slot atoms for packages, so translate it to '0' here.
+			mydata['SLOT'] = _unicode_decode('0')
+
+		return [mydata[x] for x in wants]
+
+	def _aux_get(self, mycpv, wants, st=None):
+		mydir = self.getpath(mycpv)
+		if st is None:
+			try:
+				st = os.stat(mydir)
+			except OSError as e:
+				if e.errno == errno.ENOENT:
+					raise KeyError(mycpv)
+				elif e.errno == PermissionDenied.errno:
+					raise PermissionDenied(mydir)
+				else:
+					raise
+		if not stat.S_ISDIR(st.st_mode):
+			raise KeyError(mycpv)
+		results = []
+		for x in wants:
+			if x == "_mtime_":
+				results.append(st[stat.ST_MTIME])
+				continue
+			try:
+				myf = io.open(
+					_unicode_encode(os.path.join(mydir, x),
+					encoding=_encodings['fs'], errors='strict'),
+					mode='r', encoding=_encodings['repo.content'],
+					errors='replace')
+				try:
+					myd = myf.read()
+				finally:
+					myf.close()
+				# Preserve \n for metadata that is known to
+				# contain multiple lines.
+				if self._aux_multi_line_re.match(x) is None:
+					myd = " ".join(myd.split())
+			except IOError:
+				myd = _unicode_decode('')
+			if x == "EAPI" and not myd:
+				results.append(_unicode_decode('0'))
+			else:
+				results.append(myd)
+		return results
+
+	def aux_update(self, cpv, values):
+		mylink = self._dblink(cpv)
+		if not mylink.exists():
+			raise KeyError(cpv)
+		self._bump_mtime(cpv)
+		self._clear_pkg_cache(mylink)
+		for k, v in values.items():
+			if v:
+				mylink.setfile(k, v)
+			else:
+				try:
+					os.unlink(os.path.join(self.getpath(cpv), k))
+				except EnvironmentError:
+					pass
+		self._bump_mtime(cpv)
+
+	def counter_tick(self, myroot=None, mycpv=None):
+		"""
+		@param myroot: ignored, self._eroot is used instead
+		"""
+		return self.counter_tick_core(incrementing=1, mycpv=mycpv)
+
+	def get_counter_tick_core(self, myroot=None, mycpv=None):
+		"""
+		Use this method to retrieve the counter instead
+		of having to trust the value of a global counter
+		file that can lead to invalid COUNTER
+		generation. When cache is valid, the package COUNTER
+		files are not read and we rely on the timestamp of
+		the package directory to validate cache. The stat
+		calls should only take a short time, so performance
+		is sufficient without having to rely on a potentially
+		corrupt global counter file.
+
+		The global counter file located at
+		$CACHE_PATH/counter serves to record the
+		counter of the last installed package and
+		it also corresponds to the total number of
+		installation actions that have occurred in
+		the history of this package database.
+
+		@param myroot: ignored, self._eroot is used instead
+		"""
+		myroot = None
+		new_vdb = False
+		counter = -1
+		try:
+			cfile = io.open(
+				_unicode_encode(self._counter_path,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'],
+				errors='replace')
+		except EnvironmentError as e:
+			new_vdb = not bool(self.cpv_all())
+			if not new_vdb:
+				writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
+					self._counter_path, noiselevel=-1)
+				writemsg("!!! %s\n" % str(e), noiselevel=-1)
+			del e
+		else:
+			try:
+				try:
+					counter = long(cfile.readline().strip())
+				finally:
+					cfile.close()
+			except (OverflowError, ValueError) as e:
+				writemsg(_("!!! COUNTER file is corrupt: '%s'\n") % \
+					self._counter_path, noiselevel=-1)
+				writemsg("!!! %s\n" % str(e), noiselevel=-1)
+				del e
+
+		if self._cached_counter == counter:
+			max_counter = counter
+		else:
+			# We must ensure that we return a counter
+			# value that is at least as large as the
+			# highest one from the installed packages,
+			# since having a corrupt value that is too low
+			# can trigger incorrect AUTOCLEAN behavior due
+			# to newly installed packages having lower
+			# COUNTERs than the previous version in the
+			# same slot.
+			max_counter = counter
+			for cpv in self.cpv_all():
+				try:
+					pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
+				except (KeyError, OverflowError, ValueError):
+					continue
+				if pkg_counter > max_counter:
+					max_counter = pkg_counter
+
+		if counter < 0 and not new_vdb:
+			writemsg(_("!!! Initializing COUNTER to " \
+				"value of %d\n") % max_counter, noiselevel=-1)
+
+		return max_counter + 1
+
+	def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
+		"""
+		This method will grab the next COUNTER value and record it back
+		to the global file. Note that every package install must have
+		a unique counter, since a slotmove update can move two packages
+		into the same SLOT and in that case it's important that both
+		packages have different COUNTER metadata.
+
+		@param myroot: ignored, self._eroot is used instead
+		@param mycpv: ignored
+		@rtype: int
+		@returns: new counter value
+		"""
+		myroot = None
+		mycpv = None
+		self.lock()
+		try:
+			counter = self.get_counter_tick_core() - 1
+			if incrementing:
+				#increment counter
+				counter += 1
+				# update new global counter file
+				try:
+					write_atomic(self._counter_path, str(counter))
+				except InvalidLocation:
+					self.settings._init_dirs()
+					write_atomic(self._counter_path, str(counter))
+			self._cached_counter = counter
+
+			# Since we hold a lock, this is a good opportunity
+			# to flush the cache. Note that this will only
+			# flush the cache periodically in the main process
+			# when _aux_cache_threshold is exceeded.
+			self.flush_cache()
+		finally:
+			self.unlock()
+
+		return counter
+
+	def _dblink(self, cpv):
+		category, pf = catsplit(cpv)
+		return dblink(category, pf, settings=self.settings,
+			vartree=self.vartree, treetype="vartree")
+
+	def removeFromContents(self, pkg, paths, relative_paths=True):
+		"""
+		@param pkg: cpv for an installed package
+		@type pkg: string
+		@param paths: paths of files to remove from contents
+		@type paths: iterable
+		"""
+		if not hasattr(pkg, "getcontents"):
+			pkg = self._dblink(pkg)
+		root = self.settings['ROOT']
+		root_len = len(root) - 1
+		new_contents = pkg.getcontents().copy()
+		removed = 0
+
+		for filename in paths:
+			filename = _unicode_decode(filename,
+				encoding=_encodings['content'], errors='strict')
+			filename = normalize_path(filename)
+			if relative_paths:
+				relative_filename = filename
+			else:
+				relative_filename = filename[root_len:]
+			contents_key = pkg._match_contents(relative_filename)
+			if contents_key:
+				del new_contents[contents_key]
+				removed += 1
+
+		if removed:
+			self._bump_mtime(pkg.mycpv)
+			f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
+			write_contents(new_contents, root, f)
+			f.close()
+			self._bump_mtime(pkg.mycpv)
+			pkg._clear_contents_cache()
+
+	class _owners_cache(object):
+		"""
+		This class maintains an hash table that serves to index package
+		contents by mapping the basename of file to a list of possible
+		packages that own it. This is used to optimize owner lookups
+		by narrowing the search down to a smaller number of packages.
+		"""
+		try:
+			from hashlib import md5 as _new_hash
+		except ImportError:
+			from md5 import new as _new_hash
+
+		_hash_bits = 16
+		_hex_chars = int(_hash_bits / 4)
+
+		def __init__(self, vardb):
+			self._vardb = vardb
+
+		def add(self, cpv):
+			eroot_len = len(self._vardb._eroot)
+			contents = self._vardb._dblink(cpv).getcontents()
+			pkg_hash = self._hash_pkg(cpv)
+			if not contents:
+				# Empty path is a code used to represent empty contents.
+				self._add_path("", pkg_hash)
+
+			for x in contents:
+				self._add_path(x[eroot_len:], pkg_hash)
+
+			self._vardb._aux_cache["modified"].add(cpv)
+
+		def _add_path(self, path, pkg_hash):
+			"""
+			Empty path is a code that represents empty contents.
+			"""
+			if path:
+				name = os.path.basename(path.rstrip(os.path.sep))
+				if not name:
+					return
+			else:
+				name = path
+			name_hash = self._hash_str(name)
+			base_names = self._vardb._aux_cache["owners"]["base_names"]
+			pkgs = base_names.get(name_hash)
+			if pkgs is None:
+				pkgs = {}
+				base_names[name_hash] = pkgs
+			pkgs[pkg_hash] = None
+
+		def _hash_str(self, s):
+			h = self._new_hash()
+			# Always use a constant utf_8 encoding here, since
+			# the "default" encoding can change.
+			h.update(_unicode_encode(s,
+				encoding=_encodings['repo.content'],
+				errors='backslashreplace'))
+			h = h.hexdigest()
+			h = h[-self._hex_chars:]
+			h = int(h, 16)
+			return h
+
+		def _hash_pkg(self, cpv):
+			counter, mtime = self._vardb.aux_get(
+				cpv, ["COUNTER", "_mtime_"])
+			try:
+				counter = int(counter)
+			except ValueError:
+				counter = 0
+			return (cpv, counter, mtime)
+
+	class _owners_db(object):
+
+		def __init__(self, vardb):
+			self._vardb = vardb
+
+		def populate(self):
+			self._populate()
+
+		def _populate(self):
+			owners_cache = vardbapi._owners_cache(self._vardb)
+			cached_hashes = set()
+			base_names = self._vardb._aux_cache["owners"]["base_names"]
+
+			# Take inventory of all cached package hashes.
+			for name, hash_values in list(base_names.items()):
+				if not isinstance(hash_values, dict):
+					del base_names[name]
+					continue
+				cached_hashes.update(hash_values)
+
+			# Create sets of valid package hashes and uncached packages.
+			uncached_pkgs = set()
+			hash_pkg = owners_cache._hash_pkg
+			valid_pkg_hashes = set()
+			for cpv in self._vardb.cpv_all():
+				hash_value = hash_pkg(cpv)
+				valid_pkg_hashes.add(hash_value)
+				if hash_value not in cached_hashes:
+					uncached_pkgs.add(cpv)
+
+			# Cache any missing packages.
+			for cpv in uncached_pkgs:
+				owners_cache.add(cpv)
+
+			# Delete any stale cache.
+			stale_hashes = cached_hashes.difference(valid_pkg_hashes)
+			if stale_hashes:
+				for base_name_hash, bucket in list(base_names.items()):
+					for hash_value in stale_hashes.intersection(bucket):
+						del bucket[hash_value]
+					if not bucket:
+						del base_names[base_name_hash]
+
+			return owners_cache
+
+		def get_owners(self, path_iter):
+			"""
+			@return the owners as a dblink -> set(files) mapping.
+			"""
+			owners = {}
+			for owner, f in self.iter_owners(path_iter):
+				owned_files = owners.get(owner)
+				if owned_files is None:
+					owned_files = set()
+					owners[owner] = owned_files
+				owned_files.add(f)
+			return owners
+
+		def getFileOwnerMap(self, path_iter):
+			owners = self.get_owners(path_iter)
+			file_owners = {}
+			for pkg_dblink, files in owners.items():
+				for f in files:
+					owner_set = file_owners.get(f)
+					if owner_set is None:
+						owner_set = set()
+						file_owners[f] = owner_set
+					owner_set.add(pkg_dblink)
+			return file_owners
+
+		def iter_owners(self, path_iter):
+			"""
+			Iterate over tuples of (dblink, path). In order to avoid
+			consuming too many resources for too much time, resources
+			are only allocated for the duration of a given iter_owners()
+			call. Therefore, to maximize reuse of resources when searching
+			for multiple files, it's best to search for them all in a single
+			call.
+			"""
+
+			if not isinstance(path_iter, list):
+				path_iter = list(path_iter)
+			owners_cache = self._populate()
+			vardb = self._vardb
+			root = vardb._eroot
+			hash_pkg = owners_cache._hash_pkg
+			hash_str = owners_cache._hash_str
+			base_names = self._vardb._aux_cache["owners"]["base_names"]
+
+			dblink_cache = {}
+
+			def dblink(cpv):
+				x = dblink_cache.get(cpv)
+				if x is None:
+					if len(dblink_cache) > 20:
+						# Ensure that we don't run out of memory.
+						raise StopIteration()
+					x = self._vardb._dblink(cpv)
+					dblink_cache[cpv] = x
+				return x
+
+			while path_iter:
+
+				path = path_iter.pop()
+				is_basename = os.sep != path[:1]
+				if is_basename:
+					name = path
+				else:
+					name = os.path.basename(path.rstrip(os.path.sep))
+
+				if not name:
+					continue
+
+				name_hash = hash_str(name)
+				pkgs = base_names.get(name_hash)
+				owners = []
+				if pkgs is not None:
+					try:
+						for hash_value in pkgs:
+							if not isinstance(hash_value, tuple) or \
+								len(hash_value) != 3:
+								continue
+							cpv, counter, mtime = hash_value
+							if not isinstance(cpv, basestring):
+								continue
+							try:
+								current_hash = hash_pkg(cpv)
+							except KeyError:
+								continue
+
+							if current_hash != hash_value:
+								continue
+
+							if is_basename:
+								for p in dblink(cpv).getcontents():
+									if os.path.basename(p) == name:
+										owners.append((cpv, p[len(root):]))
+							else:
+								if dblink(cpv).isowner(path):
+									owners.append((cpv, path))
+
+					except StopIteration:
+						path_iter.append(path)
+						del owners[:]
+						dblink_cache.clear()
+						gc.collect()
+						for x in self._iter_owners_low_mem(path_iter):
+							yield x
+						return
+					else:
+						for cpv, p in owners:
+							yield (dblink(cpv), p)
+
+		def _iter_owners_low_mem(self, path_list):
+			"""
+			This implemention will make a short-lived dblink instance (and
+			parse CONTENTS) for every single installed package. This is
+			slower and but uses less memory than the method which uses the
+			basename cache.
+			"""
+
+			if not path_list:
+				return
+
+			path_info_list = []
+			for path in path_list:
+				is_basename = os.sep != path[:1]
+				if is_basename:
+					name = path
+				else:
+					name = os.path.basename(path.rstrip(os.path.sep))
+				path_info_list.append((path, name, is_basename))
+
+			root = self._vardb._eroot
+			for cpv in self._vardb.cpv_all():
+				dblnk =  self._vardb._dblink(cpv)
+
+				for path, name, is_basename in path_info_list:
+					if is_basename:
+						for p in dblnk.getcontents():
+							if os.path.basename(p) == name:
+								yield dblnk, p[len(root):]
+					else:
+						if dblnk.isowner(path):
+							yield dblnk, path
+
+class vartree(object):
+	"this tree will scan a var/db/pkg database located at root (passed to init)"
+	def __init__(self, root=None, virtual=None, categories=None,
+		settings=None):
+
+		if settings is None:
+			settings = portage.settings
+		self.root = settings['ROOT']
+
+		if root is not None and root != self.root:
+			warnings.warn("The 'root' parameter of the " + \
+				"portage.dbapi.vartree.vartree" + \
+				" constructor is now unused. Use " + \
+				"settings['ROOT'] instead.",
+				DeprecationWarning, stacklevel=2)
+
+		self.settings = settings
+		self.dbapi = vardbapi(settings=settings, vartree=self)
+		self.populated = 1
+
+	def getpath(self, mykey, filename=None):
+		return self.dbapi.getpath(mykey, filename=filename)
+
+	def zap(self, mycpv):
+		return
+
+	def inject(self, mycpv):
+		return
+
+	def get_provide(self, mycpv):
+		myprovides = []
+		mylines = None
+		try:
+			mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
+			if mylines:
+				myuse = myuse.split()
+				mylines = use_reduce(mylines, uselist=myuse, flat=True)
+				for myprovide in mylines:
+					mys = catpkgsplit(myprovide)
+					if not mys:
+						mys = myprovide.split("/")
+					myprovides += [mys[0] + "/" + mys[1]]
+			return myprovides
+		except SystemExit as e:
+			raise
+		except Exception as e:
+			mydir = self.dbapi.getpath(mycpv)
+			writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
+				noiselevel=-1)
+			if mylines:
+				writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
+					noiselevel=-1)
+			writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
+			return []
+
+	def get_all_provides(self):
+		myprovides = {}
+		for node in self.getallcpv():
+			for mykey in self.get_provide(node):
+				if mykey in myprovides:
+					myprovides[mykey] += [node]
+				else:
+					myprovides[mykey] = [node]
+		return myprovides
+
+	def dep_bestmatch(self, mydep, use_cache=1):
+		"compatibility method -- all matches, not just visible ones"
+		#mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
+		mymatch = best(self.dbapi.match(
+			dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
+			use_cache=use_cache))
+		if mymatch is None:
+			return ""
+		else:
+			return mymatch
+
+	def dep_match(self, mydep, use_cache=1):
+		"compatibility method -- we want to see all matches, not just visible ones"
+		#mymatch = match(mydep,self.dbapi)
+		mymatch = self.dbapi.match(mydep, use_cache=use_cache)
+		if mymatch is None:
+			return []
+		else:
+			return mymatch
+
+	def exists_specific(self, cpv):
+		return self.dbapi.cpv_exists(cpv)
+
+	def getallcpv(self):
+		"""temporary function, probably to be renamed --- Gets a list of all
+		category/package-versions installed on the system."""
+		return self.dbapi.cpv_all()
+
+	def getallnodes(self):
+		"""new behavior: these are all *unmasked* nodes.  There may or may not be available
+		masked package for nodes in this nodes list."""
+		return self.dbapi.cp_all()
+
+	def getebuildpath(self, fullpackage):
+		cat, package = catsplit(fullpackage)
+		return self.getpath(fullpackage, filename=package+".ebuild")
+
+	def getslot(self, mycatpkg):
+		"Get a slot for a catpkg; assume it exists."
+		try:
+			return self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
+		except KeyError:
+			return ""
+
+	def populate(self):
+		self.populated=1
+
+class dblink(object):
+	"""
+	This class provides an interface to the installed package database
+	At present this is implemented as a text backend in /var/db/pkg.
+	"""
+
+	import re
+	_normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
+
+	_contents_re = re.compile(r'^(' + \
+		r'(?P<dir>(dev|dir|fif) (.+))|' + \
+		r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
+		r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
+		r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
+		r')$'
+	)
+
+	def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
+		vartree=None, blockers=None, scheduler=None, pipe=None):
+		"""
+		Creates a DBlink object for a given CPV.
+		The given CPV may not be present in the database already.
+		
+		@param cat: Category
+		@type cat: String
+		@param pkg: Package (PV)
+		@type pkg: String
+		@param myroot: ignored, settings['ROOT'] is used instead
+		@type myroot: String (Path)
+		@param settings: Typically portage.settings
+		@type settings: portage.config
+		@param treetype: one of ['porttree','bintree','vartree']
+		@type treetype: String
+		@param vartree: an instance of vartree corresponding to myroot.
+		@type vartree: vartree
+		"""
+
+		if settings is None:
+			raise TypeError("settings argument is required")
+
+		mysettings = settings
+		myroot = settings['ROOT']
+		self.cat = cat
+		self.pkg = pkg
+		self.mycpv = self.cat + "/" + self.pkg
+		self.mysplit = list(catpkgsplit(self.mycpv)[1:])
+		self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
+		self.treetype = treetype
+		if vartree is None:
+			vartree = portage.db[myroot]["vartree"]
+		self.vartree = vartree
+		self._blockers = blockers
+		self._scheduler = scheduler
+
+		# WARNING: EROOT support is experimental and may be incomplete
+		# for cases in which EPREFIX is non-empty.
+		self._eroot = mysettings['EROOT']
+		self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
+		self.dbcatdir = self.dbroot+"/"+cat
+		self.dbpkgdir = self.dbcatdir+"/"+pkg
+		self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
+		self.dbdir = self.dbpkgdir
+		self.settings = mysettings
+		self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
+
+		self.myroot=myroot
+		self._installed_instance = None
+		self.contentscache = None
+		self._contents_inodes = None
+		self._contents_basenames = None
+		self._linkmap_broken = False
+		self._md5_merge_map = {}
+		self._hash_key = (self.myroot, self.mycpv)
+		self._protect_obj = None
+		self._pipe = pipe
+
+	def __hash__(self):
+		return hash(self._hash_key)
+
+	def __eq__(self, other):
+		return isinstance(other, dblink) and \
+			self._hash_key == other._hash_key
+
+	def _get_protect_obj(self):
+
+		if self._protect_obj is None:
+			self._protect_obj = ConfigProtect(self._eroot,
+			portage.util.shlex_split(
+				self.settings.get("CONFIG_PROTECT", "")),
+			portage.util.shlex_split(
+				self.settings.get("CONFIG_PROTECT_MASK", "")))
+
+		return self._protect_obj
+
+	def isprotected(self, obj):
+		return self._get_protect_obj().isprotected(obj)
+
+	def updateprotect(self):
+		self._get_protect_obj().updateprotect()
+
+	def lockdb(self):
+		self.vartree.dbapi.lock()
+
+	def unlockdb(self):
+		self.vartree.dbapi.unlock()
+
+	def getpath(self):
+		"return path to location of db information (for >>> informational display)"
+		return self.dbdir
+
+	def exists(self):
+		"does the db entry exist?  boolean."
+		return os.path.exists(self.dbdir)
+
+	def delete(self):
+		"""
+		Remove this entry from the database
+		"""
+		if not os.path.exists(self.dbdir):
+			return
+
+		# Check validity of self.dbdir before attempting to remove it.
+		if not self.dbdir.startswith(self.dbroot):
+			writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
+				self.dbdir, noiselevel=-1)
+			return
+
+		shutil.rmtree(self.dbdir)
+		# If empty, remove parent category directory.
+		try:
+			os.rmdir(os.path.dirname(self.dbdir))
+		except OSError:
+			pass
+		self.vartree.dbapi._remove(self)
+
+	def clearcontents(self):
+		"""
+		For a given db entry (self), erase the CONTENTS values.
+		"""
+		self.lockdb()
+		try:
+			if os.path.exists(self.dbdir+"/CONTENTS"):
+				os.unlink(self.dbdir+"/CONTENTS")
+		finally:
+			self.unlockdb()
+
+	def _clear_contents_cache(self):
+		self.contentscache = None
+		self._contents_inodes = None
+		self._contents_basenames = None
+
+	def getcontents(self):
+		"""
+		Get the installed files of a given package (aka what that package installed)
+		"""
+		contents_file = os.path.join(self.dbdir, "CONTENTS")
+		if self.contentscache is not None:
+			return self.contentscache
+		pkgfiles = {}
+		try:
+			myc = io.open(_unicode_encode(contents_file,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'],
+				errors='replace')
+		except EnvironmentError as e:
+			if e.errno != errno.ENOENT:
+				raise
+			del e
+			self.contentscache = pkgfiles
+			return pkgfiles
+		mylines = myc.readlines()
+		myc.close()
+		null_byte = "\0"
+		normalize_needed = self._normalize_needed
+		contents_re = self._contents_re
+		obj_index = contents_re.groupindex['obj']
+		dir_index = contents_re.groupindex['dir']
+		sym_index = contents_re.groupindex['sym']
+		# The old symlink format may exist on systems that have packages
+		# which were installed many years ago (see bug #351814).
+		oldsym_index = contents_re.groupindex['oldsym']
+		# CONTENTS files already contain EPREFIX
+		myroot = self.settings['ROOT']
+		if myroot == os.path.sep:
+			myroot = None
+		# used to generate parent dir entries
+		dir_entry = (_unicode_decode("dir"),)
+		eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
+		pos = 0
+		errors = []
+		for pos, line in enumerate(mylines):
+			if null_byte in line:
+				# Null bytes are a common indication of corruption.
+				errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
+				continue
+			line = line.rstrip("\n")
+			m = contents_re.match(line)
+			if m is None:
+				errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
+				continue
+
+			if m.group(obj_index) is not None:
+				base = obj_index
+				#format: type, mtime, md5sum
+				data = (m.group(base+1), m.group(base+4), m.group(base+3))
+			elif m.group(dir_index) is not None:
+				base = dir_index
+				#format: type
+				data = (m.group(base+1),)
+			elif m.group(sym_index) is not None:
+				base = sym_index
+				if m.group(oldsym_index) is None:
+					mtime = m.group(base+5)
+				else:
+					mtime = m.group(base+8)
+				#format: type, mtime, dest
+				data = (m.group(base+1), mtime, m.group(base+3))
+			else:
+				# This won't happen as long the regular expression
+				# is written to only match valid entries.
+				raise AssertionError(_("required group not found " + \
+					"in CONTENTS entry: '%s'") % line)
+
+			path = m.group(base+2)
+			if normalize_needed.search(path) is not None:
+				path = normalize_path(path)
+				if not path.startswith(os.path.sep):
+					path = os.path.sep + path
+
+			if myroot is not None:
+				path = os.path.join(myroot, path.lstrip(os.path.sep))
+
+			# Implicitly add parent directories, since we can't necessarily
+			# assume that they are explicitly listed in CONTENTS, and it's
+			# useful for callers if they can rely on parent directory entries
+			# being generated here (crucial for things like dblink.isowner()).
+			path_split = path.split(os.sep)
+			path_split.pop()
+			while len(path_split) > eroot_split_len:
+				parent = os.sep.join(path_split)
+				if parent in pkgfiles:
+					break
+				pkgfiles[parent] = dir_entry
+				path_split.pop()
+
+			pkgfiles[path] = data
+
+		if errors:
+			writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
+			for pos, e in errors:
+				writemsg(_("!!!   line %d: %s\n") % (pos, e), noiselevel=-1)
+		self.contentscache = pkgfiles
+		return pkgfiles
+
+	def _prune_plib_registry(self, unmerge=False,
+		needed=None, preserve_paths=None):
+		# remove preserved libraries that don't have any consumers left
+		if not (self._linkmap_broken or
+			self.vartree.dbapi._linkmap is None or
+			self.vartree.dbapi._plib_registry is None):
+			self.vartree.dbapi._fs_lock()
+			plib_registry = self.vartree.dbapi._plib_registry
+			plib_registry.lock()
+			try:
+				plib_registry.load()
+
+				unmerge_with_replacement = \
+					unmerge and preserve_paths is not None
+				if unmerge_with_replacement:
+					# If self.mycpv is about to be unmerged and we
+					# have a replacement package, we want to exclude
+					# the irrelevant NEEDED data that belongs to
+					# files which are being unmerged now.
+					exclude_pkgs = (self.mycpv,)
+				else:
+					exclude_pkgs = None
+
+				self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
+					include_file=needed, preserve_paths=preserve_paths)
+
+				if unmerge:
+					unmerge_preserve = None
+					if not unmerge_with_replacement:
+						unmerge_preserve = \
+							self._find_libs_to_preserve(unmerge=True)
+					counter = self.vartree.dbapi.cpv_counter(self.mycpv)
+					plib_registry.unregister(self.mycpv,
+						self.settings["SLOT"], counter)
+					if unmerge_preserve:
+						for path in sorted(unmerge_preserve):
+							contents_key = self._match_contents(path)
+							if not contents_key:
+								continue
+							obj_type = self.getcontents()[contents_key][0]
+							self._display_merge(_(">>> needed   %s %s\n") % \
+								(obj_type, contents_key), noiselevel=-1)
+						plib_registry.register(self.mycpv,
+							self.settings["SLOT"], counter, unmerge_preserve)
+						# Remove the preserved files from our contents
+						# so that they won't be unmerged.
+						self.vartree.dbapi.removeFromContents(self,
+							unmerge_preserve)
+
+				unmerge_no_replacement = \
+					unmerge and not unmerge_with_replacement
+				cpv_lib_map = self._find_unused_preserved_libs(
+					unmerge_no_replacement)
+				if cpv_lib_map:
+					self._remove_preserved_libs(cpv_lib_map)
+					self.vartree.dbapi.lock()
+					try:
+						for cpv, removed in cpv_lib_map.items():
+							if not self.vartree.dbapi.cpv_exists(cpv):
+								continue
+							self.vartree.dbapi.removeFromContents(cpv, removed)
+					finally:
+						self.vartree.dbapi.unlock()
+
+				plib_registry.store()
+			finally:
+				plib_registry.unlock()
+				self.vartree.dbapi._fs_unlock()
+
+	def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
+		ldpath_mtimes=None, others_in_slot=None, needed=None,
+		preserve_paths=None):
+		"""
+		Calls prerm
+		Unmerges a given package (CPV)
+		calls postrm
+		calls cleanrm
+		calls env_update
+		
+		@param pkgfiles: files to unmerge (generally self.getcontents() )
+		@type pkgfiles: Dictionary
+		@param trimworld: Unused
+		@type trimworld: Boolean
+		@param cleanup: cleanup to pass to doebuild (see doebuild)
+		@type cleanup: Boolean
+		@param ldpath_mtimes: mtimes to pass to env_update (see env_update)
+		@type ldpath_mtimes: Dictionary
+		@param others_in_slot: all dblink instances in this slot, excluding self
+		@type others_in_slot: list
+		@param needed: Filename containing libraries needed after unmerge.
+		@type needed: String
+		@param preserve_paths: Libraries preserved by a package instance that
+			is currently being merged. They need to be explicitly passed to the
+			LinkageMap, since they are not registered in the
+			PreservedLibsRegistry yet.
+		@type preserve_paths: set
+		@rtype: Integer
+		@returns:
+		1. os.EX_OK if everything went well.
+		2. return code of the failed phase (for prerm, postrm, cleanrm)
+		"""
+
+		if trimworld is not None:
+			warnings.warn("The trimworld parameter of the " + \
+				"portage.dbapi.vartree.dblink.unmerge()" + \
+				" method is now unused.",
+				DeprecationWarning, stacklevel=2)
+
+		background = False
+		log_path = self.settings.get("PORTAGE_LOG_FILE")
+		if self._scheduler is None:
+			# We create a scheduler instance and use it to
+			# log unmerge output separately from merge output.
+			self._scheduler = PollScheduler().sched_iface
+		if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
+			if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
+				self.settings["PORTAGE_BACKGROUND"] = "1"
+				self.settings.backup_changes("PORTAGE_BACKGROUND")
+				background = True
+			elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
+				self.settings["PORTAGE_BACKGROUND"] = "0"
+				self.settings.backup_changes("PORTAGE_BACKGROUND")
+		elif self.settings.get("PORTAGE_BACKGROUND") == "1":
+			background = True
+
+		self.vartree.dbapi._bump_mtime(self.mycpv)
+		showMessage = self._display_merge
+		if self.vartree.dbapi._categories is not None:
+			self.vartree.dbapi._categories = None
+		# When others_in_slot is supplied, the security check has already been
+		# done for this slot, so it shouldn't be repeated until the next
+		# replacement or unmerge operation.
+		if others_in_slot is None:
+			slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+			slot_matches = self.vartree.dbapi.match(
+				"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+			others_in_slot = []
+			for cur_cpv in slot_matches:
+				if cur_cpv == self.mycpv:
+					continue
+				others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+					settings=self.settings, vartree=self.vartree,
+					treetype="vartree", pipe=self._pipe))
+
+			retval = self._security_check([self] + others_in_slot)
+			if retval:
+				return retval
+
+		contents = self.getcontents()
+		# Now, don't assume that the name of the ebuild is the same as the
+		# name of the dir; the package may have been moved.
+		myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
+		failures = 0
+		ebuild_phase = "prerm"
+		mystuff = os.listdir(self.dbdir)
+		for x in mystuff:
+			if x.endswith(".ebuild"):
+				if x[:-7] != self.pkg:
+					# Clean up after vardbapi.move_ent() breakage in
+					# portage versions before 2.1.2
+					os.rename(os.path.join(self.dbdir, x), myebuildpath)
+					write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
+				break
+
+		if self.mycpv != self.settings.mycpv or \
+			"EAPI" not in self.settings.configdict["pkg"]:
+			# We avoid a redundant setcpv call here when
+			# the caller has already taken care of it.
+			self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
+
+		eapi_unsupported = False
+		try:
+			doebuild_environment(myebuildpath, "prerm",
+				settings=self.settings, db=self.vartree.dbapi)
+		except UnsupportedAPIException as e:
+			eapi_unsupported = e
+
+		self._prune_plib_registry(unmerge=True, needed=needed,
+			preserve_paths=preserve_paths)
+
+		builddir_lock = None
+		scheduler = self._scheduler
+		retval = os.EX_OK
+		try:
+			# Only create builddir_lock if the caller
+			# has not already acquired the lock.
+			if "PORTAGE_BUILDIR_LOCKED" not in self.settings:
+				builddir_lock = EbuildBuildDir(
+					scheduler=scheduler,
+					settings=self.settings)
+				builddir_lock.lock()
+				prepare_build_dirs(settings=self.settings, cleanup=True)
+				log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+			# Log the error after PORTAGE_LOG_FILE is initialized
+			# by prepare_build_dirs above.
+			if eapi_unsupported:
+				# Sometimes this happens due to corruption of the EAPI file.
+				failures += 1
+				showMessage(_("!!! FAILED prerm: %s\n") % \
+					os.path.join(self.dbdir, "EAPI"),
+					level=logging.ERROR, noiselevel=-1)
+				showMessage(_unicode_decode("%s\n") % (eapi_unsupported,),
+					level=logging.ERROR, noiselevel=-1)
+			elif os.path.isfile(myebuildpath):
+				phase = EbuildPhase(background=background,
+					phase=ebuild_phase, scheduler=scheduler,
+					settings=self.settings)
+				phase.start()
+				retval = phase.wait()
+
+				# XXX: Decide how to handle failures here.
+				if retval != os.EX_OK:
+					failures += 1
+					showMessage(_("!!! FAILED prerm: %s\n") % retval,
+						level=logging.ERROR, noiselevel=-1)
+
+			self.vartree.dbapi._fs_lock()
+			try:
+				self._unmerge_pkgfiles(pkgfiles, others_in_slot)
+			finally:
+				self.vartree.dbapi._fs_unlock()
+			self._clear_contents_cache()
+
+			if not eapi_unsupported and os.path.isfile(myebuildpath):
+				ebuild_phase = "postrm"
+				phase = EbuildPhase(background=background,
+					phase=ebuild_phase, scheduler=scheduler,
+					settings=self.settings)
+				phase.start()
+				retval = phase.wait()
+
+				# XXX: Decide how to handle failures here.
+				if retval != os.EX_OK:
+					failures += 1
+					showMessage(_("!!! FAILED postrm: %s\n") % retval,
+						level=logging.ERROR, noiselevel=-1)
+
+		finally:
+			self.vartree.dbapi._bump_mtime(self.mycpv)
+			try:
+					if not eapi_unsupported and os.path.isfile(myebuildpath):
+						if retval != os.EX_OK:
+							msg_lines = []
+							msg = _("The '%(ebuild_phase)s' "
+							"phase of the '%(cpv)s' package "
+							"has failed with exit value %(retval)s.") % \
+							{"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
+							"retval":retval}
+							from textwrap import wrap
+							msg_lines.extend(wrap(msg, 72))
+							msg_lines.append("")
+
+							ebuild_name = os.path.basename(myebuildpath)
+							ebuild_dir = os.path.dirname(myebuildpath)
+							msg = _("The problem occurred while executing "
+							"the ebuild file named '%(ebuild_name)s' "
+							"located in the '%(ebuild_dir)s' directory. "
+							"If necessary, manually remove "
+							"the environment.bz2 file and/or the "
+							"ebuild file located in that directory.") % \
+							{"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
+							msg_lines.extend(wrap(msg, 72))
+							msg_lines.append("")
+
+							msg = _("Removal "
+							"of the environment.bz2 file is "
+							"preferred since it may allow the "
+							"removal phases to execute successfully. "
+							"The ebuild will be "
+							"sourced and the eclasses "
+							"from the current portage tree will be used "
+							"when necessary. Removal of "
+							"the ebuild file will cause the "
+							"pkg_prerm() and pkg_postrm() removal "
+							"phases to be skipped entirely.")
+							msg_lines.extend(wrap(msg, 72))
+
+							self._eerror(ebuild_phase, msg_lines)
+
+					self._elog_process(phasefilter=("prerm", "postrm"))
+
+					if retval == os.EX_OK:
+						try:
+							doebuild_environment(myebuildpath, "cleanrm",
+								settings=self.settings, db=self.vartree.dbapi)
+						except UnsupportedAPIException:
+							pass
+						phase = EbuildPhase(background=background,
+							phase="cleanrm", scheduler=scheduler,
+							settings=self.settings)
+						phase.start()
+						retval = phase.wait()
+			finally:
+					if builddir_lock is not None:
+						builddir_lock.unlock()
+
+		if log_path is not None:
+
+			if not failures and 'unmerge-logs' not in self.settings.features:
+				try:
+					os.unlink(log_path)
+				except OSError:
+					pass
+
+			try:
+				st = os.stat(log_path)
+			except OSError:
+				pass
+			else:
+				if st.st_size == 0:
+					try:
+						os.unlink(log_path)
+					except OSError:
+						pass
+
+		if log_path is not None and os.path.exists(log_path):
+			# Restore this since it gets lost somewhere above and it
+			# needs to be set for _display_merge() to be able to log.
+			# Note that the log isn't necessarily supposed to exist
+			# since if PORT_LOGDIR is unset then it's a temp file
+			# so it gets cleaned above.
+			self.settings["PORTAGE_LOG_FILE"] = log_path
+		else:
+			self.settings.pop("PORTAGE_LOG_FILE", None)
+
+		# Lock the config memory file to prevent symlink creation
+		# in merge_contents from overlapping with env-update.
+		self.vartree.dbapi._fs_lock()
+		try:
+			env_update(target_root=self.settings['ROOT'],
+				prev_mtimes=ldpath_mtimes,
+				contents=contents, env=self.settings.environ(),
+				writemsg_level=self._display_merge)
+		finally:
+			self.vartree.dbapi._fs_unlock()
+
+		return os.EX_OK
+
+	def _display_merge(self, msg, level=0, noiselevel=0):
+		if not self._verbose and noiselevel >= 0 and level < logging.WARN:
+			return
+		if self._scheduler is None:
+			writemsg_level(msg, level=level, noiselevel=noiselevel)
+		else:
+			log_path = None
+			if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+				log_path = self.settings.get("PORTAGE_LOG_FILE")
+			background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+
+			if background and log_path is None:
+				if level >= logging.WARN:
+					writemsg_level(msg, level=level, noiselevel=noiselevel)
+			else:
+				self._scheduler.output(msg,
+					log_path=log_path, background=background,
+					level=level, noiselevel=noiselevel)
+
+	def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
+		"""
+		
+		Unmerges the contents of a package from the liveFS
+		Removes the VDB entry for self
+		
+		@param pkgfiles: typically self.getcontents()
+		@type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
+		@param others_in_slot: all dblink instances in this slot, excluding self
+		@type others_in_slot: list
+		@rtype: None
+		"""
+
+		os = _os_merge
+		perf_md5 = perform_md5
+		showMessage = self._display_merge
+
+		if not pkgfiles:
+			showMessage(_("No package files given... Grabbing a set.\n"))
+			pkgfiles = self.getcontents()
+
+		if others_in_slot is None:
+			others_in_slot = []
+			slot = self.vartree.dbapi.aux_get(self.mycpv, ["SLOT"])[0]
+			slot_matches = self.vartree.dbapi.match(
+				"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+			for cur_cpv in slot_matches:
+				if cur_cpv == self.mycpv:
+					continue
+				others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+					settings=self.settings,
+					vartree=self.vartree, treetype="vartree", pipe=self._pipe))
+
+		dest_root = self._eroot
+		dest_root_len = len(dest_root) - 1
+
+		cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+		stale_confmem = []
+		protected_symlinks = {}
+
+		unmerge_orphans = "unmerge-orphans" in self.settings.features
+		calc_prelink = "prelink-checksums" in self.settings.features
+
+		if pkgfiles:
+			self.updateprotect()
+			mykeys = list(pkgfiles)
+			mykeys.sort()
+			mykeys.reverse()
+
+			#process symlinks second-to-last, directories last.
+			mydirs = set()
+			ignored_unlink_errnos = (
+				errno.EBUSY, errno.ENOENT,
+				errno.ENOTDIR, errno.EISDIR)
+			ignored_rmdir_errnos = (
+				errno.EEXIST, errno.ENOTEMPTY,
+				errno.EBUSY, errno.ENOENT,
+				errno.ENOTDIR, errno.EISDIR,
+				errno.EPERM)
+			modprotect = os.path.join(self._eroot, "lib/modules/")
+
+			def unlink(file_name, lstatobj):
+				if bsd_chflags:
+					if lstatobj.st_flags != 0:
+						bsd_chflags.lchflags(file_name, 0)
+					parent_name = os.path.dirname(file_name)
+					# Use normal stat/chflags for the parent since we want to
+					# follow any symlinks to the real parent directory.
+					pflags = os.stat(parent_name).st_flags
+					if pflags != 0:
+						bsd_chflags.chflags(parent_name, 0)
+				try:
+					if not stat.S_ISLNK(lstatobj.st_mode):
+						# Remove permissions to ensure that any hardlinks to
+						# suid/sgid files are rendered harmless.
+						os.chmod(file_name, 0)
+					os.unlink(file_name)
+				except OSError as ose:
+					# If the chmod or unlink fails, you are in trouble.
+					# With Prefix this can be because the file is owned
+					# by someone else (a screwup by root?), on a normal
+					# system maybe filesystem corruption.  In any case,
+					# if we backtrace and die here, we leave the system
+					# in a totally undefined state, hence we just bleed
+					# like hell and continue to hopefully finish all our
+					# administrative and pkg_postinst stuff.
+					self._eerror("postrm", 
+						["Could not chmod or unlink '%s': %s" % \
+						(file_name, ose)])
+				finally:
+					if bsd_chflags and pflags != 0:
+						# Restore the parent flags we saved before unlinking
+						bsd_chflags.chflags(parent_name, pflags)
+
+			def show_unmerge(zing, desc, file_type, file_name):
+					showMessage("%s %s %s %s\n" % \
+						(zing, desc.ljust(8), file_type, file_name))
+
+			unmerge_desc = {}
+			unmerge_desc["cfgpro"] = _("cfgpro")
+			unmerge_desc["replaced"] = _("replaced")
+			unmerge_desc["!dir"] = _("!dir")
+			unmerge_desc["!empty"] = _("!empty")
+			unmerge_desc["!fif"] = _("!fif")
+			unmerge_desc["!found"] = _("!found")
+			unmerge_desc["!md5"] = _("!md5")
+			unmerge_desc["!mtime"] = _("!mtime")
+			unmerge_desc["!obj"] = _("!obj")
+			unmerge_desc["!sym"] = _("!sym")
+
+			real_root = self.settings['ROOT']
+			real_root_len = len(real_root) - 1
+			eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
+
+			# These files are generated by emerge, so we need to remove
+			# them when they are the only thing left in a directory.
+			infodir_cleanup = frozenset(["dir", "dir.old"])
+			infodirs = frozenset(infodir for infodir in chain(
+				self.settings.get("INFOPATH", "").split(":"),
+				self.settings.get("INFODIR", "").split(":")) if infodir)
+			infodirs_inodes = set()
+			for infodir in infodirs:
+				infodir = os.path.join(real_root, infodir.lstrip(os.sep))
+				try:
+					statobj = os.stat(infodir)
+				except OSError:
+					pass
+				else:
+					infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
+
+			for i, objkey in enumerate(mykeys):
+
+				obj = normalize_path(objkey)
+				if os is _os_merge:
+					try:
+						_unicode_encode(obj,
+							encoding=_encodings['merge'], errors='strict')
+					except UnicodeEncodeError:
+						# The package appears to have been merged with a 
+						# different value of sys.getfilesystemencoding(),
+						# so fall back to utf_8 if appropriate.
+						try:
+							_unicode_encode(obj,
+								encoding=_encodings['fs'], errors='strict')
+						except UnicodeEncodeError:
+							pass
+						else:
+							os = portage.os
+							perf_md5 = portage.checksum.perform_md5
+
+				file_data = pkgfiles[objkey]
+				file_type = file_data[0]
+				statobj = None
+				try:
+					statobj = os.stat(obj)
+				except OSError:
+					pass
+				lstatobj = None
+				try:
+					lstatobj = os.lstat(obj)
+				except (OSError, AttributeError):
+					pass
+				islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
+				if lstatobj is None:
+						show_unmerge("---", unmerge_desc["!found"], file_type, obj)
+						continue
+				# don't use EROOT, CONTENTS entries already contain EPREFIX
+				if obj.startswith(real_root):
+					relative_path = obj[real_root_len:]
+					is_owned = False
+					for dblnk in others_in_slot:
+						if dblnk.isowner(relative_path):
+							is_owned = True
+							break
+
+					if file_type == "sym" and is_owned and \
+						(islink and statobj and stat.S_ISDIR(statobj.st_mode)):
+						# A new instance of this package claims the file, so
+						# don't unmerge it. If the file is symlink to a
+						# directory and the unmerging package installed it as
+						# a symlink, but the new owner has it listed as a
+						# directory, then we'll produce a warning since the
+						# symlink is a sort of orphan in this case (see
+						# bug #326685).
+						symlink_orphan = False
+						for dblnk in others_in_slot:
+							parent_contents_key = \
+								dblnk._match_contents(relative_path)
+							if not parent_contents_key:
+								continue
+							if not parent_contents_key.startswith(
+								real_root):
+								continue
+							if dblnk.getcontents()[
+								parent_contents_key][0] == "dir":
+								symlink_orphan = True
+								break
+
+						if symlink_orphan:
+							protected_symlinks.setdefault(
+								(statobj.st_dev, statobj.st_ino),
+								[]).append(relative_path)
+
+					if is_owned:
+						show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
+						continue
+					elif relative_path in cfgfiledict:
+						stale_confmem.append(relative_path)
+				# next line includes a tweak to protect modules from being unmerged,
+				# but we don't protect modules from being overwritten if they are
+				# upgraded. We effectively only want one half of the config protection
+				# functionality for /lib/modules. For portage-ng both capabilities
+				# should be able to be independently specified.
+				# TODO: For rebuilds, re-parent previous modules to the new
+				# installed instance (so they are not orphans). For normal
+				# uninstall (not rebuild/reinstall), remove the modules along
+				# with all other files (leave no orphans).
+				if obj.startswith(modprotect):
+					show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
+					continue
+
+				# Don't unlink symlinks to directories here since that can
+				# remove /lib and /usr/lib symlinks.
+				if unmerge_orphans and \
+					lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
+					not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
+					not self.isprotected(obj):
+					try:
+						unlink(obj, lstatobj)
+					except EnvironmentError as e:
+						if e.errno not in ignored_unlink_errnos:
+							raise
+						del e
+					show_unmerge("<<<", "", file_type, obj)
+					continue
+
+				lmtime = str(lstatobj[stat.ST_MTIME])
+				if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
+					show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
+					continue
+
+				if pkgfiles[objkey][0] == "dir":
+					if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
+						show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
+						continue
+					mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
+				elif pkgfiles[objkey][0] == "sym":
+					if not islink:
+						show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
+						continue
+
+					# If this symlink points to a directory then we don't want
+					# to unmerge it if there are any other packages that
+					# installed files into the directory via this symlink
+					# (see bug #326685).
+					# TODO: Resolving a symlink to a directory will require
+					# simulation if $ROOT != / and the link is not relative.
+					if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
+						and obj.startswith(real_root):
+
+						relative_path = obj[real_root_len:]
+						try:
+							target_dir_contents = os.listdir(obj)
+						except OSError:
+							pass
+						else:
+							if target_dir_contents:
+								# If all the children are regular files owned
+								# by this package, then the symlink should be
+								# safe to unmerge.
+								all_owned = True
+								for child in target_dir_contents:
+									child = os.path.join(relative_path, child)
+									if not self.isowner(child):
+										all_owned = False
+										break
+									try:
+										child_lstat = os.lstat(os.path.join(
+											real_root, child.lstrip(os.sep)))
+									except OSError:
+										continue
+
+									if not stat.S_ISREG(child_lstat.st_mode):
+										# Nested symlinks or directories make
+										# the issue very complex, so just
+										# preserve the symlink in order to be
+										# on the safe side.
+										all_owned = False
+										break
+
+								if not all_owned:
+									protected_symlinks.setdefault(
+										(statobj.st_dev, statobj.st_ino),
+										[]).append(relative_path)
+									show_unmerge("---", unmerge_desc["!empty"],
+										file_type, obj)
+									continue
+
+					# Go ahead and unlink symlinks to directories here when
+					# they're actually recorded as symlinks in the contents.
+					# Normally, symlinks such as /lib -> lib64 are not recorded
+					# as symlinks in the contents of a package.  If a package
+					# installs something into ${D}/lib/, it is recorded in the
+					# contents as a directory even if it happens to correspond
+					# to a symlink when it's merged to the live filesystem.
+					try:
+						unlink(obj, lstatobj)
+						show_unmerge("<<<", "", file_type, obj)
+					except (OSError, IOError) as e:
+						if e.errno not in ignored_unlink_errnos:
+							raise
+						del e
+						show_unmerge("!!!", "", file_type, obj)
+				elif pkgfiles[objkey][0] == "obj":
+					if statobj is None or not stat.S_ISREG(statobj.st_mode):
+						show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+						continue
+					mymd5 = None
+					try:
+						mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
+					except FileNotFound as e:
+						# the file has disappeared between now and our stat call
+						show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+						continue
+
+					# string.lower is needed because db entries used to be in upper-case.  The
+					# string.lower allows for backwards compatibility.
+					if mymd5 != pkgfiles[objkey][2].lower():
+						show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
+						continue
+					try:
+						unlink(obj, lstatobj)
+					except (OSError, IOError) as e:
+						if e.errno not in ignored_unlink_errnos:
+							raise
+						del e
+					show_unmerge("<<<", "", file_type, obj)
+				elif pkgfiles[objkey][0] == "fif":
+					if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
+						show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
+						continue
+					show_unmerge("---", "", file_type, obj)
+				elif pkgfiles[objkey][0] == "dev":
+					show_unmerge("---", "", file_type, obj)
+
+			mydirs = sorted(mydirs)
+			mydirs.reverse()
+
+			for obj, inode_key in mydirs:
+				# Treat any directory named "info" as a candidate here,
+				# since it might have been in INFOPATH previously even
+				# though it may not be there now.
+				if inode_key in infodirs_inodes or \
+					os.path.basename(obj) == "info":
+					try:
+						remaining = os.listdir(obj)
+					except OSError:
+						pass
+					else:
+						cleanup_info_dir = ()
+						if remaining and \
+							len(remaining) <= len(infodir_cleanup):
+							if not set(remaining).difference(infodir_cleanup):
+								cleanup_info_dir = remaining
+
+						for child in cleanup_info_dir:
+							child = os.path.join(obj, child)
+							try:
+								lstatobj = os.lstat(child)
+								if stat.S_ISREG(lstatobj.st_mode):
+									unlink(child, lstatobj)
+									show_unmerge("<<<", "", "obj", child)
+							except EnvironmentError as e:
+								if e.errno not in ignored_unlink_errnos:
+									raise
+								del e
+								show_unmerge("!!!", "", "obj", child)
+				try:
+					if bsd_chflags:
+						lstatobj = os.lstat(obj)
+						if lstatobj.st_flags != 0:
+							bsd_chflags.lchflags(obj, 0)
+						parent_name = os.path.dirname(obj)
+						# Use normal stat/chflags for the parent since we want to
+						# follow any symlinks to the real parent directory.
+						pflags = os.stat(parent_name).st_flags
+						if pflags != 0:
+							bsd_chflags.chflags(parent_name, 0)
+					try:
+						os.rmdir(obj)
+					finally:
+						if bsd_chflags and pflags != 0:
+							# Restore the parent flags we saved before unlinking
+							bsd_chflags.chflags(parent_name, pflags)
+					show_unmerge("<<<", "", "dir", obj)
+				except EnvironmentError as e:
+					if e.errno not in ignored_rmdir_errnos:
+						raise
+					if e.errno != errno.ENOENT:
+						show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
+					del e
+				else:
+					# When a directory is successfully removed, there's
+					# no need to protect symlinks that point to it.
+					unmerge_syms = protected_symlinks.pop(inode_key, None)
+					if unmerge_syms is not None:
+						for relative_path in unmerge_syms:
+							obj = os.path.join(real_root,
+								relative_path.lstrip(os.sep))
+							try:
+								unlink(obj, os.lstat(obj))
+								show_unmerge("<<<", "", "sym", obj)
+							except (OSError, IOError) as e:
+								if e.errno not in ignored_unlink_errnos:
+									raise
+								del e
+								show_unmerge("!!!", "", "sym", obj)
+
+		if protected_symlinks:
+			msg = "One or more symlinks to directories have been " + \
+				"preserved in order to ensure that files installed " + \
+				"via these symlinks remain accessible:"
+			lines = textwrap.wrap(msg, 72)
+			lines.append("")
+			flat_list = set()
+			flat_list.update(*protected_symlinks.values())
+			flat_list = sorted(flat_list)
+			for f in flat_list:
+				lines.append("\t%s" % (os.path.join(real_root,
+					f.lstrip(os.sep))))
+			lines.append("")
+			self._elog("eerror", "postrm", lines)
+
+		# Remove stale entries from config memory.
+		if stale_confmem:
+			for filename in stale_confmem:
+				del cfgfiledict[filename]
+			writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+		#remove self from vartree database so that our own virtual gets zapped if we're the last node
+		self.vartree.zap(self.mycpv)
+
+	def isowner(self, filename, destroot=None):
+		""" 
+		Check if a file belongs to this package. This may
+		result in a stat call for the parent directory of
+		every installed file, since the inode numbers are
+		used to work around the problem of ambiguous paths
+		caused by symlinked directories. The results of
+		stat calls are cached to optimize multiple calls
+		to this method.
+
+		@param filename:
+		@type filename:
+		@param destroot:
+		@type destroot:
+		@rtype: Boolean
+		@returns:
+		1. True if this package owns the file.
+		2. False if this package does not own the file.
+		"""
+
+		if destroot is not None and destroot != self._eroot:
+			warnings.warn("The second parameter of the " + \
+				"portage.dbapi.vartree.dblink.isowner()" + \
+				" is now unused. Instead " + \
+				"self.settings['EROOT'] will be used.",
+				DeprecationWarning, stacklevel=2)
+
+		return bool(self._match_contents(filename))
+
+	def _match_contents(self, filename, destroot=None):
+		"""
+		The matching contents entry is returned, which is useful
+		since the path may differ from the one given by the caller,
+		due to symlinks.
+
+		@rtype: String
+		@return: the contents entry corresponding to the given path, or False
+			if the file is not owned by this package.
+		"""
+
+		filename = _unicode_decode(filename,
+			encoding=_encodings['content'], errors='strict')
+
+		if destroot is not None and destroot != self._eroot:
+			warnings.warn("The second parameter of the " + \
+				"portage.dbapi.vartree.dblink._match_contents()" + \
+				" is now unused. Instead " + \
+				"self.settings['ROOT'] will be used.",
+				DeprecationWarning, stacklevel=2)
+
+		# don't use EROOT here, image already contains EPREFIX
+		destroot = self.settings['ROOT']
+
+		# The given filename argument might have a different encoding than the
+		# the filenames contained in the contents, so use separate wrapped os
+		# modules for each. The basename is more likely to contain non-ascii
+		# characters than the directory path, so use os_filename_arg for all
+		# operations involving the basename of the filename arg.
+		os_filename_arg = _os_merge
+		os = _os_merge
+
+		try:
+			_unicode_encode(filename,
+				encoding=_encodings['merge'], errors='strict')
+		except UnicodeEncodeError:
+			# The package appears to have been merged with a
+			# different value of sys.getfilesystemencoding(),
+			# so fall back to utf_8 if appropriate.
+			try:
+				_unicode_encode(filename,
+					encoding=_encodings['fs'], errors='strict')
+			except UnicodeEncodeError:
+				pass
+			else:
+				os_filename_arg = portage.os
+
+		destfile = normalize_path(
+			os_filename_arg.path.join(destroot,
+			filename.lstrip(os_filename_arg.path.sep)))
+
+		pkgfiles = self.getcontents()
+		if pkgfiles and destfile in pkgfiles:
+			return destfile
+		if pkgfiles:
+			basename = os_filename_arg.path.basename(destfile)
+			if self._contents_basenames is None:
+
+				try:
+					for x in pkgfiles:
+						_unicode_encode(x,
+							encoding=_encodings['merge'],
+							errors='strict')
+				except UnicodeEncodeError:
+					# The package appears to have been merged with a
+					# different value of sys.getfilesystemencoding(),
+					# so fall back to utf_8 if appropriate.
+					try:
+						for x in pkgfiles:
+							_unicode_encode(x,
+								encoding=_encodings['fs'],
+								errors='strict')
+					except UnicodeEncodeError:
+						pass
+					else:
+						os = portage.os
+
+				self._contents_basenames = set(
+					os.path.basename(x) for x in pkgfiles)
+			if basename not in self._contents_basenames:
+				# This is a shortcut that, in most cases, allows us to
+				# eliminate this package as an owner without the need
+				# to examine inode numbers of parent directories.
+				return False
+
+			# Use stat rather than lstat since we want to follow
+			# any symlinks to the real parent directory.
+			parent_path = os_filename_arg.path.dirname(destfile)
+			try:
+				parent_stat = os_filename_arg.stat(parent_path)
+			except EnvironmentError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+				return False
+			if self._contents_inodes is None:
+
+				if os is _os_merge:
+					try:
+						for x in pkgfiles:
+							_unicode_encode(x,
+								encoding=_encodings['merge'],
+								errors='strict')
+					except UnicodeEncodeError:
+						# The package appears to have been merged with a 
+						# different value of sys.getfilesystemencoding(),
+						# so fall back to utf_8 if appropriate.
+						try:
+							for x in pkgfiles:
+								_unicode_encode(x,
+									encoding=_encodings['fs'],
+									errors='strict')
+						except UnicodeEncodeError:
+							pass
+						else:
+							os = portage.os
+
+				self._contents_inodes = {}
+				parent_paths = set()
+				for x in pkgfiles:
+					p_path = os.path.dirname(x)
+					if p_path in parent_paths:
+						continue
+					parent_paths.add(p_path)
+					try:
+						s = os.stat(p_path)
+					except OSError:
+						pass
+					else:
+						inode_key = (s.st_dev, s.st_ino)
+						# Use lists of paths in case multiple
+						# paths reference the same inode.
+						p_path_list = self._contents_inodes.get(inode_key)
+						if p_path_list is None:
+							p_path_list = []
+							self._contents_inodes[inode_key] = p_path_list
+						if p_path not in p_path_list:
+							p_path_list.append(p_path)
+
+			p_path_list = self._contents_inodes.get(
+				(parent_stat.st_dev, parent_stat.st_ino))
+			if p_path_list:
+				for p_path in p_path_list:
+					x = os_filename_arg.path.join(p_path, basename)
+					if x in pkgfiles:
+						return x
+
+		return False
+
+	def _linkmap_rebuild(self, **kwargs):
+		"""
+		Rebuild the self._linkmap if it's not broken due to missing
+		scanelf binary. Also, return early if preserve-libs is disabled
+		and the preserve-libs registry is empty.
+		"""
+		if self._linkmap_broken or \
+			self.vartree.dbapi._linkmap is None or \
+			self.vartree.dbapi._plib_registry is None or \
+			("preserve-libs" not in self.settings.features and \
+			not self.vartree.dbapi._plib_registry.hasEntries()):
+			return
+		try:
+			self.vartree.dbapi._linkmap.rebuild(**kwargs)
+		except CommandNotFound as e:
+			self._linkmap_broken = True
+			self._display_merge(_("!!! Disabling preserve-libs " \
+				"due to error: Command Not Found: %s\n") % (e,),
+				level=logging.ERROR, noiselevel=-1)
+
+	def _find_libs_to_preserve(self, unmerge=False):
+		"""
+		Get set of relative paths for libraries to be preserved. When
+		unmerge is False, file paths to preserve are selected from
+		self._installed_instance. Otherwise, paths are selected from
+		self.
+		"""
+		if self._linkmap_broken or \
+			self.vartree.dbapi._linkmap is None or \
+			self.vartree.dbapi._plib_registry is None or \
+			(not unmerge and self._installed_instance is None) or \
+			"preserve-libs" not in self.settings.features:
+			return set()
+
+		os = _os_merge
+		linkmap = self.vartree.dbapi._linkmap
+		if unmerge:
+			installed_instance = self
+		else:
+			installed_instance = self._installed_instance
+		old_contents = installed_instance.getcontents()
+		root = self.settings['ROOT']
+		root_len = len(root) - 1
+		lib_graph = digraph()
+		path_node_map = {}
+
+		def path_to_node(path):
+			node = path_node_map.get(path)
+			if node is None:
+				node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
+				alt_path_node = lib_graph.get(node)
+				if alt_path_node is not None:
+					node = alt_path_node
+				node.alt_paths.add(path)
+				path_node_map[path] = node
+			return node
+
+		consumer_map = {}
+		provider_nodes = set()
+		# Create provider nodes and add them to the graph.
+		for f_abs in old_contents:
+
+			if os is _os_merge:
+				try:
+					_unicode_encode(f_abs,
+						encoding=_encodings['merge'], errors='strict')
+				except UnicodeEncodeError:
+					# The package appears to have been merged with a 
+					# different value of sys.getfilesystemencoding(),
+					# so fall back to utf_8 if appropriate.
+					try:
+						_unicode_encode(f_abs,
+							encoding=_encodings['fs'], errors='strict')
+					except UnicodeEncodeError:
+						pass
+					else:
+						os = portage.os
+
+			f = f_abs[root_len:]
+			if not unmerge and self.isowner(f):
+				# We have an indentically named replacement file,
+				# so we don't try to preserve the old copy.
+				continue
+			try:
+				consumers = linkmap.findConsumers(f,
+					exclude_providers=(installed_instance.isowner,))
+			except KeyError:
+				continue
+			if not consumers:
+				continue
+			provider_node = path_to_node(f)
+			lib_graph.add(provider_node, None)
+			provider_nodes.add(provider_node)
+			consumer_map[provider_node] = consumers
+
+		# Create consumer nodes and add them to the graph.
+		# Note that consumers can also be providers.
+		for provider_node, consumers in consumer_map.items():
+			for c in consumers:
+				consumer_node = path_to_node(c)
+				if installed_instance.isowner(c) and \
+					consumer_node not in provider_nodes:
+					# This is not a provider, so it will be uninstalled.
+					continue
+				lib_graph.add(provider_node, consumer_node)
+
+		# Locate nodes which should be preserved. They consist of all
+		# providers that are reachable from consumers that are not
+		# providers themselves.
+		preserve_nodes = set()
+		for consumer_node in lib_graph.root_nodes():
+			if consumer_node in provider_nodes:
+				continue
+			# Preserve all providers that are reachable from this consumer.
+			node_stack = lib_graph.child_nodes(consumer_node)
+			while node_stack:
+				provider_node = node_stack.pop()
+				if provider_node in preserve_nodes:
+					continue
+				preserve_nodes.add(provider_node)
+				node_stack.extend(lib_graph.child_nodes(provider_node))
+
+		preserve_paths = set()
+		for preserve_node in preserve_nodes:
+			# Preserve the library itself, and also preserve the
+			# soname symlink which is the only symlink that is
+			# strictly required.
+			hardlinks = set()
+			soname_symlinks = set()
+			soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
+			for f in preserve_node.alt_paths:
+				f_abs = os.path.join(root, f.lstrip(os.sep))
+				try:
+					if stat.S_ISREG(os.lstat(f_abs).st_mode):
+						hardlinks.add(f)
+					elif os.path.basename(f) == soname:
+						soname_symlinks.add(f)
+				except OSError:
+					pass
+
+			if hardlinks:
+				preserve_paths.update(hardlinks)
+				preserve_paths.update(soname_symlinks)
+
+		return preserve_paths
+
+	def _add_preserve_libs_to_contents(self, preserve_paths):
+		"""
+		Preserve libs returned from _find_libs_to_preserve().
+		"""
+
+		if not preserve_paths:
+			return
+
+		os = _os_merge
+		showMessage = self._display_merge
+		root = self.settings['ROOT']
+
+		# Copy contents entries from the old package to the new one.
+		new_contents = self.getcontents().copy()
+		old_contents = self._installed_instance.getcontents()
+		for f in sorted(preserve_paths):
+			f = _unicode_decode(f,
+				encoding=_encodings['content'], errors='strict')
+			f_abs = os.path.join(root, f.lstrip(os.sep))
+			contents_entry = old_contents.get(f_abs)
+			if contents_entry is None:
+				# This will probably never happen, but it might if one of the
+				# paths returned from findConsumers() refers to one of the libs
+				# that should be preserved yet the path is not listed in the
+				# contents. Such a path might belong to some other package, so
+				# it shouldn't be preserved here.
+				showMessage(_("!!! File '%s' will not be preserved "
+					"due to missing contents entry\n") % (f_abs,),
+					level=logging.ERROR, noiselevel=-1)
+				preserve_paths.remove(f)
+				continue
+			new_contents[f_abs] = contents_entry
+			obj_type = contents_entry[0]
+			showMessage(_(">>> needed    %s %s\n") % (obj_type, f_abs),
+				noiselevel=-1)
+			# Add parent directories to contents if necessary.
+			parent_dir = os.path.dirname(f_abs)
+			while len(parent_dir) > len(root):
+				new_contents[parent_dir] = ["dir"]
+				prev = parent_dir
+				parent_dir = os.path.dirname(parent_dir)
+				if prev == parent_dir:
+					break
+		outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
+		write_contents(new_contents, root, outfile)
+		outfile.close()
+		self._clear_contents_cache()
+
+	def _find_unused_preserved_libs(self, unmerge_no_replacement):
+		"""
+		Find preserved libraries that don't have any consumers left.
+		"""
+
+		if self._linkmap_broken or \
+			self.vartree.dbapi._linkmap is None or \
+			self.vartree.dbapi._plib_registry is None or \
+			not self.vartree.dbapi._plib_registry.hasEntries():
+			return {}
+
+		# Since preserved libraries can be consumers of other preserved
+		# libraries, use a graph to track consumer relationships.
+		plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+		linkmap = self.vartree.dbapi._linkmap
+		lib_graph = digraph()
+		preserved_nodes = set()
+		preserved_paths = set()
+		path_cpv_map = {}
+		path_node_map = {}
+		root = self.settings['ROOT']
+
+		def path_to_node(path):
+			node = path_node_map.get(path)
+			if node is None:
+				node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
+				alt_path_node = lib_graph.get(node)
+				if alt_path_node is not None:
+					node = alt_path_node
+				node.alt_paths.add(path)
+				path_node_map[path] = node
+			return node
+
+		for cpv, plibs in plib_dict.items():
+			for f in plibs:
+				path_cpv_map[f] = cpv
+				preserved_node = path_to_node(f)
+				if not preserved_node.file_exists():
+					continue
+				lib_graph.add(preserved_node, None)
+				preserved_paths.add(f)
+				preserved_nodes.add(preserved_node)
+				for c in self.vartree.dbapi._linkmap.findConsumers(f):
+					consumer_node = path_to_node(c)
+					if not consumer_node.file_exists():
+						continue
+					# Note that consumers may also be providers.
+					lib_graph.add(preserved_node, consumer_node)
+
+		# Eliminate consumers having providers with the same soname as an
+		# installed library that is not preserved. This eliminates
+		# libraries that are erroneously preserved due to a move from one
+		# directory to another.
+		# Also eliminate consumers that are going to be unmerged if
+		# unmerge_no_replacement is True.
+		provider_cache = {}
+		for preserved_node in preserved_nodes:
+			soname = linkmap.getSoname(preserved_node)
+			for consumer_node in lib_graph.parent_nodes(preserved_node):
+				if consumer_node in preserved_nodes:
+					continue
+				if unmerge_no_replacement:
+					will_be_unmerged = True
+					for path in consumer_node.alt_paths:
+						if not self.isowner(path):
+							will_be_unmerged = False
+							break
+					if will_be_unmerged:
+						# This consumer is not preserved and it is
+						# being unmerged, so drop this edge.
+						lib_graph.remove_edge(preserved_node, consumer_node)
+						continue
+
+				providers = provider_cache.get(consumer_node)
+				if providers is None:
+					providers = linkmap.findProviders(consumer_node)
+					provider_cache[consumer_node] = providers
+				providers = providers.get(soname)
+				if providers is None:
+					continue
+				for provider in providers:
+					if provider in preserved_paths:
+						continue
+					provider_node = path_to_node(provider)
+					if not provider_node.file_exists():
+						continue
+					if provider_node in preserved_nodes:
+						continue
+					# An alternative provider seems to be
+					# installed, so drop this edge.
+					lib_graph.remove_edge(preserved_node, consumer_node)
+					break
+
+		cpv_lib_map = {}
+		while lib_graph:
+			root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
+			if not root_nodes:
+				break
+			lib_graph.difference_update(root_nodes)
+			unlink_list = set()
+			for node in root_nodes:
+				unlink_list.update(node.alt_paths)
+			unlink_list = sorted(unlink_list)
+			for obj in unlink_list:
+				cpv = path_cpv_map.get(obj)
+				if cpv is None:
+					# This means that a symlink is in the preserved libs
+					# registry, but the actual lib it points to is not.
+					self._display_merge(_("!!! symlink to lib is preserved, "
+						"but not the lib itself:\n!!! '%s'\n") % (obj,),
+						level=logging.ERROR, noiselevel=-1)
+					continue
+				removed = cpv_lib_map.get(cpv)
+				if removed is None:
+					removed = set()
+					cpv_lib_map[cpv] = removed
+				removed.add(obj)
+
+		return cpv_lib_map
+
+	def _remove_preserved_libs(self, cpv_lib_map):
+		"""
+		Remove files returned from _find_unused_preserved_libs().
+		"""
+
+		os = _os_merge
+
+		files_to_remove = set()
+		for files in cpv_lib_map.values():
+			files_to_remove.update(files)
+		files_to_remove = sorted(files_to_remove)
+		showMessage = self._display_merge
+		root = self.settings['ROOT']
+
+		parent_dirs = set()
+		for obj in files_to_remove:
+			obj = os.path.join(root, obj.lstrip(os.sep))
+			parent_dirs.add(os.path.dirname(obj))
+			if os.path.islink(obj):
+				obj_type = _("sym")
+			else:
+				obj_type = _("obj")
+			try:
+				os.unlink(obj)
+			except OSError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+			else:
+				showMessage(_("<<< !needed  %s %s\n") % (obj_type, obj),
+					noiselevel=-1)
+
+		# Remove empty parent directories if possible.
+		while parent_dirs:
+			x = parent_dirs.pop()
+			while True:
+				try:
+					os.rmdir(x)
+				except OSError:
+					break
+				prev = x
+				x = os.path.dirname(x)
+				if x == prev:
+					break
+
+		self.vartree.dbapi._plib_registry.pruneNonExisting()
+
+	def _collision_protect(self, srcroot, destroot, mypkglist,
+		file_list, symlink_list):
+
+			os = _os_merge
+
+			collision_ignore = set([normalize_path(myignore) for myignore in \
+				portage.util.shlex_split(
+				self.settings.get("COLLISION_IGNORE", ""))])
+
+			# For collisions with preserved libraries, the current package
+			# will assume ownership and the libraries will be unregistered.
+			if self.vartree.dbapi._plib_registry is None:
+				# preserve-libs is entirely disabled
+				plib_cpv_map = None
+				plib_paths = None
+				plib_inodes = {}
+			else:
+				plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+				plib_cpv_map = {}
+				plib_paths = set()
+				for cpv, paths in plib_dict.items():
+					plib_paths.update(paths)
+					for f in paths:
+						plib_cpv_map[f] = cpv
+				plib_inodes = self._lstat_inode_map(plib_paths)
+
+			plib_collisions = {}
+
+			showMessage = self._display_merge
+			stopmerge = False
+			collisions = []
+			symlink_collisions = []
+			destroot = self.settings['ROOT']
+			showMessage(_(" %s checking %d files for package collisions\n") % \
+				(colorize("GOOD", "*"), len(file_list) + len(symlink_list)))
+			for i, (f, f_type) in enumerate(chain(
+				((f, "reg") for f in file_list),
+				((f, "sym") for f in symlink_list))):
+				if i % 1000 == 0 and i != 0:
+					showMessage(_("%d files checked ...\n") % i)
+
+				dest_path = normalize_path(
+					os.path.join(destroot, f.lstrip(os.path.sep)))
+				try:
+					dest_lstat = os.lstat(dest_path)
+				except EnvironmentError as e:
+					if e.errno == errno.ENOENT:
+						del e
+						continue
+					elif e.errno == errno.ENOTDIR:
+						del e
+						# A non-directory is in a location where this package
+						# expects to have a directory.
+						dest_lstat = None
+						parent_path = dest_path
+						while len(parent_path) > len(destroot):
+							parent_path = os.path.dirname(parent_path)
+							try:
+								dest_lstat = os.lstat(parent_path)
+								break
+							except EnvironmentError as e:
+								if e.errno != errno.ENOTDIR:
+									raise
+								del e
+						if not dest_lstat:
+							raise AssertionError(
+								"unable to find non-directory " + \
+								"parent for '%s'" % dest_path)
+						dest_path = parent_path
+						f = os.path.sep + dest_path[len(destroot):]
+						if f in collisions:
+							continue
+					else:
+						raise
+				if f[0] != "/":
+					f="/"+f
+
+				if stat.S_ISDIR(dest_lstat.st_mode):
+					if f_type == "sym":
+						# This case is explicitly banned
+						# by PMS (see bug #326685).
+						symlink_collisions.append(f)
+						collisions.append(f)
+						continue
+
+				plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
+				if plibs:
+					for path in plibs:
+						cpv = plib_cpv_map[path]
+						paths = plib_collisions.get(cpv)
+						if paths is None:
+							paths = set()
+							plib_collisions[cpv] = paths
+						paths.add(path)
+					# The current package will assume ownership and the
+					# libraries will be unregistered, so exclude this
+					# path from the normal collisions.
+					continue
+
+				isowned = False
+				full_path = os.path.join(destroot, f.lstrip(os.path.sep))
+				for ver in mypkglist:
+					if ver.isowner(f):
+						isowned = True
+						break
+				if not isowned and self.isprotected(full_path):
+					isowned = True
+				if not isowned:
+					stopmerge = True
+					if collision_ignore:
+						if f in collision_ignore:
+							stopmerge = False
+						else:
+							for myignore in collision_ignore:
+								if f.startswith(myignore + os.path.sep):
+									stopmerge = False
+									break
+					if stopmerge:
+						collisions.append(f)
+			return collisions, symlink_collisions, plib_collisions
+
+	def _lstat_inode_map(self, path_iter):
+		"""
+		Use lstat to create a map of the form:
+		  {(st_dev, st_ino) : set([path1, path2, ...])}
+		Multiple paths may reference the same inode due to hardlinks.
+		All lstat() calls are relative to self.myroot.
+		"""
+
+		os = _os_merge
+
+		root = self.settings['ROOT']
+		inode_map = {}
+		for f in path_iter:
+			path = os.path.join(root, f.lstrip(os.sep))
+			try:
+				st = os.lstat(path)
+			except OSError as e:
+				if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+					raise
+				del e
+				continue
+			key = (st.st_dev, st.st_ino)
+			paths = inode_map.get(key)
+			if paths is None:
+				paths = set()
+				inode_map[key] = paths
+			paths.add(f)
+		return inode_map
+
+	def _security_check(self, installed_instances):
+		if not installed_instances:
+			return 0
+
+		os = _os_merge
+
+		showMessage = self._display_merge
+
+		file_paths = set()
+		for dblnk in installed_instances:
+			file_paths.update(dblnk.getcontents())
+		inode_map = {}
+		real_paths = set()
+		for i, path in enumerate(file_paths):
+
+			if os is _os_merge:
+				try:
+					_unicode_encode(path,
+						encoding=_encodings['merge'], errors='strict')
+				except UnicodeEncodeError:
+					# The package appears to have been merged with a 
+					# different value of sys.getfilesystemencoding(),
+					# so fall back to utf_8 if appropriate.
+					try:
+						_unicode_encode(path,
+							encoding=_encodings['fs'], errors='strict')
+					except UnicodeEncodeError:
+						pass
+					else:
+						os = portage.os
+
+			try:
+				s = os.lstat(path)
+			except OSError as e:
+				if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+					raise
+				del e
+				continue
+			if not stat.S_ISREG(s.st_mode):
+				continue
+			path = os.path.realpath(path)
+			if path in real_paths:
+				continue
+			real_paths.add(path)
+			if s.st_nlink > 1 and \
+				s.st_mode & (stat.S_ISUID | stat.S_ISGID):
+				k = (s.st_dev, s.st_ino)
+				inode_map.setdefault(k, []).append((path, s))
+		suspicious_hardlinks = []
+		for path_list in inode_map.values():
+			path, s = path_list[0]
+			if len(path_list) == s.st_nlink:
+				# All hardlinks seem to be owned by this package.
+				continue
+			suspicious_hardlinks.append(path_list)
+		if not suspicious_hardlinks:
+			return 0
+
+		msg = []
+		msg.append(_("suid/sgid file(s) "
+			"with suspicious hardlink(s):"))
+		msg.append("")
+		for path_list in suspicious_hardlinks:
+			for path, s in path_list:
+				msg.append("\t%s" % path)
+		msg.append("")
+		msg.append(_("See the Gentoo Security Handbook " 
+			"guide for advice on how to proceed."))
+
+		self._eerror("preinst", msg)
+
+		return 1
+
+	def _eqawarn(self, phase, lines):
+		self._elog("eqawarn", phase, lines)
+
+	def _eerror(self, phase, lines):
+		self._elog("eerror", phase, lines)
+
+	def _elog(self, funcname, phase, lines):
+		func = getattr(portage.elog.messages, funcname)
+		if self._scheduler is None:
+			for l in lines:
+				func(l, phase=phase, key=self.mycpv)
+		else:
+			background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+			log_path = None
+			if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+				log_path = self.settings.get("PORTAGE_LOG_FILE")
+			out = io.StringIO()
+			for line in lines:
+				func(line, phase=phase, key=self.mycpv, out=out)
+			msg = out.getvalue()
+			self._scheduler.output(msg,
+				background=background, log_path=log_path)
+
+	def _elog_process(self, phasefilter=None):
+		cpv = self.mycpv
+		if self._pipe is None:
+			elog_process(cpv, self.settings, phasefilter=phasefilter)
+		else:
+			logdir = os.path.join(self.settings["T"], "logging")
+			ebuild_logentries = collect_ebuild_messages(logdir)
+			py_logentries = collect_messages(key=cpv).get(cpv, {})
+			logentries = _merge_logentries(py_logentries, ebuild_logentries)
+			funcnames = {
+				"INFO": "einfo",
+				"LOG": "elog",
+				"WARN": "ewarn",
+				"QA": "eqawarn",
+				"ERROR": "eerror"
+			}
+			str_buffer = []
+			for phase, messages in logentries.items():
+				for key, lines in messages:
+					funcname = funcnames[key]
+					if isinstance(lines, basestring):
+						lines = [lines]
+					for line in lines:
+						fields = (funcname, phase, cpv, line.rstrip('\n'))
+						str_buffer.append(' '.join(fields))
+						str_buffer.append('\n')
+			if str_buffer:
+				os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
+
+	def _emerge_log(self, msg):
+		emergelog(False, msg)
+
+	def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
+		mydbapi=None, prev_mtimes=None, counter=None):
+		"""
+		
+		This function does the following:
+		
+		calls self._preserve_libs if FEATURES=preserve-libs
+		calls self._collision_protect if FEATURES=collision-protect
+		calls doebuild(mydo=pkg_preinst)
+		Merges the package to the livefs
+		unmerges old version (if required)
+		calls doebuild(mydo=pkg_postinst)
+		calls env_update
+		
+		@param srcroot: Typically this is ${D}
+		@type srcroot: String (Path)
+		@param destroot: ignored, self.settings['ROOT'] is used instead
+		@type destroot: String (Path)
+		@param inforoot: root of the vardb entry ?
+		@type inforoot: String (Path)
+		@param myebuild: path to the ebuild that we are processing
+		@type myebuild: String (Path)
+		@param mydbapi: dbapi which is handed to doebuild.
+		@type mydbapi: portdbapi instance
+		@param prev_mtimes: { Filename:mtime } mapping for env_update
+		@type prev_mtimes: Dictionary
+		@rtype: Boolean
+		@returns:
+		1. 0 on success
+		2. 1 on failure
+		
+		secondhand is a list of symlinks that have been skipped due to their target
+		not existing; we will merge these symlinks at a later time.
+		"""
+
+		os = _os_merge
+
+		srcroot = _unicode_decode(srcroot,
+			encoding=_encodings['content'], errors='strict')
+		destroot = self.settings['ROOT']
+		inforoot = _unicode_decode(inforoot,
+			encoding=_encodings['content'], errors='strict')
+		myebuild = _unicode_decode(myebuild,
+			encoding=_encodings['content'], errors='strict')
+
+		showMessage = self._display_merge
+		srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
+
+		if not os.path.isdir(srcroot):
+			showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
+				level=logging.ERROR, noiselevel=-1)
+			return 1
+
+		slot = ''
+		for var_name in ('CHOST', 'SLOT'):
+			if var_name == 'CHOST' and self.cat == 'virtual':
+				try:
+					os.unlink(os.path.join(inforoot, var_name))
+				except OSError:
+					pass
+				continue
+
+			try:
+				val = io.open(_unicode_encode(
+					os.path.join(inforoot, var_name),
+					encoding=_encodings['fs'], errors='strict'),
+					mode='r', encoding=_encodings['repo.content'],
+					errors='replace').readline().strip()
+			except EnvironmentError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+				val = ''
+
+			if var_name == 'SLOT':
+				slot = val
+
+				if not slot.strip():
+					slot = self.settings.get(var_name, '')
+					if not slot.strip():
+						showMessage(_("!!! SLOT is undefined\n"),
+							level=logging.ERROR, noiselevel=-1)
+						return 1
+					write_atomic(os.path.join(inforoot, var_name), slot + '\n')
+
+			if val != self.settings.get(var_name, ''):
+				self._eqawarn('preinst',
+					[_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
+					{"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
+
+		def eerror(lines):
+			self._eerror("preinst", lines)
+
+		if not os.path.exists(self.dbcatdir):
+			ensure_dirs(self.dbcatdir)
+
+		otherversions = []
+		for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
+			otherversions.append(v.split("/")[1])
+
+		cp = self.mysplit[0]
+		slot_atom = "%s:%s" % (cp, slot)
+
+		# filter any old-style virtual matches
+		slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom) \
+			if cpv_getkey(cpv) == cp]
+
+		if self.mycpv not in slot_matches and \
+			self.vartree.dbapi.cpv_exists(self.mycpv):
+			# handle multislot or unapplied slotmove
+			slot_matches.append(self.mycpv)
+
+		others_in_slot = []
+		from portage import config
+		for cur_cpv in slot_matches:
+			# Clone the config in case one of these has to be unmerged since
+			# we need it to have private ${T} etc... for things like elog.
+			settings_clone = config(clone=self.settings)
+			settings_clone.pop("PORTAGE_BUILDIR_LOCKED", None)
+			settings_clone.reset()
+			others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+				settings=settings_clone,
+				vartree=self.vartree, treetype="vartree",
+				scheduler=self._scheduler, pipe=self._pipe))
+
+		retval = self._security_check(others_in_slot)
+		if retval:
+			return retval
+
+		if slot_matches:
+			# Used by self.isprotected().
+			max_dblnk = None
+			max_counter = -1
+			for dblnk in others_in_slot:
+				cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
+				if cur_counter > max_counter:
+					max_counter = cur_counter
+					max_dblnk = dblnk
+			self._installed_instance = max_dblnk
+
+		# We check for unicode encoding issues after src_install. However,
+		# the check must be repeated here for binary packages (it's
+		# inexpensive since we call os.walk() here anyway).
+		unicode_errors = []
+
+		while True:
+
+			unicode_error = False
+
+			myfilelist = []
+			mylinklist = []
+			paths_with_newlines = []
+			srcroot_len = len(srcroot)
+			def onerror(e):
+				raise
+			for parent, dirs, files in os.walk(srcroot, onerror=onerror):
+				try:
+					parent = _unicode_decode(parent,
+						encoding=_encodings['merge'], errors='strict')
+				except UnicodeDecodeError:
+					new_parent = _unicode_decode(parent,
+						encoding=_encodings['merge'], errors='replace')
+					new_parent = _unicode_encode(new_parent,
+						encoding=_encodings['merge'], errors='backslashreplace')
+					new_parent = _unicode_decode(new_parent,
+						encoding=_encodings['merge'], errors='replace')
+					os.rename(parent, new_parent)
+					unicode_error = True
+					unicode_errors.append(new_parent[srcroot_len:])
+					break
+
+				for fname in files:
+					try:
+						fname = _unicode_decode(fname,
+							encoding=_encodings['merge'], errors='strict')
+					except UnicodeDecodeError:
+						fpath = portage._os.path.join(
+							parent.encode(_encodings['merge']), fname)
+						new_fname = _unicode_decode(fname,
+							encoding=_encodings['merge'], errors='replace')
+						new_fname = _unicode_encode(new_fname,
+							encoding=_encodings['merge'], errors='backslashreplace')
+						new_fname = _unicode_decode(new_fname,
+							encoding=_encodings['merge'], errors='replace')
+						new_fpath = os.path.join(parent, new_fname)
+						os.rename(fpath, new_fpath)
+						unicode_error = True
+						unicode_errors.append(new_fpath[srcroot_len:])
+						fname = new_fname
+						fpath = new_fpath
+					else:
+						fpath = os.path.join(parent, fname)
+
+					relative_path = fpath[srcroot_len:]
+
+					if "\n" in relative_path:
+						paths_with_newlines.append(relative_path)
+
+					file_mode = os.lstat(fpath).st_mode
+					if stat.S_ISREG(file_mode):
+						myfilelist.append(relative_path)
+					elif stat.S_ISLNK(file_mode):
+						# Note: os.walk puts symlinks to directories in the "dirs"
+						# list and it does not traverse them since that could lead
+						# to an infinite recursion loop.
+						mylinklist.append(relative_path)
+
+				if unicode_error:
+					break
+
+			if not unicode_error:
+				break
+
+		if unicode_errors:
+			eerror(portage._merge_unicode_error(unicode_errors))
+
+		if paths_with_newlines:
+			msg = []
+			msg.append(_("This package installs one or more files containing a newline (\\n) character:"))
+			msg.append("")
+			paths_with_newlines.sort()
+			for f in paths_with_newlines:
+				msg.append("\t/%s" % (f.replace("\n", "\\n")))
+			msg.append("")
+			msg.append(_("package %s NOT merged") % self.mycpv)
+			msg.append("")
+			eerror(msg)
+			return 1
+
+		# If there are no files to merge, and an installed package in the same
+		# slot has files, it probably means that something went wrong.
+		if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
+			not myfilelist and not mylinklist and others_in_slot:
+			installed_files = None
+			for other_dblink in others_in_slot:
+				installed_files = other_dblink.getcontents()
+				if not installed_files:
+					continue
+				from textwrap import wrap
+				wrap_width = 72
+				msg = []
+				d = {
+					"new_cpv":self.mycpv,
+					"old_cpv":other_dblink.mycpv
+				}
+				msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
+					"any files, but the currently installed '%(old_cpv)s'"
+					" package has the following files: ") % d, wrap_width))
+				msg.append("")
+				msg.extend(sorted(installed_files))
+				msg.append("")
+				msg.append(_("package %s NOT merged") % self.mycpv)
+				msg.append("")
+				msg.extend(wrap(
+					_("Manually run `emerge --unmerge =%s` if you "
+					"really want to remove the above files. Set "
+					"PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
+					"/etc/make.conf if you do not want to "
+					"abort in cases like this.") % other_dblink.mycpv,
+					wrap_width))
+				eerror(msg)
+			if installed_files:
+				return 1
+
+		# check for package collisions
+		blockers = self._blockers
+		if blockers is None:
+			blockers = []
+		collisions, symlink_collisions, plib_collisions = \
+			self._collision_protect(srcroot, destroot,
+			others_in_slot + blockers, myfilelist, mylinklist)
+
+		# Make sure the ebuild environment is initialized and that ${T}/elog
+		# exists for logging of collision-protect eerror messages.
+		if myebuild is None:
+			myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+		doebuild_environment(myebuild, "preinst",
+			settings=self.settings, db=mydbapi)
+		self.settings["REPLACING_VERSIONS"] = " ".join(
+			[portage.versions.cpv_getversion(other.mycpv)
+			for other in others_in_slot])
+		prepare_build_dirs(settings=self.settings, cleanup=cleanup)
+
+		if collisions:
+			collision_protect = "collision-protect" in self.settings.features
+			protect_owned = "protect-owned" in self.settings.features
+			msg = _("This package will overwrite one or more files that"
+			" may belong to other packages (see list below).")
+			if not (collision_protect or protect_owned):
+				msg += _(" Add either \"collision-protect\" or" 
+				" \"protect-owned\" to FEATURES in"
+				" make.conf if you would like the merge to abort"
+				" in cases like this. See the make.conf man page for"
+				" more information about these features.")
+			if self.settings.get("PORTAGE_QUIET") != "1":
+				msg += _(" You can use a command such as"
+				" `portageq owners / <filename>` to identify the"
+				" installed package that owns a file. If portageq"
+				" reports that only one package owns a file then do NOT"
+				" file a bug report. A bug report is only useful if it"
+				" identifies at least two or more packages that are known"
+				" to install the same file(s)."
+				" If a collision occurs and you"
+				" can not explain where the file came from then you"
+				" should simply ignore the collision since there is not"
+				" enough information to determine if a real problem"
+				" exists. Please do NOT file a bug report at"
+				" http://bugs.gentoo.org unless you report exactly which"
+				" two packages install the same file(s). Once again,"
+				" please do NOT file a bug report unless you have"
+				" completely understood the above message.")
+
+			self.settings["EBUILD_PHASE"] = "preinst"
+			from textwrap import wrap
+			msg = wrap(msg, 70)
+			if collision_protect:
+				msg.append("")
+				msg.append(_("package %s NOT merged") % self.settings.mycpv)
+			msg.append("")
+			msg.append(_("Detected file collision(s):"))
+			msg.append("")
+
+			for f in collisions:
+				msg.append("\t%s" % \
+					os.path.join(destroot, f.lstrip(os.path.sep)))
+
+			eerror(msg)
+
+			owners = None
+			if collision_protect or protect_owned or symlink_collisions:
+				msg = []
+				msg.append("")
+				msg.append(_("Searching all installed"
+					" packages for file collisions..."))
+				msg.append("")
+				msg.append(_("Press Ctrl-C to Stop"))
+				msg.append("")
+				eerror(msg)
+
+				if len(collisions) > 20:
+					# get_owners is slow for large numbers of files, so
+					# don't look them all up.
+					collisions = collisions[:20]
+				self.lockdb()
+				try:
+					owners = self.vartree.dbapi._owners.get_owners(collisions)
+					self.vartree.dbapi.flush_cache()
+				finally:
+					self.unlockdb()
+
+				for pkg, owned_files in owners.items():
+					cpv = pkg.mycpv
+					msg = []
+					msg.append("%s" % cpv)
+					for f in sorted(owned_files):
+						msg.append("\t%s" % os.path.join(destroot,
+							f.lstrip(os.path.sep)))
+					msg.append("")
+					eerror(msg)
+
+				if not owners:
+					eerror([_("None of the installed"
+						" packages claim the file(s)."), ""])
+
+			# The explanation about the collision and how to solve
+			# it may not be visible via a scrollback buffer, especially
+			# if the number of file collisions is large. Therefore,
+			# show a summary at the end.
+			abort = False
+			if collision_protect:
+				abort = True
+				msg = _("Package '%s' NOT merged due to file collisions.") % \
+					self.settings.mycpv
+			elif protect_owned and owners:
+				abort = True
+				msg = _("Package '%s' NOT merged due to file collisions.") % \
+					self.settings.mycpv
+			elif symlink_collisions:
+				abort = True
+				msg = _("Package '%s' NOT merged due to collision " + \
+				"between a symlink and a directory which is explicitly " + \
+				"forbidden by PMS (see bug #326685).") % \
+				(self.settings.mycpv,)
+			else:
+				msg = _("Package '%s' merged despite file collisions.") % \
+					self.settings.mycpv
+			msg += _(" If necessary, refer to your elog "
+				"messages for the whole content of the above message.")
+			eerror(wrap(msg, 70))
+
+			if abort:
+				return 1
+
+		# The merge process may move files out of the image directory,
+		# which causes invalidation of the .installed flag.
+		try:
+			os.unlink(os.path.join(
+				os.path.dirname(normalize_path(srcroot)), ".installed"))
+		except OSError as e:
+			if e.errno != errno.ENOENT:
+				raise
+			del e
+
+		self.dbdir = self.dbtmpdir
+		self.delete()
+		ensure_dirs(self.dbtmpdir)
+
+		# run preinst script
+		showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
+			{"cpv":self.mycpv, "destroot":destroot})
+		phase = EbuildPhase(background=False, phase="preinst",
+			scheduler=self._scheduler, settings=self.settings)
+		phase.start()
+		a = phase.wait()
+
+		# XXX: Decide how to handle failures here.
+		if a != os.EX_OK:
+			showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
+				level=logging.ERROR, noiselevel=-1)
+			return a
+
+		# copy "info" files (like SLOT, CFLAGS, etc.) into the database
+		for x in os.listdir(inforoot):
+			self.copyfile(inforoot+"/"+x)
+
+		# write local package counter for recording
+		if counter is None:
+			counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
+		io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
+			encoding=_encodings['fs'], errors='strict'),
+			mode='w', encoding=_encodings['repo.content'],
+			errors='backslashreplace').write(_unicode_decode(str(counter)))
+
+		self.updateprotect()
+
+		#if we have a file containing previously-merged config file md5sums, grab it.
+		self.vartree.dbapi._fs_lock()
+		try:
+			cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+			if "NOCONFMEM" in self.settings:
+				cfgfiledict["IGNORE"]=1
+			else:
+				cfgfiledict["IGNORE"]=0
+
+			# Always behave like --noconfmem is enabled for downgrades
+			# so that people who don't know about this option are less
+			# likely to get confused when doing upgrade/downgrade cycles.
+			pv_split = catpkgsplit(self.mycpv)[1:]
+			for other in others_in_slot:
+				if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
+					cfgfiledict["IGNORE"] = 1
+					break
+
+			rval = self._merge_contents(srcroot, destroot, cfgfiledict)
+			if rval != os.EX_OK:
+				return rval
+		finally:
+			self.vartree.dbapi._fs_unlock()
+
+		# These caches are populated during collision-protect and the data
+		# they contain is now invalid. It's very important to invalidate
+		# the contents_inodes cache so that FEATURES=unmerge-orphans
+		# doesn't unmerge anything that belongs to this package that has
+		# just been merged.
+		for dblnk in others_in_slot:
+			dblnk._clear_contents_cache()
+		self._clear_contents_cache()
+
+		linkmap = self.vartree.dbapi._linkmap
+		plib_registry = self.vartree.dbapi._plib_registry
+		# We initialize preserve_paths to an empty set rather
+		# than None here because it plays an important role
+		# in prune_plib_registry logic by serving to indicate
+		# that we have a replacement for a package that's
+		# being unmerged.
+
+		preserve_paths = set()
+		needed = None
+		if not (self._linkmap_broken or linkmap is None or
+			plib_registry is None):
+			self.vartree.dbapi._fs_lock()
+			plib_registry.lock()
+			try:
+				plib_registry.load()
+				needed = os.path.join(inforoot, linkmap._needed_aux_key)
+				self._linkmap_rebuild(include_file=needed)
+
+				# Preserve old libs if they are still in use
+				# TODO: Handle cases where the previous instance
+				# has already been uninstalled but it still has some
+				# preserved libraries in the registry that we may
+				# want to preserve here.
+				preserve_paths = self._find_libs_to_preserve()
+			finally:
+				plib_registry.unlock()
+				self.vartree.dbapi._fs_unlock()
+
+			if preserve_paths:
+				self._add_preserve_libs_to_contents(preserve_paths)
+
+		# If portage is reinstalling itself, remove the old
+		# version now since we want to use the temporary
+		# PORTAGE_BIN_PATH that will be removed when we return.
+		reinstall_self = False
+		if self.myroot == "/" and \
+			match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
+			reinstall_self = True
+
+		emerge_log = self._emerge_log
+
+		# If we have any preserved libraries then autoclean
+		# is forced so that preserve-libs logic doesn't have
+		# to account for the additional complexity of the
+		# AUTOCLEAN=no mode.
+		autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
+			or preserve_paths
+
+		if autoclean:
+			emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
+
+		others_in_slot.append(self)  # self has just been merged
+		for dblnk in list(others_in_slot):
+			if dblnk is self:
+				continue
+			if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
+				continue
+			showMessage(_(">>> Safely unmerging already-installed instance...\n"))
+			emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
+			others_in_slot.remove(dblnk) # dblnk will unmerge itself now
+			dblnk._linkmap_broken = self._linkmap_broken
+			dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
+			dblnk.settings.backup_changes("REPLACED_BY_VERSION")
+			unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
+				others_in_slot=others_in_slot, needed=needed,
+				preserve_paths=preserve_paths)
+			dblnk.settings.pop("REPLACED_BY_VERSION", None)
+
+			if unmerge_rval == os.EX_OK:
+				emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
+			else:
+				emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
+
+			self.lockdb()
+			try:
+				# TODO: Check status and abort if necessary.
+				dblnk.delete()
+			finally:
+				self.unlockdb()
+			showMessage(_(">>> Original instance of package unmerged safely.\n"))
+
+		if len(others_in_slot) > 1:
+			showMessage(colorize("WARN", _("WARNING:"))
+				+ _(" AUTOCLEAN is disabled.  This can cause serious"
+				" problems due to overlapping packages.\n"),
+				level=logging.WARN, noiselevel=-1)
+
+		# We hold both directory locks.
+		self.dbdir = self.dbpkgdir
+		self.lockdb()
+		try:
+			self.delete()
+			_movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+		finally:
+			self.unlockdb()
+
+		# Check for file collisions with blocking packages
+		# and remove any colliding files from their CONTENTS
+		# since they now belong to this package.
+		self._clear_contents_cache()
+		contents = self.getcontents()
+		destroot_len = len(destroot) - 1
+		self.lockdb()
+		try:
+			for blocker in blockers:
+				self.vartree.dbapi.removeFromContents(blocker, iter(contents),
+					relative_paths=False)
+		finally:
+			self.unlockdb()
+
+		plib_registry = self.vartree.dbapi._plib_registry
+		if plib_registry:
+			self.vartree.dbapi._fs_lock()
+			plib_registry.lock()
+			try:
+				plib_registry.load()
+
+				if preserve_paths:
+					# keep track of the libs we preserved
+					plib_registry.register(self.mycpv, slot, counter,
+						sorted(preserve_paths))
+
+				# Unregister any preserved libs that this package has overwritten
+				# and update the contents of the packages that owned them.
+				plib_dict = plib_registry.getPreservedLibs()
+				for cpv, paths in plib_collisions.items():
+					if cpv not in plib_dict:
+						continue
+					has_vdb_entry = False
+					if cpv != self.mycpv:
+						# If we've replaced another instance with the
+						# same cpv then the vdb entry no longer belongs
+						# to it, so we'll have to get the slot and counter
+						# from plib_registry._data instead.
+						self.vartree.dbapi.lock()
+						try:
+							try:
+								slot, counter = self.vartree.dbapi.aux_get(
+									cpv, ["SLOT", "COUNTER"])
+							except KeyError:
+								pass
+							else:
+								has_vdb_entry = True
+								self.vartree.dbapi.removeFromContents(
+									cpv, paths)
+						finally:
+							self.vartree.dbapi.unlock()
+
+					if not has_vdb_entry:
+						# It's possible for previously unmerged packages
+						# to have preserved libs in the registry, so try
+						# to retrieve the slot and counter from there.
+						has_registry_entry = False
+						for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
+							plib_registry._data.items():
+							if plib_cpv != cpv:
+								continue
+							try:
+								cp, slot = plib_cps.split(":", 1)
+							except ValueError:
+								continue
+							counter = plib_counter
+							has_registry_entry = True
+							break
+
+						if not has_registry_entry:
+							continue
+
+					remaining = [f for f in plib_dict[cpv] if f not in paths]
+					plib_registry.register(cpv, slot, counter, remaining)
+
+				plib_registry.store()
+			finally:
+				plib_registry.unlock()
+				self.vartree.dbapi._fs_unlock()
+
+		self.vartree.dbapi._add(self)
+		contents = self.getcontents()
+
+		#do postinst script
+		self.settings["PORTAGE_UPDATE_ENV"] = \
+			os.path.join(self.dbpkgdir, "environment.bz2")
+		self.settings.backup_changes("PORTAGE_UPDATE_ENV")
+		try:
+			phase = EbuildPhase(background=False, phase="postinst",
+				scheduler=self._scheduler, settings=self.settings)
+			phase.start()
+			a = phase.wait()
+			if a == os.EX_OK:
+				showMessage(_(">>> %s merged.\n") % self.mycpv)
+		finally:
+			self.settings.pop("PORTAGE_UPDATE_ENV", None)
+
+		if a != os.EX_OK:
+			# It's stupid to bail out here, so keep going regardless of
+			# phase return code.
+			showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
+				level=logging.ERROR, noiselevel=-1)
+
+		downgrade = False
+		for v in otherversions:
+			if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
+				downgrade = True
+
+		# Lock the config memory file to prevent symlink creation
+		# in merge_contents from overlapping with env-update.
+		self.vartree.dbapi._fs_lock()
+		try:
+			#update environment settings, library paths. DO NOT change symlinks.
+			env_update(makelinks=(not downgrade),
+				target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
+				contents=contents, env=self.settings.environ(),
+				writemsg_level=self._display_merge)
+		finally:
+			self.vartree.dbapi._fs_unlock()
+
+		# For gcc upgrades, preserved libs have to be removed after the
+		# the library path has been updated.
+		self._prune_plib_registry()
+
+		return os.EX_OK
+
+	def _new_backup_path(self, p):
+		"""
+		The works for any type path, such as a regular file, symlink,
+		or directory. The parent directory is assumed to exist.
+		The returned filename is of the form p + '.backup.' + x, where
+		x guarantees that the returned path does not exist yet.
+		"""
+		os = _os_merge
+
+		x = -1
+		while True:
+			x += 1
+			backup_p = p + '.backup.' + str(x).rjust(4, '0')
+			try:
+				os.lstat(backup_p)
+			except OSError:
+				break
+
+		return backup_p
+
+	def _merge_contents(self, srcroot, destroot, cfgfiledict):
+
+		cfgfiledict_orig = cfgfiledict.copy()
+
+		# open CONTENTS file (possibly overwriting old one) for recording
+		# Use atomic_ofstream for automatic coercion of raw bytes to
+		# unicode, in order to prevent TypeError when writing raw bytes
+		# to TextIOWrapper with python2.
+		outfile = atomic_ofstream(_unicode_encode(
+			os.path.join(self.dbtmpdir, 'CONTENTS'),
+			encoding=_encodings['fs'], errors='strict'),
+			mode='w', encoding=_encodings['repo.content'],
+			errors='backslashreplace')
+
+		# Don't bump mtimes on merge since some application require
+		# preservation of timestamps.  This means that the unmerge phase must
+		# check to see if file belongs to an installed instance in the same
+		# slot.
+		mymtime = None
+
+		# set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
+		prevmask = os.umask(0)
+		secondhand = []
+
+		# we do a first merge; this will recurse through all files in our srcroot but also build up a
+		# "second hand" of symlinks to merge later
+		if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
+			return 1
+
+		# now, it's time for dealing our second hand; we'll loop until we can't merge anymore.	The rest are
+		# broken symlinks.  We'll merge them too.
+		lastlen = 0
+		while len(secondhand) and len(secondhand)!=lastlen:
+			# clear the thirdhand.	Anything from our second hand that
+			# couldn't get merged will be added to thirdhand.
+
+			thirdhand = []
+			if self.mergeme(srcroot, destroot, outfile, thirdhand,
+				secondhand, cfgfiledict, mymtime):
+				return 1
+
+			#swap hands
+			lastlen = len(secondhand)
+
+			# our thirdhand now becomes our secondhand.  It's ok to throw
+			# away secondhand since thirdhand contains all the stuff that
+			# couldn't be merged.
+			secondhand = thirdhand
+
+		if len(secondhand):
+			# force merge of remaining symlinks (broken or circular; oh well)
+			if self.mergeme(srcroot, destroot, outfile, None,
+				secondhand, cfgfiledict, mymtime):
+				return 1
+
+		#restore umask
+		os.umask(prevmask)
+
+		#if we opened it, close it
+		outfile.flush()
+		outfile.close()
+
+		# write out our collection of md5sums
+		if cfgfiledict != cfgfiledict_orig:
+			cfgfiledict.pop("IGNORE", None)
+			try:
+				writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+			except InvalidLocation:
+				self.settings._init_dirs()
+				writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+		return os.EX_OK
+
+	def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
+		"""
+		
+		This function handles actual merging of the package contents to the livefs.
+		It also handles config protection.
+		
+		@param srcroot: Where are we copying files from (usually ${D})
+		@type srcroot: String (Path)
+		@param destroot: Typically ${ROOT}
+		@type destroot: String (Path)
+		@param outfile: File to log operations to
+		@type outfile: File Object
+		@param secondhand: A set of items to merge in pass two (usually
+		or symlinks that point to non-existing files that may get merged later)
+		@type secondhand: List
+		@param stufftomerge: Either a diretory to merge, or a list of items.
+		@type stufftomerge: String or List
+		@param cfgfiledict: { File:mtime } mapping for config_protected files
+		@type cfgfiledict: Dictionary
+		@param thismtime: The current time (typically long(time.time())
+		@type thismtime: Long
+		@rtype: None or Boolean
+		@returns:
+		1. True on failure
+		2. None otherwise
+		
+		"""
+
+		showMessage = self._display_merge
+		writemsg = self._display_merge
+
+		os = _os_merge
+		sep = os.sep
+		join = os.path.join
+		srcroot = normalize_path(srcroot).rstrip(sep) + sep
+		destroot = normalize_path(destroot).rstrip(sep) + sep
+		calc_prelink = "prelink-checksums" in self.settings.features
+
+		# this is supposed to merge a list of files.  There will be 2 forms of argument passing.
+		if isinstance(stufftomerge, basestring):
+			#A directory is specified.  Figure out protection paths, listdir() it and process it.
+			mergelist = os.listdir(join(srcroot, stufftomerge))
+			offset = stufftomerge
+		else:
+			mergelist = stufftomerge
+			offset = ""
+
+		for i, x in enumerate(mergelist):
+
+			mysrc = join(srcroot, offset, x)
+			mydest = join(destroot, offset, x)
+			# myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
+			myrealdest = join(sep, offset, x)
+			# stat file once, test using S_* macros many times (faster that way)
+			mystat = os.lstat(mysrc)
+			mymode = mystat[stat.ST_MODE]
+			# handy variables; mydest is the target object on the live filesystems;
+			# mysrc is the source object in the temporary install dir
+			try:
+				mydstat = os.lstat(mydest)
+				mydmode = mydstat.st_mode
+			except OSError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+				#dest file doesn't exist
+				mydstat = None
+				mydmode = None
+
+			if stat.S_ISLNK(mymode):
+				# we are merging a symbolic link
+				myabsto = abssymlink(mysrc)
+				if myabsto.startswith(srcroot):
+					myabsto = myabsto[len(srcroot):]
+				myabsto = myabsto.lstrip(sep)
+				myto = os.readlink(mysrc)
+				if self.settings and self.settings["D"]:
+					if myto.startswith(self.settings["D"]):
+						myto = myto[len(self.settings["D"]):]
+				# myrealto contains the path of the real file to which this symlink points.
+				# we can simply test for existence of this file to see if the target has been merged yet
+				myrealto = normalize_path(os.path.join(destroot, myabsto))
+				if mydmode!=None:
+					#destination exists
+					if stat.S_ISDIR(mydmode):
+						# we can't merge a symlink over a directory
+						newdest = self._new_backup_path(mydest)
+						msg = []
+						msg.append("")
+						msg.append(_("Installation of a symlink is blocked by a directory:"))
+						msg.append("  '%s'" % mydest)
+						msg.append(_("This symlink will be merged with a different name:"))
+						msg.append("  '%s'" % newdest)
+						msg.append("")
+						self._eerror("preinst", msg)
+						mydest = newdest
+
+					elif not stat.S_ISLNK(mydmode):
+						if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
+							# Kill file blocking installation of symlink to dir #71787
+							pass
+						elif self.isprotected(mydest):
+							# Use md5 of the target in ${D} if it exists...
+							try:
+								newmd5 = perform_md5(join(srcroot, myabsto))
+							except FileNotFound:
+								# Maybe the target is merged already.
+								try:
+									newmd5 = perform_md5(myrealto)
+								except FileNotFound:
+									newmd5 = None
+							mydest = new_protect_filename(mydest, newmd5=newmd5)
+
+				# if secondhand is None it means we're operating in "force" mode and should not create a second hand.
+				if (secondhand != None) and (not os.path.exists(myrealto)):
+					# either the target directory doesn't exist yet or the target file doesn't exist -- or
+					# the target is a broken symlink.  We will add this file to our "second hand" and merge
+					# it later.
+					secondhand.append(mysrc[len(srcroot):])
+					continue
+				# unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
+				mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+					sstat=mystat, mysettings=self.settings,
+					encoding=_encodings['merge'])
+				if mymtime != None:
+					showMessage(">>> %s -> %s\n" % (mydest, myto))
+					outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+				else:
+					showMessage(_("!!! Failed to move file.\n"),
+						level=logging.ERROR, noiselevel=-1)
+					showMessage("!!! %s -> %s\n" % (mydest, myto),
+						level=logging.ERROR, noiselevel=-1)
+					return 1
+			elif stat.S_ISDIR(mymode):
+				# we are merging a directory
+				if mydmode != None:
+					# destination exists
+
+					if bsd_chflags:
+						# Save then clear flags on dest.
+						dflags = mydstat.st_flags
+						if dflags != 0:
+							bsd_chflags.lchflags(mydest, 0)
+
+					if not os.access(mydest, os.W_OK):
+						pkgstuff = pkgsplit(self.pkg)
+						writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
+						writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
+						writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
+						writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
+						writemsg(_("!!! And finish by running this: env-update\n\n"))
+						return 1
+
+					if stat.S_ISDIR(mydmode) or \
+						(stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
+						# a symlink to an existing directory will work for us; keep it:
+						showMessage("--- %s/\n" % mydest)
+						if bsd_chflags:
+							bsd_chflags.lchflags(mydest, dflags)
+					else:
+						# a non-directory and non-symlink-to-directory.  Won't work for us.  Move out of the way.
+						backup_dest = self._new_backup_path(mydest)
+						msg = []
+						msg.append("")
+						msg.append(_("Installation of a directory is blocked by a file:"))
+						msg.append("  '%s'" % mydest)
+						msg.append(_("This file will be renamed to a different name:"))
+						msg.append("  '%s'" % backup_dest)
+						msg.append("")
+						self._eerror("preinst", msg)
+						if movefile(mydest, backup_dest,
+							mysettings=self.settings,
+							encoding=_encodings['merge']) is None:
+							return 1
+						showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
+							level=logging.ERROR, noiselevel=-1)
+						#now create our directory
+						try:
+							if self.settings.selinux_enabled():
+								_selinux_merge.mkdir(mydest, mysrc)
+							else:
+								os.mkdir(mydest)
+						except OSError as e:
+							# Error handling should be equivalent to
+							# portage.util.ensure_dirs() for cases
+							# like bug #187518.
+							if e.errno in (errno.EEXIST,):
+								pass
+							elif os.path.isdir(mydest):
+								pass
+							else:
+								raise
+							del e
+
+						if bsd_chflags:
+							bsd_chflags.lchflags(mydest, dflags)
+						os.chmod(mydest, mystat[0])
+						os.chown(mydest, mystat[4], mystat[5])
+						showMessage(">>> %s/\n" % mydest)
+				else:
+					try:
+						#destination doesn't exist
+						if self.settings.selinux_enabled():
+							_selinux_merge.mkdir(mydest, mysrc)
+						else:
+							os.mkdir(mydest)
+					except OSError as e:
+						# Error handling should be equivalent to
+						# portage.util.ensure_dirs() for cases
+						# like bug #187518.
+						if e.errno in (errno.EEXIST,):
+							pass
+						elif os.path.isdir(mydest):
+							pass
+						else:
+							raise
+						del e
+					os.chmod(mydest, mystat[0])
+					os.chown(mydest, mystat[4], mystat[5])
+					showMessage(">>> %s/\n" % mydest)
+				outfile.write("dir "+myrealdest+"\n")
+				# recurse and merge this directory
+				if self.mergeme(srcroot, destroot, outfile, secondhand,
+					join(offset, x), cfgfiledict, thismtime):
+					return 1
+			elif stat.S_ISREG(mymode):
+				# we are merging a regular file
+				mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
+				# calculate config file protection stuff
+				mydestdir = os.path.dirname(mydest)
+				moveme = 1
+				zing = "!!!"
+				mymtime = None
+				protected = self.isprotected(mydest)
+				if mydmode != None:
+					# destination file exists
+					
+					if stat.S_ISDIR(mydmode):
+						# install of destination is blocked by an existing directory with the same name
+						newdest = self._new_backup_path(mydest)
+						msg = []
+						msg.append("")
+						msg.append(_("Installation of a regular file is blocked by a directory:"))
+						msg.append("  '%s'" % mydest)
+						msg.append(_("This file will be merged with a different name:"))
+						msg.append("  '%s'" % newdest)
+						msg.append("")
+						self._eerror("preinst", msg)
+						mydest = newdest
+
+					elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
+						# install of destination is blocked by an existing regular file,
+						# or by a symlink to an existing regular file;
+						# now, config file management may come into play.
+						# we only need to tweak mydest if cfg file management is in play.
+						if protected:
+							# we have a protection path; enable config file management.
+							cfgprot = 0
+							destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
+							if mymd5 == destmd5:
+								#file already in place; simply update mtimes of destination
+								moveme = 1
+							else:
+								if mymd5 == cfgfiledict.get(myrealdest, [None])[0]:
+									""" An identical update has previously been
+									merged.  Skip it unless the user has chosen
+									--noconfmem."""
+									moveme = cfgfiledict["IGNORE"]
+									cfgprot = cfgfiledict["IGNORE"]
+									if not moveme:
+										zing = "---"
+										mymtime = mystat[stat.ST_MTIME]
+								else:
+									moveme = 1
+									cfgprot = 1
+							if moveme:
+								# Merging a new file, so update confmem.
+								cfgfiledict[myrealdest] = [mymd5]
+							elif destmd5 == cfgfiledict.get(myrealdest, [None])[0]:
+								"""A previously remembered update has been
+								accepted, so it is removed from confmem."""
+								del cfgfiledict[myrealdest]
+
+							if cfgprot:
+								mydest = new_protect_filename(mydest, newmd5=mymd5)
+
+				# whether config protection or not, we merge the new file the
+				# same way.  Unless moveme=0 (blocking directory)
+				if moveme:
+					# Create hardlinks only for source files that already exist
+					# as hardlinks (having identical st_dev and st_ino).
+					hardlink_key = (mystat.st_dev, mystat.st_ino)
+
+					hardlink_candidates = self._md5_merge_map.get(hardlink_key)
+					if hardlink_candidates is None:
+						hardlink_candidates = []
+						self._md5_merge_map[hardlink_key] = hardlink_candidates
+
+					mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+						sstat=mystat, mysettings=self.settings,
+						hardlink_candidates=hardlink_candidates,
+						encoding=_encodings['merge'])
+					if mymtime is None:
+						return 1
+					if hardlink_candidates is not None:
+						hardlink_candidates.append(mydest)
+					zing = ">>>"
+
+				if mymtime != None:
+					outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+				showMessage("%s %s\n" % (zing,mydest))
+			else:
+				# we are merging a fifo or device node
+				zing = "!!!"
+				if mydmode is None:
+					# destination doesn't exist
+					if movefile(mysrc, mydest, newmtime=thismtime,
+						sstat=mystat, mysettings=self.settings,
+						encoding=_encodings['merge']) is not None:
+						zing = ">>>"
+					else:
+						return 1
+				if stat.S_ISFIFO(mymode):
+					outfile.write("fif %s\n" % myrealdest)
+				else:
+					outfile.write("dev %s\n" % myrealdest)
+				showMessage(zing + " " + mydest + "\n")
+
+	def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
+		mydbapi=None, prev_mtimes=None, counter=None):
+		"""
+		@param myroot: ignored, self._eroot is used instead
+		"""
+		myroot = None
+		retval = -1
+		parallel_install = "parallel-install" in self.settings.features
+		if not parallel_install:
+			self.lockdb()
+		self.vartree.dbapi._bump_mtime(self.mycpv)
+		if self._scheduler is None:
+			self._scheduler = PollScheduler().sched_iface
+		try:
+			retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
+				cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
+				counter=counter)
+
+			# If PORTAGE_BUILDDIR doesn't exist, then it probably means
+			# fail-clean is enabled, and the success/die hooks have
+			# already been called by EbuildPhase.
+			if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+
+				if retval == os.EX_OK:
+					phase = 'success_hooks'
+				else:
+					phase = 'die_hooks'
+
+				ebuild_phase = MiscFunctionsProcess(
+					background=False, commands=[phase],
+					scheduler=self._scheduler, settings=self.settings)
+				ebuild_phase.start()
+				ebuild_phase.wait()
+				self._elog_process()
+
+				if 'noclean' not in self.settings.features and \
+					(retval == os.EX_OK or \
+					'fail-clean' in self.settings.features):
+					if myebuild is None:
+						myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+
+					doebuild_environment(myebuild, "clean",
+						settings=self.settings, db=mydbapi)
+					phase = EbuildPhase(background=False, phase="clean",
+						scheduler=self._scheduler, settings=self.settings)
+					phase.start()
+					phase.wait()
+		finally:
+			self.settings.pop('REPLACING_VERSIONS', None)
+			if self.vartree.dbapi._linkmap is None:
+				# preserve-libs is entirely disabled
+				pass
+			else:
+				self.vartree.dbapi._linkmap._clear_cache()
+			self.vartree.dbapi._bump_mtime(self.mycpv)
+			if not parallel_install:
+				self.unlockdb()
+		return retval
+
+	def getstring(self,name):
+		"returns contents of a file with whitespace converted to spaces"
+		if not os.path.exists(self.dbdir+"/"+name):
+			return ""
+		mydata = io.open(
+			_unicode_encode(os.path.join(self.dbdir, name),
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'], errors='replace'
+			).read().split()
+		return " ".join(mydata)
+
+	def copyfile(self,fname):
+		shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
+
+	def getfile(self,fname):
+		if not os.path.exists(self.dbdir+"/"+fname):
+			return ""
+		return io.open(_unicode_encode(os.path.join(self.dbdir, fname),
+			encoding=_encodings['fs'], errors='strict'), 
+			mode='r', encoding=_encodings['repo.content'], errors='replace'
+			).read()
+
+	def setfile(self,fname,data):
+		kwargs = {}
+		if fname == 'environment.bz2' or not isinstance(data, basestring):
+			kwargs['mode'] = 'wb'
+		else:
+			kwargs['mode'] = 'w'
+			kwargs['encoding'] = _encodings['repo.content']
+		write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
+
+	def getelements(self,ename):
+		if not os.path.exists(self.dbdir+"/"+ename):
+			return []
+		mylines = io.open(_unicode_encode(
+			os.path.join(self.dbdir, ename),
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'], errors='replace'
+			).readlines()
+		myreturn = []
+		for x in mylines:
+			for y in x[:-1].split():
+				myreturn.append(y)
+		return myreturn
+
+	def setelements(self,mylist,ename):
+		myelement = io.open(_unicode_encode(
+			os.path.join(self.dbdir, ename),
+			encoding=_encodings['fs'], errors='strict'),
+			mode='w', encoding=_encodings['repo.content'],
+			errors='backslashreplace')
+		for x in mylist:
+			myelement.write(_unicode_decode(x+"\n"))
+		myelement.close()
+
+	def isregular(self):
+		"Is this a regular package (does it have a CATEGORY file?  A dblink can be virtual *and* regular)"
+		return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
+
+def merge(mycat, mypkg, pkgloc, infloc,
+	myroot=None, settings=None, myebuild=None,
+	mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
+	scheduler=None):
+	"""
+	@param myroot: ignored, settings['EROOT'] is used instead
+	"""
+	myroot = None
+	if settings is None:
+		raise TypeError("settings argument is required")
+	if not os.access(settings['EROOT'], os.W_OK):
+		writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
+			noiselevel=-1)
+		return errno.EACCES
+	background = (settings.get('PORTAGE_BACKGROUND') == '1')
+	merge_task = MergeProcess(
+		mycat=mycat, mypkg=mypkg, settings=settings,
+		treetype=mytree, vartree=vartree,
+		scheduler=(scheduler or PollScheduler().sched_iface),
+		background=background, blockers=blockers, pkgloc=pkgloc,
+		infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+		prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'))
+	merge_task.start()
+	retcode = merge_task.wait()
+	return retcode
+
+def unmerge(cat, pkg, myroot=None, settings=None,
+	mytrimworld=None, vartree=None,
+	ldpath_mtimes=None, scheduler=None):
+	"""
+	@param myroot: ignored, settings['EROOT'] is used instead
+	@param mytrimworld: ignored
+	"""
+	myroot = None
+	if settings is None:
+		raise TypeError("settings argument is required")
+	mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
+		vartree=vartree, scheduler=scheduler)
+	vartree = mylink.vartree
+	parallel_install = "parallel-install" in settings.features
+	if not parallel_install:
+		mylink.lockdb()
+	try:
+		if mylink.exists():
+			retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
+			if retval == os.EX_OK:
+				mylink.lockdb()
+				try:
+					mylink.delete()
+				finally:
+					mylink.unlockdb()
+			return retval
+		return os.EX_OK
+	finally:
+		if vartree.dbapi._linkmap is None:
+			# preserve-libs is entirely disabled
+			pass
+		else:
+			vartree.dbapi._linkmap._clear_cache()
+		if not parallel_install:
+			mylink.unlockdb()
+
+def write_contents(contents, root, f):
+	"""
+	Write contents to any file like object. The file will be left open.
+	"""
+	root_len = len(root) - 1
+	for filename in sorted(contents):
+		entry_data = contents[filename]
+		entry_type = entry_data[0]
+		relative_filename = filename[root_len:]
+		if entry_type == "obj":
+			entry_type, mtime, md5sum = entry_data
+			line = "%s %s %s %s\n" % \
+				(entry_type, relative_filename, md5sum, mtime)
+		elif entry_type == "sym":
+			entry_type, mtime, link = entry_data
+			line = "%s %s -> %s %s\n" % \
+				(entry_type, relative_filename, link, mtime)
+		else: # dir, dev, fif
+			line = "%s %s\n" % (entry_type, relative_filename)
+		f.write(line)
+
+def tar_contents(contents, root, tar, protect=None, onProgress=None):
+	os = _os_merge
+
+	try:
+		for x in contents:
+			_unicode_encode(x,
+				encoding=_encodings['merge'],
+				errors='strict')
+	except UnicodeEncodeError:
+		# The package appears to have been merged with a
+		# different value of sys.getfilesystemencoding(),
+		# so fall back to utf_8 if appropriate.
+		try:
+			for x in contents:
+				_unicode_encode(x,
+					encoding=_encodings['fs'],
+					errors='strict')
+		except UnicodeEncodeError:
+			pass
+		else:
+			os = portage.os
+
+	root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
+	id_strings = {}
+	maxval = len(contents)
+	curval = 0
+	if onProgress:
+		onProgress(maxval, 0)
+	paths = list(contents)
+	paths.sort()
+	for path in paths:
+		curval += 1
+		try:
+			lst = os.lstat(path)
+		except OSError as e:
+			if e.errno != errno.ENOENT:
+				raise
+			del e
+			if onProgress:
+				onProgress(maxval, curval)
+			continue
+		contents_type = contents[path][0]
+		if path.startswith(root):
+			arcname = path[len(root):]
+		else:
+			raise ValueError("invalid root argument: '%s'" % root)
+		live_path = path
+		if 'dir' == contents_type and \
+			not stat.S_ISDIR(lst.st_mode) and \
+			os.path.isdir(live_path):
+			# Even though this was a directory in the original ${D}, it exists
+			# as a symlink to a directory in the live filesystem.  It must be
+			# recorded as a real directory in the tar file to ensure that tar
+			# can properly extract it's children.
+			live_path = os.path.realpath(live_path)
+		tarinfo = tar.gettarinfo(live_path, arcname)
+
+		if stat.S_ISREG(lst.st_mode):
+			if protect and protect(path):
+				# Create an empty file as a place holder in order to avoid
+				# potential collision-protect issues.
+				f = tempfile.TemporaryFile()
+				f.write(_unicode_encode(
+					"# empty file because --include-config=n " + \
+					"when `quickpkg` was used\n"))
+				f.flush()
+				f.seek(0)
+				tarinfo.size = os.fstat(f.fileno()).st_size
+				tar.addfile(tarinfo, f)
+				f.close()
+			else:
+				f = open(_unicode_encode(path,
+					encoding=object.__getattribute__(os, '_encoding'),
+					errors='strict'), 'rb')
+				try:
+					tar.addfile(tarinfo, f)
+				finally:
+					f.close()
+		else:
+			tar.addfile(tarinfo)
+		if onProgress:
+			onProgress(maxval, curval)

diff --git a/portage_with_autodep/pym/portage/dbapi/virtual.py b/portage_with_autodep/pym/portage/dbapi/virtual.py
new file mode 100644
index 0000000..ec97ffe
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/virtual.py
@@ -0,0 +1,131 @@
+# Copyright 1998-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+from portage.dbapi import dbapi
+from portage import cpv_getkey
+
+class fakedbapi(dbapi):
+	"""A fake dbapi that allows consumers to inject/remove packages to/from it
+	portage.settings is required to maintain the dbAPI.
+	"""
+	def __init__(self, settings=None, exclusive_slots=True):
+		"""
+		@param exclusive_slots: When True, injecting a package with SLOT
+			metadata causes an existing package in the same slot to be
+			automatically removed (default is True).
+		@type exclusive_slots: Boolean
+		"""
+		self._exclusive_slots = exclusive_slots
+		self.cpvdict = {}
+		self.cpdict = {}
+		if settings is None:
+			from portage import settings
+		self.settings = settings
+		self._match_cache = {}
+
+	def _clear_cache(self):
+		if self._categories is not None:
+			self._categories = None
+		if self._match_cache:
+			self._match_cache = {}
+
+	def match(self, origdep, use_cache=1):
+		result = self._match_cache.get(origdep, None)
+		if result is not None:
+			return result[:]
+		result = dbapi.match(self, origdep, use_cache=use_cache)
+		self._match_cache[origdep] = result
+		return result[:]
+
+	def cpv_exists(self, mycpv, myrepo=None):
+		return mycpv in self.cpvdict
+
+	def cp_list(self, mycp, use_cache=1, myrepo=None):
+		cachelist = self._match_cache.get(mycp)
+		# cp_list() doesn't expand old-style virtuals
+		if cachelist and cachelist[0].startswith(mycp):
+			return cachelist[:]
+		cpv_list = self.cpdict.get(mycp)
+		if cpv_list is None:
+			cpv_list = []
+		self._cpv_sort_ascending(cpv_list)
+		if not (not cpv_list and mycp.startswith("virtual/")):
+			self._match_cache[mycp] = cpv_list
+		return cpv_list[:]
+
+	def cp_all(self):
+		return list(self.cpdict)
+
+	def cpv_all(self):
+		return list(self.cpvdict)
+
+	def cpv_inject(self, mycpv, metadata=None):
+		"""Adds a cpv to the list of available packages. See the
+		exclusive_slots constructor parameter for behavior with
+		respect to SLOT metadata.
+		@param mycpv: cpv for the package to inject
+		@type mycpv: str
+		@param metadata: dictionary of raw metadata for aux_get() calls
+		@param metadata: dict
+		"""
+		self._clear_cache()
+		mycp = cpv_getkey(mycpv)
+		self.cpvdict[mycpv] = metadata
+		myslot = None
+		if self._exclusive_slots and metadata:
+			myslot = metadata.get("SLOT", None)
+		if myslot and mycp in self.cpdict:
+			# If necessary, remove another package in the same SLOT.
+			for cpv in self.cpdict[mycp]:
+				if mycpv != cpv:
+					other_metadata = self.cpvdict[cpv]
+					if other_metadata:
+						if myslot == other_metadata.get("SLOT", None):
+							self.cpv_remove(cpv)
+							break
+		if mycp not in self.cpdict:
+			self.cpdict[mycp] = []
+		if not mycpv in self.cpdict[mycp]:
+			self.cpdict[mycp].append(mycpv)
+
+	def cpv_remove(self,mycpv):
+		"""Removes a cpv from the list of available packages."""
+		self._clear_cache()
+		mycp = cpv_getkey(mycpv)
+		if mycpv in self.cpvdict:
+			del	self.cpvdict[mycpv]
+		if mycp not in self.cpdict:
+			return
+		while mycpv in self.cpdict[mycp]:
+			del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
+		if not len(self.cpdict[mycp]):
+			del self.cpdict[mycp]
+
+	def aux_get(self, mycpv, wants, myrepo=None):
+		if not self.cpv_exists(mycpv):
+			raise KeyError(mycpv)
+		metadata = self.cpvdict[mycpv]
+		if not metadata:
+			return ["" for x in wants]
+		return [metadata.get(x, "") for x in wants]
+
+	def aux_update(self, cpv, values):
+		self._clear_cache()
+		self.cpvdict[cpv].update(values)
+
+class testdbapi(object):
+	"""A dbapi instance with completely fake functions to get by hitting disk
+	TODO(antarus): 
+	This class really needs to be rewritten to have better stubs; but these work for now.
+	The dbapi classes themselves need unit tests...and that will be a lot of work.
+	"""
+
+	def __init__(self):
+		self.cpvs = {}
+		def f(*args, **kwargs):
+			return True
+		fake_api = dir(dbapi)
+		for call in fake_api:
+			if not hasattr(self, call):
+				setattr(self, call, f)

diff --git a/portage_with_autodep/pym/portage/debug.py b/portage_with_autodep/pym/portage/debug.py
new file mode 100644
index 0000000..ce642fe
--- /dev/null
+++ b/portage_with_autodep/pym/portage/debug.py
@@ -0,0 +1,120 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import sys
+
+try:
+	import threading
+except ImportError:
+	import dummy_threading as threading
+
+import portage.const
+from portage.util import writemsg
+
+def set_trace(on=True):
+	if on:
+		t = trace_handler()
+		threading.settrace(t.event_handler)
+		sys.settrace(t.event_handler)
+	else:
+		sys.settrace(None)
+		threading.settrace(None)
+
+class trace_handler(object):
+
+	def __init__(self):
+		python_system_paths = []
+		for x in sys.path:
+			if os.path.basename(x).startswith("python2."):
+				python_system_paths.append(x)
+
+		self.ignore_prefixes = []
+		for x in python_system_paths:
+			self.ignore_prefixes.append(x + os.sep)
+
+		self.trim_filename = prefix_trimmer(os.path.join(portage.const.PORTAGE_BASE_PATH, "pym") + os.sep).trim
+		self.show_local_lines = False
+		self.max_repr_length = 200
+
+	def event_handler(self, *args):
+		frame, event, arg = args
+		if "line" == event:
+			if self.show_local_lines:
+				self.trace_line(*args)
+		else:
+			if not self.ignore_filename(frame.f_code.co_filename):
+				self.trace_event(*args)
+				return self.event_handler
+
+	def trace_event(self, frame, event, arg):
+		writemsg("%s line=%d name=%s event=%s %slocals=%s\n" % \
+		(self.trim_filename(frame.f_code.co_filename),
+		frame.f_lineno,
+		frame.f_code.co_name,
+		event,
+		self.arg_repr(frame, event, arg),
+		self.locals_repr(frame, event, arg)))
+
+	def arg_repr(self, frame, event, arg):
+		my_repr = None
+		if "return" == event:
+			my_repr = repr(arg)
+			if len(my_repr) > self.max_repr_length:
+				my_repr = "'omitted'"
+			return "value=%s " % my_repr
+		elif "exception" == event:
+			my_repr = repr(arg[1])
+			if len(my_repr) > self.max_repr_length:
+				my_repr = "'omitted'"
+			return "type=%s value=%s " % (arg[0], my_repr)
+
+		return ""
+
+	def trace_line(self, frame, event, arg):
+		writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
+
+	def ignore_filename(self, filename):
+		if filename:
+			for x in self.ignore_prefixes:
+				if filename.startswith(x):
+					return True
+		return False
+
+	def locals_repr(self, frame, event, arg):
+		"""Create a representation of the locals dict that is suitable for
+		tracing output."""
+
+		my_locals = frame.f_locals.copy()
+
+		# prevent unsafe  __repr__ call on self when __init__ is called
+		# (method calls aren't safe until after __init__  has completed).
+		if frame.f_code.co_name == "__init__" and "self" in my_locals:
+			my_locals["self"] = "omitted"
+
+		# We omit items that will lead to unreasonable bloat of the trace
+		# output (and resulting log file).
+		for k, v in my_locals.items():
+			my_repr = repr(v)
+			if len(my_repr) > self.max_repr_length:
+				my_locals[k] = "omitted"
+		return my_locals
+
+class prefix_trimmer(object):
+	def __init__(self, prefix):
+		self.prefix = prefix
+		self.cut_index = len(prefix)
+		self.previous = None
+		self.previous_trimmed = None
+
+	def trim(self, s):
+		"""Remove a prefix from the string and return the result.
+		The previous result is automatically cached."""
+		if s == self.previous:
+			return self.previous_trimmed
+		else:
+			if s.startswith(self.prefix):
+				self.previous_trimmed = s[self.cut_index:]
+			else:
+				self.previous_trimmed = s
+			return self.previous_trimmed

diff --git a/portage_with_autodep/pym/portage/dep/__init__.py b/portage_with_autodep/pym/portage/dep/__init__.py
new file mode 100644
index 0000000..fd5ad30
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dep/__init__.py
@@ -0,0 +1,2432 @@
+# deps.py -- Portage dependency resolution functions
+# Copyright 2003-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+	'Atom', 'best_match_to_list', 'cpvequal',
+	'dep_getcpv', 'dep_getkey', 'dep_getslot',
+	'dep_getusedeps', 'dep_opconvert', 'flatten',
+	'get_operator', 'isjustname', 'isspecific',
+	'isvalidatom', 'match_from_list', 'match_to_list',
+	'paren_enclose', 'paren_normalize', 'paren_reduce',
+	'remove_slot', 'strip_empty', 'use_reduce', 
+	'_repo_separator', '_slot_separator',
+]
+
+# DEPEND SYNTAX:
+#
+# 'use?' only affects the immediately following word!
+# Nesting is the only legal way to form multiple '[!]use?' requirements.
+#
+# Where: 'a' and 'b' are use flags, and 'z' is a depend atom.
+#
+# "a? z"           -- If 'a' in [use], then b is valid.
+# "a? ( z )"       -- Syntax with parenthesis.
+# "a? b? z"        -- Deprecated.
+# "a? ( b? z )"    -- Valid
+# "a? ( b? ( z ) ) -- Valid
+#
+
+import re, sys
+import warnings
+from itertools import chain
+from portage import _unicode_decode
+from portage.eapi import eapi_has_slot_deps, eapi_has_src_uri_arrows, \
+	eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_use_dep_defaults
+from portage.exception import InvalidAtom, InvalidData, InvalidDependString
+from portage.localization import _
+from portage.versions import catpkgsplit, catsplit, \
+	pkgcmp, ververify, _cp, _cpv
+import portage.cache.mappings
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+# Api consumers included in portage should set this to True.
+# Once the relevant api changes are in a portage release with
+# stable keywords, make these warnings unconditional.
+_internal_warnings = False
+
+def cpvequal(cpv1, cpv2):
+	"""
+	
+	@param cpv1: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+	@type cpv1: String
+	@param cpv2: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+	@type cpv2: String
+	@rtype: Boolean
+	@returns:
+	1.  True if cpv1 = cpv2
+	2.  False Otherwise
+	3.  Throws PortageException if cpv1 or cpv2 is not a CPV
+
+	Example Usage:
+	>>> from portage.dep import cpvequal
+	>>> cpvequal("sys-apps/portage-2.1","sys-apps/portage-2.1")
+	>>> True
+
+	"""
+
+	split1 = catpkgsplit(cpv1)
+	split2 = catpkgsplit(cpv2)
+	
+	if not split1 or not split2:
+		raise portage.exception.PortageException(_("Invalid data '%s, %s', parameter was not a CPV") % (cpv1, cpv2))
+	
+	if split1[0] != split2[0]:
+		return False
+	
+	return (pkgcmp(split1[1:], split2[1:]) == 0)
+
+def strip_empty(myarr):
+	"""
+	Strip all empty elements from an array
+
+	@param myarr: The list of elements
+	@type myarr: List
+	@rtype: Array
+	@return: The array with empty elements removed
+	"""
+	warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+		('portage.dep.strip_empty',), DeprecationWarning, stacklevel=2)
+	return [x for x in myarr if x]
+
+def paren_reduce(mystr):
+	"""
+	Take a string and convert all paren enclosed entities into sublists and
+	split the list elements by spaces. All redundant brackets are removed.
+
+	Example usage:
+		>>> paren_reduce('foobar foo? ( bar baz )')
+		['foobar', 'foo?', ['bar', 'baz']]
+
+	@param mystr: The string to reduce
+	@type mystr: String
+	@rtype: Array
+	@return: The reduced string in an array
+	"""
+	if _internal_warnings:
+		warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+			('portage.dep.paren_reduce',), DeprecationWarning, stacklevel=2)
+	mysplit = mystr.split()
+	level = 0
+	stack = [[]]
+	need_bracket = False
+
+	for token in mysplit:
+		if token == "(":
+			need_bracket = False
+			stack.append([])
+			level += 1
+		elif token == ")":
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % mystr)
+			if level > 0:
+				level -= 1
+				l = stack.pop()
+				is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+				def ends_in_any_of_dep(k):
+					return k>=0 and stack[k] and stack[k][-1] == "||"
+
+				def ends_in_operator(k):
+					return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+				def special_append():
+					"""
+					Use extend instead of append if possible. This kills all redundant brackets.
+					"""
+					if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+						if len(l) == 1 and isinstance(l[0], list):
+							# l = [[...]]
+							stack[level].extend(l[0])
+						else:
+							stack[level].extend(l)
+					else:	
+						stack[level].append(l)
+
+				if l:
+					if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+						#Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+						stack[level].extend(l)
+					elif not stack[level]:
+						#An '||' in the level above forces us to keep to brackets.
+						special_append()
+					elif len(l) == 1 and ends_in_any_of_dep(level):
+						#Optimize: || ( A ) -> A
+						stack[level].pop()
+						special_append()
+					elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+						#Optimize: 	|| ( || ( ... ) ) -> || ( ... )
+						#			foo? ( foo? ( ... ) ) -> foo? ( ... )
+						#			|| ( foo? ( ... ) ) -> foo? ( ... )
+						stack[level].pop()
+						special_append()
+					else:
+						special_append()
+				else:
+					if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+						stack[level].pop()
+			else:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % mystr)
+		elif token == "||":
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % mystr)
+			need_bracket = True
+			stack[level].append(token)
+		else:
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % mystr)
+
+			if token[-1] == "?":
+				need_bracket = True
+			
+			stack[level].append(token)
+
+	if level != 0 or need_bracket:
+		raise InvalidDependString(
+			_("malformed syntax: '%s'") % mystr)
+	
+	return stack[0]
+
+class paren_normalize(list):
+	"""Take a dependency structure as returned by paren_reduce or use_reduce
+	and generate an equivalent structure that has no redundant lists."""
+	def __init__(self, src):
+		if _internal_warnings:
+			warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+				('portage.dep.paren_normalize',), DeprecationWarning, stacklevel=2)
+		list.__init__(self)
+		self._zap_parens(src, self)
+
+	def _zap_parens(self, src, dest, disjunction=False):
+		if not src:
+			return dest
+		i = iter(src)
+		for x in i:
+			if isinstance(x, basestring):
+				if x in ('||', '^^'):
+					y = self._zap_parens(next(i), [], disjunction=True)
+					if len(y) == 1:
+						dest.append(y[0])
+					else:
+						dest.append(x)
+						dest.append(y)
+				elif x.endswith("?"):
+					dest.append(x)
+					dest.append(self._zap_parens(next(i), []))
+				else:
+					dest.append(x)
+			else:
+				if disjunction:
+					x = self._zap_parens(x, [])
+					if len(x) == 1:
+						dest.append(x[0])
+					else:
+						dest.append(x)
+				else:
+					self._zap_parens(x, dest)
+		return dest
+
+def paren_enclose(mylist, unevaluated_atom=False):
+	"""
+	Convert a list to a string with sublists enclosed with parens.
+
+	Example usage:
+		>>> test = ['foobar','foo',['bar','baz']]
+		>>> paren_enclose(test)
+		'foobar foo ( bar baz )'
+
+	@param mylist: The list
+	@type mylist: List
+	@rtype: String
+	@return: The paren enclosed string
+	"""
+	mystrparts = []
+	for x in mylist:
+		if isinstance(x, list):
+			mystrparts.append("( "+paren_enclose(x)+" )")
+		else:
+			if unevaluated_atom:
+				x = getattr(x, 'unevaluated_atom', x)
+			mystrparts.append(x)
+	return " ".join(mystrparts)
+
+def use_reduce(depstr, uselist=[], masklist=[], matchall=False, excludeall=[], is_src_uri=False, \
+	eapi=None, opconvert=False, flat=False, is_valid_flag=None, token_class=None, matchnone=False):
+	"""
+	Takes a dep string and reduces the use? conditionals out, leaving an array
+	with subarrays. All redundant brackets are removed.
+
+	@param deparray: depstring
+	@type deparray: String
+	@param uselist: List of use enabled flags
+	@type uselist: List
+	@param masklist: List of masked flags (always treated as disabled)
+	@type masklist: List
+	@param matchall: Treat all conditionals as active. Used by repoman. 
+	@type matchall: Bool
+	@param excludeall: List of flags for which negated conditionals are always treated as inactive.
+	@type excludeall: List
+	@param is_src_uri: Indicates if depstr represents a SRC_URI
+	@type is_src_uri: Bool
+	@param eapi: Indicates the EAPI the dep string has to comply to
+	@type eapi: String
+	@param opconvert: Put every operator as first element into it's argument list
+	@type opconvert: Bool
+	@param flat: Create a flat list of all tokens
+	@type flat: Bool
+	@param is_valid_flag: Function that decides if a given use flag might be used in use conditionals
+	@type is_valid_flag: Function
+	@param token_class: Convert all non operator tokens into this class
+	@type token_class: Class
+	@param matchnone: Treat all conditionals as inactive. Used by digestgen(). 
+	@type matchnone: Bool
+	@rtype: List
+	@return: The use reduced depend array
+	"""
+	if isinstance(depstr, list):
+		if _internal_warnings:
+			warnings.warn(_("Passing paren_reduced dep arrays to %s is deprecated. " + \
+				"Pass the original dep string instead.") % \
+				('portage.dep.use_reduce',), DeprecationWarning, stacklevel=2)
+		depstr = paren_enclose(depstr)
+
+	if opconvert and flat:
+		raise ValueError("portage.dep.use_reduce: 'opconvert' and 'flat' are mutually exclusive")
+
+	if matchall and matchnone:
+		raise ValueError("portage.dep.use_reduce: 'matchall' and 'matchnone' are mutually exclusive")
+
+	useflag_re = _get_useflag_re(eapi)
+
+	def is_active(conditional):
+		"""
+		Decides if a given use conditional is active.
+		"""
+		if conditional.startswith("!"):
+			flag = conditional[1:-1]
+			is_negated = True
+		else:
+			flag = conditional[:-1]
+			is_negated = False
+		
+		if is_valid_flag:
+			if not is_valid_flag(flag):
+				msg = _("USE flag '%s' referenced in " + \
+					"conditional '%s' is not in IUSE") \
+					% (flag, conditional)
+				e = InvalidData(msg, category='IUSE.missing')
+				raise InvalidDependString(msg, errors=(e,))
+		else:
+			if useflag_re.match(flag) is None:
+				raise InvalidDependString(
+					_("invalid use flag '%s' in conditional '%s'") % (flag, conditional))
+
+		if is_negated and flag in excludeall:
+			return False
+
+		if flag in masklist:
+			return is_negated
+
+		if matchall:
+			return True
+
+		if matchnone:
+			return False
+
+		return (flag in uselist and not is_negated) or \
+			(flag not in uselist and is_negated)
+
+	def missing_white_space_check(token, pos):
+		"""
+		Used to generate good error messages for invalid tokens.
+		"""
+		for x in (")", "(", "||"):
+			if token.startswith(x) or token.endswith(x):
+				raise InvalidDependString(
+					_("missing whitespace around '%s' at '%s', token %s") % (x, token, pos+1))
+
+	mysplit = depstr.split()
+	#Count the bracket level.
+	level = 0
+	#We parse into a stack. Every time we hit a '(', a new empty list is appended to the stack.
+	#When we hit a ')', the last list in the stack is merged with list one level up.
+	stack = [[]]
+	#Set need_bracket to True after use conditionals or ||. Other tokens need to ensure
+	#that need_bracket is not True.
+	need_bracket = False
+	#Set need_simple_token to True after a SRC_URI arrow. Other tokens need to ensure
+	#that need_simple_token is not True.
+	need_simple_token = False
+
+	for pos, token in enumerate(mysplit):
+		if token == "(":
+			if need_simple_token:
+				raise InvalidDependString(
+					_("expected: file name, got: '%s', token %s") % (token, pos+1))
+			if len(mysplit) >= pos+2 and mysplit[pos+1] == ")":
+				raise InvalidDependString(
+					_("expected: dependency string, got: ')', token %s") % (pos+1,))
+			need_bracket = False
+			stack.append([])
+			level += 1
+		elif token == ")":
+			if need_bracket:
+				raise InvalidDependString(
+					_("expected: '(', got: '%s', token %s") % (token, pos+1))
+			if need_simple_token:
+				raise InvalidDependString(
+					_("expected: file name, got: '%s', token %s") % (token, pos+1))
+			if level > 0:
+				level -= 1
+				l = stack.pop()
+
+				is_single = len(l) == 1 or \
+					(opconvert and l and l[0] == "||") or \
+					(not opconvert and len(l)==2 and l[0] == "||")
+				ignore = False
+
+				if flat:
+					#In 'flat' mode, we simply merge all lists into a single large one.
+					if stack[level] and stack[level][-1][-1] == "?":
+						#The last token before the '(' that matches the current ')'
+						#was a use conditional. The conditional is removed in any case.
+						#Merge the current list if needed.
+						if is_active(stack[level][-1]):
+							stack[level].pop()
+							stack[level].extend(l)
+						else:
+							stack[level].pop()
+					else:
+						stack[level].extend(l)
+					continue
+
+				if stack[level]:
+					if stack[level][-1] == "||" and not l:
+						#Optimize: || ( ) -> .
+						stack[level].pop()
+					elif stack[level][-1][-1] == "?":
+						#The last token before the '(' that matches the current ')'
+						#was a use conditional, remove it and decide if we
+						#have to keep the current list.
+						if not is_active(stack[level][-1]):
+							ignore = True
+						stack[level].pop()
+
+				def ends_in_any_of_dep(k):
+					return k>=0 and stack[k] and stack[k][-1] == "||"
+
+				def starts_with_any_of_dep(k):
+					#'ends_in_any_of_dep' for opconvert
+					return k>=0 and stack[k] and stack[k][0] == "||"
+
+				def last_any_of_operator_level(k):
+					#Returns the level of the last || operator if it is in effect for
+					#the current level. It is not in effect, if there is a level, that
+					#ends in a non-operator. This is almost equivalent to stack[level][-1]=="||",
+					#expect that it skips empty levels.
+					while k>=0:
+						if stack[k]:
+							if stack[k][-1] == "||":
+								return k
+							elif stack[k][-1][-1] != "?":
+								return -1
+						k -= 1
+					return -1
+
+				def special_append():
+					"""
+					Use extend instead of append if possible. This kills all redundant brackets.
+					"""
+					if is_single:
+						#Either [A], [[...]] or [|| [...]]
+						if l[0] == "||" and ends_in_any_of_dep(level-1):
+							if opconvert:
+								stack[level].extend(l[1:])
+							else:
+								stack[level].extend(l[1])
+						elif len(l) == 1 and isinstance(l[0], list):
+							# l = [[...]]
+							last = last_any_of_operator_level(level-1)
+							if last == -1:
+								if opconvert and isinstance(l[0], list) \
+									and l[0] and l[0][0] == '||':
+									stack[level].append(l[0])
+								else:
+									stack[level].extend(l[0])
+							else:
+								if opconvert and l[0] and l[0][0] == "||":
+									stack[level].extend(l[0][1:])
+								else:
+									stack[level].append(l[0])
+						else:
+							stack[level].extend(l)
+					else:
+						if opconvert and stack[level] and stack[level][-1] == '||':
+							stack[level][-1] = ['||'] + l
+						else:
+							stack[level].append(l)
+
+				if l and not ignore:
+					#The current list is not empty and we don't want to ignore it because
+					#of an inactive use conditional.
+					if not ends_in_any_of_dep(level-1) and not ends_in_any_of_dep(level):
+						#Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+						stack[level].extend(l)
+					elif not stack[level]:
+						#An '||' in the level above forces us to keep to brackets.
+						special_append()
+					elif is_single and ends_in_any_of_dep(level):
+						#Optimize: || ( A ) -> A,  || ( || ( ... ) ) -> || ( ... )
+						stack[level].pop()
+						special_append()
+					elif ends_in_any_of_dep(level) and ends_in_any_of_dep(level-1):
+						#Optimize: || ( A || ( B C ) ) -> || ( A B C )
+						stack[level].pop()
+						stack[level].extend(l)
+					else:
+						if opconvert and ends_in_any_of_dep(level):
+							#In opconvert mode, we have to move the operator from the level
+							#above into the current list.
+							stack[level].pop()
+							stack[level].append(["||"] + l)
+						else:
+							special_append()
+
+			else:
+				raise InvalidDependString(
+					_("no matching '%s' for '%s', token %s") % ("(", ")", pos+1))
+		elif token == "||":
+			if is_src_uri:
+				raise InvalidDependString(
+					_("any-of dependencies are not allowed in SRC_URI: token %s") % (pos+1,))
+			if need_bracket:
+				raise InvalidDependString(
+					_("expected: '(', got: '%s', token %s") % (token, pos+1))
+			need_bracket = True
+			stack[level].append(token)
+		elif token == "->":
+			if need_simple_token:
+				raise InvalidDependString(
+					_("expected: file name, got: '%s', token %s") % (token, pos+1))
+			if not is_src_uri:
+				raise InvalidDependString(
+					_("SRC_URI arrow are only allowed in SRC_URI: token %s") % (pos+1,))
+			if eapi is None or not eapi_has_src_uri_arrows(eapi):
+				raise InvalidDependString(
+					_("SRC_URI arrow not allowed in EAPI %s: token %s") % (eapi, pos+1))
+			need_simple_token = True
+			stack[level].append(token)	
+		else:
+			missing_white_space_check(token, pos)
+
+			if need_bracket:
+				raise InvalidDependString(
+					_("expected: '(', got: '%s', token %s") % (token, pos+1))
+
+			if need_simple_token and "/" in token:
+				#The last token was a SRC_URI arrow, make sure we have a simple file name.
+				raise InvalidDependString(
+					_("expected: file name, got: '%s', token %s") % (token, pos+1))
+
+			if token[-1] == "?":
+				need_bracket = True
+			else:
+				need_simple_token = False
+				if token_class and not is_src_uri:
+					#Add a hack for SRC_URI here, to avoid conditional code at the consumer level
+					try:
+						token = token_class(token, eapi=eapi,
+							is_valid_flag=is_valid_flag)
+					except InvalidAtom as e:
+						raise InvalidDependString(
+							_("Invalid atom (%s), token %s") \
+							% (e, pos+1), errors=(e,))
+					except SystemExit:
+						raise
+					except Exception as e:
+						raise InvalidDependString(
+							_("Invalid token '%s', token %s") % (token, pos+1))
+
+					if not matchall and \
+						hasattr(token, 'evaluate_conditionals'):
+						token = token.evaluate_conditionals(uselist)
+
+			stack[level].append(token)
+
+	if level != 0:
+		raise InvalidDependString(
+			_("Missing '%s' at end of string") % (")",))
+	
+	if need_bracket:
+		raise InvalidDependString(
+			_("Missing '%s' at end of string") % ("(",))
+			
+	if need_simple_token:
+		raise InvalidDependString(
+			_("Missing file name at end of string"))
+
+	return stack[0]
+
+def dep_opconvert(deplist):
+	"""
+	Iterate recursively through a list of deps, if the
+	dep is a '||' or '&&' operator, combine it with the
+	list of deps that follows..
+
+	Example usage:
+		>>> test = ["blah", "||", ["foo", "bar", "baz"]]
+		>>> dep_opconvert(test)
+		['blah', ['||', 'foo', 'bar', 'baz']]
+
+	@param deplist: A list of deps to format
+	@type mydep: List
+	@rtype: List
+	@return:
+		The new list with the new ordering
+	"""
+	if _internal_warnings:
+		warnings.warn(_("%s is deprecated. Use %s with the opconvert parameter set to True instead.") % \
+			('portage.dep.dep_opconvert', 'portage.dep.use_reduce'), DeprecationWarning, stacklevel=2)
+
+	retlist = []
+	x = 0
+	while x != len(deplist):
+		if isinstance(deplist[x], list):
+			retlist.append(dep_opconvert(deplist[x]))
+		elif deplist[x] == "||":
+			retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
+			x += 1
+		else:
+			retlist.append(deplist[x])
+		x += 1
+	return retlist
+
+def flatten(mylist):
+	"""
+	Recursively traverse nested lists and return a single list containing
+	all non-list elements that are found.
+
+	Example usage:
+		>>> flatten([1, [2, 3, [4]]])
+		[1, 2, 3, 4]
+
+	@param mylist: A list containing nested lists and non-list elements.
+	@type mylist: List
+	@rtype: List
+	@return: A single list containing only non-list elements.
+	"""
+	if _internal_warnings:
+		warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+			('portage.dep.flatten',), DeprecationWarning, stacklevel=2)
+
+	newlist = []
+	for x in mylist:
+		if isinstance(x, list):
+			newlist.extend(flatten(x))
+		else:
+			newlist.append(x)
+	return newlist
+
+
+_usedep_re = {
+	"0":        re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
+	"4-python": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@.-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
+}
+
+def _get_usedep_re(eapi):
+	"""
+	When eapi is None then validation is not as strict, since we want the
+	same to work for multiple EAPIs that may have slightly different rules.
+	@param eapi: The EAPI
+	@type eapi: String or None
+	@rtype: regular expression object
+	@return: A regular expression object that matches valid USE deps for the
+		given eapi.
+	"""
+	if eapi in (None, "4-python",):
+		return _usedep_re["4-python"]
+	else:
+		return _usedep_re["0"]
+
+class _use_dep(object):
+
+	__slots__ = ("__weakref__", "eapi", "conditional", "missing_enabled", "missing_disabled",
+		"disabled", "enabled", "tokens", "required")
+
+	class _conditionals_class(object):
+		__slots__ = ("enabled", "disabled", "equal", "not_equal")
+
+		def items(self):
+			for k in self.__slots__:
+				v = getattr(self, k, None)
+				if v:
+					yield (k, v)
+
+		def values(self):
+			for k in self.__slots__:
+				v = getattr(self, k, None)
+				if v:
+					yield v
+
+	# used in InvalidAtom messages
+	_conditional_strings = {
+		'enabled' :     '%s?',
+		'disabled':    '!%s?',
+		'equal':        '%s=',
+		'not_equal':   '!%s=',
+	}
+
+	def __init__(self, use, eapi, enabled_flags=None, disabled_flags=None, missing_enabled=None, \
+		missing_disabled=None, conditional=None, required=None):
+
+		self.eapi = eapi
+
+		if enabled_flags is not None:
+			#A shortcut for the classe's own methods.
+			self.tokens = use
+			if not isinstance(self.tokens, tuple):
+				self.tokens = tuple(self.tokens)
+
+			self.required = frozenset(required)
+			self.enabled = frozenset(enabled_flags)
+			self.disabled = frozenset(disabled_flags)
+			self.missing_enabled = frozenset(missing_enabled)
+			self.missing_disabled = frozenset(missing_disabled)
+			self.conditional = None
+
+			if conditional:
+				self.conditional = self._conditionals_class()
+				for k in "enabled", "disabled", "equal", "not_equal":
+					setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+			return
+
+		enabled_flags = set()
+		disabled_flags = set()
+		missing_enabled = set()
+		missing_disabled = set()
+		no_default = set()
+
+		conditional = {}
+		usedep_re = _get_usedep_re(self.eapi)
+
+		for x in use:
+			m = usedep_re.match(x)
+			if m is None:
+				raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+			operator = m.group("prefix") + m.group("suffix")
+			flag = m.group("flag")
+			default = m.group("default")
+
+			if not operator:
+				enabled_flags.add(flag)
+			elif operator == "-":
+				disabled_flags.add(flag)
+			elif operator == "?":
+				conditional.setdefault("enabled", set()).add(flag)
+			elif operator == "=":
+				conditional.setdefault("equal", set()).add(flag)
+			elif operator == "!=":
+				conditional.setdefault("not_equal", set()).add(flag)
+			elif operator == "!?":
+				conditional.setdefault("disabled", set()).add(flag)
+			else:
+				raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+			if default:
+				if default == "(+)":
+					if flag in missing_disabled or flag in no_default:
+						raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+					missing_enabled.add(flag)
+				else:
+					if flag in missing_enabled or flag in no_default:
+						raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+					missing_disabled.add(flag)
+			else:
+				if flag in missing_enabled or flag in missing_disabled:
+					raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+				no_default.add(flag)
+
+		self.tokens = use
+		if not isinstance(self.tokens, tuple):
+			self.tokens = tuple(self.tokens)
+
+		self.required = frozenset(no_default)
+
+		self.enabled = frozenset(enabled_flags)
+		self.disabled = frozenset(disabled_flags)
+		self.missing_enabled = frozenset(missing_enabled)
+		self.missing_disabled = frozenset(missing_disabled)
+		self.conditional = None
+
+		if conditional:
+			self.conditional = self._conditionals_class()
+			for k in "enabled", "disabled", "equal", "not_equal":
+				setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+	def __bool__(self):
+		return bool(self.tokens)
+
+	if sys.hexversion < 0x3000000:
+		__nonzero__ = __bool__
+
+	def __str__(self):
+		if not self.tokens:
+			return ""
+		return "[%s]" % (",".join(self.tokens),)
+
+	def __repr__(self):
+		return "portage.dep._use_dep(%s)" % repr(self.tokens)
+
+	def evaluate_conditionals(self, use):
+		"""
+		Create a new instance with conditionals evaluated.
+
+		Conditional evaluation behavior:
+
+			parent state   conditional   result
+
+			 x              x?            x
+			-x              x?
+			 x             !x?
+			-x             !x?           -x
+
+			 x              x=            x
+			-x              x=           -x
+			 x             !x=           -x
+			-x             !x=            x
+
+		Conditional syntax examples:
+
+			Compact Form        Equivalent Expanded Form
+
+			foo[bar?]           bar? ( foo[bar]  ) !bar? ( foo       )
+			foo[!bar?]          bar? ( foo       ) !bar? ( foo[-bar] )
+			foo[bar=]           bar? ( foo[bar]  ) !bar? ( foo[-bar] )
+			foo[!bar=]          bar? ( foo[-bar] ) !bar? ( foo[bar]  )
+
+		"""
+		enabled_flags = set(self.enabled)
+		disabled_flags = set(self.disabled)
+
+		tokens = []
+		usedep_re = _get_usedep_re(self.eapi)
+
+		for x in self.tokens:
+			m = usedep_re.match(x)
+
+			operator = m.group("prefix") + m.group("suffix")
+			flag = m.group("flag")
+			default = m.group("default")
+			if default is None:
+				default = ""
+
+			if operator == "?":
+				if flag in use:
+					enabled_flags.add(flag)
+					tokens.append(flag+default)
+			elif operator == "=":
+				if flag in use:
+					enabled_flags.add(flag)
+					tokens.append(flag+default)
+				else:
+					disabled_flags.add(flag)
+					tokens.append("-"+flag+default)
+			elif operator == "!=":
+				if flag in use:
+					disabled_flags.add(flag)
+					tokens.append("-"+flag+default)
+				else:
+					enabled_flags.add(flag)
+					tokens.append(flag+default)
+			elif operator == "!?":
+				if flag not in use:
+					disabled_flags.add(flag)
+					tokens.append("-"+flag+default)
+			else:
+				tokens.append(x)
+
+		return _use_dep(tokens, self.eapi, enabled_flags=enabled_flags, disabled_flags=disabled_flags, \
+			missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, required=self.required)
+
+	def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+		"""
+		Create a new instance with satisfied use deps removed.
+		"""
+		if parent_use is None and self.conditional:
+			raise InvalidAtom("violated_conditionals needs 'parent_use'" + \
+				" parameter for conditional flags.")
+
+		enabled_flags = set()
+		disabled_flags = set()
+
+		conditional = {}
+		tokens = []
+
+		all_defaults = frozenset(chain(self.missing_enabled, self.missing_disabled))
+		
+		def validate_flag(flag):
+			return is_valid_flag(flag) or flag in all_defaults
+
+		usedep_re = _get_usedep_re(self.eapi)
+
+		for x in self.tokens:
+			m = usedep_re.match(x)
+
+			operator = m.group("prefix") + m.group("suffix")
+			flag = m.group("flag")
+
+			if not validate_flag(flag):
+				tokens.append(x)
+				if not operator:
+					enabled_flags.add(flag)
+				elif operator == "-":
+					disabled_flags.add(flag)
+				elif operator == "?":
+					conditional.setdefault("enabled", set()).add(flag)
+				elif operator == "=":
+					conditional.setdefault("equal", set()).add(flag)
+				elif operator == "!=":
+					conditional.setdefault("not_equal", set()).add(flag)
+				elif operator == "!?":
+					conditional.setdefault("disabled", set()).add(flag)
+
+				continue
+
+			if not operator:
+				if flag not in other_use:
+					if is_valid_flag(flag) or flag in self.missing_disabled:
+						tokens.append(x)
+						enabled_flags.add(flag)
+			elif operator == "-":
+				if flag not in other_use:
+					if not is_valid_flag(flag):
+						if flag in self.missing_enabled:
+							tokens.append(x)
+							disabled_flags.add(flag)
+				else:
+					tokens.append(x)
+					disabled_flags.add(flag)
+			elif operator == "?":
+				if flag not in parent_use or flag in other_use:
+					continue
+
+				if is_valid_flag(flag) or flag in self.missing_disabled:
+					tokens.append(x)
+					conditional.setdefault("enabled", set()).add(flag)
+			elif operator == "=":
+				if flag in parent_use and flag not in other_use:
+					if is_valid_flag(flag):
+						tokens.append(x)
+						conditional.setdefault("equal", set()).add(flag)
+					else:
+						if flag in self.missing_disabled:
+							tokens.append(x)
+							conditional.setdefault("equal", set()).add(flag)
+				elif flag not in parent_use:
+					if flag not in other_use:
+						if not is_valid_flag(flag):
+							if flag in self.missing_enabled:
+								tokens.append(x)
+								conditional.setdefault("equal", set()).add(flag)
+					else:
+						tokens.append(x)
+						conditional.setdefault("equal", set()).add(flag)
+			elif operator == "!=":
+				if flag not in parent_use and flag not in other_use:
+					if is_valid_flag(flag):
+						tokens.append(x)
+						conditional.setdefault("not_equal", set()).add(flag)
+					else:
+						if flag in self.missing_disabled:
+							tokens.append(x)
+							conditional.setdefault("not_equal", set()).add(flag)
+				elif flag in parent_use:
+					if flag not in other_use:
+						if not is_valid_flag(flag):
+							if flag in self.missing_enabled:
+								tokens.append(x)
+								conditional.setdefault("not_equal", set()).add(flag)
+					else:
+						tokens.append(x)
+						conditional.setdefault("not_equal", set()).add(flag)
+			elif operator == "!?":
+				if flag not in parent_use:
+					if flag not in other_use:
+						if not is_valid_flag(flag) and flag in self.missing_enabled:
+							tokens.append(x)
+							conditional.setdefault("disabled", set()).add(flag)
+					else:
+						tokens.append(x)
+						conditional.setdefault("disabled", set()).add(flag)
+
+		return _use_dep(tokens, self.eapi, enabled_flags=enabled_flags, disabled_flags=disabled_flags, \
+			missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, \
+			conditional=conditional, required=self.required)
+
+	def _eval_qa_conditionals(self, use_mask, use_force):
+		"""
+		For repoman, evaluate all possible combinations within the constraints
+		of the given use.force and use.mask settings. The result may seem
+		ambiguous in the sense that the same flag can be in both the enabled
+		and disabled sets, but this is useful within the context of how its
+		intended to be used by repoman. It is assumed that the caller has
+		already ensured that there is no intersection between the given
+		use_mask and use_force sets when necessary.
+		"""
+		enabled_flags = set(self.enabled)
+		disabled_flags = set(self.disabled)
+		missing_enabled = self.missing_enabled
+		missing_disabled = self.missing_disabled
+
+		tokens = []
+		usedep_re = _get_usedep_re(self.eapi)
+
+		for x in self.tokens:
+			m = usedep_re.match(x)
+
+			operator = m.group("prefix") + m.group("suffix")
+			flag = m.group("flag")
+			default = m.group("default")
+			if default is None:
+				default = ""
+
+			if operator == "?":
+				if flag not in use_mask:
+					enabled_flags.add(flag)
+					tokens.append(flag+default)
+			elif operator == "=":
+				if flag not in use_mask:
+					enabled_flags.add(flag)
+					tokens.append(flag+default)
+				if flag not in use_force:
+					disabled_flags.add(flag)
+					tokens.append("-"+flag+default)
+			elif operator == "!=":
+				if flag not in use_force:
+					enabled_flags.add(flag)
+					tokens.append(flag+default)
+				if flag not in use_mask:
+					disabled_flags.add(flag)
+					tokens.append("-"+flag+default)
+			elif operator == "!?":
+				if flag not in use_force:
+					disabled_flags.add(flag)
+					tokens.append("-"+flag+default)
+			else:
+				tokens.append(x)
+
+		return _use_dep(tokens, self.eapi, enabled_flags=enabled_flags, disabled_flags=disabled_flags, \
+			missing_enabled=missing_enabled, missing_disabled=missing_disabled, required=self.required)
+
+if sys.hexversion < 0x3000000:
+	_atom_base = unicode
+else:
+	_atom_base = str
+
+class Atom(_atom_base):
+
+	"""
+	For compatibility with existing atom string manipulation code, this
+	class emulates most of the str methods that are useful with atoms.
+	"""
+
+	class _blocker(object):
+		__slots__ = ("overlap",)
+
+		class _overlap(object):
+			__slots__ = ("forbid",)
+
+			def __init__(self, forbid=False):
+				self.forbid = forbid
+
+		def __init__(self, forbid_overlap=False):
+			self.overlap = self._overlap(forbid=forbid_overlap)
+
+	def __new__(cls, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=False,
+		_use=None, eapi=None, is_valid_flag=None):
+		return _atom_base.__new__(cls, s)
+
+	def __init__(self, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=False,
+		_use=None, eapi=None, is_valid_flag=None):
+		if isinstance(s, Atom):
+			# This is an efficiency assertion, to ensure that the Atom
+			# constructor is not called redundantly.
+			raise TypeError(_("Expected %s, got %s") % \
+				(_atom_base, type(s)))
+
+		if not isinstance(s, _atom_base):
+			# Avoid TypeError from _atom_base.__init__ with PyPy.
+			s = _unicode_decode(s)
+
+		_atom_base.__init__(s)
+
+		if "!" == s[:1]:
+			blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
+			if blocker.overlap.forbid:
+				s = s[2:]
+			else:
+				s = s[1:]
+		else:
+			blocker = False
+		self.__dict__['blocker'] = blocker
+		m = _atom_re.match(s)
+		extended_syntax = False
+		if m is None:
+			if allow_wildcard:
+				m = _atom_wildcard_re.match(s)
+				if m is None:
+					raise InvalidAtom(self)
+				op = None
+				gdict = m.groupdict()
+				cpv = cp = gdict['simple']
+				if cpv.find("**") != -1:
+					raise InvalidAtom(self)
+				slot = gdict['slot']
+				repo = gdict['repo']
+				use_str = None
+				extended_syntax = True
+			else:
+				raise InvalidAtom(self)
+		elif m.group('op') is not None:
+			base = _atom_re.groupindex['op']
+			op = m.group(base + 1)
+			cpv = m.group(base + 2)
+			cp = m.group(base + 3)
+			slot = m.group(_atom_re.groups - 2)
+			repo = m.group(_atom_re.groups - 1)
+			use_str = m.group(_atom_re.groups)
+			if m.group(base + 4) is not None:
+				raise InvalidAtom(self)
+		elif m.group('star') is not None:
+			base = _atom_re.groupindex['star']
+			op = '=*'
+			cpv = m.group(base + 1)
+			cp = m.group(base + 2)
+			slot = m.group(_atom_re.groups - 2)
+			repo = m.group(_atom_re.groups - 1)
+			use_str = m.group(_atom_re.groups)
+			if m.group(base + 3) is not None:
+				raise InvalidAtom(self)
+		elif m.group('simple') is not None:
+			op = None
+			cpv = cp = m.group(_atom_re.groupindex['simple'] + 1)
+			slot = m.group(_atom_re.groups - 2)
+			repo = m.group(_atom_re.groups - 1)
+			use_str = m.group(_atom_re.groups)
+			if m.group(_atom_re.groupindex['simple'] + 2) is not None:
+				raise InvalidAtom(self)
+
+		else:
+			raise AssertionError(_("required group not found in atom: '%s'") % self)
+		self.__dict__['cp'] = cp
+		self.__dict__['cpv'] = cpv
+		self.__dict__['repo'] = repo
+		self.__dict__['slot'] = slot
+		self.__dict__['operator'] = op
+		self.__dict__['extended_syntax'] = extended_syntax
+
+		if not (repo is None or allow_repo):
+			raise InvalidAtom(self)
+
+		if use_str is not None:
+			if _use is not None:
+				use = _use
+			else:
+				use = _use_dep(use_str[1:-1].split(","), eapi)
+			without_use = Atom(m.group('without_use'), allow_repo=allow_repo)
+		else:
+			use = None
+			if unevaluated_atom is not None and \
+				unevaluated_atom.use is not None:
+				# unevaluated_atom.use is used for IUSE checks when matching
+				# packages, so it must not propagate to without_use
+				without_use = Atom(s, allow_wildcard=allow_wildcard,
+					allow_repo=allow_repo)
+			else:
+				without_use = self
+
+		self.__dict__['use'] = use
+		self.__dict__['without_use'] = without_use
+
+		if unevaluated_atom:
+			self.__dict__['unevaluated_atom'] = unevaluated_atom
+		else:
+			self.__dict__['unevaluated_atom'] = self
+
+		if eapi is not None:
+			if not isinstance(eapi, basestring):
+				raise TypeError('expected eapi argument of ' + \
+					'%s, got %s: %s' % (basestring, type(eapi), eapi,))
+			if self.slot and not eapi_has_slot_deps(eapi):
+				raise InvalidAtom(
+					_("Slot deps are not allowed in EAPI %s: '%s'") \
+					% (eapi, self), category='EAPI.incompatible')
+			if self.use:
+				if not eapi_has_use_deps(eapi):
+					raise InvalidAtom(
+						_("Use deps are not allowed in EAPI %s: '%s'") \
+						% (eapi, self), category='EAPI.incompatible')
+				elif not eapi_has_use_dep_defaults(eapi) and \
+					(self.use.missing_enabled or self.use.missing_disabled):
+					raise InvalidAtom(
+						_("Use dep defaults are not allowed in EAPI %s: '%s'") \
+						% (eapi, self), category='EAPI.incompatible')
+				if is_valid_flag is not None and self.use.conditional:
+					invalid_flag = None
+					try:
+						for conditional_type, flags in \
+							self.use.conditional.items():
+							for flag in flags:
+								if not is_valid_flag(flag):
+									invalid_flag = (conditional_type, flag)
+									raise StopIteration()
+					except StopIteration:
+						pass
+					if invalid_flag is not None:
+						conditional_type, flag = invalid_flag
+						conditional_str = _use_dep._conditional_strings[conditional_type]
+						msg = _("USE flag '%s' referenced in " + \
+							"conditional '%s' in atom '%s' is not in IUSE") \
+							% (flag, conditional_str % flag, self)
+						raise InvalidAtom(msg, category='IUSE.missing')
+			if self.blocker and self.blocker.overlap.forbid and not eapi_has_strong_blocks(eapi):
+				raise InvalidAtom(
+					_("Strong blocks are not allowed in EAPI %s: '%s'") \
+						% (eapi, self), category='EAPI.incompatible')
+
+	@property
+	def without_repo(self):
+		if self.repo is None:
+			return self
+		return Atom(self.replace(_repo_separator + self.repo, '', 1),
+			allow_wildcard=True)
+
+	@property
+	def without_slot(self):
+		if self.slot is None:
+			return self
+		return Atom(self.replace(_slot_separator + self.slot, '', 1),
+			allow_repo=True, allow_wildcard=True)
+
+	def __setattr__(self, name, value):
+		raise AttributeError("Atom instances are immutable",
+			self.__class__, name, value)
+
+	def intersects(self, other):
+		"""
+		Atoms with different cpv, operator or use attributes cause this method
+		to return False even though there may actually be some intersection.
+		TODO: Detect more forms of intersection.
+		@param other: The package atom to match
+		@type other: Atom
+		@rtype: Boolean
+		@return: True if this atom and the other atom intersect,
+			False otherwise.
+		"""
+		if not isinstance(other, Atom):
+			raise TypeError("expected %s, got %s" % \
+				(Atom, type(other)))
+
+		if self == other:
+			return True
+
+		if self.cp != other.cp or \
+			self.use != other.use or \
+			self.operator != other.operator or \
+			self.cpv != other.cpv:
+			return False
+
+		if self.slot is None or \
+			other.slot is None or \
+			self.slot == other.slot:
+			return True
+
+		return False
+
+	def evaluate_conditionals(self, use):
+		"""
+		Create an atom instance with any USE conditionals evaluated.
+		@param use: The set of enabled USE flags
+		@type use: set
+		@rtype: Atom
+		@return: an atom instance with any USE conditionals evaluated
+		"""
+		if not (self.use and self.use.conditional):
+			return self
+		atom = remove_slot(self)
+		if self.slot:
+			atom += ":%s" % self.slot
+		use_dep = self.use.evaluate_conditionals(use)
+		atom += str(use_dep)
+		return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+	def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+		"""
+		Create an atom instance with any USE conditional removed, that is
+		satisfied by other_use.
+		@param other_use: The set of enabled USE flags
+		@type other_use: set
+		@param is_valid_flag: Function that decides if a use flag is referenceable in use deps
+		@type is_valid_flag: function
+		@param parent_use: Set of enabled use flags of the package requiring this atom
+		@type parent_use: set
+		@rtype: Atom
+		@return: an atom instance with any satisfied USE conditionals removed
+		"""
+		if not self.use:
+			return self
+		atom = remove_slot(self)
+		if self.slot:
+			atom += ":%s" % self.slot
+		use_dep = self.use.violated_conditionals(other_use, is_valid_flag, parent_use)
+		atom += str(use_dep)
+		return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+	def _eval_qa_conditionals(self, use_mask, use_force):
+		if not (self.use and self.use.conditional):
+			return self
+		atom = remove_slot(self)
+		if self.slot:
+			atom += ":%s" % self.slot
+		use_dep = self.use._eval_qa_conditionals(use_mask, use_force)
+		atom += str(use_dep)
+		return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+	def __copy__(self):
+		"""Immutable, so returns self."""
+		return self
+
+	def __deepcopy__(self, memo=None):
+		"""Immutable, so returns self."""
+		memo[id(self)] = self
+		return self
+
+_extended_cp_re_cache = {}
+
+def extended_cp_match(extended_cp, other_cp):
+	"""
+	Checks if an extended syntax cp matches a non extended cp
+	"""
+	# Escape special '+' and '.' characters which are allowed in atoms,
+	# and convert '*' to regex equivalent.
+	global _extended_cp_re_cache
+	extended_cp_re = _extended_cp_re_cache.get(extended_cp)
+	if extended_cp_re is None:
+		extended_cp_re = re.compile("^" + re.escape(extended_cp).replace(
+			r'\*', '[^/]*') + "$")
+		_extended_cp_re_cache[extended_cp] = extended_cp_re
+	return extended_cp_re.match(other_cp) is not None
+
+class ExtendedAtomDict(portage.cache.mappings.MutableMapping):
+	"""
+	dict() wrapper that supports extended atoms as keys and allows lookup
+	of a normal cp against other normal cp and extended cp.
+	The value type has to be given to __init__ and is assumed to be the same
+	for all values.
+	"""
+
+	__slots__ = ('_extended', '_normal', '_value_class')
+
+	def __init__(self, value_class):
+		self._extended = {}
+		self._normal = {}
+		self._value_class = value_class
+
+	def copy(self):
+		result = self.__class__(self._value_class)
+		result._extended.update(self._extended)
+		result._normal.update(self._normal)
+		return result
+
+	def __iter__(self):
+		for k in self._normal:
+			yield k
+		for k in self._extended:
+			yield k
+
+	def iteritems(self):
+		for item in self._normal.items():
+			yield item
+		for item in self._extended.items():
+			yield item
+
+	def __delitem__(self, cp):
+		if "*" in cp:
+			return self._extended.__delitem__(cp)
+		else:
+			return self._normal.__delitem__(cp)
+
+	if sys.hexversion >= 0x3000000:
+		keys = __iter__
+		items = iteritems
+
+	def __len__(self):
+		return len(self._normal) + len(self._extended)
+
+	def setdefault(self, cp, default=None):
+		if "*" in cp:
+			return self._extended.setdefault(cp, default)
+		else:
+			return self._normal.setdefault(cp, default)
+
+	def __getitem__(self, cp):
+
+		if not isinstance(cp, basestring):
+			raise KeyError(cp)
+
+		if '*' in cp:
+			return self._extended[cp]
+
+		ret = self._value_class()
+		normal_match = self._normal.get(cp)
+		match = False
+
+		if normal_match is not None:
+			match = True
+			if hasattr(ret, "update"):
+				ret.update(normal_match)
+			elif hasattr(ret, "extend"):
+				ret.extend(normal_match)
+			else:
+				raise NotImplementedError()
+
+		for extended_cp in self._extended:
+			if extended_cp_match(extended_cp, cp):
+				match = True
+				if hasattr(ret, "update"):
+					ret.update(self._extended[extended_cp])
+				elif hasattr(ret, "extend"):
+					ret.extend(self._extended[extended_cp])
+				else:
+					raise NotImplementedError()
+
+		if not match:
+			raise KeyError(cp)
+
+		return ret
+
+	def __setitem__(self, cp, val):
+		if "*" in cp:
+			self._extended[cp] = val
+		else:
+			self._normal[cp] = val
+
+	def __eq__(self, other):
+		return self._value_class == other._value_class and \
+			self._extended == other._extended and \
+			self._normal == other._normal
+
+	def clear(self):
+		self._extended.clear()
+		self._normal.clear()
+
+
+def get_operator(mydep):
+	"""
+	Return the operator used in a depstring.
+
+	Example usage:
+		>>> from portage.dep import *
+		>>> get_operator(">=test-1.0")
+		'>='
+
+	@param mydep: The dep string to check
+	@type mydep: String
+	@rtype: String
+	@return: The operator. One of:
+		'~', '=', '>', '<', '=*', '>=', or '<='
+	"""
+	if not isinstance(mydep, Atom):
+		mydep = Atom(mydep)
+
+	return mydep.operator
+
+def dep_getcpv(mydep):
+	"""
+	Return the category-package-version with any operators/slot specifications stripped off
+
+	Example usage:
+		>>> dep_getcpv('>=media-libs/test-3.0')
+		'media-libs/test-3.0'
+
+	@param mydep: The depstring
+	@type mydep: String
+	@rtype: String
+	@return: The depstring with the operator removed
+	"""
+	if not isinstance(mydep, Atom):
+		mydep = Atom(mydep)
+
+	return mydep.cpv
+
+def dep_getslot(mydep):
+	"""
+	Retrieve the slot on a depend.
+
+	Example usage:
+		>>> dep_getslot('app-misc/test:3')
+		'3'
+
+	@param mydep: The depstring to retrieve the slot of
+	@type mydep: String
+	@rtype: String
+	@return: The slot
+	"""
+	slot = getattr(mydep, "slot", False)
+	if slot is not False:
+		return slot
+
+	#remove repo_name if present
+	mydep = mydep.split(_repo_separator)[0]
+	
+	colon = mydep.find(_slot_separator)
+	if colon != -1:
+		bracket = mydep.find("[", colon)
+		if bracket == -1:
+			return mydep[colon+1:]
+		else:
+			return mydep[colon+1:bracket]
+	return None
+
+def dep_getrepo(mydep):
+	"""
+	Retrieve the repo on a depend.
+
+	Example usage:
+		>>> dep_getrepo('app-misc/test::repository')
+		'repository'
+
+	@param mydep: The depstring to retrieve the repository of
+	@type mydep: String
+	@rtype: String
+	@return: The repository name
+	"""
+	repo = getattr(mydep, "repo", False)
+	if repo is not False:
+		return repo
+
+	metadata = getattr(mydep, "metadata", False)
+	if metadata:
+		repo = metadata.get('repository', False)
+		if repo is not False:
+			return repo
+
+	colon = mydep.find(_repo_separator)
+	if colon != -1:
+		bracket = mydep.find("[", colon)
+		if bracket == -1:
+			return mydep[colon+2:]
+		else:
+			return mydep[colon+2:bracket]
+	return None
+def remove_slot(mydep):
+	"""
+	Removes dep components from the right side of an atom:
+		* slot
+		* use
+		* repo
+	And repo_name from the left side.
+	"""
+	colon = mydep.find(_slot_separator)
+	if colon != -1:
+		mydep = mydep[:colon]
+	else:
+		bracket = mydep.find("[")
+		if bracket != -1:
+			mydep = mydep[:bracket]
+	return mydep
+
+def dep_getusedeps( depend ):
+	"""
+	Pull a listing of USE Dependencies out of a dep atom.
+	
+	Example usage:
+		>>> dep_getusedeps('app-misc/test:3[foo,-bar]')
+		('foo', '-bar')
+	
+	@param depend: The depstring to process
+	@type depend: String
+	@rtype: List
+	@return: List of use flags ( or [] if no flags exist )
+	"""
+	use_list = []
+	open_bracket = depend.find('[')
+	# -1 = failure (think c++ string::npos)
+	comma_separated = False
+	bracket_count = 0
+	while( open_bracket != -1 ):
+		bracket_count += 1
+		if bracket_count > 1:
+			raise InvalidAtom(_("USE Dependency with more "
+				"than one set of brackets: %s") % (depend,))
+		close_bracket = depend.find(']', open_bracket )
+		if close_bracket == -1:
+			raise InvalidAtom(_("USE Dependency with no closing bracket: %s") % depend )
+		use = depend[open_bracket + 1: close_bracket]
+		# foo[1:1] may return '' instead of None, we don't want '' in the result
+		if not use:
+			raise InvalidAtom(_("USE Dependency with "
+				"no use flag ([]): %s") % depend )
+		if not comma_separated:
+			comma_separated = "," in use
+
+		if comma_separated and bracket_count > 1:
+			raise InvalidAtom(_("USE Dependency contains a mixture of "
+				"comma and bracket separators: %s") % depend )
+
+		if comma_separated:
+			for x in use.split(","):
+				if x:
+					use_list.append(x)
+				else:
+					raise InvalidAtom(_("USE Dependency with no use "
+						"flag next to comma: %s") % depend )
+		else:
+			use_list.append(use)
+
+		# Find next use flag
+		open_bracket = depend.find( '[', open_bracket+1 )
+	return tuple(use_list)
+
+# \w is [a-zA-Z0-9_]
+
+# 2.1.3 A slot name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_slot_separator = ":"
+_slot = r'([\w+][\w+.-]*)'
+_slot_re = re.compile('^' + _slot + '$', re.VERBOSE)
+
+_use = r'\[.*\]'
+_op = r'([=~]|[><]=?)'
+_repo_separator = "::"
+_repo_name = r'[\w][\w-]*'
+_repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?'
+
+_atom_re = re.compile('^(?P<without_use>(?:' +
+	'(?P<op>' + _op + _cpv + ')|' +
+	'(?P<star>=' + _cpv + r'\*)|' +
+	'(?P<simple>' + _cp + '))' + 
+	'(' + _slot_separator + _slot + ')?' + _repo + ')(' + _use + ')?$', re.VERBOSE)
+	
+_extended_cat = r'[\w+*][\w+.*-]*'
+_extended_pkg = r'[\w+*][\w+*-]*?'
+
+_atom_wildcard_re = re.compile('(?P<simple>(' + _extended_cat + ')/(' + _extended_pkg + '))(:(?P<slot>' + _slot + '))?(' + _repo_separator + '(?P<repo>' + _repo_name + '))?$')
+
+_useflag_re = {
+	"0":        re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@-]*$'),
+	"4-python": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@.-]*$'),
+}
+
+def _get_useflag_re(eapi):
+	"""
+	When eapi is None then validation is not as strict, since we want the
+	same to work for multiple EAPIs that may have slightly different rules.
+	@param eapi: The EAPI
+	@type eapi: String or None
+	@rtype: regular expression object
+	@return: A regular expression object that matches valid USE flags for the
+		given eapi.
+	"""
+	if eapi in (None, "4-python",):
+		return _useflag_re["4-python"]
+	else:
+		return _useflag_re["0"]
+
+def isvalidatom(atom, allow_blockers=False, allow_wildcard=False, allow_repo=False):
+	"""
+	Check to see if a depend atom is valid
+
+	Example usage:
+		>>> isvalidatom('media-libs/test-3.0')
+		False
+		>>> isvalidatom('>=media-libs/test-3.0')
+		True
+
+	@param atom: The depend atom to check against
+	@type atom: String or Atom
+	@rtype: Boolean
+	@return: One of the following:
+		1) False if the atom is invalid
+		2) True if the atom is valid
+	"""
+	try:
+		if not isinstance(atom, Atom):
+			atom = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+		if not allow_blockers and atom.blocker:
+			return False
+		return True
+	except InvalidAtom:
+		return False
+
+def isjustname(mypkg):
+	"""
+	Checks to see if the atom is only the package name (no version parts).
+
+	Example usage:
+		>>> isjustname('=media-libs/test-3.0')
+		False
+		>>> isjustname('media-libs/test')
+		True
+
+	@param mypkg: The package atom to check
+	@param mypkg: String or Atom
+	@rtype: Integer
+	@return: One of the following:
+		1) False if the package string is not just the package name
+		2) True if it is
+	"""
+	try:
+		if not isinstance(mypkg, Atom):
+			mypkg = Atom(mypkg)
+		return mypkg == mypkg.cp
+	except InvalidAtom:
+		pass
+
+	for x in mypkg.split('-')[-2:]:
+		if ververify(x):
+			return False
+	return True
+
+def isspecific(mypkg):
+	"""
+	Checks to see if a package is in =category/package-version or
+	package-version format.
+
+	Example usage:
+		>>> isspecific('media-libs/test')
+		False
+		>>> isspecific('=media-libs/test-3.0')
+		True
+
+	@param mypkg: The package depstring to check against
+	@type mypkg: String
+	@rtype: Boolean
+	@return: One of the following:
+		1) False if the package string is not specific
+		2) True if it is
+	"""
+	try:
+		if not isinstance(mypkg, Atom):
+			mypkg = Atom(mypkg)
+		return mypkg != mypkg.cp
+	except InvalidAtom:
+		pass
+
+	# Fall back to legacy code for backward compatibility.
+	return not isjustname(mypkg)
+
+def dep_getkey(mydep):
+	"""
+	Return the category/package-name of a depstring.
+
+	Example usage:
+		>>> dep_getkey('=media-libs/test-3.0')
+		'media-libs/test'
+
+	@param mydep: The depstring to retrieve the category/package-name of
+	@type mydep: String
+	@rtype: String
+	@return: The package category/package-name
+	"""
+	if not isinstance(mydep, Atom):
+		mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+	return mydep.cp
+
+def match_to_list(mypkg, mylist):
+	"""
+	Searches list for entries that matches the package.
+
+	@param mypkg: The package atom to match
+	@type mypkg: String
+	@param mylist: The list of package atoms to compare against
+	@param mylist: List
+	@rtype: List
+	@return: A unique list of package atoms that match the given package atom
+	"""
+	return [ x for x in set(mylist) if match_from_list(x, [mypkg]) ]
+
+def best_match_to_list(mypkg, mylist):
+	"""
+	Returns the most specific entry that matches the package given.
+
+	@param mypkg: The package atom to check
+	@type mypkg: String
+	@param mylist: The list of package atoms to check against
+	@type mylist: List
+	@rtype: String
+	@return: The package atom which best matches given the following ordering:
+		- =cpv      6
+		- ~cpv      5
+		- =cpv*     4
+		- cp:slot   3
+		- >cpv      2
+		- <cpv      2
+		- >=cpv     2
+		- <=cpv     2
+		- cp        1
+		- cp:slot with extended syntax	0
+		- cp with extended syntax	-1
+	"""
+	operator_values = {'=':6, '~':5, '=*':4,
+		'>':2, '<':2, '>=':2, '<=':2, None:1}
+	maxvalue = -2
+	bestm  = None
+	for x in match_to_list(mypkg, mylist):
+		if x.extended_syntax:
+			if dep_getslot(x) is not None:
+				if maxvalue < 0:
+					maxvalue = 0
+					bestm = x
+			else:
+				if maxvalue < -1:
+					maxvalue = -1
+					bestm = x
+			continue
+		if dep_getslot(x) is not None:
+			if maxvalue < 3:
+				maxvalue = 3
+				bestm = x
+		op_val = operator_values[x.operator]
+		if op_val > maxvalue:
+			maxvalue = op_val
+			bestm  = x
+	return bestm
+
+def match_from_list(mydep, candidate_list):
+	"""
+	Searches list for entries that matches the package.
+
+	@param mydep: The package atom to match
+	@type mydep: String
+	@param candidate_list: The list of package atoms to compare against
+	@param candidate_list: List
+	@rtype: List
+	@return: A list of package atoms that match the given package atom
+	"""
+
+	if not candidate_list:
+		return []
+
+	from portage.util import writemsg
+	if "!" == mydep[:1]:
+		if "!" == mydep[1:2]:
+			mydep = mydep[2:]
+		else:
+			mydep = mydep[1:]
+	if not isinstance(mydep, Atom):
+		mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+	mycpv     = mydep.cpv
+	mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
+	slot      = mydep.slot
+
+	if not mycpv_cps:
+		cat, pkg = catsplit(mycpv)
+		ver      = None
+		rev      = None
+	else:
+		cat, pkg, ver, rev = mycpv_cps
+		if mydep == mycpv:
+			raise KeyError(_("Specific key requires an operator"
+				" (%s) (try adding an '=')") % (mydep))
+
+	if ver and rev:
+		operator = mydep.operator
+		if not operator:
+			writemsg(_("!!! Invalid atom: %s\n") % mydep, noiselevel=-1)
+			return []
+	else:
+		operator = None
+
+	mylist = []
+
+	if operator is None:
+		for x in candidate_list:
+			cp = getattr(x, "cp", None)
+			if cp is None:
+				mysplit = catpkgsplit(remove_slot(x))
+				if mysplit is not None:
+					cp = mysplit[0] + '/' + mysplit[1]
+
+			if cp is None:
+				continue
+
+			if cp == mycpv or (mydep.extended_syntax and \
+				extended_cp_match(mydep.cp, cp)):
+				mylist.append(x)
+
+	elif operator == "=": # Exact match
+		for x in candidate_list:
+			xcpv = getattr(x, "cpv", None)
+			if xcpv is None:
+				xcpv = remove_slot(x)
+			if not cpvequal(xcpv, mycpv):
+				continue
+			mylist.append(x)
+
+	elif operator == "=*": # glob match
+		# XXX: Nasty special casing for leading zeros
+		# Required as =* is a literal prefix match, so can't 
+		# use vercmp
+		mysplit = catpkgsplit(mycpv)
+		myver = mysplit[2].lstrip("0")
+		if not myver or not myver[0].isdigit():
+			myver = "0"+myver
+		mycpv = mysplit[0]+"/"+mysplit[1]+"-"+myver
+		for x in candidate_list:
+			xs = getattr(x, "cpv_split", None)
+			if xs is None:
+				xs = catpkgsplit(remove_slot(x))
+			myver = xs[2].lstrip("0")
+			if not myver or not myver[0].isdigit():
+				myver = "0"+myver
+			xcpv = xs[0]+"/"+xs[1]+"-"+myver
+			if xcpv.startswith(mycpv):
+				mylist.append(x)
+
+	elif operator == "~": # version, any revision, match
+		for x in candidate_list:
+			xs = getattr(x, "cpv_split", None)
+			if xs is None:
+				xs = catpkgsplit(remove_slot(x))
+			if xs is None:
+				raise InvalidData(x)
+			if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
+				continue
+			if xs[2] != ver:
+				continue
+			mylist.append(x)
+
+	elif operator in [">", ">=", "<", "<="]:
+		mysplit = ["%s/%s" % (cat, pkg), ver, rev]
+		for x in candidate_list:
+			xs = getattr(x, "cpv_split", None)
+			if xs is None:
+				xs = catpkgsplit(remove_slot(x))
+			xcat, xpkg, xver, xrev = xs
+			xs = ["%s/%s" % (xcat, xpkg), xver, xrev]
+			try:
+				result = pkgcmp(xs, mysplit)
+			except ValueError: # pkgcmp may return ValueError during int() conversion
+				writemsg(_("\nInvalid package name: %s\n") % x, noiselevel=-1)
+				raise
+			if result is None:
+				continue
+			elif operator == ">":
+				if result > 0:
+					mylist.append(x)
+			elif operator == ">=":
+				if result >= 0:
+					mylist.append(x)
+			elif operator == "<":
+				if result < 0:
+					mylist.append(x)
+			elif operator == "<=":
+				if result <= 0:
+					mylist.append(x)
+			else:
+				raise KeyError(_("Unknown operator: %s") % mydep)
+	else:
+		raise KeyError(_("Unknown operator: %s") % mydep)
+
+	if slot is not None and not mydep.extended_syntax:
+		candidate_list = mylist
+		mylist = []
+		for x in candidate_list:
+			xslot = getattr(x, "slot", False)
+			if xslot is False:
+				xslot = dep_getslot(x)
+			if xslot is not None and xslot != slot:
+				continue
+			mylist.append(x)
+
+	if mydep.unevaluated_atom.use:
+		candidate_list = mylist
+		mylist = []
+		for x in candidate_list:
+			use = getattr(x, "use", None)
+			if use is not None:
+				if mydep.unevaluated_atom.use and \
+					not x.iuse.is_valid_flag(
+					mydep.unevaluated_atom.use.required):
+					continue
+
+				if mydep.use:
+
+					missing_enabled = mydep.use.missing_enabled.difference(x.iuse.all)
+					missing_disabled = mydep.use.missing_disabled.difference(x.iuse.all)
+
+					if mydep.use.enabled:
+						if mydep.use.enabled.intersection(missing_disabled):
+							continue
+						need_enabled = mydep.use.enabled.difference(use.enabled)
+						if need_enabled:
+							need_enabled = need_enabled.difference(missing_enabled)
+							if need_enabled:
+								continue
+
+					if mydep.use.disabled:
+						if mydep.use.disabled.intersection(missing_enabled):
+							continue
+						need_disabled = mydep.use.disabled.intersection(use.enabled)
+						if need_disabled:
+							need_disabled = need_disabled.difference(missing_disabled)
+							if need_disabled:
+								continue
+
+			mylist.append(x)
+
+	if mydep.repo:
+		candidate_list = mylist
+		mylist = []
+		for x in candidate_list:
+			repo = getattr(x, "repo", False)
+			if repo is False:
+				repo = dep_getrepo(x)
+			if repo is not None and repo != mydep.repo:
+				continue
+			mylist.append(x)
+
+	return mylist
+
+def human_readable_required_use(required_use):
+	return required_use.replace("^^", "exactly-one-of").replace("||", "any-of")
+
+def get_required_use_flags(required_use):
+	"""
+	Returns a set of use flags that are used in the given REQUIRED_USE string
+
+	@param required_use: REQUIRED_USE string
+	@type required_use: String
+	@rtype: Set
+	@return: Set of use flags that are used in the given REQUIRED_USE string
+	"""
+
+	mysplit = required_use.split()
+	level = 0
+	stack = [[]]
+	need_bracket = False
+
+	used_flags = set()
+
+	def register_token(token):
+		if token.endswith("?"):
+			token = token[:-1]
+		if token.startswith("!"):
+			token = token[1:]
+		used_flags.add(token)
+
+	for token in mysplit:
+		if token == "(":
+			need_bracket = False
+			stack.append([])
+			level += 1
+		elif token == ")":
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % required_use)
+			if level > 0:
+				level -= 1
+				l = stack.pop()
+				ignore = False
+				if stack[level]:
+					if stack[level][-1] in ("||", "^^") or \
+						(not isinstance(stack[level][-1], bool) and \
+						stack[level][-1][-1] == "?"):
+						ignore = True
+						stack[level].pop()
+						stack[level].append(True)
+
+				if l and not ignore:
+					stack[level].append(all(x for x in l))
+			else:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % required_use)
+		elif token in ("||", "^^"):
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % required_use)
+			need_bracket = True
+			stack[level].append(token)
+		else:
+			if need_bracket or "(" in token or ")" in token or \
+				"|" in token or "^" in token:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % required_use)
+
+			if token[-1] == "?":
+				need_bracket = True
+				stack[level].append(token)
+			else:
+				stack[level].append(True)
+			
+			register_token(token)
+
+	if level != 0 or need_bracket:
+		raise InvalidDependString(
+			_("malformed syntax: '%s'") % required_use)
+
+	return frozenset(used_flags)
+
+class _RequiredUseLeaf(object):
+
+	__slots__ = ('_satisfied', '_token')
+
+	def __init__(self, token, satisfied):
+		self._token = token
+		self._satisfied = satisfied
+
+	def tounicode(self):
+		return self._token
+
+class _RequiredUseBranch(object):
+
+	__slots__ = ('_children', '_operator', '_parent', '_satisfied')
+
+	def __init__(self, operator=None, parent=None):
+		self._children = []
+		self._operator = operator
+		self._parent = parent
+		self._satisfied = False
+
+	def __bool__(self):
+		return self._satisfied
+
+	def tounicode(self):
+
+		include_parens = self._parent is not None
+		tokens = []
+		if self._operator is not None:
+			tokens.append(self._operator)
+
+		if include_parens:
+			tokens.append("(")
+
+		complex_nesting = False
+		node = self
+		while node != None and not complex_nesting:
+			if node._operator in ("||", "^^"):
+				complex_nesting = True
+			else:
+				node = node._parent
+
+		if complex_nesting:
+			for child in self._children:
+				tokens.append(child.tounicode())
+		else:
+			for child in self._children:
+				if not child._satisfied:
+					tokens.append(child.tounicode())
+
+		if include_parens:
+			tokens.append(")")
+
+		return " ".join(tokens)
+
+	if sys.hexversion < 0x3000000:
+		__nonzero__ = __bool__
+
+def check_required_use(required_use, use, iuse_match):
+	"""
+	Checks if the use flags listed in 'use' satisfy all
+	constraints specified in 'constraints'.
+
+	@param required_use: REQUIRED_USE string
+	@type required_use: String
+	@param use: Enabled use flags
+	@param use: List
+	@param iuse_match: Callable that takes a single flag argument and returns
+		True if the flag is matched, false otherwise,
+	@param iuse_match: Callable
+	@rtype: Bool
+	@return: Indicates if REQUIRED_USE constraints are satisfied
+	"""
+
+	def is_active(token):
+		if token.startswith("!"):
+			flag = token[1:]
+			is_negated = True
+		else:
+			flag = token
+			is_negated = False
+
+		if not flag or not iuse_match(flag):
+			msg = _("USE flag '%s' is not in IUSE") \
+				% (flag,)
+			e = InvalidData(msg, category='IUSE.missing')
+			raise InvalidDependString(msg, errors=(e,))
+
+		return (flag in use and not is_negated) or \
+			(flag not in use and is_negated)
+	
+	def is_satisfied(operator, argument):
+		if not argument:
+			#|| ( ) -> True
+			return True
+
+		if operator == "||":
+			return (True in argument)
+		elif operator == "^^":
+			return (argument.count(True) == 1)
+		elif operator[-1] == "?":
+			return (False not in argument)
+
+	mysplit = required_use.split()
+	level = 0
+	stack = [[]]
+	tree = _RequiredUseBranch()
+	node = tree
+	need_bracket = False
+
+	for token in mysplit:
+		if token == "(":
+			if not need_bracket:
+				child = _RequiredUseBranch(parent=node)
+				node._children.append(child)
+				node = child
+
+			need_bracket = False
+			stack.append([])
+			level += 1
+		elif token == ")":
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % required_use)
+			if level > 0:
+				level -= 1
+				l = stack.pop()
+				op = None
+				if stack[level]:
+					if stack[level][-1] in ("||", "^^"):
+						op = stack[level].pop()
+						satisfied = is_satisfied(op, l)
+						stack[level].append(satisfied)
+						node._satisfied = satisfied
+
+					elif not isinstance(stack[level][-1], bool) and \
+						stack[level][-1][-1] == "?":
+						op = stack[level].pop()
+						if is_active(op[:-1]):
+							satisfied = is_satisfied(op, l)
+							stack[level].append(satisfied)
+							node._satisfied = satisfied
+						else:
+							node._satisfied = True
+							last_node = node._parent._children.pop()
+							if last_node is not node:
+								raise AssertionError(
+									"node is not last child of parent")
+							node = node._parent
+							continue
+
+				if op is None:
+					satisfied = False not in l
+					node._satisfied = satisfied
+					if l:
+						stack[level].append(satisfied)
+
+					if len(node._children) <= 1 or \
+						node._parent._operator not in ("||", "^^"):
+						last_node = node._parent._children.pop()
+						if last_node is not node:
+							raise AssertionError(
+								"node is not last child of parent")
+						for child in node._children:
+							node._parent._children.append(child)
+							if isinstance(child, _RequiredUseBranch):
+								child._parent = node._parent
+
+				elif not node._children:
+					last_node = node._parent._children.pop()
+					if last_node is not node:
+						raise AssertionError(
+							"node is not last child of parent")
+
+				elif len(node._children) == 1 and op in ("||", "^^"):
+					last_node = node._parent._children.pop()
+					if last_node is not node:
+						raise AssertionError(
+							"node is not last child of parent")
+					node._parent._children.append(node._children[0])
+					if isinstance(node._children[0], _RequiredUseBranch):
+						node._children[0]._parent = node._parent
+						node = node._children[0]
+						if node._operator is None and \
+							node._parent._operator not in ("||", "^^"):
+							last_node = node._parent._children.pop()
+							if last_node is not node:
+								raise AssertionError(
+									"node is not last child of parent")
+							for child in node._children:
+								node._parent._children.append(child)
+								if isinstance(child, _RequiredUseBranch):
+									child._parent = node._parent
+
+				node = node._parent
+			else:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % required_use)
+		elif token in ("||", "^^"):
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % required_use)
+			need_bracket = True
+			stack[level].append(token)
+			child = _RequiredUseBranch(operator=token, parent=node)
+			node._children.append(child)
+			node = child
+		else:
+			if need_bracket or "(" in token or ")" in token or \
+				"|" in token or "^" in token:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % required_use)
+
+			if token[-1] == "?":
+				need_bracket = True
+				stack[level].append(token)
+				child = _RequiredUseBranch(operator=token, parent=node)
+				node._children.append(child)
+				node = child
+			else:
+				satisfied = is_active(token)
+				stack[level].append(satisfied)
+				node._children.append(_RequiredUseLeaf(token, satisfied))
+
+	if level != 0 or need_bracket:
+		raise InvalidDependString(
+			_("malformed syntax: '%s'") % required_use)
+
+	tree._satisfied = False not in stack[0]
+	return tree
+
+def extract_affecting_use(mystr, atom, eapi=None):
+	"""
+	Take a dep string and an atom and return the use flags
+	that decide if the given atom is in effect.
+
+	Example usage:
+		>>> extract_use_cond('sasl? ( dev-libs/cyrus-sasl ) \
+			!minimal? ( cxx? ( dev-libs/cyrus-sasl ) )', 'dev-libs/cyrus-sasl')
+		(['sasl', 'minimal', 'cxx'])
+
+	@param dep: The dependency string
+	@type mystr: String
+	@param atom: The atom to get into effect
+	@type atom: String
+	@rtype: Tuple of two lists of strings
+	@return: List of use flags that need to be enabled, List of use flag that need to be disabled
+	"""
+	useflag_re = _get_useflag_re(eapi)
+	mysplit = mystr.split()
+	level = 0
+	stack = [[]]
+	need_bracket = False
+	affecting_use = set()
+
+	def flag(conditional):
+		if conditional[0] == "!":
+			flag = conditional[1:-1]
+		else:
+			flag = conditional[:-1]
+
+		if useflag_re.match(flag) is None:
+			raise InvalidDependString(
+				_("invalid use flag '%s' in conditional '%s'") % \
+				(flag, conditional))
+
+		return flag
+
+	for token in mysplit:
+		if token == "(":
+			need_bracket = False
+			stack.append([])
+			level += 1
+		elif token == ")":
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % mystr)
+			if level > 0:
+				level -= 1
+				l = stack.pop()
+				is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+				def ends_in_any_of_dep(k):
+					return k>=0 and stack[k] and stack[k][-1] == "||"
+
+				def ends_in_operator(k):
+					return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+				def special_append():
+					"""
+					Use extend instead of append if possible. This kills all redundant brackets.
+					"""
+					if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+						if len(l) == 1 and isinstance(l[0], list):
+							# l = [[...]]
+							stack[level].extend(l[0])
+						else:
+							stack[level].extend(l)
+					else:	
+						stack[level].append(l)
+
+				if l:
+					if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+						#Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+						stack[level].extend(l)
+					elif not stack[level]:
+						#An '||' in the level above forces us to keep to brackets.
+						special_append()
+					elif len(l) == 1 and ends_in_any_of_dep(level):
+						#Optimize: || ( A ) -> A
+						stack[level].pop()
+						special_append()
+					elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+						#Optimize: 	|| ( || ( ... ) ) -> || ( ... )
+						#			foo? ( foo? ( ... ) ) -> foo? ( ... )
+						#			|| ( foo? ( ... ) ) -> foo? ( ... )
+						stack[level].pop()
+						special_append()
+						if l[0][-1] == "?":
+							affecting_use.add(flag(l[0]))
+					else:
+						if stack[level] and stack[level][-1][-1] == "?":
+							affecting_use.add(flag(stack[level][-1]))
+						special_append()
+				else:
+					if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+						stack[level].pop()
+			else:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % mystr)
+		elif token == "||":
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % mystr)
+			need_bracket = True
+			stack[level].append(token)
+		else:
+			if need_bracket:
+				raise InvalidDependString(
+					_("malformed syntax: '%s'") % mystr)
+
+			if token[-1] == "?":
+				need_bracket = True
+				stack[level].append(token)
+			elif token == atom:
+				stack[level].append(token)
+
+	if level != 0 or need_bracket:
+		raise InvalidDependString(
+			_("malformed syntax: '%s'") % mystr)
+
+	return affecting_use

diff --git a/portage_with_autodep/pym/portage/dep/dep_check.py b/portage_with_autodep/pym/portage/dep/dep_check.py
new file mode 100644
index 0000000..01d5021
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dep/dep_check.py
@@ -0,0 +1,679 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps']
+
+import logging
+
+import portage
+from portage import _unicode_decode
+from portage.dep import Atom, match_from_list, use_reduce
+from portage.exception import InvalidDependString, ParseError
+from portage.localization import _
+from portage.util import writemsg, writemsg_level
+from portage.versions import catpkgsplit, cpv_getkey, pkgcmp
+
+def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
+	trees=None, use_mask=None, use_force=None, **kwargs):
+	"""
+	In order to solve bug #141118, recursively expand new-style virtuals so
+	as to collapse one or more levels of indirection, generating an expanded
+	search space. In dep_zapdeps, new-style virtuals will be assigned
+	zero cost regardless of whether or not they are currently installed. Virtual
+	blockers are supported but only when the virtual expands to a single
+	atom because it wouldn't necessarily make sense to block all the components
+	of a compound virtual.  When more than one new-style virtual is matched,
+	the matches are sorted from highest to lowest versions and the atom is
+	expanded to || ( highest match ... lowest match )."""
+	newsplit = []
+	mytrees = trees[myroot]
+	portdb = mytrees["porttree"].dbapi
+	pkg_use_enabled = mytrees.get("pkg_use_enabled")
+	# Atoms are stored in the graph as (atom, id(atom)) tuples
+	# since each atom is considered to be a unique entity. For
+	# example, atoms that appear identical may behave differently
+	# in USE matching, depending on their unevaluated form. Also,
+	# specially generated virtual atoms may appear identical while
+	# having different _orig_atom attributes.
+	atom_graph = mytrees.get("atom_graph")
+	parent = mytrees.get("parent")
+	virt_parent = mytrees.get("virt_parent")
+	graph_parent = None
+	eapi = None
+	if parent is not None:
+		if virt_parent is not None:
+			graph_parent = virt_parent
+			parent = virt_parent
+		else:
+			graph_parent = parent
+		eapi = parent.metadata["EAPI"]
+	repoman = not mysettings.local_config
+	if kwargs["use_binaries"]:
+		portdb = trees[myroot]["bintree"].dbapi
+	pprovideddict = mysettings.pprovideddict
+	myuse = kwargs["myuse"]
+	for x in mysplit:
+		if x == "||":
+			newsplit.append(x)
+			continue
+		elif isinstance(x, list):
+			newsplit.append(_expand_new_virtuals(x, edebug, mydbapi,
+				mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
+				use_force=use_force, **kwargs))
+			continue
+
+		if not isinstance(x, Atom):
+			raise ParseError(
+				_("invalid token: '%s'") % x)
+
+		if repoman:
+			x = x._eval_qa_conditionals(use_mask, use_force)
+
+		mykey = x.cp
+		if not mykey.startswith("virtual/"):
+			newsplit.append(x)
+			if atom_graph is not None:
+				atom_graph.add((x, id(x)), graph_parent)
+			continue
+
+		if x.blocker:
+			# Virtual blockers are no longer expanded here since
+			# the un-expanded virtual atom is more useful for
+			# maintaining a cache of blocker atoms.
+			newsplit.append(x)
+			if atom_graph is not None:
+				atom_graph.add((x, id(x)), graph_parent)
+			continue
+
+		if repoman or not hasattr(portdb, 'match_pkgs') or \
+			pkg_use_enabled is None:
+			if portdb.cp_list(x.cp):
+				newsplit.append(x)
+			else:
+				# TODO: Add PROVIDE check for repoman.
+				a = []
+				myvartree = mytrees.get("vartree")
+				if myvartree is not None:
+					mysettings._populate_treeVirtuals_if_needed(myvartree)
+				mychoices = mysettings.getvirtuals().get(mykey, [])
+				for y in mychoices:
+					a.append(Atom(x.replace(x.cp, y.cp, 1)))
+				if not a:
+					newsplit.append(x)
+				elif len(a) == 1:
+					newsplit.append(a[0])
+				else:
+					newsplit.append(['||'] + a)
+			continue
+
+		pkgs = []
+		# Ignore USE deps here, since otherwise we might not
+		# get any matches. Choices with correct USE settings
+		# will be preferred in dep_zapdeps().
+		matches = portdb.match_pkgs(x.without_use)
+		# Use descending order to prefer higher versions.
+		matches.reverse()
+		for pkg in matches:
+			# only use new-style matches
+			if pkg.cp.startswith("virtual/"):
+				pkgs.append(pkg)
+
+		mychoices = []
+		if not pkgs and not portdb.cp_list(x.cp):
+			myvartree = mytrees.get("vartree")
+			if myvartree is not None:
+				mysettings._populate_treeVirtuals_if_needed(myvartree)
+			mychoices = mysettings.getvirtuals().get(mykey, [])
+
+		if not (pkgs or mychoices):
+			# This one couldn't be expanded as a new-style virtual.  Old-style
+			# virtuals have already been expanded by dep_virtual, so this one
+			# is unavailable and dep_zapdeps will identify it as such.  The
+			# atom is not eliminated here since it may still represent a
+			# dependency that needs to be satisfied.
+			newsplit.append(x)
+			if atom_graph is not None:
+				atom_graph.add((x, id(x)), graph_parent)
+			continue
+
+		a = []
+		for pkg in pkgs:
+			virt_atom = '=' + pkg.cpv
+			if x.unevaluated_atom.use:
+				virt_atom += str(x.unevaluated_atom.use)
+				virt_atom = Atom(virt_atom)
+				if parent is None:
+					if myuse is None:
+						virt_atom = virt_atom.evaluate_conditionals(
+							mysettings.get("PORTAGE_USE", "").split())
+					else:
+						virt_atom = virt_atom.evaluate_conditionals(myuse)
+				else:
+					virt_atom = virt_atom.evaluate_conditionals(
+						pkg_use_enabled(parent))
+			else:
+				virt_atom = Atom(virt_atom)
+
+			# Allow the depgraph to map this atom back to the
+			# original, in order to avoid distortion in places
+			# like display or conflict resolution code.
+			virt_atom.__dict__['_orig_atom'] = x
+
+			# According to GLEP 37, RDEPEND is the only dependency
+			# type that is valid for new-style virtuals. Repoman
+			# should enforce this.
+			depstring = pkg.metadata['RDEPEND']
+			pkg_kwargs = kwargs.copy()
+			pkg_kwargs["myuse"] = pkg_use_enabled(pkg)
+			if edebug:
+				writemsg_level(_("Virtual Parent:      %s\n") \
+					% (pkg,), noiselevel=-1, level=logging.DEBUG)
+				writemsg_level(_("Virtual Depstring:   %s\n") \
+					% (depstring,), noiselevel=-1, level=logging.DEBUG)
+
+			# Set EAPI used for validation in dep_check() recursion.
+			mytrees["virt_parent"] = pkg
+
+			try:
+				mycheck = dep_check(depstring, mydbapi, mysettings,
+					myroot=myroot, trees=trees, **pkg_kwargs)
+			finally:
+				# Restore previous EAPI after recursion.
+				if virt_parent is not None:
+					mytrees["virt_parent"] = virt_parent
+				else:
+					del mytrees["virt_parent"]
+
+			if not mycheck[0]:
+				raise ParseError(_unicode_decode("%s: %s '%s'") % \
+					(pkg, mycheck[1], depstring))
+
+			# pull in the new-style virtual
+			mycheck[1].append(virt_atom)
+			a.append(mycheck[1])
+			if atom_graph is not None:
+				virt_atom_node = (virt_atom, id(virt_atom))
+				atom_graph.add(virt_atom_node, graph_parent)
+				atom_graph.add(pkg, virt_atom_node)
+		# Plain old-style virtuals.  New-style virtuals are preferred.
+		if not pkgs:
+				for y in mychoices:
+					new_atom = Atom(x.replace(x.cp, y.cp, 1))
+					matches = portdb.match(new_atom)
+					# portdb is an instance of depgraph._dep_check_composite_db, so
+					# USE conditionals are already evaluated.
+					if matches and mykey in \
+						portdb.aux_get(matches[-1], ['PROVIDE'])[0].split():
+						a.append(new_atom)
+						if atom_graph is not None:
+							atom_graph.add((new_atom, id(new_atom)),
+								graph_parent)
+
+		if not a and mychoices:
+			# Check for a virtual package.provided match.
+			for y in mychoices:
+				new_atom = Atom(x.replace(x.cp, y.cp, 1))
+				if match_from_list(new_atom,
+					pprovideddict.get(new_atom.cp, [])):
+					a.append(new_atom)
+					if atom_graph is not None:
+						atom_graph.add((new_atom, id(new_atom)), graph_parent)
+
+		if not a:
+			newsplit.append(x)
+			if atom_graph is not None:
+				atom_graph.add((x, id(x)), graph_parent)
+		elif len(a) == 1:
+			newsplit.append(a[0])
+		else:
+			newsplit.append(['||'] + a)
+
+	return newsplit
+
+def dep_eval(deplist):
+	if not deplist:
+		return 1
+	if deplist[0]=="||":
+		#or list; we just need one "1"
+		for x in deplist[1:]:
+			if isinstance(x, list):
+				if dep_eval(x)==1:
+					return 1
+			elif x==1:
+					return 1
+		#XXX: unless there's no available atoms in the list
+		#in which case we need to assume that everything is
+		#okay as some ebuilds are relying on an old bug.
+		if len(deplist) == 1:
+			return 1
+		return 0
+	else:
+		for x in deplist:
+			if isinstance(x, list):
+				if dep_eval(x)==0:
+					return 0
+			elif x==0 or x==2:
+				return 0
+		return 1
+
+def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
+	"""
+	Takes an unreduced and reduced deplist and removes satisfied dependencies.
+	Returned deplist contains steps that must be taken to satisfy dependencies.
+	"""
+	if trees is None:
+		trees = portage.db
+	writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
+	if not reduced or unreduced == ["||"] or dep_eval(reduced):
+		return []
+
+	if unreduced[0] != "||":
+		unresolved = []
+		for x, satisfied in zip(unreduced, reduced):
+			if isinstance(x, list):
+				unresolved += dep_zapdeps(x, satisfied, myroot,
+					use_binaries=use_binaries, trees=trees)
+			elif not satisfied:
+				unresolved.append(x)
+		return unresolved
+
+	# We're at a ( || atom ... ) type level and need to make a choice
+	deps = unreduced[1:]
+	satisfieds = reduced[1:]
+
+	# Our preference order is for an the first item that:
+	# a) contains all unmasked packages with the same key as installed packages
+	# b) contains all unmasked packages
+	# c) contains masked installed packages
+	# d) is the first item
+
+	preferred_installed = []
+	preferred_in_graph = []
+	preferred_any_slot = []
+	preferred_non_installed = []
+	unsat_use_in_graph = []
+	unsat_use_installed = []
+	unsat_use_non_installed = []
+	other_installed = []
+	other_installed_some = []
+	other = []
+
+	# unsat_use_* must come after preferred_non_installed
+	# for correct ordering in cases like || ( foo[a] foo[b] ).
+	choice_bins = (
+		preferred_in_graph,
+		preferred_installed,
+		preferred_any_slot,
+		preferred_non_installed,
+		unsat_use_in_graph,
+		unsat_use_installed,
+		unsat_use_non_installed,
+		other_installed,
+		other_installed_some,
+		other,
+	)
+
+	# Alias the trees we'll be checking availability against
+	parent   = trees[myroot].get("parent")
+	priority = trees[myroot].get("priority")
+	graph_db = trees[myroot].get("graph_db")
+	graph    = trees[myroot].get("graph")
+	vardb = None
+	if "vartree" in trees[myroot]:
+		vardb = trees[myroot]["vartree"].dbapi
+	if use_binaries:
+		mydbapi = trees[myroot]["bintree"].dbapi
+	else:
+		mydbapi = trees[myroot]["porttree"].dbapi
+
+	# Sort the deps into installed, not installed but already 
+	# in the graph and other, not installed and not in the graph
+	# and other, with values of [[required_atom], availablility]
+	for x, satisfied in zip(deps, satisfieds):
+		if isinstance(x, list):
+			atoms = dep_zapdeps(x, satisfied, myroot,
+				use_binaries=use_binaries, trees=trees)
+		else:
+			atoms = [x]
+		if vardb is None:
+			# When called by repoman, we can simply return the first choice
+			# because dep_eval() handles preference selection.
+			return atoms
+
+		all_available = True
+		all_use_satisfied = True
+		slot_map = {}
+		cp_map = {}
+		for atom in atoms:
+			if atom.blocker:
+				continue
+			# Ignore USE dependencies here since we don't want USE
+			# settings to adversely affect || preference evaluation.
+			avail_pkg = mydbapi.match(atom.without_use)
+			if avail_pkg:
+				avail_pkg = avail_pkg[-1] # highest (ascending order)
+				avail_slot = Atom("%s:%s" % (atom.cp,
+					mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
+			if not avail_pkg:
+				all_available = False
+				all_use_satisfied = False
+				break
+
+			if atom.use:
+				avail_pkg_use = mydbapi.match(atom)
+				if not avail_pkg_use:
+					all_use_satisfied = False
+				else:
+					# highest (ascending order)
+					avail_pkg_use = avail_pkg_use[-1]
+					if avail_pkg_use != avail_pkg:
+						avail_pkg = avail_pkg_use
+						avail_slot = Atom("%s:%s" % (atom.cp,
+							mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
+
+			slot_map[avail_slot] = avail_pkg
+			pkg_cp = cpv_getkey(avail_pkg)
+			highest_cpv = cp_map.get(pkg_cp)
+			if highest_cpv is None or \
+				pkgcmp(catpkgsplit(avail_pkg)[1:],
+				catpkgsplit(highest_cpv)[1:]) > 0:
+				cp_map[pkg_cp] = avail_pkg
+
+		this_choice = (atoms, slot_map, cp_map, all_available)
+		if all_available:
+			# The "all installed" criterion is not version or slot specific.
+			# If any version of a package is already in the graph then we
+			# assume that it is preferred over other possible packages choices.
+			all_installed = True
+			for atom in set(Atom(atom.cp) for atom in atoms \
+				if not atom.blocker):
+				# New-style virtuals have zero cost to install.
+				if not vardb.match(atom) and not atom.startswith("virtual/"):
+					all_installed = False
+					break
+			all_installed_slots = False
+			if all_installed:
+				all_installed_slots = True
+				for slot_atom in slot_map:
+					# New-style virtuals have zero cost to install.
+					if not vardb.match(slot_atom) and \
+						not slot_atom.startswith("virtual/"):
+						all_installed_slots = False
+						break
+			if graph_db is None:
+				if all_use_satisfied:
+					if all_installed:
+						if all_installed_slots:
+							preferred_installed.append(this_choice)
+						else:
+							preferred_any_slot.append(this_choice)
+					else:
+						preferred_non_installed.append(this_choice)
+				else:
+					if all_installed_slots:
+						unsat_use_installed.append(this_choice)
+					else:
+						unsat_use_non_installed.append(this_choice)
+			else:
+				all_in_graph = True
+				for slot_atom in slot_map:
+					# New-style virtuals have zero cost to install.
+					if slot_atom.startswith("virtual/"):
+						continue
+					# We check if the matched package has actually been
+					# added to the digraph, in order to distinguish between
+					# those packages and installed packages that may need
+					# to be uninstalled in order to resolve blockers.
+					graph_matches = graph_db.match_pkgs(slot_atom)
+					if not graph_matches or graph_matches[-1] not in graph:
+						all_in_graph = False
+						break
+				circular_atom = None
+				if all_in_graph:
+					if parent is None or priority is None:
+						pass
+					elif priority.buildtime and \
+						not (priority.satisfied or priority.optional):
+						# Check if the atom would result in a direct circular
+						# dependency and try to avoid that if it seems likely
+						# to be unresolvable. This is only relevant for
+						# buildtime deps that aren't already satisfied by an
+						# installed package.
+						cpv_slot_list = [parent]
+						for atom in atoms:
+							if atom.blocker:
+								continue
+							if vardb.match(atom):
+								# If the atom is satisfied by an installed
+								# version then it's not a circular dep.
+								continue
+							if atom.cp != parent.cp:
+								continue
+							if match_from_list(atom, cpv_slot_list):
+								circular_atom = atom
+								break
+				if circular_atom is not None:
+					other.append(this_choice)
+				else:
+					if all_use_satisfied:
+						if all_in_graph:
+							preferred_in_graph.append(this_choice)
+						elif all_installed:
+							if all_installed_slots:
+								preferred_installed.append(this_choice)
+							else:
+								preferred_any_slot.append(this_choice)
+						else:
+							preferred_non_installed.append(this_choice)
+					else:
+						if all_in_graph:
+							unsat_use_in_graph.append(this_choice)
+						elif all_installed_slots:
+							unsat_use_installed.append(this_choice)
+						else:
+							unsat_use_non_installed.append(this_choice)
+		else:
+			all_installed = True
+			some_installed = False
+			for atom in atoms:
+				if not atom.blocker:
+					if vardb.match(atom):
+						some_installed = True
+					else:
+						all_installed = False
+
+			if all_installed:
+				other_installed.append(this_choice)
+			elif some_installed:
+				other_installed_some.append(this_choice)
+			else:
+				other.append(this_choice)
+
+	# Prefer choices which contain upgrades to higher slots. This helps
+	# for deps such as || ( foo:1 foo:2 ), where we want to prefer the
+	# atom which matches the higher version rather than the atom furthest
+	# to the left. Sorting is done separately for each of choice_bins, so
+	# as not to interfere with the ordering of the bins. Because of the
+	# bin separation, the main function of this code is to allow
+	# --depclean to remove old slots (rather than to pull in new slots).
+	for choices in choice_bins:
+		if len(choices) < 2:
+			continue
+		for choice_1 in choices[1:]:
+			atoms_1, slot_map_1, cp_map_1, all_available_1 = choice_1
+			cps = set(cp_map_1)
+			for choice_2 in choices:
+				if choice_1 is choice_2:
+					# choice_1 will not be promoted, so move on
+					break
+				atoms_2, slot_map_2, cp_map_2, all_available_2 = choice_2
+				intersecting_cps = cps.intersection(cp_map_2)
+				if not intersecting_cps:
+					continue
+				has_upgrade = False
+				has_downgrade = False
+				for cp in intersecting_cps:
+					version_1 = cp_map_1[cp]
+					version_2 = cp_map_2[cp]
+					difference = pkgcmp(catpkgsplit(version_1)[1:],
+						catpkgsplit(version_2)[1:])
+					if difference != 0:
+						if difference > 0:
+							has_upgrade = True
+						else:
+							has_downgrade = True
+							break
+				if has_upgrade and not has_downgrade:
+					# promote choice_1 in front of choice_2
+					choices.remove(choice_1)
+					index_2 = choices.index(choice_2)
+					choices.insert(index_2, choice_1)
+					break
+
+	for allow_masked in (False, True):
+		for choices in choice_bins:
+			for atoms, slot_map, cp_map, all_available in choices:
+				if all_available or allow_masked:
+					return atoms
+
+	assert(False) # This point should not be reachable
+
+def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
+	use_cache=1, use_binaries=0, myroot="/", trees=None):
+	"""Takes a depend string and parses the condition."""
+	edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
+	#check_config_instance(mysettings)
+	if trees is None:
+		trees = globals()["db"]
+	if use=="yes":
+		if myuse is None:
+			#default behavior
+			myusesplit = mysettings["PORTAGE_USE"].split()
+		else:
+			myusesplit = myuse
+			# We've been given useflags to use.
+			#print "USE FLAGS PASSED IN."
+			#print myuse
+			#if "bindist" in myusesplit:
+			#	print "BINDIST is set!"
+			#else:
+			#	print "BINDIST NOT set."
+	else:
+		#we are being run by autouse(), don't consult USE vars yet.
+		# WE ALSO CANNOT USE SETTINGS
+		myusesplit=[]
+
+	mymasks = set()
+	useforce = set()
+	useforce.add(mysettings["ARCH"])
+	if use == "all":
+		# This masking/forcing is only for repoman.  In other cases, relevant
+		# masking/forcing should have already been applied via
+		# config.regenerate().  Also, binary or installed packages may have
+		# been built with flags that are now masked, and it would be
+		# inconsistent to mask them now.  Additionally, myuse may consist of
+		# flags from a parent package that is being merged to a $ROOT that is
+		# different from the one that mysettings represents.
+		mymasks.update(mysettings.usemask)
+		mymasks.update(mysettings.archlist())
+		mymasks.discard(mysettings["ARCH"])
+		useforce.update(mysettings.useforce)
+		useforce.difference_update(mymasks)
+
+	# eapi code borrowed from _expand_new_virtuals()
+	mytrees = trees[myroot]
+	parent = mytrees.get("parent")
+	virt_parent = mytrees.get("virt_parent")
+	current_parent = None
+	eapi = None
+	if parent is not None:
+		if virt_parent is not None:
+			current_parent = virt_parent
+		else:
+			current_parent = parent
+
+	if current_parent is not None:
+		# Don't pass the eapi argument to use_reduce() for installed packages
+		# since previous validation will have already marked them as invalid
+		# when necessary and now we're more interested in evaluating
+		# dependencies so that things like --depclean work as well as possible
+		# in spite of partial invalidity.
+		if not current_parent.installed:
+			eapi = current_parent.metadata['EAPI']
+
+	try:
+		mysplit = use_reduce(depstring, uselist=myusesplit, masklist=mymasks, \
+			matchall=(use=="all"), excludeall=useforce, opconvert=True, \
+			token_class=Atom, eapi=eapi)
+	except InvalidDependString as e:
+		return [0, _unicode_decode("%s") % (e,)]
+
+	if mysplit == []:
+		#dependencies were reduced to nothing
+		return [1,[]]
+
+	# Recursively expand new-style virtuals so as to
+	# collapse one or more levels of indirection.
+	try:
+		mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
+			use=use, mode=mode, myuse=myuse,
+			use_force=useforce, use_mask=mymasks, use_cache=use_cache,
+			use_binaries=use_binaries, myroot=myroot, trees=trees)
+	except ParseError as e:
+		return [0, _unicode_decode("%s") % (e,)]
+
+	mysplit2=mysplit[:]
+	mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
+	if mysplit2 is None:
+		return [0, _("Invalid token")]
+
+	writemsg("\n\n\n", 1)
+	writemsg("mysplit:  %s\n" % (mysplit), 1)
+	writemsg("mysplit2: %s\n" % (mysplit2), 1)
+
+	selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
+		use_binaries=use_binaries, trees=trees)
+
+	return [1, selected_atoms]
+
+def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
+	"Reduces the deplist to ones and zeros"
+	deplist=mydeplist[:]
+	for mypos, token in enumerate(deplist):
+		if isinstance(deplist[mypos], list):
+			#recurse
+			deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
+		elif deplist[mypos]=="||":
+			pass
+		elif token[:1] == "!":
+			deplist[mypos] = False
+		else:
+			mykey = deplist[mypos].cp
+			if mysettings and mykey in mysettings.pprovideddict and \
+			        match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
+				deplist[mypos]=True
+			elif mydbapi is None:
+				# Assume nothing is satisfied.  This forces dep_zapdeps to
+				# return all of deps the deps that have been selected
+				# (excluding those satisfied by package.provided).
+				deplist[mypos] = False
+			else:
+				if mode:
+					x = mydbapi.xmatch(mode, deplist[mypos])
+					if mode.startswith("minimum-"):
+						mydep = []
+						if x:
+							mydep.append(x)
+					else:
+						mydep = x
+				else:
+					mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
+				if mydep!=None:
+					tmp=(len(mydep)>=1)
+					if deplist[mypos][0]=="!":
+						tmp=False
+					deplist[mypos]=tmp
+				else:
+					#encountered invalid string
+					return None
+	return deplist

diff --git a/portage_with_autodep/pym/portage/dispatch_conf.py b/portage_with_autodep/pym/portage/dispatch_conf.py
new file mode 100644
index 0000000..4991020
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dispatch_conf.py
@@ -0,0 +1,188 @@
+# archive_conf.py -- functionality common to archive-conf and dispatch-conf
+# Copyright 2003-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# Library by Wayne Davison <gentoo@blorf.net>, derived from code
+# written by Jeremy Wohl (http://igmus.org)
+
+from __future__ import print_function
+
+import os, sys, shutil
+
+import portage
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.localization import _
+
+RCS_BRANCH = '1.1.1'
+RCS_LOCK = 'rcs -ko -M -l'
+RCS_PUT = 'ci -t-"Archived config file." -m"dispatch-conf update."'
+RCS_GET = 'co'
+RCS_MERGE = "rcsmerge -p -r" + RCS_BRANCH + " '%s' > '%s'"
+
+DIFF3_MERGE = "diff3 -mE '%s' '%s' '%s' > '%s'"
+
+def diffstatusoutput_len(cmd):
+    """
+    Execute the string cmd in a shell with getstatusoutput() and return a
+    2-tuple (status, output_length). If getstatusoutput() raises
+    UnicodeDecodeError (known to happen with python3.1), return a
+    2-tuple (1, 1). This provides a simple way to check for non-zero
+    output length of diff commands, while providing simple handling of
+    UnicodeDecodeError when necessary.
+    """
+    try:
+        status, output = portage.subprocess_getstatusoutput(cmd)
+        return (status, len(output))
+    except UnicodeDecodeError:
+        return (1, 1)
+
+def read_config(mandatory_opts):
+    loader = KeyValuePairFileLoader(
+        '/etc/dispatch-conf.conf', None)
+    opts, errors = loader.load()
+    if not opts:
+        print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
+        sys.exit(1)
+
+	# Handle quote removal here, since KeyValuePairFileLoader doesn't do that.
+    quotes = "\"'"
+    for k, v in opts.items():
+        if v[:1] in quotes and v[:1] == v[-1:]:
+            opts[k] = v[1:-1]
+
+    for key in mandatory_opts:
+        if key not in opts:
+            if key == "merge":
+                opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
+            else:
+                print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)
+
+    if not os.path.exists(opts['archive-dir']):
+        os.mkdir(opts['archive-dir'])
+        # Use restrictive permissions by default, in order to protect
+        # against vulnerabilities (like bug #315603 involving rcs).
+        os.chmod(opts['archive-dir'], 0o700)
+    elif not os.path.isdir(opts['archive-dir']):
+        print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
+        sys.exit(1)
+
+    return opts
+
+
+def rcs_archive(archive, curconf, newconf, mrgconf):
+    """Archive existing config in rcs (on trunk). Then, if mrgconf is
+    specified and an old branch version exists, merge the user's changes
+    and the distributed changes and put the result into mrgconf.  Lastly,
+    if newconf was specified, leave it in the archive dir with a .dist.new
+    suffix along with the last 1.1.1 branch version with a .dist suffix."""
+
+    try:
+        os.makedirs(os.path.dirname(archive))
+    except OSError:
+        pass
+
+    if os.path.isfile(curconf):
+        try:
+            shutil.copy2(curconf, archive)
+        except(IOError, os.error) as why:
+            print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+                {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+    if os.path.exists(archive + ',v'):
+        os.system(RCS_LOCK + ' ' + archive)
+    os.system(RCS_PUT + ' ' + archive)
+
+    ret = 0
+    if newconf != '':
+        os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
+        has_branch = os.path.exists(archive)
+        if has_branch:
+            os.rename(archive, archive + '.dist')
+
+        try:
+            shutil.copy2(newconf, archive)
+        except(IOError, os.error) as why:
+            print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+                  {"newconf": newconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+        if has_branch:
+            if mrgconf != '':
+                # This puts the results of the merge into mrgconf.
+                ret = os.system(RCS_MERGE % (archive, mrgconf))
+                mystat = os.lstat(newconf)
+                os.chmod(mrgconf, mystat.st_mode)
+                os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+        os.rename(archive, archive + '.dist.new')
+    return ret
+
+
+def file_archive(archive, curconf, newconf, mrgconf):
+    """Archive existing config to the archive-dir, bumping old versions
+    out of the way into .# versions (log-rotate style). Then, if mrgconf
+    was specified and there is a .dist version, merge the user's changes
+    and the distributed changes and put the result into mrgconf.  Lastly,
+    if newconf was specified, archive it as a .dist.new version (which
+    gets moved to the .dist version at the end of the processing)."""
+
+    try:
+        os.makedirs(os.path.dirname(archive))
+    except OSError:
+        pass
+
+    # Archive the current config file if it isn't already saved
+    if os.path.exists(archive) \
+     and diffstatusoutput_len("diff -aq '%s' '%s'" % (curconf,archive))[1] != 0:
+        suf = 1
+        while suf < 9 and os.path.exists(archive + '.' + str(suf)):
+            suf += 1
+
+        while suf > 1:
+            os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
+            suf -= 1
+
+        os.rename(archive, archive + '.1')
+
+    if os.path.isfile(curconf):
+        try:
+            shutil.copy2(curconf, archive)
+        except(IOError, os.error) as why:
+            print(_('dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal') % \
+                {"curconf": curconf, "archive": archive, "reason": str(why)}, file=sys.stderr)
+
+    if newconf != '':
+        # Save off new config file in the archive dir with .dist.new suffix
+        try:
+            shutil.copy2(newconf, archive + '.dist.new')
+        except(IOError, os.error) as why:
+            print(_('dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal') % \
+                  {"newconf": newconf, "archive": archive + '.dist.new', "reason": str(why)}, file=sys.stderr)
+
+        ret = 0
+        if mrgconf != '' and os.path.exists(archive + '.dist'):
+            # This puts the results of the merge into mrgconf.
+            ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
+            mystat = os.lstat(newconf)
+            os.chmod(mrgconf, mystat.st_mode)
+            os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+
+        return ret
+
+
+def rcs_archive_post_process(archive):
+    """Check in the archive file with the .dist.new suffix on the branch
+    and remove the one with the .dist suffix."""
+    os.rename(archive + '.dist.new', archive)
+    if os.path.exists(archive + '.dist'):
+        # Commit the last-distributed version onto the branch.
+        os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
+        os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
+        os.unlink(archive + '.dist')
+    else:
+        # Forcefully commit the last-distributed version onto the branch.
+        os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
+
+
+def file_archive_post_process(archive):
+    """Rename the archive file with the .dist.new suffix to a .dist suffix"""
+    os.rename(archive + '.dist.new', archive + '.dist')

diff --git a/portage_with_autodep/pym/portage/eapi.py b/portage_with_autodep/pym/portage/eapi.py
new file mode 100644
index 0000000..da5fd8c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/eapi.py
@@ -0,0 +1,50 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+def eapi_has_iuse_defaults(eapi):
+	return eapi != "0"
+
+def eapi_has_slot_deps(eapi):
+	return eapi != "0"
+
+def eapi_has_src_uri_arrows(eapi):
+	return eapi not in ("0", "1")
+
+def eapi_has_use_deps(eapi):
+	return eapi not in ("0", "1")
+
+def eapi_has_strong_blocks(eapi):
+	return eapi not in ("0", "1")
+
+def eapi_has_src_prepare_and_src_configure(eapi):
+	return eapi not in ("0", "1")
+
+def eapi_supports_prefix(eapi):
+	return eapi not in ("0", "1", "2")
+
+def eapi_exports_AA(eapi):
+	return eapi in ("0", "1", "2", "3")
+
+def eapi_exports_KV(eapi):
+	return eapi in ("0", "1", "2", "3")
+
+def eapi_exports_merge_type(eapi):
+	return eapi not in ("0", "1", "2", "3")
+
+def eapi_exports_replace_vars(eapi):
+	return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_pkg_pretend(eapi):
+	return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_implicit_rdepend(eapi):
+	return eapi in ("0", "1", "2", "3")
+
+def eapi_has_dosed_dohard(eapi):
+	return eapi in ("0", "1", "2", "3")
+
+def eapi_has_required_use(eapi):
+	return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_use_dep_defaults(eapi):
+	return eapi not in ("0", "1", "2", "3")

diff --git a/portage_with_autodep/pym/portage/eclass_cache.py b/portage_with_autodep/pym/portage/eclass_cache.py
new file mode 100644
index 0000000..1374f1d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/eclass_cache.py
@@ -0,0 +1,123 @@
+# Copyright 2005-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Nicholas Carpaski (carpaski@gentoo.org), Brian Harring (ferringb@gentoo.org)
+
+__all__ = ["cache"]
+
+import stat
+import sys
+from portage.util import normalize_path
+import errno
+from portage.exception import PermissionDenied
+from portage import os
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+class cache(object):
+	"""
+	Maintains the cache information about eclasses used in ebuild.
+	"""
+	def __init__(self, porttree_root, overlays=[]):
+
+		self.eclasses = {} # {"Name": ("location","_mtime_")}
+		self._eclass_locations = {}
+
+		# screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
+		# ~harring
+		if porttree_root:
+			self.porttree_root = porttree_root
+			self.porttrees = [self.porttree_root] + overlays
+			self.porttrees = tuple(map(normalize_path, self.porttrees))
+			self._master_eclass_root = os.path.join(self.porttrees[0], "eclass")
+			self.update_eclasses()
+		else:
+			self.porttree_root = None
+			self.porttrees = ()
+			self._master_eclass_root = None
+
+	def copy(self):
+		return self.__copy__()
+
+	def __copy__(self):
+		result = self.__class__(None)
+		result.eclasses = self.eclasses.copy()
+		result._eclass_locations = self._eclass_locations.copy()
+		result.porttree_root = self.porttree_root
+		result.porttrees = self.porttrees
+		result._master_eclass_root = self._master_eclass_root
+		return result
+
+	def append(self, other):
+		"""
+		Append another instance to this instance. This will cause eclasses
+		from the other instance to override any eclasses from this instance
+		that have the same name.
+		"""
+		if not isinstance(other, self.__class__):
+			raise TypeError(
+				"expected type %s, got %s" % (self.__class__, type(other)))
+		self.porttrees = self.porttrees + other.porttrees
+		self.eclasses.update(other.eclasses)
+		self._eclass_locations.update(other._eclass_locations)
+
+	def update_eclasses(self):
+		self.eclasses = {}
+		self._eclass_locations = {}
+		master_eclasses = {}
+		eclass_len = len(".eclass")
+		ignored_listdir_errnos = (errno.ENOENT, errno.ENOTDIR)
+		for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]:
+			try:
+				eclass_filenames = os.listdir(x)
+			except OSError as e:
+				if e.errno in ignored_listdir_errnos:
+					del e
+					continue
+				elif e.errno == PermissionDenied.errno:
+					raise PermissionDenied(x)
+				raise
+			for y in eclass_filenames:
+				if not y.endswith(".eclass"):
+					continue
+				try:
+					mtime = os.stat(os.path.join(x, y))[stat.ST_MTIME]
+				except OSError:
+					continue
+				ys=y[:-eclass_len]
+				if x == self._master_eclass_root:
+					master_eclasses[ys] = mtime
+					self.eclasses[ys] = (x, mtime)
+					self._eclass_locations[ys] = x
+					continue
+
+				master_mtime = master_eclasses.get(ys)
+				if master_mtime is not None:
+					if master_mtime == mtime:
+						# It appears to be identical to the master,
+						# so prefer the master entry.
+						continue
+
+				self.eclasses[ys] = (x, mtime)
+				self._eclass_locations[ys] = x
+
+	def is_eclass_data_valid(self, ec_dict):
+		if not isinstance(ec_dict, dict):
+			return False
+		for eclass, tup in ec_dict.items():
+			cached_data = self.eclasses.get(eclass, None)
+			""" Only use the mtime for validation since the probability of a
+			collision is small and, depending on the cache implementation, the
+			path may not be specified (cache from rsync mirrors, for example).
+			"""
+			if cached_data is None or tup[1] != cached_data[1]:
+				return False
+
+		return True
+
+	def get_eclass_data(self, inherits):
+		ec_dict = {}
+		for x in inherits:
+			ec_dict[x] = self.eclasses[x]
+
+		return ec_dict

diff --git a/portage_with_autodep/pym/portage/elog/__init__.py b/portage_with_autodep/pym/portage/elog/__init__.py
new file mode 100644
index 0000000..1a8309d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/__init__.py
@@ -0,0 +1,182 @@
+# elog/__init__.py - elog core functions
+# Copyright 2006-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.util:writemsg',
+)
+
+from portage.const import EBUILD_PHASES
+from portage.exception import AlarmSignal, PortageException
+from portage.process import atexit_register
+from portage.elog.messages import collect_ebuild_messages, collect_messages
+from portage.elog.filtering import filter_loglevels
+from portage.localization import _
+from portage import os
+
+def _preload_elog_modules(settings):
+	logsystems = settings.get("PORTAGE_ELOG_SYSTEM", "").split()
+	for s in logsystems:
+		# allow per module overrides of PORTAGE_ELOG_CLASSES
+		if ":" in s:
+			s, levels = s.split(":", 1)
+			levels = levels.split(",")
+		# - is nicer than _ for module names, so allow people to use it.
+		s = s.replace("-", "_")
+		try:
+			_load_mod("portage.elog.mod_" + s)
+		except ImportError:
+			pass
+
+def _merge_logentries(a, b):
+	rValue = {}
+	phases = set(a)
+	phases.update(b)
+	for p in phases:
+		merged_msgs = []
+		rValue[p] = merged_msgs
+		for d in a, b:
+			msgs = d.get(p)
+			if msgs:
+				merged_msgs.extend(msgs)
+	return rValue
+
+def _combine_logentries(logentries):
+	# generate a single string with all log messages
+	rValue = []
+	for phase in EBUILD_PHASES:
+		if not phase in logentries:
+			continue
+		previous_type = None
+		for msgtype, msgcontent in logentries[phase]:
+			if previous_type != msgtype:
+				previous_type = msgtype
+				rValue.append("%s: %s\n" % (msgtype, phase))
+			for line in msgcontent:
+				rValue.append(line)
+			rValue.append("\n")
+	return "".join(rValue)
+
+_elog_mod_imports = {}
+def _load_mod(name):
+	global _elog_mod_imports
+	m = _elog_mod_imports.get(name)
+	if m is None:
+		m = __import__(name)
+		for comp in name.split(".")[1:]:
+			m = getattr(m, comp)
+		_elog_mod_imports[name] = m
+	return m
+
+_elog_listeners = []
+def add_listener(listener):
+	'''
+	Listeners should accept four arguments: settings, key, logentries and logtext
+	'''
+	_elog_listeners.append(listener)
+
+def remove_listener(listener):
+	'''
+	Remove previously added listener
+	'''
+	_elog_listeners.remove(listener)
+
+_elog_atexit_handlers = []
+
+def elog_process(cpv, mysettings, phasefilter=None):
+	global _elog_atexit_handlers
+	
+	logsystems = mysettings.get("PORTAGE_ELOG_SYSTEM","").split()
+	for s in logsystems:
+		# allow per module overrides of PORTAGE_ELOG_CLASSES
+		if ":" in s:
+			s, levels = s.split(":", 1)
+			levels = levels.split(",")
+		# - is nicer than _ for module names, so allow people to use it.
+		s = s.replace("-", "_")
+		try:
+			_load_mod("portage.elog.mod_" + s)
+		except ImportError:
+			pass
+
+	if "T" in mysettings:
+		ebuild_logentries = collect_ebuild_messages(
+			os.path.join(mysettings["T"], "logging"))
+	else:
+		# A build dir isn't necessarily required since the messages.e*
+		# functions allow messages to be generated in-memory.
+		ebuild_logentries = {}
+	all_logentries = collect_messages(key=cpv, phasefilter=phasefilter)
+	if cpv in all_logentries:
+		# Messages generated by the python elog implementation are assumed
+		# to come first. For example, this ensures correct order for einfo
+		# messages that are generated prior to the setup phase.
+		all_logentries[cpv] = \
+			_merge_logentries(all_logentries[cpv], ebuild_logentries)
+	else:
+		all_logentries[cpv] = ebuild_logentries
+
+	my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
+	logsystems = {}
+	for token in mysettings.get("PORTAGE_ELOG_SYSTEM", "").split():
+		if ":" in token:
+			s, levels = token.split(":", 1)
+			levels = levels.split(",")
+		else:
+			s = token
+			levels = ()
+		levels_set = logsystems.get(s)
+		if levels_set is None:
+			levels_set = set()
+			logsystems[s] = levels_set
+		levels_set.update(levels)
+
+	for key in all_logentries:
+		default_logentries = filter_loglevels(all_logentries[key], my_elog_classes)
+
+		# in case the filters matched all messages and no module overrides exist
+		if len(default_logentries) == 0 and (not ":" in mysettings.get("PORTAGE_ELOG_SYSTEM", "")):
+			continue
+
+		default_fulllog = _combine_logentries(default_logentries)
+
+		# call listeners
+		for listener in _elog_listeners:
+			listener(mysettings, str(key), default_logentries, default_fulllog)
+
+		# pass the processing to the individual modules
+		for s, levels in logsystems.items():
+			# allow per module overrides of PORTAGE_ELOG_CLASSES
+			if levels:
+				mod_logentries = filter_loglevels(all_logentries[key], levels)
+				mod_fulllog = _combine_logentries(mod_logentries)
+			else:
+				mod_logentries = default_logentries
+				mod_fulllog = default_fulllog
+			if len(mod_logentries) == 0:
+				continue
+			# - is nicer than _ for module names, so allow people to use it.
+			s = s.replace("-", "_")
+			try:
+				m = _load_mod("portage.elog.mod_" + s)
+				# Timeout after one minute (in case something like the mail
+				# module gets hung).
+				try:
+					AlarmSignal.register(60)
+					m.process(mysettings, str(key), mod_logentries, mod_fulllog)
+				finally:
+					AlarmSignal.unregister()
+				if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
+					_elog_atexit_handlers.append(m.finalize)
+					atexit_register(m.finalize)
+			except (ImportError, AttributeError) as e:
+				writemsg(_("!!! Error while importing logging modules "
+					"while loading \"mod_%s\":\n") % str(s))
+				writemsg("%s\n" % str(e), noiselevel=-1)
+			except AlarmSignal:
+				writemsg("Timeout in elog_process for system '%s'\n" % s,
+					noiselevel=-1)
+			except PortageException as e:
+				writemsg("%s\n" % str(e), noiselevel=-1)
+

diff --git a/portage_with_autodep/pym/portage/elog/filtering.py b/portage_with_autodep/pym/portage/elog/filtering.py
new file mode 100644
index 0000000..82181a4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/filtering.py
@@ -0,0 +1,15 @@
+# elog/messages.py - elog core functions
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+def filter_loglevels(logentries, loglevels):
+	# remove unwanted entries from all logentries
+	rValue = {}
+	loglevels = [x.upper() for x in loglevels]
+	for phase in logentries:
+		for msgtype, msgcontent in logentries[phase]:
+			if msgtype.upper() in loglevels or "*" in loglevels:
+				if phase not in rValue:
+					rValue[phase] = []
+				rValue[phase].append((msgtype, msgcontent))
+	return rValue

diff --git a/portage_with_autodep/pym/portage/elog/messages.py b/portage_with_autodep/pym/portage/elog/messages.py
new file mode 100644
index 0000000..6c1580a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/messages.py
@@ -0,0 +1,172 @@
+# elog/messages.py - elog core functions
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.output:colorize',
+	'portage.util:writemsg',
+)
+
+from portage.const import EBUILD_PHASES
+from portage.localization import _
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+
+import io
+import sys
+
+def collect_ebuild_messages(path):
+	""" Collect elog messages generated by the bash logging function stored 
+		at 'path'.
+	"""
+	mylogfiles = None
+	try:
+		mylogfiles = os.listdir(path)
+	except OSError:
+		pass
+	# shortcut for packages without any messages
+	if not mylogfiles:
+		return {}
+	# exploit listdir() file order so we process log entries in chronological order
+	mylogfiles.reverse()
+	logentries = {}
+	for msgfunction in mylogfiles:
+		filename = os.path.join(path, msgfunction)
+		if msgfunction not in EBUILD_PHASES:
+			writemsg(_("!!! can't process invalid log file: %s\n") % filename,
+				noiselevel=-1)
+			continue
+		if not msgfunction in logentries:
+			logentries[msgfunction] = []
+		lastmsgtype = None
+		msgcontent = []
+		for l in io.open(_unicode_encode(filename,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'], errors='replace'):
+			if not l:
+				continue
+			try:
+				msgtype, msg = l.split(" ", 1)
+			except ValueError:
+				writemsg(_("!!! malformed entry in "
+					"log file: '%s'\n") % filename, noiselevel=-1)
+				continue
+
+			if lastmsgtype is None:
+				lastmsgtype = msgtype
+			
+			if msgtype == lastmsgtype:
+				msgcontent.append(msg)
+			else:
+				if msgcontent:
+					logentries[msgfunction].append((lastmsgtype, msgcontent))
+				msgcontent = [msg]
+			lastmsgtype = msgtype
+		if msgcontent:
+			logentries[msgfunction].append((lastmsgtype, msgcontent))
+
+	# clean logfiles to avoid repetitions
+	for f in mylogfiles:
+		try:
+			os.unlink(os.path.join(path, f))
+		except OSError:
+			pass
+	return logentries
+
+_msgbuffer = {}
+def _elog_base(level, msg, phase="other", key=None, color=None, out=None):
+	""" Backend for the other messaging functions, should not be called 
+	    directly.
+	"""
+
+	# TODO: Have callers pass in a more unique 'key' parameter than a plain
+	# cpv, in order to ensure that messages are properly grouped together
+	# for a given package instance, and also to ensure that each elog module's
+	# process() function is only called once for each unique package. This is
+	# needed not only when building packages in parallel, but also to preserve
+	# continuity in messages when a package is simply updated, since we don't
+	# want the elog_process() call from the uninstall of the old version to
+	# cause discontinuity in the elog messages of the new one being installed.
+
+	global _msgbuffer
+
+	if out is None:
+		out = sys.stdout
+
+	if color is None:
+		color = "GOOD"
+
+	msg = _unicode_decode(msg,
+		encoding=_encodings['content'], errors='replace')
+
+	formatted_msg = colorize(color, " * ") + msg + "\n"
+
+	# avoid potential UnicodeEncodeError
+	if out in (sys.stdout, sys.stderr):
+		formatted_msg = _unicode_encode(formatted_msg,
+			encoding=_encodings['stdio'], errors='backslashreplace')
+		if sys.hexversion >= 0x3000000:
+			out = out.buffer
+
+	out.write(formatted_msg)
+
+	if key not in _msgbuffer:
+		_msgbuffer[key] = {}
+	if phase not in _msgbuffer[key]:
+		_msgbuffer[key][phase] = []
+	_msgbuffer[key][phase].append((level, msg))
+
+	#raise NotImplementedError()
+
+def collect_messages(key=None, phasefilter=None):
+	global _msgbuffer
+
+	if key is None:
+		rValue = _msgbuffer
+		_reset_buffer()
+	else:
+		rValue = {}
+		if key in _msgbuffer:
+			if phasefilter is None:
+				rValue[key] = _msgbuffer.pop(key)
+			else:
+				rValue[key] = {}
+				for phase in phasefilter:
+					try:
+						rValue[key][phase] = _msgbuffer[key].pop(phase)
+					except KeyError:
+						pass
+				if not _msgbuffer[key]:
+					del _msgbuffer[key]
+	return rValue
+
+def _reset_buffer():
+	""" Reset the internal message buffer when it has been processed, 
+	    should not be called directly.
+	"""
+	global _msgbuffer
+	
+	_msgbuffer = {}
+
+# creating and exporting the actual messaging functions
+_functions = { "einfo": ("INFO", "GOOD"),
+		"elog": ("LOG", "GOOD"),
+		"ewarn": ("WARN", "WARN"),
+		"eqawarn": ("QA", "WARN"),
+		"eerror": ("ERROR", "BAD"),
+}
+
+def _make_msgfunction(level, color):
+	def _elog(msg, phase="other", key=None, out=None):
+		""" Display and log a message assigned to the given key/cpv 
+		    (or unassigned if no key is given).
+		""" 
+		_elog_base(level, msg,  phase=phase, key=key, color=color, out=out)
+	return _elog
+
+for f in _functions:
+	setattr(sys.modules[__name__], f, _make_msgfunction(_functions[f][0], _functions[f][1]))
+del f, _functions

diff --git a/portage_with_autodep/pym/portage/elog/mod_custom.py b/portage_with_autodep/pym/portage/elog/mod_custom.py
new file mode 100644
index 0000000..e1a5223
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_custom.py
@@ -0,0 +1,19 @@
+# elog/mod_custom.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.elog.mod_save, portage.process, portage.exception
+
+def process(mysettings, key, logentries, fulltext):
+	elogfilename = portage.elog.mod_save.process(mysettings, key, logentries, fulltext)
+	
+	if not mysettings.get("PORTAGE_ELOG_COMMAND"):
+		raise portage.exception.MissingParameter("!!! Custom logging requested but PORTAGE_ELOG_COMMAND is not defined")
+	else:
+		mylogcmd = mysettings["PORTAGE_ELOG_COMMAND"]
+		mylogcmd = mylogcmd.replace("${LOGFILE}", elogfilename)
+		mylogcmd = mylogcmd.replace("${PACKAGE}", key)
+		retval = portage.process.spawn_bash(mylogcmd)
+		if retval != 0:
+			raise portage.exception.PortageException("!!! PORTAGE_ELOG_COMMAND failed with exitcode %d" % retval)
+	return

diff --git a/portage_with_autodep/pym/portage/elog/mod_echo.py b/portage_with_autodep/pym/portage/elog/mod_echo.py
new file mode 100644
index 0000000..5de25bf
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_echo.py
@@ -0,0 +1,46 @@
+# elog/mod_echo.py - elog dispatch module
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+from portage.output import EOutput, colorize
+from portage.const import EBUILD_PHASES
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+_items = []
+def process(mysettings, key, logentries, fulltext):
+	global _items
+	_items.append((mysettings["ROOT"], key, logentries))
+
+def finalize():
+	global _items
+	printer = EOutput()
+	for root, key, logentries in _items:
+		print()
+		if root == "/":
+			printer.einfo(_("Messages for package %s:") %
+				colorize("INFORM", key))
+		else:
+			printer.einfo(_("Messages for package %(pkg)s merged to %(root)s:") %
+				{"pkg": colorize("INFORM", key), "root": root})
+		print()
+		for phase in EBUILD_PHASES:
+			if phase not in logentries:
+				continue
+			for msgtype, msgcontent in logentries[phase]:
+				fmap = {"INFO": printer.einfo,
+						"WARN": printer.ewarn,
+						"ERROR": printer.eerror,
+						"LOG": printer.einfo,
+						"QA": printer.ewarn}
+				if isinstance(msgcontent, basestring):
+					msgcontent = [msgcontent]
+				for line in msgcontent:
+					fmap[msgtype](line.strip("\n"))
+	_items = []
+	return

diff --git a/portage_with_autodep/pym/portage/elog/mod_mail.py b/portage_with_autodep/pym/portage/elog/mod_mail.py
new file mode 100644
index 0000000..086c683
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_mail.py
@@ -0,0 +1,43 @@
+# elog/mod_mail.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.mail, socket
+from portage.exception import PortageException
+from portage.localization import _
+from portage.util import writemsg
+
+def process(mysettings, key, logentries, fulltext):
+	if "PORTAGE_ELOG_MAILURI" in mysettings:
+		myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+	else:
+		myrecipient = "root@localhost"
+	
+	myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
+	myfrom = myfrom.replace("${HOST}", socket.getfqdn())
+	mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
+	mysubject = mysubject.replace("${PACKAGE}", key)
+	mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+	# look at the phases listed in our logentries to figure out what action was performed
+	action = _("merged")
+	for phase in logentries:
+		# if we found a *rm phase assume that the package was unmerged
+		if phase in ["postrm", "prerm"]:
+			action = _("unmerged")
+	# if we think that the package was unmerged, make sure there was no unexpected
+	# phase recorded to avoid misinformation
+	if action == _("unmerged"):
+		for phase in logentries:
+			if phase not in ["postrm", "prerm", "other"]:
+				action = _("unknown")
+
+	mysubject = mysubject.replace("${ACTION}", action)
+
+	mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, fulltext)
+	try:
+		portage.mail.send_mail(mysettings, mymessage)
+	except PortageException as e:
+		writemsg("%s\n" % str(e), noiselevel=-1)
+
+	return

diff --git a/portage_with_autodep/pym/portage/elog/mod_mail_summary.py b/portage_with_autodep/pym/portage/elog/mod_mail_summary.py
new file mode 100644
index 0000000..0bd67f2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_mail_summary.py
@@ -0,0 +1,89 @@
+# elog/mod_mail_summary.py - elog dispatch module
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.exception import AlarmSignal, PortageException
+from portage.localization import _
+from portage.util import writemsg
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+import socket
+import time
+
+_config_keys = ('PORTAGE_ELOG_MAILURI', 'PORTAGE_ELOG_MAILFROM',
+	'PORTAGE_ELOG_MAILSUBJECT',)
+_items = {}
+def process(mysettings, key, logentries, fulltext):
+	global _items
+	time_str = _unicode_decode(
+		time.strftime("%Y%m%d-%H%M%S %Z", time.localtime(time.time())),
+		encoding=_encodings['content'], errors='replace')
+	header = _(">>> Messages generated for package %(pkg)s by process %(pid)d on %(time)s:\n\n") % \
+		{"pkg": key, "pid": os.getpid(), "time": time_str}
+	config_root = mysettings["PORTAGE_CONFIGROOT"]
+
+	# Copy needed variables from the config instance,
+	# since we don't need to hold a reference for the
+	# whole thing. This also makes it possible to
+	# rely on per-package variable settings that may
+	# have come from /etc/portage/package.env, since
+	# we'll be isolated from any future mutations of
+	# mysettings.
+	config_dict = {}
+	for k in _config_keys:
+		v = mysettings.get(k)
+		if v is not None:
+			config_dict[k] = v
+
+	config_dict, items = _items.setdefault(config_root, (config_dict, {}))
+	items[key] = header + fulltext
+
+def finalize():
+	global _items
+	for mysettings, items in _items.values():
+		_finalize(mysettings, items)
+	_items.clear()
+
+def _finalize(mysettings, items):
+	if len(items) == 0:
+		return
+	elif len(items) == 1:
+		count = _("one package")
+	else:
+		count = _("multiple packages")
+	if "PORTAGE_ELOG_MAILURI" in mysettings:
+		myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+	else:
+		myrecipient = "root@localhost"
+	
+	myfrom = mysettings.get("PORTAGE_ELOG_MAILFROM", "")
+	myfrom = myfrom.replace("${HOST}", socket.getfqdn())
+	mysubject = mysettings.get("PORTAGE_ELOG_MAILSUBJECT", "")
+	mysubject = mysubject.replace("${PACKAGE}", count)
+	mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+	mybody = _("elog messages for the following packages generated by "
+		"process %(pid)d on host %(host)s:\n") % {"pid": os.getpid(), "host": socket.getfqdn()}
+	for key in items:
+		 mybody += "- %s\n" % key
+
+	mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject,
+		mybody, attachments=list(items.values()))
+
+	# Timeout after one minute in case send_mail() blocks indefinitely.
+	try:
+		try:
+			AlarmSignal.register(60)
+			portage.mail.send_mail(mysettings, mymessage)
+		finally:
+			AlarmSignal.unregister()
+	except AlarmSignal:
+		writemsg("Timeout in finalize() for elog system 'mail_summary'\n",
+			noiselevel=-1)
+	except PortageException as e:
+		writemsg("%s\n" % (e,), noiselevel=-1)
+
+	return

diff --git a/portage_with_autodep/pym/portage/elog/mod_save.py b/portage_with_autodep/pym/portage/elog/mod_save.py
new file mode 100644
index 0000000..9350a6e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_save.py
@@ -0,0 +1,51 @@
+# elog/mod_save.py - elog dispatch module
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import time
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, portage_uid
+from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs
+from portage.util import ensure_dirs, normalize_path
+
+def process(mysettings, key, logentries, fulltext):
+
+	if mysettings.get("PORT_LOGDIR"):
+		logdir = normalize_path(mysettings["PORT_LOGDIR"])
+	else:
+		logdir = os.path.join(os.sep, "var", "log", "portage")
+
+	if not os.path.isdir(logdir):
+		# Only initialize group/mode if the directory doesn't
+		# exist, so that we don't override permissions if they
+		# were previously set by the administrator.
+		# NOTE: These permissions should be compatible with our
+		# default logrotate config as discussed in bug 374287.
+		ensure_dirs(logdir, uid=portage_uid, gid=portage_gid, mode=0o2770)
+
+	cat = mysettings['CATEGORY']
+	pf = mysettings['PF']
+
+	elogfilename = pf + ":" + _unicode_decode(
+		time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())),
+		encoding=_encodings['content'], errors='replace') + ".log"
+
+	if "split-elog" in mysettings.features:
+		log_subdir = os.path.join(logdir, "elog", cat)
+		elogfilename = os.path.join(log_subdir, elogfilename)
+	else:
+		log_subdir = os.path.join(logdir, "elog")
+		elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
+	_ensure_log_subdirs(logdir, log_subdir)
+
+	elogfile = io.open(_unicode_encode(elogfilename,
+		encoding=_encodings['fs'], errors='strict'),
+		mode='w', encoding=_encodings['content'], errors='backslashreplace')
+	elogfile.write(_unicode_decode(fulltext))
+	elogfile.close()
+
+	return elogfilename

diff --git a/portage_with_autodep/pym/portage/elog/mod_save_summary.py b/portage_with_autodep/pym/portage/elog/mod_save_summary.py
new file mode 100644
index 0000000..4adc6f3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_save_summary.py
@@ -0,0 +1,59 @@
+# elog/mod_save_summary.py - elog dispatch module
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import time
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, portage_uid
+from portage.localization import _
+from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs
+from portage.util import apply_permissions, ensure_dirs, normalize_path
+
+def process(mysettings, key, logentries, fulltext):
+	if mysettings.get("PORT_LOGDIR"):
+		logdir = normalize_path(mysettings["PORT_LOGDIR"])
+	else:
+		logdir = os.path.join(os.sep, "var", "log", "portage")
+
+	if not os.path.isdir(logdir):
+		# Only initialize group/mode if the directory doesn't
+		# exist, so that we don't override permissions if they
+		# were previously set by the administrator.
+		# NOTE: These permissions should be compatible with our
+		# default logrotate config as discussed in bug 374287.
+		ensure_dirs(logdir, uid=portage_uid, gid=portage_gid, mode=0o2770)
+
+	elogdir = os.path.join(logdir, "elog")
+	_ensure_log_subdirs(logdir, elogdir)
+
+	# TODO: Locking
+	elogfilename = elogdir+"/summary.log"
+	elogfile = io.open(_unicode_encode(elogfilename,
+		encoding=_encodings['fs'], errors='strict'),
+		mode='a', encoding=_encodings['content'], errors='backslashreplace')
+
+	# Copy group permission bits from parent directory.
+	elogdir_st = os.stat(elogdir)
+	elogdir_gid = elogdir_st.st_gid
+	elogdir_grp_mode = 0o060 & elogdir_st.st_mode
+	apply_permissions(elogfilename, gid=elogdir_gid,
+		mode=elogdir_grp_mode, mask=0)
+
+	time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",
+		time.localtime(time.time()))
+	# Avoid potential UnicodeDecodeError later.
+	time_str = _unicode_decode(time_str,
+		encoding=_encodings['content'], errors='replace')
+	elogfile.write(_unicode_decode(
+		_(">>> Messages generated by process " +
+		"%(pid)d on %(time)s for package %(pkg)s:\n\n") %
+		{"pid": os.getpid(), "time": time_str, "pkg": key}))
+	elogfile.write(_unicode_decode(fulltext))
+	elogfile.write(_unicode_decode("\n"))
+	elogfile.close()
+
+	return elogfilename

diff --git a/portage_with_autodep/pym/portage/elog/mod_syslog.py b/portage_with_autodep/pym/portage/elog/mod_syslog.py
new file mode 100644
index 0000000..d71dab4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/elog/mod_syslog.py
@@ -0,0 +1,32 @@
+# elog/mod_syslog.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+import syslog
+from portage.const import EBUILD_PHASES
+from portage import _encodings
+
+_pri = {
+	"INFO"   : syslog.LOG_INFO, 
+	"WARN"   : syslog.LOG_WARNING, 
+	"ERROR"  : syslog.LOG_ERR, 
+	"LOG"    : syslog.LOG_NOTICE,
+	"QA"     : syslog.LOG_WARNING
+}
+
+def process(mysettings, key, logentries, fulltext):
+	syslog.openlog("portage", syslog.LOG_ERR | syslog.LOG_WARNING | syslog.LOG_INFO | syslog.LOG_NOTICE, syslog.LOG_LOCAL5)
+	for phase in EBUILD_PHASES:
+		if not phase in logentries:
+			continue
+		for msgtype,msgcontent in logentries[phase]:
+			msgtext = "".join(msgcontent)
+			for line in msgtext.splitlines():
+				line = "%s: %s: %s" % (key, phase, line)
+				if sys.hexversion < 0x3000000 and isinstance(msgtext, unicode):
+					# Avoid TypeError from syslog.syslog()
+					line = line.encode(_encodings['content'], 
+						'backslashreplace')
+				syslog.syslog(_pri[msgtype], line)
+	syslog.closelog()

diff --git a/portage_with_autodep/pym/portage/env/__init__.py b/portage_with_autodep/pym/portage/env/__init__.py
new file mode 100644
index 0000000..17b66d1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/env/__init__.py
@@ -0,0 +1,3 @@
+# Copyright: 2007 Gentoo Foundation
+# License: GPL2
+

diff --git a/portage_with_autodep/pym/portage/env/config.py b/portage_with_autodep/pym/portage/env/config.py
new file mode 100644
index 0000000..865d835
--- /dev/null
+++ b/portage_with_autodep/pym/portage/env/config.py
@@ -0,0 +1,105 @@
+# config.py -- Portage Config
+# Copyright 2007-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["ConfigLoaderKlass", "GenericFile", "PackageKeywordsFile",
+	"PackageUseFile", "PackageMaskFile", "PortageModulesFile"]
+
+from portage.cache.mappings import UserDict
+from portage.env.loaders import KeyListFileLoader, KeyValuePairFileLoader, ItemFileLoader
+
+class ConfigLoaderKlass(UserDict):
+	"""
+	A base class stub for things to inherit from.
+	Users may want a non-file backend.
+	"""
+	
+	def __init__(self, loader):
+		"""
+		@param loader: A class that has a load() that returns two dicts
+			the first being a data dict, the second being a dict of errors.
+		"""
+		UserDict.__init__(self)
+		self._loader = loader
+
+	def load(self):
+		"""
+		Load the data from the loader.
+
+		@throws LoaderError:
+		"""
+
+		self.data, self.errors = self._loader.load()
+
+class GenericFile(UserDict):
+	"""
+	Inherits from ConfigLoaderKlass, attempts to use all known loaders
+	until it gets <something> in data.  This is probably really slow but is
+	helpful when you really have no idea what you are loading (hint hint the file
+	should perhaps declare  what type it is? ;)
+	"""
+	
+	loaders = [KeyListFileLoader, KeyValuePairFileLoader, ItemFileLoader]
+	
+	def __init__(self, filename):
+		UserDict.__init__(self)
+		self.filename = filename
+	
+	def load(self):
+		for loader in self.loaders:
+			l = loader(self.filename, None)
+			data, errors = l.load()
+			if len(data) and not len(errors):
+				(self.data, self.errors) = (data, errors)
+				return
+
+
+class PackageKeywordsFile(ConfigLoaderKlass):
+	"""
+	Inherits from ConfigLoaderKlass; implements a file-based backend.
+	"""
+
+	default_loader = KeyListFileLoader
+
+	def __init__(self, filename):
+		super(PackageKeywordsFile, self).__init__(
+			self.default_loader(filename, validator=None))
+	
+class PackageUseFile(ConfigLoaderKlass):
+	"""
+	Inherits from PackageUse; implements a file-based backend.  Doesn't handle recursion yet.
+	"""
+
+	default_loader = KeyListFileLoader
+	def __init__(self, filename):
+		super(PackageUseFile, self).__init__(
+			self.default_loader(filename, validator=None))
+	
+class PackageMaskFile(ConfigLoaderKlass):
+	"""
+	A class that implements a file-based package.mask
+	
+	Entires in package.mask are of the form:
+	atom1
+	atom2
+	or optionally
+	-atom3
+	to revert a previous mask; this only works when masking files are stacked
+	"""
+	
+	default_loader = ItemFileLoader
+
+	def __init__(self, filename):
+		super(PackageMaskFile, self).__init__(
+			self.default_loader(filename, validator=None))
+
+class PortageModulesFile(ConfigLoaderKlass):
+	"""
+	File Class for /etc/portage/modules
+	"""
+	
+	default_loader = KeyValuePairFileLoader
+	
+	def __init__(self, filename):
+		super(PortageModulesFile, self).__init__(
+			 self.default_loader(filename, validator=None))

diff --git a/portage_with_autodep/pym/portage/env/loaders.py b/portage_with_autodep/pym/portage/env/loaders.py
new file mode 100644
index 0000000..b540fbb
--- /dev/null
+++ b/portage_with_autodep/pym/portage/env/loaders.py
@@ -0,0 +1,319 @@
+# config.py -- Portage Config
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import stat
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.localization import _
+
+class LoaderError(Exception):
+	
+	def __init__(self, resource, error_msg):
+		"""
+		@param resource: Resource that failed to load (file/sql/etc)
+		@type resource: String
+		@param error_msg: Error from underlying Loader system
+		@type error_msg: String
+		"""
+
+		self.resource = resource
+		self.error_msg = error_msg
+	
+	def __str__(self):
+		return "Failed while loading resource: %s, error was: %s" % (
+			self.resource, self.error_msg)
+
+
+def RecursiveFileLoader(filename):
+	"""
+	If filename is of type file, return a generate that yields filename
+	else if filename is of type directory, return a generator that fields
+	files in that directory.
+	
+	Ignore files beginning with . or ending in ~.
+	Prune CVS directories.
+
+	@param filename: name of a file/directory to traverse
+	@rtype: list
+	@returns: List of files to process
+	"""
+
+	try:
+		st = os.stat(filename)
+	except OSError:
+		return
+	if stat.S_ISDIR(st.st_mode):
+		for root, dirs, files in os.walk(filename):
+			for d in list(dirs):
+				if d[:1] == '.' or d == 'CVS':
+					dirs.remove(d)
+			for f in files:
+				try:
+					f = _unicode_decode(f,
+						encoding=_encodings['fs'], errors='strict')
+				except UnicodeDecodeError:
+					continue
+				if f[:1] == '.' or f[-1:] == '~':
+					continue
+				yield os.path.join(root, f)
+	else:
+		yield filename
+
+
+class DataLoader(object):
+
+	def __init__(self, validator):
+		f = validator
+		if f is None:
+			# if they pass in no validator, just make a fake one
+			# that always returns true
+			def validate(key):
+				return True
+			f = validate
+		self._validate = f
+
+	def load(self):
+		"""
+		Function to do the actual work of a Loader
+		"""
+		raise NotImplementedError("Please override in a subclass")
+
+class EnvLoader(DataLoader):
+	""" Class to access data in the environment """
+	def __init__(self, validator):
+		DataLoader.__init__(self, validator)
+
+	def load(self):
+		return os.environ
+
+class TestTextLoader(DataLoader):
+	""" You give it some data, it 'loads' it for you, no filesystem access
+	"""
+	def __init__(self, validator):
+		DataLoader.__init__(self, validator)
+		self.data = {}
+		self.errors = {}
+
+	def setData(self, text):
+		"""Explicitly set the data field
+		Args:
+			text - a dict of data typical of Loaders
+		Returns:
+			None
+		"""
+		if isinstance(text, dict):
+			self.data = text
+		else:
+			raise ValueError("setData requires a dict argument")
+
+	def setErrors(self, errors):
+		self.errors = errors
+	
+	def load(self):
+		return (self.data, self.errors)
+
+
+class FileLoader(DataLoader):
+	""" Class to access data in files """
+
+	def __init__(self, filename, validator):
+		"""
+			Args:
+				filename : Name of file or directory to open
+				validator : class with validate() method to validate data.
+		"""
+		DataLoader.__init__(self, validator)
+		self.fname = filename
+
+	def load(self):
+		"""
+		Return the {source: {key: value}} pairs from a file
+		Return the {source: [list of errors] from a load
+
+		@param recursive: If set and self.fname is a directory; 
+			load all files in self.fname
+		@type: Boolean
+		@rtype: tuple
+		@returns:
+		Returns (data,errors), both may be empty dicts or populated.
+		"""
+		data = {}
+		errors = {}
+		# I tried to save a nasty lookup on lineparser by doing the lookup
+		# once, which may be expensive due to digging in child classes.
+		func = self.lineParser
+		for fn in RecursiveFileLoader(self.fname):
+			try:
+				f = io.open(_unicode_encode(fn,
+					encoding=_encodings['fs'], errors='strict'), mode='r',
+					encoding=_encodings['content'], errors='replace')
+			except EnvironmentError as e:
+				if e.errno not in (errno.ENOENT, errno.ESTALE):
+					raise
+				del e
+				continue
+			for line_num, line in enumerate(f):
+				func(line, line_num, data, errors)
+			f.close()
+		return (data, errors)
+
+	def lineParser(self, line, line_num, data, errors):
+		""" This function parses 1 line at a time
+			Args:
+				line: a string representing 1 line of a file
+				line_num: an integer representing what line we are processing
+				data: a dict that contains the data we have extracted from the file
+				      already
+				errors: a dict representing parse errors.
+			Returns:
+				Nothing (None).  Writes to data and errors
+		"""
+		raise NotImplementedError("Please over-ride this in a child class")
+
+class ItemFileLoader(FileLoader):
+	"""
+	Class to load data from a file full of items one per line
+	
+	>>> item1
+	>>> item2
+	>>> item3
+	>>> item1
+	
+	becomes { 'item1':None, 'item2':None, 'item3':None }
+	Note that due to the data store being a dict, duplicates
+	are removed.
+	"""
+
+	def __init__(self, filename, validator):
+		FileLoader.__init__(self, filename, validator)
+	
+	def lineParser(self, line, line_num, data, errors):
+		line = line.strip()
+		if line.startswith('#'): # Skip commented lines
+			return
+		if not len(line): # skip empty lines
+			return
+		split = line.split()
+		if not len(split):
+			errors.setdefault(self.fname, []).append(
+				_("Malformed data at line: %s, data: %s")
+				% (line_num + 1, line))
+			return
+		key = split[0]
+		if not self._validate(key):
+			errors.setdefault(self.fname, []).append(
+				_("Validation failed at line: %s, data %s")
+				% (line_num + 1, key))
+			return
+		data[key] = None
+
+class KeyListFileLoader(FileLoader):
+	"""
+	Class to load data from a file full of key [list] tuples
+	
+	>>>>key foo1 foo2 foo3
+	becomes
+	{'key':['foo1','foo2','foo3']}
+	"""
+
+	def __init__(self, filename, validator=None, valuevalidator=None):
+		FileLoader.__init__(self, filename, validator)
+
+		f = valuevalidator
+		if f is None:
+			# if they pass in no validator, just make a fake one
+			# that always returns true
+			def validate(key):
+				return True
+			f = validate
+		self._valueValidate = f
+
+	def lineParser(self, line, line_num, data, errors):
+		line = line.strip()
+		if line.startswith('#'): # Skip commented lines
+			return
+		if not len(line): # skip empty lines
+			return
+		split = line.split()
+		if len(split) < 1:
+			errors.setdefault(self.fname, []).append(
+				_("Malformed data at line: %s, data: %s")
+				% (line_num + 1, line))
+			return
+		key = split[0]
+		value = split[1:]
+		if not self._validate(key):
+			errors.setdefault(self.fname, []).append(
+				_("Key validation failed at line: %s, data %s")
+				% (line_num + 1, key))
+			return
+		if not self._valueValidate(value):
+			errors.setdefault(self.fname, []).append(
+				_("Value validation failed at line: %s, data %s")
+				% (line_num + 1, value))
+			return
+		if key in data:
+			data[key].append(value)
+		else:
+			data[key] = value
+
+
+class KeyValuePairFileLoader(FileLoader):
+	"""
+	Class to load data from a file full of key=value pairs
+	
+	>>>>key=value
+	>>>>foo=bar
+	becomes:
+	{'key':'value',
+	 'foo':'bar'}
+	"""
+
+	def __init__(self, filename, validator, valuevalidator=None):
+		FileLoader.__init__(self, filename, validator)
+
+		f = valuevalidator
+		if f is None:
+			# if they pass in no validator, just make a fake one
+			# that always returns true
+			def validate(key):
+				return True
+			f = validate
+		self._valueValidate = f
+
+
+	def lineParser(self, line, line_num, data, errors):
+		line = line.strip()
+		if line.startswith('#'): # skip commented lines
+			return
+		if not len(line): # skip empty lines
+			return
+		split = line.split('=', 1)
+		if len(split) < 2:
+			errors.setdefault(self.fname, []).append(
+				_("Malformed data at line: %s, data %s")
+				% (line_num + 1, line))
+			return
+		key = split[0].strip()
+		value = split[1].strip()
+		if not key:
+			errors.setdefault(self.fname, []).append(
+				_("Malformed key at line: %s, key %s")
+				% (line_num + 1, key))
+			return
+		if not self._validate(key):
+			errors.setdefault(self.fname, []).append(
+				_("Key validation failed at line: %s, data %s")
+				% (line_num + 1, key))
+			return
+		if not self._valueValidate(value):
+			errors.setdefault(self.fname, []).append(
+				_("Value validation failed at line: %s, data %s")
+				% (line_num + 1, value))
+			return
+		data[key] = value

diff --git a/portage_with_autodep/pym/portage/env/validators.py b/portage_with_autodep/pym/portage/env/validators.py
new file mode 100644
index 0000000..4d11d69
--- /dev/null
+++ b/portage_with_autodep/pym/portage/env/validators.py
@@ -0,0 +1,20 @@
+# validators.py Portage File Loader Code
+# Copyright 2007 Gentoo Foundation
+
+from portage.dep import isvalidatom
+
+ValidAtomValidator = isvalidatom
+
+def PackagesFileValidator(atom):
+	""" This function mutates atoms that begin with - or *
+	    It then checks to see if that atom is valid, and if
+	    so returns True, else it returns False.
+	    
+	    Args:
+		atom: a string representing an atom such as sys-apps/portage-2.1
+	"""
+	if atom.startswith("*") or atom.startswith("-"):
+		atom = atom[1:]
+	if not isvalidatom(atom):
+		return False
+	return True

diff --git a/portage_with_autodep/pym/portage/exception.py b/portage_with_autodep/pym/portage/exception.py
new file mode 100644
index 0000000..7891120
--- /dev/null
+++ b/portage_with_autodep/pym/portage/exception.py
@@ -0,0 +1,186 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import sys
+from portage import _encodings, _unicode_encode, _unicode_decode
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+class PortageException(Exception):
+	"""General superclass for portage exceptions"""
+	def __init__(self,value):
+		self.value = value[:]
+		if isinstance(self.value, basestring):
+			self.value = _unicode_decode(self.value,
+				encoding=_encodings['content'], errors='replace')
+
+	def __str__(self):
+		if isinstance(self.value, basestring):
+			return self.value
+		else:
+			return _unicode_decode(repr(self.value),
+				encoding=_encodings['content'], errors='replace')
+
+	if sys.hexversion < 0x3000000:
+
+		__unicode__ = __str__
+
+		def __str__(self):
+			return _unicode_encode(self.__unicode__(),
+				encoding=_encodings['content'], errors='backslashreplace')
+
+class CorruptionError(PortageException):
+	"""Corruption indication"""
+
+class InvalidDependString(PortageException):
+	"""An invalid depend string has been encountered"""
+	def __init__(self, value, errors=None):
+		PortageException.__init__(self, value)
+		self.errors = errors
+
+class InvalidVersionString(PortageException):
+	"""An invalid version string has been encountered"""
+
+class SecurityViolation(PortageException):
+	"""An incorrect formatting was passed instead of the expected one"""
+
+class IncorrectParameter(PortageException):
+	"""A parameter of the wrong type was passed"""
+
+class MissingParameter(PortageException):
+	"""A parameter is required for the action requested but was not passed"""
+
+class ParseError(PortageException):
+	"""An error was generated while attempting to parse the request"""
+
+class InvalidData(PortageException):
+	"""An incorrect formatting was passed instead of the expected one"""
+	def __init__(self, value, category=None):
+		PortageException.__init__(self, value)
+		self.category = category
+
+class InvalidDataType(PortageException):
+	"""An incorrect type was passed instead of the expected one"""
+
+class InvalidLocation(PortageException):
+	"""Data was not found when it was expected to exist or was specified incorrectly"""
+
+class FileNotFound(InvalidLocation):
+	"""A file was not found when it was expected to exist"""
+
+class DirectoryNotFound(InvalidLocation):
+	"""A directory was not found when it was expected to exist"""
+
+class OperationNotPermitted(PortageException):
+	from errno import EPERM as errno
+	"""An operation was not permitted operating system"""
+
+class PermissionDenied(PortageException):
+	from errno import EACCES as errno
+	"""Permission denied"""
+
+class TryAgain(PortageException):
+	from errno import EAGAIN as errno
+	"""Try again"""
+
+class TimeoutException(PortageException):
+	"""Operation timed out"""
+	# NOTE: ETIME is undefined on FreeBSD (bug #336875)
+	#from errno import ETIME as errno
+
+class AlarmSignal(TimeoutException):
+	def __init__(self, value, signum=None, frame=None):
+		TimeoutException.__init__(self, value)
+		self.signum = signum
+		self.frame = frame
+
+	@classmethod
+	def register(cls, time):
+		signal.signal(signal.SIGALRM, cls._signal_handler)
+		signal.alarm(time)
+
+	@classmethod
+	def unregister(cls):
+		signal.alarm(0)
+		signal.signal(signal.SIGALRM, signal.SIG_DFL)
+
+	@classmethod
+	def _signal_handler(cls, signum, frame):
+		signal.signal(signal.SIGALRM, signal.SIG_DFL)
+		raise AlarmSignal("alarm signal",
+			signum=signum, frame=frame)
+
+class ReadOnlyFileSystem(PortageException):
+	"""Read-only file system"""
+
+class CommandNotFound(PortageException):
+	"""A required binary was not available or executable"""
+
+class AmbiguousPackageName(ValueError, PortageException):
+	"""Raised by portage.cpv_expand() when the package name is ambiguous due
+	to the existence of multiple matches in different categories. This inherits
+	from ValueError, for backward compatibility with calling code that already
+	handles ValueError."""
+	def __str__(self):
+		return ValueError.__str__(self)
+
+class PortagePackageException(PortageException):
+	"""Malformed or missing package data"""
+
+class PackageNotFound(PortagePackageException):
+	"""Missing Ebuild or Binary"""
+
+class PackageSetNotFound(PortagePackageException):
+	"""Missing package set"""
+
+class InvalidPackageName(PortagePackageException):
+	"""Malformed package name"""
+
+class InvalidAtom(PortagePackageException):
+	"""Malformed atom spec"""
+	def __init__(self, value, category=None):
+		PortagePackageException.__init__(self, value)
+		self.category = category
+
+class UnsupportedAPIException(PortagePackageException):
+	"""Unsupported API"""
+	def __init__(self, cpv, eapi):
+		self.cpv, self.eapi = cpv, eapi
+	def __str__(self):
+		eapi = self.eapi
+		if not isinstance(eapi, basestring):
+			eapi = str(eapi)
+		eapi = eapi.lstrip("-")
+		msg = _("Unable to do any operations on '%(cpv)s', since "
+		"its EAPI is higher than this portage version's. Please upgrade"
+		" to a portage version that supports EAPI '%(eapi)s'.") % \
+		{"cpv": self.cpv, "eapi": eapi}
+		return _unicode_decode(msg,
+			encoding=_encodings['content'], errors='replace')
+
+	if sys.hexversion < 0x3000000:
+
+		__unicode__ = __str__
+
+		def __str__(self):
+			return _unicode_encode(self.__unicode__(),
+				encoding=_encodings['content'], errors='backslashreplace')
+
+class SignatureException(PortageException):
+	"""Signature was not present in the checked file"""
+
+class DigestException(SignatureException):
+	"""A problem exists in the digest"""
+
+class MissingSignature(SignatureException):
+	"""Signature was not present in the checked file"""
+
+class InvalidSignature(SignatureException):
+	"""Signature was checked and was not a valid, current, nor trusted signature"""
+
+class UntrustedSignature(SignatureException):
+	"""Signature was not certified to the desired security level"""
+

diff --git a/portage_with_autodep/pym/portage/getbinpkg.py b/portage_with_autodep/pym/portage/getbinpkg.py
new file mode 100644
index 0000000..a511f51
--- /dev/null
+++ b/portage_with_autodep/pym/portage/getbinpkg.py
@@ -0,0 +1,861 @@
+# getbinpkg.py -- Portage binary-package helper functions
+# Copyright 2003-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.output import colorize
+from portage.cache.mappings import slot_dict_class
+from portage.localization import _
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+import sys
+import socket
+import time
+import tempfile
+import base64
+
+_all_errors = [NotImplementedError, ValueError, socket.error]
+
+try:
+	from html.parser import HTMLParser as html_parser_HTMLParser
+except ImportError:
+	from HTMLParser import HTMLParser as html_parser_HTMLParser
+
+try:
+	from urllib.parse import unquote as urllib_parse_unquote
+except ImportError:
+	from urllib2 import unquote as urllib_parse_unquote
+
+try:
+	import cPickle as pickle
+except ImportError:
+	import pickle
+
+try:
+	import ftplib
+except ImportError as e:
+	sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT FTPLIB: ")+str(e)+"\n")
+else:
+	_all_errors.extend(ftplib.all_errors)
+
+try:
+	try:
+		from http.client import HTTPConnection as http_client_HTTPConnection
+		from http.client import BadStatusLine as http_client_BadStatusLine
+		from http.client import ResponseNotReady as http_client_ResponseNotReady
+		from http.client import error as http_client_error
+	except ImportError:
+		from httplib import HTTPConnection as http_client_HTTPConnection
+		from httplib import BadStatusLine as http_client_BadStatusLine
+		from httplib import ResponseNotReady as http_client_ResponseNotReady
+		from httplib import error as http_client_error
+except ImportError as e:
+	sys.stderr.write(colorize("BAD","!!! CANNOT IMPORT HTTP.CLIENT: ")+str(e)+"\n")
+else:
+	_all_errors.append(http_client_error)
+
+_all_errors = tuple(_all_errors)
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+def make_metadata_dict(data):
+	myid,myglob = data
+	
+	mydict = {}
+	for x in portage.xpak.getindex_mem(myid):
+		mydict[x] = portage.xpak.getitem(data,x)
+
+	return mydict
+
+class ParseLinks(html_parser_HTMLParser):
+	"""Parser class that overrides HTMLParser to grab all anchors from an html
+	page and provide suffix and prefix limitors"""
+	def __init__(self):
+		self.PL_anchors = []
+		html_parser_HTMLParser.__init__(self)
+
+	def get_anchors(self):
+		return self.PL_anchors
+		
+	def get_anchors_by_prefix(self,prefix):
+		newlist = []
+		for x in self.PL_anchors:
+			if x.startswith(prefix):
+				if x not in newlist:
+					newlist.append(x[:])
+		return newlist
+		
+	def get_anchors_by_suffix(self,suffix):
+		newlist = []
+		for x in self.PL_anchors:
+			if x.endswith(suffix):
+				if x not in newlist:
+					newlist.append(x[:])
+		return newlist
+		
+	def	handle_endtag(self,tag):
+		pass
+
+	def	handle_starttag(self,tag,attrs):
+		if tag == "a":
+			for x in attrs:
+				if x[0] == 'href':
+					if x[1] not in self.PL_anchors:
+						self.PL_anchors.append(urllib_parse_unquote(x[1]))
+
+
+def create_conn(baseurl,conn=None):
+	"""(baseurl,conn) --- Takes a protocol://site:port/address url, and an
+	optional connection. If connection is already active, it is passed on.
+	baseurl is reduced to address and is returned in tuple (conn,address)"""
+
+	parts = baseurl.split("://",1)
+	if len(parts) != 2:
+		raise ValueError(_("Provided URI does not "
+			"contain protocol identifier. '%s'") % baseurl)
+	protocol,url_parts = parts
+	del parts
+
+	url_parts = url_parts.split("/")
+	host = url_parts[0]
+	if len(url_parts) < 2:
+		address = "/"
+	else:
+		address = "/"+"/".join(url_parts[1:])
+	del url_parts
+
+	userpass_host = host.split("@",1)
+	if len(userpass_host) == 1:
+		host = userpass_host[0]
+		userpass = ["anonymous"]
+	else:
+		host = userpass_host[1]
+		userpass = userpass_host[0].split(":")
+	del userpass_host
+
+	if len(userpass) > 2:
+		raise ValueError(_("Unable to interpret username/password provided."))
+	elif len(userpass) == 2:
+		username = userpass[0]
+		password = userpass[1]
+	elif len(userpass) == 1:
+		username = userpass[0]
+		password = None
+	del userpass
+
+	http_headers = {}
+	http_params = {}
+	if username and password:
+		http_headers = {
+			"Authorization": "Basic %s" %
+			  base64.encodestring("%s:%s" % (username, password)).replace(
+			    "\012",
+			    ""
+			  ),
+		}
+
+	if not conn:
+		if protocol == "https":
+			# Use local import since https typically isn't needed, and
+			# this way we can usually avoid triggering the global scope
+			# http.client ImportError handler (like during stage1 -> stage2
+			# builds where USE=ssl is disabled for python).
+			try:
+				try:
+					from http.client import HTTPSConnection as http_client_HTTPSConnection
+				except ImportError:
+					from httplib import HTTPSConnection as http_client_HTTPSConnection
+			except ImportError:
+				raise NotImplementedError(
+					_("python must have ssl enabled for https support"))
+			conn = http_client_HTTPSConnection(host)
+		elif protocol == "http":
+			conn = http_client_HTTPConnection(host)
+		elif protocol == "ftp":
+			passive = 1
+			if(host[-1] == "*"):
+				passive = 0
+				host = host[:-1]
+			conn = ftplib.FTP(host)
+			if password:
+				conn.login(username,password)
+			else:
+				sys.stderr.write(colorize("WARN",
+					_(" * No password provided for username"))+" '%s'" % \
+					(username,) + "\n\n")
+				conn.login(username)
+			conn.set_pasv(passive)
+			conn.set_debuglevel(0)
+		elif protocol == "sftp":
+			try:
+				import paramiko
+			except ImportError:
+				raise NotImplementedError(
+					_("paramiko must be installed for sftp support"))
+			t = paramiko.Transport(host)
+			t.connect(username=username, password=password)
+			conn = paramiko.SFTPClient.from_transport(t)
+		else:
+			raise NotImplementedError(_("%s is not a supported protocol.") % protocol)
+
+	return (conn,protocol,address, http_params, http_headers)
+
+def make_ftp_request(conn, address, rest=None, dest=None):
+	"""(conn,address,rest) --- uses the conn object to request the data
+	from address and issuing a rest if it is passed."""
+	try:
+	
+		if dest:
+			fstart_pos = dest.tell()
+	
+		conn.voidcmd("TYPE I")
+		fsize = conn.size(address)
+
+		if (rest != None) and (rest < 0):
+			rest = fsize+int(rest)
+		if rest < 0:
+			rest = 0
+
+		if rest != None:
+			mysocket = conn.transfercmd("RETR "+str(address), rest)
+		else:
+			mysocket = conn.transfercmd("RETR "+str(address))
+
+		mydata = ""
+		while 1:
+			somedata = mysocket.recv(8192)
+			if somedata:
+				if dest:
+					dest.write(somedata)
+				else:
+					mydata = mydata + somedata
+			else:
+				break
+
+		if dest:
+			data_size = fstart_pos - dest.tell()
+		else:
+			data_size = len(mydata)
+
+		mysocket.close()
+		conn.voidresp()
+		conn.voidcmd("TYPE A")
+
+		return mydata,not (fsize==data_size),""
+
+	except ValueError as e:
+		return None,int(str(e)[:4]),str(e)
+	
+
+def make_http_request(conn, address, params={}, headers={}, dest=None):
+	"""(conn,address,params,headers) --- uses the conn object to request
+	the data from address, performing Location forwarding and using the
+	optional params and headers."""
+
+	rc = 0
+	response = None
+	while (rc == 0) or (rc == 301) or (rc == 302):
+		try:
+			if (rc != 0):
+				conn,ignore,ignore,ignore,ignore = create_conn(address)
+			conn.request("GET", address, body=None, headers=headers)
+		except SystemExit as e:
+			raise
+		except Exception as e:
+			return None,None,"Server request failed: "+str(e)
+		response = conn.getresponse()
+		rc = response.status
+
+		# 301 means that the page address is wrong.
+		if ((rc == 301) or (rc == 302)):
+			ignored_data = response.read()
+			del ignored_data
+			for x in str(response.msg).split("\n"):
+				parts = x.split(": ",1)
+				if parts[0] == "Location":
+					if (rc == 301):
+						sys.stderr.write(colorize("BAD",
+							_("Location has moved: ")) + str(parts[1]) + "\n")
+					if (rc == 302):
+						sys.stderr.write(colorize("BAD",
+							_("Location has temporarily moved: ")) + \
+							str(parts[1]) + "\n")
+					address = parts[1]
+					break
+	
+	if (rc != 200) and (rc != 206):
+		return None,rc,"Server did not respond successfully ("+str(response.status)+": "+str(response.reason)+")"
+
+	if dest:
+		dest.write(response.read())
+		return "",0,""
+
+	return response.read(),0,""
+
+
+def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
+	myarray = []
+	
+	if not (prefix and suffix):
+		match_both = 0
+		
+	for x in array:
+		add_p = 0
+		if prefix and (len(x) >= len(prefix)) and (x[:len(prefix)] == prefix):
+			add_p = 1
+
+		if match_both:
+			if prefix and not add_p: # Require both, but don't have first one.
+				continue
+		else:
+			if add_p:     # Only need one, and we have it.
+				myarray.append(x[:])
+				continue
+
+		if not allow_overlap: # Not allow to overlap prefix and suffix
+			if len(x) >= (len(prefix)+len(suffix)):
+				pass
+			else:
+				continue          # Too short to match.
+		else:
+			pass                      # Do whatever... We're overlapping.
+		
+		if suffix and (len(x) >= len(suffix)) and (x[-len(suffix):] == suffix):
+			myarray.append(x)   # It matches
+		else:
+			continue            # Doesn't match.
+
+	return myarray
+			
+
+
+def dir_get_list(baseurl,conn=None):
+	"""(baseurl[,connection]) -- Takes a base url to connect to and read from.
+	URI should be in the form <proto>://<site>[:port]<path>
+	Connection is used for persistent connection instances."""
+
+	if not conn:
+		keepconnection = 0
+	else:
+		keepconnection = 1
+
+	conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+	listing = None
+	if protocol in ["http","https"]:
+		if not address.endswith("/"):
+			# http servers can return a 400 error here
+			# if the address doesn't end with a slash.
+			address += "/"
+		page,rc,msg = make_http_request(conn,address,params,headers)
+		
+		if page:
+			parser = ParseLinks()
+			parser.feed(page)
+			del page
+			listing = parser.get_anchors()
+		else:
+			import portage.exception
+			raise portage.exception.PortageException(
+				_("Unable to get listing: %s %s") % (rc,msg))
+	elif protocol in ["ftp"]:
+		if address[-1] == '/':
+			olddir = conn.pwd()
+			conn.cwd(address)
+			listing = conn.nlst()
+			conn.cwd(olddir)
+			del olddir
+		else:
+			listing = conn.nlst(address)
+	elif protocol == "sftp":
+		listing = conn.listdir(address)
+	else:
+		raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+	if not keepconnection:
+		conn.close()
+
+	return listing
+
+def file_get_metadata(baseurl,conn=None, chunk_size=3000):
+	"""(baseurl[,connection]) -- Takes a base url to connect to and read from.
+	URI should be in the form <proto>://<site>[:port]<path>
+	Connection is used for persistent connection instances."""
+
+	if not conn:
+		keepconnection = 0
+	else:
+		keepconnection = 1
+
+	conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+	if protocol in ["http","https"]:
+		headers["Range"] = "bytes=-"+str(chunk_size)
+		data,rc,msg = make_http_request(conn, address, params, headers)
+	elif protocol in ["ftp"]:
+		data,rc,msg = make_ftp_request(conn, address, -chunk_size)
+	elif protocol == "sftp":
+		f = conn.open(address)
+		try:
+			f.seek(-chunk_size, 2)
+			data = f.read()
+		finally:
+			f.close()
+	else:
+		raise TypeError(_("Unknown protocol. '%s'") % protocol)
+	
+	if data:
+		xpaksize = portage.xpak.decodeint(data[-8:-4])
+		if (xpaksize+8) > chunk_size:
+			myid = file_get_metadata(baseurl, conn, (xpaksize+8))
+			if not keepconnection:
+				conn.close()
+			return myid
+		else:
+			xpak_data = data[len(data)-(xpaksize+8):-8]
+		del data
+
+		myid = portage.xpak.xsplit_mem(xpak_data)
+		if not myid:
+			myid = None,None
+		del xpak_data
+	else:
+		myid = None,None
+
+	if not keepconnection:
+		conn.close()
+
+	return myid
+
+
+def file_get(baseurl,dest,conn=None,fcmd=None,filename=None):
+	"""(baseurl,dest,fcmd=) -- Takes a base url to connect to and read from.
+	URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>"""
+
+	if not fcmd:
+		return file_get_lib(baseurl,dest,conn)
+	if not filename:
+		filename = os.path.basename(baseurl)
+
+	variables = {
+		"DISTDIR": dest,
+		"URI":     baseurl,
+		"FILE":    filename
+	}
+
+	from portage.util import varexpand
+	from portage.process import spawn
+	myfetch = portage.util.shlex_split(fcmd)
+	myfetch = [varexpand(x, mydict=variables) for x in myfetch]
+	fd_pipes= {
+		0:sys.stdin.fileno(),
+		1:sys.stdout.fileno(),
+		2:sys.stdout.fileno()
+	}
+	retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes)
+	if retval != os.EX_OK:
+		sys.stderr.write(_("Fetcher exited with a failure condition.\n"))
+		return 0
+	return 1
+
+def file_get_lib(baseurl,dest,conn=None):
+	"""(baseurl[,connection]) -- Takes a base url to connect to and read from.
+	URI should be in the form <proto>://<site>[:port]<path>
+	Connection is used for persistent connection instances."""
+
+	if not conn:
+		keepconnection = 0
+	else:
+		keepconnection = 1
+
+	conn,protocol,address,params,headers = create_conn(baseurl, conn)
+
+	sys.stderr.write("Fetching '"+str(os.path.basename(address)+"'\n"))
+	if protocol in ["http","https"]:
+		data,rc,msg = make_http_request(conn, address, params, headers, dest=dest)
+	elif protocol in ["ftp"]:
+		data,rc,msg = make_ftp_request(conn, address, dest=dest)
+	elif protocol == "sftp":
+		rc = 0
+		try:
+			f = conn.open(address)
+		except SystemExit:
+			raise
+		except Exception:
+			rc = 1
+		else:
+			try:
+				if dest:
+					bufsize = 8192
+					while True:
+						data = f.read(bufsize)
+						if not data:
+							break
+						dest.write(data)
+			finally:
+				f.close()
+	else:
+		raise TypeError(_("Unknown protocol. '%s'") % protocol)
+	
+	if not keepconnection:
+		conn.close()
+
+	return rc
+
+
+def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
+	"""(baseurl,conn,chunk_size,verbose) -- 
+	"""
+	if not conn:
+		keepconnection = 0
+	else:
+		keepconnection = 1
+
+	cache_path = "/var/cache/edb"
+	metadatafilename = os.path.join(cache_path, 'remote_metadata.pickle')
+
+	if makepickle is None:
+		makepickle = "/var/cache/edb/metadata.idx.most_recent"
+
+	try:
+		conn, protocol, address, params, headers = create_conn(baseurl, conn)
+	except _all_errors as e:
+		# ftplib.FTP(host) can raise errors like this:
+		#   socket.error: (111, 'Connection refused')
+		sys.stderr.write("!!! %s\n" % (e,))
+		return {}
+
+	out = sys.stdout
+	try:
+		metadatafile = open(_unicode_encode(metadatafilename,
+			encoding=_encodings['fs'], errors='strict'), 'rb')
+		mypickle = pickle.Unpickler(metadatafile)
+		try:
+			mypickle.find_global = None
+		except AttributeError:
+			# TODO: If py3k, override Unpickler.find_class().
+			pass
+		metadata = mypickle.load()
+		out.write(_("Loaded metadata pickle.\n"))
+		out.flush()
+		metadatafile.close()
+	except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError):
+		metadata = {}
+	if baseurl not in metadata:
+		metadata[baseurl]={}
+	if "indexname" not in metadata[baseurl]:
+		metadata[baseurl]["indexname"]=""
+	if "timestamp" not in metadata[baseurl]:
+		metadata[baseurl]["timestamp"]=0
+	if "unmodified" not in metadata[baseurl]:
+		metadata[baseurl]["unmodified"]=0
+	if "data" not in metadata[baseurl]:
+		metadata[baseurl]["data"]={}
+
+	if not os.access(cache_path, os.W_OK):
+		sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
+		sys.stderr.write(_("!!! Permission denied: '%s'\n") % cache_path)
+		return metadata[baseurl]["data"]
+
+	import portage.exception
+	try:
+		filelist = dir_get_list(baseurl, conn)
+	except portage.exception.PortageException as e:
+		sys.stderr.write(_("!!! Error connecting to '%s'.\n") % baseurl)
+		sys.stderr.write("!!! %s\n" % str(e))
+		del e
+		return metadata[baseurl]["data"]
+	tbz2list = match_in_array(filelist, suffix=".tbz2")
+	metalist = match_in_array(filelist, prefix="metadata.idx")
+	del filelist
+	
+	# Determine if our metadata file is current.
+	metalist.sort()
+	metalist.reverse() # makes the order new-to-old.
+	for mfile in metalist:
+		if usingcache and \
+		   ((metadata[baseurl]["indexname"] != mfile) or \
+			  (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
+			# Try to download new cache until we succeed on one.
+			data=""
+			for trynum in [1,2,3]:
+				mytempfile = tempfile.TemporaryFile()
+				try:
+					file_get(baseurl+"/"+mfile, mytempfile, conn)
+					if mytempfile.tell() > len(data):
+						mytempfile.seek(0)
+						data = mytempfile.read()
+				except ValueError as e:
+					sys.stderr.write("--- "+str(e)+"\n")
+					if trynum < 3:
+						sys.stderr.write(_("Retrying...\n"))
+					sys.stderr.flush()
+					mytempfile.close()
+					continue
+				if match_in_array([mfile],suffix=".gz"):
+					out.write("gzip'd\n")
+					out.flush()
+					try:
+						import gzip
+						mytempfile.seek(0)
+						gzindex = gzip.GzipFile(mfile[:-3],'rb',9,mytempfile)
+						data = gzindex.read()
+					except SystemExit as e:
+						raise
+					except Exception as e:
+						mytempfile.close()
+						sys.stderr.write(_("!!! Failed to use gzip: ")+str(e)+"\n")
+						sys.stderr.flush()
+					mytempfile.close()
+				try:
+					metadata[baseurl]["data"] = pickle.loads(data)
+					del data
+					metadata[baseurl]["indexname"] = mfile
+					metadata[baseurl]["timestamp"] = int(time.time())
+					metadata[baseurl]["modified"]  = 0 # It's not, right after download.
+					out.write(_("Pickle loaded.\n"))
+					out.flush()
+					break
+				except SystemExit as e:
+					raise
+				except Exception as e:
+					sys.stderr.write(_("!!! Failed to read data from index: ")+str(mfile)+"\n")
+					sys.stderr.write("!!! "+str(e)+"\n")
+					sys.stderr.flush()
+			try:
+				metadatafile = open(_unicode_encode(metadatafilename,
+					encoding=_encodings['fs'], errors='strict'), 'wb')
+				pickle.dump(metadata, metadatafile, protocol=2)
+				metadatafile.close()
+			except SystemExit as e:
+				raise
+			except Exception as e:
+				sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
+				sys.stderr.write("!!! "+str(e)+"\n")
+				sys.stderr.flush()
+			break
+	# We may have metadata... now we run through the tbz2 list and check.
+
+	class CacheStats(object):
+		from time import time
+		def __init__(self, out):
+			self.misses = 0
+			self.hits = 0
+			self.last_update = 0
+			self.out = out
+			self.min_display_latency = 0.2
+		def update(self):
+			cur_time = self.time()
+			if cur_time - self.last_update >= self.min_display_latency:
+				self.last_update = cur_time
+				self.display()
+		def display(self):
+			self.out.write("\r"+colorize("WARN",
+				_("cache miss: '")+str(self.misses)+"'") + \
+				" --- "+colorize("GOOD", _("cache hit: '")+str(self.hits)+"'"))
+			self.out.flush()
+
+	cache_stats = CacheStats(out)
+	have_tty = os.environ.get('TERM') != 'dumb' and out.isatty()
+	if have_tty:
+		cache_stats.display()
+	binpkg_filenames = set()
+	for x in tbz2list:
+		x = os.path.basename(x)
+		binpkg_filenames.add(x)
+		if x not in metadata[baseurl]["data"]:
+			cache_stats.misses += 1
+			if have_tty:
+				cache_stats.update()
+			metadata[baseurl]["modified"] = 1
+			myid = None
+			for retry in range(3):
+				try:
+					myid = file_get_metadata(
+						"/".join((baseurl.rstrip("/"), x.lstrip("/"))),
+						conn, chunk_size)
+					break
+				except http_client_BadStatusLine:
+					# Sometimes this error is thrown from conn.getresponse() in
+					# make_http_request().  The docstring for this error in
+					# httplib.py says "Presumably, the server closed the
+					# connection before sending a valid response".
+					conn, protocol, address, params, headers = create_conn(
+						baseurl)
+				except http_client_ResponseNotReady:
+					# With some http servers this error is known to be thrown
+					# from conn.getresponse() in make_http_request() when the
+					# remote file does not have appropriate read permissions.
+					# Maybe it's possible to recover from this exception in
+					# cases though, so retry.
+					conn, protocol, address, params, headers = create_conn(
+						baseurl)
+
+			if myid and myid[0]:
+				metadata[baseurl]["data"][x] = make_metadata_dict(myid)
+			elif verbose:
+				sys.stderr.write(colorize("BAD",
+					_("!!! Failed to retrieve metadata on: "))+str(x)+"\n")
+				sys.stderr.flush()
+		else:
+			cache_stats.hits += 1
+			if have_tty:
+				cache_stats.update()
+	cache_stats.display()
+	# Cleanse stale cache for files that don't exist on the server anymore.
+	stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
+	if stale_cache:
+		for x in stale_cache:
+			del metadata[baseurl]["data"][x]
+		metadata[baseurl]["modified"] = 1
+	del stale_cache
+	del binpkg_filenames
+	out.write("\n")
+	out.flush()
+
+	try:
+		if "modified" in metadata[baseurl] and metadata[baseurl]["modified"]:
+			metadata[baseurl]["timestamp"] = int(time.time())
+			metadatafile = open(_unicode_encode(metadatafilename,
+				encoding=_encodings['fs'], errors='strict'), 'wb')
+			pickle.dump(metadata, metadatafile, protocol=2)
+			metadatafile.close()
+		if makepickle:
+			metadatafile = open(_unicode_encode(makepickle,
+				encoding=_encodings['fs'], errors='strict'), 'wb')
+			pickle.dump(metadata[baseurl]["data"], metadatafile, protocol=2)
+			metadatafile.close()
+	except SystemExit as e:
+		raise
+	except Exception as e:
+		sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
+		sys.stderr.write("!!! "+str(e)+"\n")
+		sys.stderr.flush()
+
+	if not keepconnection:
+		conn.close()
+	
+	return metadata[baseurl]["data"]
+
+def _cmp_cpv(d1, d2):
+	cpv1 = d1["CPV"]
+	cpv2 = d2["CPV"]
+	if cpv1 > cpv2:
+		return 1
+	elif cpv1 == cpv2:
+		return 0
+	else:
+		return -1
+
+class PackageIndex(object):
+
+	def __init__(self,
+		allowed_pkg_keys=None,
+		default_header_data=None,
+		default_pkg_data=None,
+		inherited_keys=None,
+		translated_keys=None):
+
+		self._pkg_slot_dict = None
+		if allowed_pkg_keys is not None:
+			self._pkg_slot_dict = slot_dict_class(allowed_pkg_keys)
+
+		self._default_header_data = default_header_data
+		self._default_pkg_data = default_pkg_data
+		self._inherited_keys = inherited_keys
+		self._write_translation_map = {}
+		self._read_translation_map = {}
+		if translated_keys:
+			self._write_translation_map.update(translated_keys)
+			self._read_translation_map.update(((y, x) for (x, y) in translated_keys))
+		self.header = {}
+		if self._default_header_data:
+			self.header.update(self._default_header_data)
+		self.packages = []
+		self.modified = True
+
+	def _readpkgindex(self, pkgfile, pkg_entry=True):
+
+		allowed_keys = None
+		if self._pkg_slot_dict is None or not pkg_entry:
+			d = {}
+		else:
+			d = self._pkg_slot_dict()
+			allowed_keys = d.allowed_keys
+
+		for line in pkgfile:
+			line = line.rstrip("\n")
+			if not line:
+				break
+			line = line.split(":", 1)
+			if not len(line) == 2:
+				continue
+			k, v = line
+			if v:
+				v = v[1:]
+			k = self._read_translation_map.get(k, k)
+			if allowed_keys is not None and \
+				k not in allowed_keys:
+				continue
+			d[k] = v
+		return d
+
+	def _writepkgindex(self, pkgfile, items):
+		for k, v in items:
+			pkgfile.write("%s: %s\n" % \
+				(self._write_translation_map.get(k, k), v))
+		pkgfile.write("\n")
+
+	def read(self, pkgfile):
+		self.readHeader(pkgfile)
+		self.readBody(pkgfile)
+
+	def readHeader(self, pkgfile):
+		self.header.update(self._readpkgindex(pkgfile, pkg_entry=False))
+
+	def readBody(self, pkgfile):
+		while True:
+			d = self._readpkgindex(pkgfile)
+			if not d:
+				break
+			mycpv = d.get("CPV")
+			if not mycpv:
+				continue
+			if self._default_pkg_data:
+				for k, v in self._default_pkg_data.items():
+					d.setdefault(k, v)
+			if self._inherited_keys:
+				for k in self._inherited_keys:
+					v = self.header.get(k)
+					if v is not None:
+						d.setdefault(k, v)
+			self.packages.append(d)
+
+	def write(self, pkgfile):
+		if self.modified:
+			self.header["TIMESTAMP"] = str(long(time.time()))
+			self.header["PACKAGES"] = str(len(self.packages))
+		keys = list(self.header)
+		keys.sort()
+		self._writepkgindex(pkgfile, [(k, self.header[k]) \
+			for k in keys if self.header[k]])
+		for metadata in sorted(self.packages,
+			key=portage.util.cmp_sort_key(_cmp_cpv)):
+			metadata = metadata.copy()
+			cpv = metadata["CPV"]
+			if self._inherited_keys:
+				for k in self._inherited_keys:
+					v = self.header.get(k)
+					if v is not None and v == metadata.get(k):
+						del metadata[k]
+			if self._default_pkg_data:
+				for k, v in self._default_pkg_data.items():
+					if metadata.get(k) == v:
+						metadata.pop(k, None)
+			keys = list(metadata)
+			keys.sort()
+			self._writepkgindex(pkgfile,
+				[(k, metadata[k]) for k in keys if metadata[k]])

diff --git a/portage_with_autodep/pym/portage/glsa.py b/portage_with_autodep/pym/portage/glsa.py
new file mode 100644
index 0000000..a784d14
--- /dev/null
+++ b/portage_with_autodep/pym/portage/glsa.py
@@ -0,0 +1,699 @@
+# Copyright 2003-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import absolute_import
+
+import io
+import sys
+try:
+	from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+	from urllib import urlopen as urllib_request_urlopen
+import re
+import xml.dom.minidom
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.versions import pkgsplit, catpkgsplit, pkgcmp, best
+from portage.util import grabfile
+from portage.const import CACHE_PATH
+from portage.localization import _
+from portage.dep import _slot_separator
+
+# Note: the space for rgt and rlt is important !!
+# FIXME: use slot deps instead, requires GLSA format versioning
+opMapping = {"le": "<=", "lt": "<", "eq": "=", "gt": ">", "ge": ">=", 
+			 "rge": ">=~", "rle": "<=~", "rgt": " >~", "rlt": " <~"}
+NEWLINE_ESCAPE = "!;\\n"	# some random string to mark newlines that should be preserved
+SPACE_ESCAPE = "!;_"		# some random string to mark spaces that should be preserved
+
+def get_applied_glsas(settings):
+	"""
+	Return a list of applied or injected GLSA IDs
+	
+	@type	settings: portage.config
+	@param	settings: portage config instance
+	@rtype:		list
+	@return:	list of glsa IDs
+	"""
+	return grabfile(os.path.join(settings["EROOT"], CACHE_PATH, "glsa"))
+
+
+# TODO: use the textwrap module instead
+def wrap(text, width, caption=""):
+	"""
+	Wraps the given text at column I{width}, optionally indenting
+	it so that no text is under I{caption}. It's possible to encode 
+	hard linebreaks in I{text} with L{NEWLINE_ESCAPE}.
+	
+	@type	text: String
+	@param	text: the text to be wrapped
+	@type	width: Integer
+	@param	width: the column at which the text should be wrapped
+	@type	caption: String
+	@param	caption: this string is inserted at the beginning of the 
+					 return value and the paragraph is indented up to
+					 C{len(caption)}.
+	@rtype:		String
+	@return:	the wrapped and indented paragraph
+	"""
+	rValue = ""
+	line = caption
+	text = text.replace(2*NEWLINE_ESCAPE, NEWLINE_ESCAPE+" "+NEWLINE_ESCAPE)
+	words = text.split()
+	indentLevel = len(caption)+1
+	
+	for w in words:
+		if line != "" and line[-1] == "\n":
+			rValue += line
+			line = " "*indentLevel
+		if len(line)+len(w.replace(NEWLINE_ESCAPE, ""))+1 > width:
+			rValue += line+"\n"
+			line = " "*indentLevel+w.replace(NEWLINE_ESCAPE, "\n")
+		elif w.find(NEWLINE_ESCAPE) >= 0:
+			if len(line.strip()) > 0:
+				rValue += line+" "+w.replace(NEWLINE_ESCAPE, "\n")
+			else:
+				rValue += line+w.replace(NEWLINE_ESCAPE, "\n")
+			line = " "*indentLevel
+		else:
+			if len(line.strip()) > 0:
+				line += " "+w
+			else:
+				line += w
+	if len(line) > 0:
+		rValue += line.replace(NEWLINE_ESCAPE, "\n")
+	rValue = rValue.replace(SPACE_ESCAPE, " ")
+	return rValue
+
+def get_glsa_list(myconfig):
+	"""
+	Returns a list of all available GLSAs in the given repository
+	by comparing the filelist there with the pattern described in
+	the config.
+	
+	@type	myconfig: portage.config
+	@param	myconfig: Portage settings instance
+	
+	@rtype:		List of Strings
+	@return:	a list of GLSA IDs in this repository
+	"""
+	rValue = []
+
+	if "GLSA_DIR" in myconfig:
+		repository = myconfig["GLSA_DIR"]
+	else:
+		repository = os.path.join(myconfig["PORTDIR"], "metadata", "glsa")
+
+	if not os.access(repository, os.R_OK):
+		return []
+	dirlist = os.listdir(repository)
+	prefix = "glsa-"
+	suffix = ".xml"
+	
+	for f in dirlist:
+		try:
+			if f[:len(prefix)] == prefix:
+				rValue.append(f[len(prefix):-1*len(suffix)])
+		except IndexError:
+			pass
+	return rValue
+
+def getListElements(listnode):
+	"""
+	Get all <li> elements for a given <ol> or <ul> node.
+	
+	@type	listnode: xml.dom.Node
+	@param	listnode: <ul> or <ol> list to get the elements for
+	@rtype:		List of Strings
+	@return:	a list that contains the value of the <li> elements
+	"""
+	rValue = []
+	if not listnode.nodeName in ["ul", "ol"]:
+		raise GlsaFormatException("Invalid function call: listnode is not <ul> or <ol>")
+	for li in listnode.childNodes:
+		if li.nodeType != xml.dom.Node.ELEMENT_NODE:
+			continue
+		rValue.append(getText(li, format="strip"))
+	return rValue
+
+def getText(node, format):
+	"""
+	This is the main parser function. It takes a node and traverses
+	recursive over the subnodes, getting the text of each (and the
+	I{link} attribute for <uri> and <mail>). Depending on the I{format}
+	parameter the text might be formatted by adding/removing newlines,
+	tabs and spaces. This function is only useful for the GLSA DTD,
+	it's not applicable for other DTDs.
+	
+	@type	node: xml.dom.Node
+	@param	node: the root node to start with the parsing
+	@type	format: String
+	@param	format: this should be either I{strip}, I{keep} or I{xml}
+					I{keep} just gets the text and does no formatting.
+					I{strip} replaces newlines and tabs with spaces and
+					replaces multiple spaces with one space.
+					I{xml} does some more formatting, depending on the
+					type of the encountered nodes.
+	@rtype:		String
+	@return:	the (formatted) content of the node and its subnodes
+	"""
+	rValue = ""
+	if format in ["strip", "keep"]:
+		if node.nodeName in ["uri", "mail"]:
+			rValue += node.childNodes[0].data+": "+node.getAttribute("link")
+		else:
+			for subnode in node.childNodes:
+				if subnode.nodeName == "#text":
+					rValue += subnode.data
+				else:
+					rValue += getText(subnode, format)
+	else:
+		for subnode in node.childNodes:
+			if subnode.nodeName == "p":
+				for p_subnode in subnode.childNodes:
+					if p_subnode.nodeName == "#text":
+						rValue += p_subnode.data.strip()
+					elif p_subnode.nodeName in ["uri", "mail"]:
+						rValue += p_subnode.childNodes[0].data
+						rValue += " ( "+p_subnode.getAttribute("link")+" )"
+				rValue += NEWLINE_ESCAPE
+			elif subnode.nodeName == "ul":
+				for li in getListElements(subnode):
+					rValue += "-"+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" "
+			elif subnode.nodeName == "ol":
+				i = 0
+				for li in getListElements(subnode):
+					i = i+1
+					rValue += str(i)+"."+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" "
+			elif subnode.nodeName == "code":
+				rValue += getText(subnode, format="keep").replace("\n", NEWLINE_ESCAPE)
+				if rValue[-1*len(NEWLINE_ESCAPE):] != NEWLINE_ESCAPE:
+					rValue += NEWLINE_ESCAPE
+			elif subnode.nodeName == "#text":
+				rValue += subnode.data
+			else:
+				raise GlsaFormatException(_("Invalid Tag found: "), subnode.nodeName)
+	if format == "strip":
+		rValue = rValue.strip(" \n\t")
+		rValue = re.sub("[\s]{2,}", " ", rValue)
+	return rValue
+
+def getMultiTagsText(rootnode, tagname, format):
+	"""
+	Returns a list with the text of all subnodes of type I{tagname}
+	under I{rootnode} (which itself is not parsed) using the given I{format}.
+	
+	@type	rootnode: xml.dom.Node
+	@param	rootnode: the node to search for I{tagname}
+	@type	tagname: String
+	@param	tagname: the name of the tags to search for
+	@type	format: String
+	@param	format: see L{getText}
+	@rtype:		List of Strings
+	@return:	a list containing the text of all I{tagname} childnodes
+	"""
+	rValue = []
+	for e in rootnode.getElementsByTagName(tagname):
+		rValue.append(getText(e, format))
+	return rValue
+
+def makeAtom(pkgname, versionNode):
+	"""
+	creates from the given package name and information in the 
+	I{versionNode} a (syntactical) valid portage atom.
+	
+	@type	pkgname: String
+	@param	pkgname: the name of the package for this atom
+	@type	versionNode: xml.dom.Node
+	@param	versionNode: a <vulnerable> or <unaffected> Node that
+						 contains the version information for this atom
+	@rtype:		String
+	@return:	the portage atom
+	"""
+	rValue = opMapping[versionNode.getAttribute("range")] \
+				+ pkgname \
+				+ "-" + getText(versionNode, format="strip")
+	try:
+		slot = versionNode.getAttribute("slot").strip()
+	except KeyError:
+		pass
+	else:
+		if slot and slot != "*":
+			rValue += _slot_separator + slot
+	return str(rValue)
+
+def makeVersion(versionNode):
+	"""
+	creates from the information in the I{versionNode} a 
+	version string (format <op><version>).
+	
+	@type	versionNode: xml.dom.Node
+	@param	versionNode: a <vulnerable> or <unaffected> Node that
+						 contains the version information for this atom
+	@rtype:		String
+	@return:	the version string
+	"""
+	rValue = opMapping[versionNode.getAttribute("range")] \
+			+ getText(versionNode, format="strip")
+	try:
+		slot = versionNode.getAttribute("slot").strip()
+	except KeyError:
+		pass
+	else:
+		if slot and slot != "*":
+			rValue += _slot_separator + slot
+	return rValue
+
+def match(atom, dbapi, match_type="default"):
+	"""
+	wrapper that calls revisionMatch() or portage.dbapi.dbapi.match() depending on 
+	the given atom.
+	
+	@type	atom: string
+	@param	atom: a <~ or >~ atom or a normal portage atom that contains the atom to match against
+	@type	dbapi: portage.dbapi.dbapi
+	@param	dbapi: one of the portage databases to use as information source
+	@type	match_type: string
+	@param	match_type: if != "default" passed as first argument to dbapi.xmatch 
+				to apply the wanted visibility filters
+	
+	@rtype:		list of strings
+	@return:	a list with the matching versions
+	"""
+	if atom[2] == "~":
+		return revisionMatch(atom, dbapi, match_type=match_type)
+	elif match_type == "default" or not hasattr(dbapi, "xmatch"):
+		return dbapi.match(atom)
+	else:
+		return dbapi.xmatch(match_type, atom)
+
+def revisionMatch(revisionAtom, dbapi, match_type="default"):
+	"""
+	handler for the special >~, >=~, <=~ and <~ atoms that are supposed to behave
+	as > and < except that they are limited to the same version, the range only
+	applies to the revision part.
+	
+	@type	revisionAtom: string
+	@param	revisionAtom: a <~ or >~ atom that contains the atom to match against
+	@type	dbapi: portage.dbapi.dbapi
+	@param	dbapi: one of the portage databases to use as information source
+	@type	match_type: string
+	@param	match_type: if != "default" passed as first argument to portdb.xmatch 
+				to apply the wanted visibility filters
+	
+	@rtype:		list of strings
+	@return:	a list with the matching versions
+	"""
+	if match_type == "default" or not hasattr(dbapi, "xmatch"):
+		if ":" in revisionAtom:
+			mylist = dbapi.match(re.sub(r'-r[0-9]+(:[^ ]+)?$', r'\1', revisionAtom[2:]))
+		else:
+			mylist = dbapi.match(re.sub("-r[0-9]+$", "", revisionAtom[2:]))
+	else:
+		if ":" in revisionAtom:
+			mylist = dbapi.xmatch(match_type, re.sub(r'-r[0-9]+(:[^ ]+)?$', r'\1', revisionAtom[2:]))
+		else:
+			mylist = dbapi.xmatch(match_type, re.sub("-r[0-9]+$", "", revisionAtom[2:]))
+	rValue = []
+	for v in mylist:
+		r1 = pkgsplit(v)[-1][1:]
+		r2 = pkgsplit(revisionAtom[3:])[-1][1:]
+		if eval(r1+" "+revisionAtom[0:2]+" "+r2):
+			rValue.append(v)
+	return rValue
+		
+
+def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=True):
+	"""
+	Checks if the systemstate is matching an atom in
+	I{vulnerableList} and returns string describing
+	the lowest version for the package that matches an atom in 
+	I{unaffectedList} and is greater than the currently installed
+	version or None if the system is not affected. Both
+	I{vulnerableList} and I{unaffectedList} should have the
+	same base package.
+	
+	@type	vulnerableList: List of Strings
+	@param	vulnerableList: atoms matching vulnerable package versions
+	@type	unaffectedList: List of Strings
+	@param	unaffectedList: atoms matching unaffected package versions
+	@type	portdbapi:	portage.dbapi.porttree.portdbapi
+	@param	portdbapi:	Ebuild repository
+	@type	vardbapi:	portage.dbapi.vartree.vardbapi
+	@param	vardbapi:	Installed package repository
+	@type	minimize:	Boolean
+	@param	minimize:	True for a least-change upgrade, False for emerge-like algorithm
+	
+	@rtype:		String | None
+	@return:	the lowest unaffected version that is greater than
+				the installed version.
+	"""
+	rValue = None
+	v_installed = []
+	u_installed = []
+	for v in vulnerableList:
+		v_installed += match(v, vardbapi)
+
+	for u in unaffectedList:
+		u_installed += match(u, vardbapi)
+	
+	install_unaffected = True
+	for i in v_installed:
+		if i not in u_installed:
+			install_unaffected = False
+
+	if install_unaffected:
+		return rValue
+	
+	for u in unaffectedList:
+		mylist = match(u, portdbapi, match_type="match-all")
+		for c in mylist:
+			c_pv = catpkgsplit(c)
+			i_pv = catpkgsplit(best(v_installed))
+			if pkgcmp(c_pv[1:], i_pv[1:]) > 0 \
+					and (rValue == None \
+						or not match("="+rValue, portdbapi) \
+						or (minimize ^ (pkgcmp(c_pv[1:], catpkgsplit(rValue)[1:]) > 0)) \
+							and match("="+c, portdbapi)) \
+					and portdbapi.aux_get(c, ["SLOT"]) == vardbapi.aux_get(best(v_installed), ["SLOT"]):
+				rValue = c_pv[0]+"/"+c_pv[1]+"-"+c_pv[2]
+				if c_pv[3] != "r0":		# we don't like -r0 for display
+					rValue += "-"+c_pv[3]
+	return rValue
+
+def format_date(datestr):
+	"""
+	Takes a date (announced, revised) date from a GLSA and formats
+	it as readable text (i.e. "January 1, 2008").
+	
+	@type	date: String
+	@param	date: the date string to reformat
+	@rtype:		String
+	@return:	a reformatted string, or the original string
+				if it cannot be reformatted.
+	"""
+	splitdate = datestr.split("-", 2)
+	if len(splitdate) != 3:
+		return datestr
+	
+	# This cannot raise an error as we use () instead of []
+	splitdate = (int(x) for x in splitdate)
+	
+	from datetime import date
+	try:
+		d = date(*splitdate)
+	except ValueError:
+		return datestr
+	
+	# TODO We could format to local date format '%x' here?
+	return _unicode_decode(d.strftime("%B %d, %Y"),
+		encoding=_encodings['content'], errors='replace')
+
+# simple Exception classes to catch specific errors
+class GlsaTypeException(Exception):
+	def __init__(self, doctype):
+		Exception.__init__(self, "wrong DOCTYPE: %s" % doctype)
+
+class GlsaFormatException(Exception):
+	pass
+				
+class GlsaArgumentException(Exception):
+	pass
+
+# GLSA xml data wrapper class
+class Glsa:
+	"""
+	This class is a wrapper for the XML data and provides methods to access
+	and display the contained data.
+	"""
+	def __init__(self, myid, myconfig, vardbapi, portdbapi):
+		"""
+		Simple constructor to set the ID, store the config and gets the 
+		XML data by calling C{self.read()}.
+		
+		@type	myid: String
+		@param	myid: String describing the id for the GLSA object (standard
+					  GLSAs have an ID of the form YYYYMM-nn) or an existing
+					  filename containing a GLSA.
+		@type	myconfig: portage.config
+		@param	myconfig: the config that should be used for this object.
+		@type	vardbapi: portage.dbapi.vartree.vardbapi
+		@param	vardbapi: installed package repository
+		@type	portdbapi: portage.dbapi.porttree.portdbapi
+		@param	portdbapi: ebuild repository
+		"""
+		myid = _unicode_decode(myid,
+			encoding=_encodings['content'], errors='strict')
+		if re.match(r'\d{6}-\d{2}', myid):
+			self.type = "id"
+		elif os.path.exists(myid):
+			self.type = "file"
+		else:
+			raise GlsaArgumentException(_("Given ID %s isn't a valid GLSA ID or filename.") % myid)
+		self.nr = myid
+		self.config = myconfig
+		self.vardbapi = vardbapi
+		self.portdbapi = portdbapi
+		self.read()
+
+	def read(self):
+		"""
+		Here we build the filename from the config and the ID and pass
+		it to urllib to fetch it from the filesystem or a remote server.
+		
+		@rtype:		None
+		@return:	None
+		"""
+		if "GLSA_DIR" in self.config:
+			repository = "file://" + self.config["GLSA_DIR"]+"/"
+		else:
+			repository = "file://" + self.config["PORTDIR"] + "/metadata/glsa/"
+		if self.type == "file":
+			myurl = "file://"+self.nr
+		else:
+			myurl = repository + "glsa-%s.xml" % str(self.nr)
+		self.parse(urllib_request_urlopen(myurl))
+		return None
+
+	def parse(self, myfile):
+		"""
+		This method parses the XML file and sets up the internal data 
+		structures by calling the different helper functions in this
+		module.
+		
+		@type	myfile: String
+		@param	myfile: Filename to grab the XML data from
+		@rtype:		None
+		@returns:	None
+		"""
+		self.DOM = xml.dom.minidom.parse(myfile)
+		if not self.DOM.doctype:
+			raise GlsaTypeException(None)
+		elif self.DOM.doctype.systemId == "http://www.gentoo.org/dtd/glsa.dtd":
+			self.dtdversion = 0
+		elif self.DOM.doctype.systemId == "http://www.gentoo.org/dtd/glsa-2.dtd":
+			self.dtdversion = 2
+		else:
+			raise GlsaTypeException(self.DOM.doctype.systemId)
+		myroot = self.DOM.getElementsByTagName("glsa")[0]
+		if self.type == "id" and myroot.getAttribute("id") != self.nr:
+			raise GlsaFormatException(_("filename and internal id don't match:") + myroot.getAttribute("id") + " != " + self.nr)
+
+		# the simple (single, required, top-level, #PCDATA) tags first
+		self.title = getText(myroot.getElementsByTagName("title")[0], format="strip")
+		self.synopsis = getText(myroot.getElementsByTagName("synopsis")[0], format="strip")
+		self.announced = format_date(getText(myroot.getElementsByTagName("announced")[0], format="strip"))
+		
+		count = 1
+		# Support both formats of revised:
+		# <revised>December 30, 2007: 02</revised>
+		# <revised count="2">2007-12-30</revised>
+		revisedEl = myroot.getElementsByTagName("revised")[0]
+		self.revised = getText(revisedEl, format="strip")
+		if ((sys.hexversion >= 0x3000000 and "count" in revisedEl.attributes) or
+			(sys.hexversion < 0x3000000 and revisedEl.attributes.has_key("count"))):
+			count = revisedEl.getAttribute("count")
+		elif (self.revised.find(":") >= 0):
+			(self.revised, count) = self.revised.split(":")
+		
+		self.revised = format_date(self.revised)
+		
+		try:
+			self.count = int(count)
+		except ValueError:
+			# TODO should this raise a GlsaFormatException?
+			self.count = 1
+		
+		# now the optional and 0-n toplevel, #PCDATA tags and references
+		try:
+			self.access = getText(myroot.getElementsByTagName("access")[0], format="strip")
+		except IndexError:
+			self.access = ""
+		self.bugs = getMultiTagsText(myroot, "bug", format="strip")
+		self.references = getMultiTagsText(myroot.getElementsByTagName("references")[0], "uri", format="keep")
+		
+		# and now the formatted text elements
+		self.description = getText(myroot.getElementsByTagName("description")[0], format="xml")
+		self.workaround = getText(myroot.getElementsByTagName("workaround")[0], format="xml")
+		self.resolution = getText(myroot.getElementsByTagName("resolution")[0], format="xml")
+		self.impact_text = getText(myroot.getElementsByTagName("impact")[0], format="xml")
+		self.impact_type = myroot.getElementsByTagName("impact")[0].getAttribute("type")
+		try:
+			self.background = getText(myroot.getElementsByTagName("background")[0], format="xml")
+		except IndexError:
+			self.background = ""					
+
+		# finally the interesting tags (product, affected, package)
+		self.glsatype = myroot.getElementsByTagName("product")[0].getAttribute("type")
+		self.product = getText(myroot.getElementsByTagName("product")[0], format="strip")
+		self.affected = myroot.getElementsByTagName("affected")[0]
+		self.packages = {}
+		for p in self.affected.getElementsByTagName("package"):
+			name = p.getAttribute("name")
+			try:
+				name = portage.dep.Atom(name)
+			except portage.exception.InvalidAtom:
+				raise GlsaFormatException(_("invalid package name: %s") % name)
+			if name != name.cp:
+				raise GlsaFormatException(_("invalid package name: %s") % name)
+			name = name.cp
+			if name not in self.packages:
+				self.packages[name] = []
+			tmp = {}
+			tmp["arch"] = p.getAttribute("arch")
+			tmp["auto"] = (p.getAttribute("auto") == "yes")
+			tmp["vul_vers"] = [makeVersion(v) for v in p.getElementsByTagName("vulnerable")]
+			tmp["unaff_vers"] = [makeVersion(v) for v in p.getElementsByTagName("unaffected")]
+			tmp["vul_atoms"] = [makeAtom(name, v) for v in p.getElementsByTagName("vulnerable")]
+			tmp["unaff_atoms"] = [makeAtom(name, v) for v in p.getElementsByTagName("unaffected")]
+			self.packages[name].append(tmp)
+		# TODO: services aren't really used yet
+		self.services = self.affected.getElementsByTagName("service")
+		return None
+
+	def dump(self, outstream=sys.stdout):
+		"""
+		Dumps a plaintext representation of this GLSA to I{outfile} or 
+		B{stdout} if it is ommitted. You can specify an alternate
+		I{encoding} if needed (default is latin1).
+		
+		@type	outstream: File
+		@param	outfile: Stream that should be used for writing
+						 (defaults to sys.stdout)
+		"""
+		width = 76
+		outstream.write(("GLSA %s: \n%s" % (self.nr, self.title)).center(width)+"\n")
+		outstream.write((width*"=")+"\n")
+		outstream.write(wrap(self.synopsis, width, caption=_("Synopsis:         "))+"\n")
+		outstream.write(_("Announced on:      %s\n") % self.announced)
+		outstream.write(_("Last revised on:   %s : %02d\n\n") % (self.revised, self.count))
+		if self.glsatype == "ebuild":
+			for k in self.packages:
+				pkg = self.packages[k]
+				for path in pkg:
+					vul_vers = "".join(path["vul_vers"])
+					unaff_vers = "".join(path["unaff_vers"])
+					outstream.write(_("Affected package:  %s\n") % k)
+					outstream.write(_("Affected archs:    "))
+					if path["arch"] == "*":
+						outstream.write(_("All\n"))
+					else:
+						outstream.write("%s\n" % path["arch"])
+					outstream.write(_("Vulnerable:        %s\n") % vul_vers)
+					outstream.write(_("Unaffected:        %s\n\n") % unaff_vers)
+		elif self.glsatype == "infrastructure":
+			pass
+		if len(self.bugs) > 0:
+			outstream.write(_("\nRelated bugs:      "))
+			for i in range(0, len(self.bugs)):
+				outstream.write(self.bugs[i])
+				if i < len(self.bugs)-1:
+					outstream.write(", ")
+				else:
+					outstream.write("\n")				
+		if self.background:
+			outstream.write("\n"+wrap(self.background, width, caption=_("Background:       ")))
+		outstream.write("\n"+wrap(self.description, width, caption=_("Description:      ")))
+		outstream.write("\n"+wrap(self.impact_text, width, caption=_("Impact:           ")))
+		outstream.write("\n"+wrap(self.workaround, width, caption=_("Workaround:       ")))
+		outstream.write("\n"+wrap(self.resolution, width, caption=_("Resolution:       ")))
+		myreferences = ""
+		for r in self.references:
+			myreferences += (r.replace(" ", SPACE_ESCAPE)+NEWLINE_ESCAPE+" ")
+		outstream.write("\n"+wrap(myreferences, width, caption=_("References:       ")))
+		outstream.write("\n")
+	
+	def isVulnerable(self):
+		"""
+		Tests if the system is affected by this GLSA by checking if any
+		vulnerable package versions are installed. Also checks for affected
+		architectures.
+		
+		@rtype:		Boolean
+		@returns:	True if the system is affected, False if not
+		"""
+		rValue = False
+		for k in self.packages:
+			pkg = self.packages[k]
+			for path in pkg:
+				if path["arch"] == "*" or self.config["ARCH"] in path["arch"].split():
+					for v in path["vul_atoms"]:
+						rValue = rValue \
+							or (len(match(v, self.vardbapi)) > 0 \
+								and getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+										self.portdbapi, self.vardbapi))
+		return rValue
+	
+	def isApplied(self):
+		"""
+		Looks if the GLSA IDis in the GLSA checkfile to check if this
+		GLSA was already applied.
+		
+		@rtype:		Boolean
+		@returns:	True if the GLSA was applied, False if not
+		"""
+		return (self.nr in get_applied_glsas(self.config))
+
+	def inject(self):
+		"""
+		Puts the ID of this GLSA into the GLSA checkfile, so it won't
+		show up on future checks. Should be called after a GLSA is 
+		applied or on explicit user request.
+
+		@rtype:		None
+		@returns:	None
+		"""
+		if not self.isApplied():
+			checkfile = io.open(
+				_unicode_encode(os.path.join(self.config["EROOT"],
+				CACHE_PATH, "glsa"),
+				encoding=_encodings['fs'], errors='strict'), 
+				mode='a+', encoding=_encodings['content'], errors='strict')
+			checkfile.write(_unicode_decode(self.nr + "\n"))
+			checkfile.close()
+		return None
+	
+	def getMergeList(self, least_change=True):
+		"""
+		Returns the list of package-versions that have to be merged to
+		apply this GLSA properly. The versions are as low as possible 
+		while avoiding downgrades (see L{getMinUpgrade}).
+		
+		@type	least_change: Boolean
+		@param	least_change: True if the smallest possible upgrade should be selected,
+					False for an emerge-like algorithm
+		@rtype:		List of Strings
+		@return:	list of package-versions that have to be merged
+		"""
+		rValue = []
+		for pkg in self.packages:
+			for path in self.packages[pkg]:
+				update = getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+					self.portdbapi, self.vardbapi, minimize=least_change)
+				if update:
+					rValue.append(update)
+		return rValue

diff --git a/portage_with_autodep/pym/portage/localization.py b/portage_with_autodep/pym/portage/localization.py
new file mode 100644
index 0000000..d16c4b1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/localization.py
@@ -0,0 +1,20 @@
+# localization.py -- Code to manage/help portage localization.
+# Copyright 2004 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# We define this to make the transition easier for us.
+def _(mystr):
+	return mystr
+
+
+def localization_example():
+	# Dict references allow translators to rearrange word order.
+	print(_("You can use this string for translating."))
+	print(_("Strings can be formatted with %(mystr)s like this.") % {"mystr": "VALUES"})
+
+	a_value = "value.of.a"
+	b_value = 123
+	c_value = [1,2,3,4]
+	print(_("A: %(a)s -- B: %(b)s -- C: %(c)s") % {"a":a_value,"b":b_value,"c":c_value})
+

diff --git a/portage_with_autodep/pym/portage/locks.py b/portage_with_autodep/pym/portage/locks.py
new file mode 100644
index 0000000..9ed1d6a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/locks.py
@@ -0,0 +1,395 @@
+# portage: Lock management code
+# Copyright 2004-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
+	"hardlock_name", "hardlink_is_mine", "hardlink_lockfile", \
+	"unhardlink_lockfile", "hardlock_cleanup"]
+
+import errno
+import fcntl
+import stat
+import sys
+import time
+
+import portage
+from portage import os
+from portage.const import PORTAGE_BIN_PATH
+from portage.exception import DirectoryNotFound, FileNotFound, \
+	InvalidData, TryAgain, OperationNotPermitted, PermissionDenied
+from portage.data import portage_gid
+from portage.util import writemsg
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+HARDLINK_FD = -2
+_default_lock_fn = fcntl.lockf
+
+# Used by emerge in order to disable the "waiting for lock" message
+# so that it doesn't interfere with the status display.
+_quiet = False
+
+def lockdir(mydir, flags=0):
+	return lockfile(mydir, wantnewlockfile=1, flags=flags)
+def unlockdir(mylock):
+	return unlockfile(mylock)
+
+def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
+	waiting_msg=None, flags=0):
+	"""
+	If wantnewlockfile is True then this creates a lockfile in the parent
+	directory as the file: '.' + basename + '.portage_lockfile'.
+	"""
+
+	if not mypath:
+		raise InvalidData(_("Empty path given"))
+
+	if isinstance(mypath, basestring) and mypath[-1] == '/':
+		mypath = mypath[:-1]
+
+	if hasattr(mypath, 'fileno'):
+		mypath = mypath.fileno()
+	if isinstance(mypath, int):
+		lockfilename    = mypath
+		wantnewlockfile = 0
+		unlinkfile      = 0
+	elif wantnewlockfile:
+		base, tail = os.path.split(mypath)
+		lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
+		del base, tail
+		unlinkfile   = 1
+	else:
+		lockfilename = mypath
+
+	if isinstance(mypath, basestring):
+		if not os.path.exists(os.path.dirname(mypath)):
+			raise DirectoryNotFound(os.path.dirname(mypath))
+		preexisting = os.path.exists(lockfilename)
+		old_mask = os.umask(000)
+		try:
+			try:
+				myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660)
+			except OSError as e:
+				func_call = "open('%s')" % lockfilename
+				if e.errno == OperationNotPermitted.errno:
+					raise OperationNotPermitted(func_call)
+				elif e.errno == PermissionDenied.errno:
+					raise PermissionDenied(func_call)
+				else:
+					raise
+
+			if not preexisting:
+				try:
+					if os.stat(lockfilename).st_gid != portage_gid:
+						os.chown(lockfilename, -1, portage_gid)
+				except OSError as e:
+					if e.errno in (errno.ENOENT, errno.ESTALE):
+						return lockfile(mypath,
+							wantnewlockfile=wantnewlockfile,
+							unlinkfile=unlinkfile, waiting_msg=waiting_msg,
+							flags=flags)
+					else:
+						writemsg("%s: chown('%s', -1, %d)\n" % \
+							(e, lockfilename, portage_gid), noiselevel=-1)
+						writemsg(_("Cannot chown a lockfile: '%s'\n") % \
+							lockfilename, noiselevel=-1)
+						writemsg(_("Group IDs of current user: %s\n") % \
+							" ".join(str(n) for n in os.getgroups()),
+							noiselevel=-1)
+		finally:
+			os.umask(old_mask)
+
+	elif isinstance(mypath, int):
+		myfd = mypath
+
+	else:
+		raise ValueError(_("Unknown type passed in '%s': '%s'") % \
+			(type(mypath), mypath))
+
+	# try for a non-blocking lock, if it's held, throw a message
+	# we're waiting on lockfile and use a blocking attempt.
+	locking_method = _default_lock_fn
+	try:
+		locking_method(myfd, fcntl.LOCK_EX|fcntl.LOCK_NB)
+	except IOError as e:
+		if not hasattr(e, "errno"):
+			raise
+		if e.errno in (errno.EACCES, errno.EAGAIN):
+			# resource temp unavailable; eg, someone beat us to the lock.
+			if flags & os.O_NONBLOCK:
+				os.close(myfd)
+				raise TryAgain(mypath)
+
+			global _quiet
+			if _quiet:
+				out = None
+			else:
+				out = portage.output.EOutput()
+			if waiting_msg is None:
+				if isinstance(mypath, int):
+					waiting_msg = _("waiting for lock on fd %i") % myfd
+				else:
+					waiting_msg = _("waiting for lock on %s\n") % lockfilename
+			if out is not None:
+				out.ebegin(waiting_msg)
+			# try for the exclusive lock now.
+			try:
+				locking_method(myfd, fcntl.LOCK_EX)
+			except EnvironmentError as e:
+				if out is not None:
+					out.eend(1, str(e))
+				raise
+			if out is not None:
+				out.eend(os.EX_OK)
+		elif e.errno == errno.ENOLCK:
+			# We're not allowed to lock on this FS.
+			os.close(myfd)
+			link_success = False
+			if lockfilename == str(lockfilename):
+				if wantnewlockfile:
+					try:
+						if os.stat(lockfilename)[stat.ST_NLINK] == 1:
+							os.unlink(lockfilename)
+					except OSError:
+						pass
+					link_success = hardlink_lockfile(lockfilename)
+			if not link_success:
+				raise
+			locking_method = None
+			myfd = HARDLINK_FD
+		else:
+			raise
+
+		
+	if isinstance(lockfilename, basestring) and \
+		myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0:
+		# The file was deleted on us... Keep trying to make one...
+		os.close(myfd)
+		writemsg(_("lockfile recurse\n"), 1)
+		lockfilename, myfd, unlinkfile, locking_method = lockfile(
+			mypath, wantnewlockfile=wantnewlockfile, unlinkfile=unlinkfile,
+			waiting_msg=waiting_msg, flags=flags)
+
+	writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
+	return (lockfilename,myfd,unlinkfile,locking_method)
+
+def _fstat_nlink(fd):
+	"""
+	@param fd: an open file descriptor
+	@type fd: Integer
+	@rtype: Integer
+	@return: the current number of hardlinks to the file
+	"""
+	try:
+		return os.fstat(fd).st_nlink
+	except EnvironmentError as e:
+		if e.errno in (errno.ENOENT, errno.ESTALE):
+			# Some filesystems such as CIFS return
+			# ENOENT which means st_nlink == 0.
+			return 0
+		raise
+
+def unlockfile(mytuple):
+
+	#XXX: Compatability hack.
+	if len(mytuple) == 3:
+		lockfilename,myfd,unlinkfile = mytuple
+		locking_method = fcntl.flock
+	elif len(mytuple) == 4:
+		lockfilename,myfd,unlinkfile,locking_method = mytuple
+	else:
+		raise InvalidData
+
+	if(myfd == HARDLINK_FD):
+		unhardlink_lockfile(lockfilename)
+		return True
+	
+	# myfd may be None here due to myfd = mypath in lockfile()
+	if isinstance(lockfilename, basestring) and \
+		not os.path.exists(lockfilename):
+		writemsg(_("lockfile does not exist '%s'\n") % lockfilename,1)
+		if myfd is not None:
+			os.close(myfd)
+		return False
+
+	try:
+		if myfd is None:
+			myfd = os.open(lockfilename, os.O_WRONLY,0o660)
+			unlinkfile = 1
+		locking_method(myfd,fcntl.LOCK_UN)
+	except OSError:
+		if isinstance(lockfilename, basestring):
+			os.close(myfd)
+		raise IOError(_("Failed to unlock file '%s'\n") % lockfilename)
+
+	try:
+		# This sleep call was added to allow other processes that are
+		# waiting for a lock to be able to grab it before it is deleted.
+		# lockfile() already accounts for this situation, however, and
+		# the sleep here adds more time than is saved overall, so am
+		# commenting until it is proved necessary.
+		#time.sleep(0.0001)
+		if unlinkfile:
+			locking_method(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+			# We won the lock, so there isn't competition for it.
+			# We can safely delete the file.
+			writemsg(_("Got the lockfile...\n"), 1)
+			if _fstat_nlink(myfd) == 1:
+				os.unlink(lockfilename)
+				writemsg(_("Unlinked lockfile...\n"), 1)
+				locking_method(myfd,fcntl.LOCK_UN)
+			else:
+				writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
+				os.close(myfd)
+				return False
+	except SystemExit:
+		raise
+	except Exception as e:
+		writemsg(_("Failed to get lock... someone took it.\n"), 1)
+		writemsg(str(e)+"\n",1)
+
+	# why test lockfilename?  because we may have been handed an
+	# fd originally, and the caller might not like having their
+	# open fd closed automatically on them.
+	if isinstance(lockfilename, basestring):
+		os.close(myfd)
+
+	return True
+
+
+
+
+def hardlock_name(path):
+	return path+".hardlock-"+os.uname()[1]+"-"+str(os.getpid())
+
+def hardlink_is_mine(link,lock):
+	try:
+		return os.stat(link).st_nlink == 2
+	except OSError:
+		return False
+
+def hardlink_lockfile(lockfilename, max_wait=14400):
+	"""Does the NFS, hardlink shuffle to ensure locking on the disk.
+	We create a PRIVATE lockfile, that is just a placeholder on the disk.
+	Then we HARDLINK the real lockfile to that private file.
+	If our file can 2 references, then we have the lock. :)
+	Otherwise we lather, rise, and repeat.
+	We default to a 4 hour timeout.
+	"""
+
+	start_time = time.time()
+	myhardlock = hardlock_name(lockfilename)
+	reported_waiting = False
+	
+	while(time.time() < (start_time + max_wait)):
+		# We only need it to exist.
+		myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0o660)
+		os.close(myfd)
+	
+		if not os.path.exists(myhardlock):
+			raise FileNotFound(
+				_("Created lockfile is missing: %(filename)s") % \
+				{"filename" : myhardlock})
+
+		try:
+			res = os.link(myhardlock, lockfilename)
+		except OSError:
+			pass
+
+		if hardlink_is_mine(myhardlock, lockfilename):
+			# We have the lock.
+			if reported_waiting:
+				writemsg("\n", noiselevel=-1)
+			return True
+
+		if reported_waiting:
+			writemsg(".", noiselevel=-1)
+		else:
+			reported_waiting = True
+			msg = _("\nWaiting on (hardlink) lockfile: (one '.' per 3 seconds)\n"
+				"%(bin_path)s/clean_locks can fix stuck locks.\n"
+				"Lockfile: %(lockfilename)s\n") % \
+				{"bin_path": PORTAGE_BIN_PATH, "lockfilename": lockfilename}
+			writemsg(msg, noiselevel=-1)
+		time.sleep(3)
+	
+	os.unlink(myhardlock)
+	return False
+
+def unhardlink_lockfile(lockfilename):
+	myhardlock = hardlock_name(lockfilename)
+	if hardlink_is_mine(myhardlock, lockfilename):
+		# Make sure not to touch lockfilename unless we really have a lock.
+		try:
+			os.unlink(lockfilename)
+		except OSError:
+			pass
+	try:
+		os.unlink(myhardlock)
+	except OSError:
+		pass
+
+def hardlock_cleanup(path, remove_all_locks=False):
+	mypid  = str(os.getpid())
+	myhost = os.uname()[1]
+	mydl = os.listdir(path)
+
+	results = []
+	mycount = 0
+
+	mylist = {}
+	for x in mydl:
+		if os.path.isfile(path+"/"+x):
+			parts = x.split(".hardlock-")
+			if len(parts) == 2:
+				filename = parts[0]
+				hostpid  = parts[1].split("-")
+				host  = "-".join(hostpid[:-1])
+				pid   = hostpid[-1]
+				
+				if filename not in mylist:
+					mylist[filename] = {}
+				if host not in mylist[filename]:
+					mylist[filename][host] = []
+				mylist[filename][host].append(pid)
+
+				mycount += 1
+
+
+	results.append(_("Found %(count)s locks") % {"count":mycount})
+	
+	for x in mylist:
+		if myhost in mylist[x] or remove_all_locks:
+			mylockname = hardlock_name(path+"/"+x)
+			if hardlink_is_mine(mylockname, path+"/"+x) or \
+			   not os.path.exists(path+"/"+x) or \
+				 remove_all_locks:
+				for y in mylist[x]:
+					for z in mylist[x][y]:
+						filename = path+"/"+x+".hardlock-"+y+"-"+z
+						if filename == mylockname:
+							continue
+						try:
+							# We're sweeping through, unlinking everyone's locks.
+							os.unlink(filename)
+							results.append(_("Unlinked: ") + filename)
+						except OSError:
+							pass
+				try:
+					os.unlink(path+"/"+x)
+					results.append(_("Unlinked: ") + path+"/"+x)
+					os.unlink(mylockname)
+					results.append(_("Unlinked: ") + mylockname)
+				except OSError:
+					pass
+			else:
+				try:
+					os.unlink(mylockname)
+					results.append(_("Unlinked: ") + mylockname)
+				except OSError:
+					pass
+
+	return results
+

diff --git a/portage_with_autodep/pym/portage/mail.py b/portage_with_autodep/pym/portage/mail.py
new file mode 100644
index 0000000..17dfcaf
--- /dev/null
+++ b/portage_with_autodep/pym/portage/mail.py
@@ -0,0 +1,177 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Since python ebuilds remove the 'email' module when USE=build
+# is enabled, use a local import so that
+# portage.proxy.lazyimport._preload_portage_submodules()
+# can load this module even though the 'email' module is missing.
+# The elog mail modules won't work, but at least an ImportError
+# won't cause portage to crash during stage builds. Since the
+# 'smtlib' module imports the 'email' module, that's imported
+# locally as well.
+
+import socket
+import sys
+import time
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+from portage.localization import _
+import portage
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+	def _force_ascii_if_necessary(s):
+		# Force ascii encoding in order to avoid UnicodeEncodeError
+		# from smtplib.sendmail with python3 (bug #291331).
+		s = _unicode_encode(s,
+			encoding='ascii', errors='backslashreplace')
+		s = _unicode_decode(s,
+			encoding='ascii', errors='replace')
+		return s
+
+else:
+
+	def _force_ascii_if_necessary(s):
+		return s
+
+def TextMessage(_text):
+	from email.mime.text import MIMEText
+	mimetext = MIMEText(_text)
+	if sys.hexversion >= 0x3000000:
+		mimetext.set_charset("UTF-8")
+	return mimetext
+
+def create_message(sender, recipient, subject, body, attachments=None):
+
+	from email.header import Header
+	from email.mime.base import MIMEBase as BaseMessage
+	from email.mime.multipart import MIMEMultipart as MultipartMessage
+
+	if sys.hexversion < 0x3000000:
+		sender = _unicode_encode(sender,
+			encoding=_encodings['content'], errors='strict')
+		recipient = _unicode_encode(recipient,
+			encoding=_encodings['content'], errors='strict')
+		subject = _unicode_encode(subject,
+			encoding=_encodings['content'], errors='backslashreplace')
+		body = _unicode_encode(body,
+			encoding=_encodings['content'], errors='backslashreplace')
+
+	if attachments == None:
+		mymessage = TextMessage(body)
+	else:
+		mymessage = MultipartMessage()
+		mymessage.attach(TextMessage(body))
+		for x in attachments:
+			if isinstance(x, BaseMessage):
+				mymessage.attach(x)
+			elif isinstance(x, basestring):
+				if sys.hexversion < 0x3000000:
+					x = _unicode_encode(x,
+						encoding=_encodings['content'],
+						errors='backslashreplace')
+				mymessage.attach(TextMessage(x))
+			else:
+				raise portage.exception.PortageException(_("Can't handle type of attachment: %s") % type(x))
+
+	mymessage.set_unixfrom(sender)
+	mymessage["To"] = recipient
+	mymessage["From"] = sender
+
+	# Use Header as a workaround so that long subject lines are wrapped
+	# correctly by <=python-2.6 (gentoo bug #263370, python issue #1974).
+	# Also, need to force ascii for python3, in order to avoid
+	# UnicodeEncodeError with non-ascii characters:
+	#  File "/usr/lib/python3.1/email/header.py", line 189, in __init__
+	#    self.append(s, charset, errors)
+	#  File "/usr/lib/python3.1/email/header.py", line 262, in append
+	#    input_bytes = s.encode(input_charset, errors)
+	#UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-9: ordinal not in range(128)
+	mymessage["Subject"] = Header(_force_ascii_if_necessary(subject))
+	mymessage["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S %z")
+	
+	return mymessage
+
+def send_mail(mysettings, message):
+
+	import smtplib
+
+	mymailhost = "localhost"
+	mymailport = 25
+	mymailuser = ""
+	mymailpasswd = ""
+	myrecipient = "root@localhost"
+	
+	# Syntax for PORTAGE_ELOG_MAILURI (if defined):
+	# address [[user:passwd@]mailserver[:port]]
+	# where address:    recipient address
+	#       user:       username for smtp auth (defaults to none)
+	#       passwd:     password for smtp auth (defaults to none)
+	#       mailserver: smtp server that should be used to deliver the mail (defaults to localhost)
+	#					alternatively this can also be the absolute path to a sendmail binary if you don't want to use smtp
+	#       port:       port to use on the given smtp server (defaults to 25, values > 100000 indicate that starttls should be used on (port-100000))
+	if " " in mysettings.get("PORTAGE_ELOG_MAILURI", ""):
+		myrecipient, mymailuri = mysettings["PORTAGE_ELOG_MAILURI"].split()
+		if "@" in mymailuri:
+			myauthdata, myconndata = mymailuri.rsplit("@", 1)
+			try:
+				mymailuser,mymailpasswd = myauthdata.split(":")
+			except ValueError:
+				print(_("!!! invalid SMTP AUTH configuration, trying unauthenticated ..."))
+		else:
+			myconndata = mymailuri
+		if ":" in myconndata:
+			mymailhost,mymailport = myconndata.split(":")
+		else:
+			mymailhost = myconndata
+	else:
+		myrecipient = mysettings.get("PORTAGE_ELOG_MAILURI", "")
+	
+	myfrom = message.get("From")
+
+	if sys.hexversion < 0x3000000:
+		myrecipient = _unicode_encode(myrecipient,
+			encoding=_encodings['content'], errors='strict')
+		mymailhost = _unicode_encode(mymailhost,
+			encoding=_encodings['content'], errors='strict')
+		mymailport = _unicode_encode(mymailport,
+			encoding=_encodings['content'], errors='strict')
+		myfrom = _unicode_encode(myfrom,
+			encoding=_encodings['content'], errors='strict')
+		mymailuser = _unicode_encode(mymailuser,
+			encoding=_encodings['content'], errors='strict')
+		mymailpasswd = _unicode_encode(mymailpasswd,
+			encoding=_encodings['content'], errors='strict')
+
+	# user wants to use a sendmail binary instead of smtp
+	if mymailhost[0] == os.sep and os.path.exists(mymailhost):
+		fd = os.popen(mymailhost+" -f "+myfrom+" "+myrecipient, "w")
+		fd.write(_force_ascii_if_necessary(message.as_string()))
+		if fd.close() != None:
+			sys.stderr.write(_("!!! %s returned with a non-zero exit code. This generally indicates an error.\n") % mymailhost)
+	else:
+		try:
+			if int(mymailport) > 100000:
+				myconn = smtplib.SMTP(mymailhost, int(mymailport) - 100000)
+				myconn.ehlo()
+				if not myconn.has_extn("STARTTLS"):
+					raise portage.exception.PortageException(_("!!! TLS support requested for logmail but not supported by server"))
+				myconn.starttls()
+				myconn.ehlo()
+			else:
+				myconn = smtplib.SMTP(mymailhost, mymailport)
+			if mymailuser != "" and mymailpasswd != "":
+				myconn.login(mymailuser, mymailpasswd)
+
+			message_str = _force_ascii_if_necessary(message.as_string())
+			myconn.sendmail(myfrom, myrecipient, message_str)
+			myconn.quit()
+		except smtplib.SMTPException as e:
+			raise portage.exception.PortageException(_("!!! An error occurred while trying to send logmail:\n")+str(e))
+		except socket.error as e:
+			raise portage.exception.PortageException(_("!!! A network error occurred while trying to send logmail:\n%s\nSure you configured PORTAGE_ELOG_MAILURI correctly?") % str(e))
+	return
+	

diff --git a/portage_with_autodep/pym/portage/manifest.py b/portage_with_autodep/pym/portage/manifest.py
new file mode 100644
index 0000000..13efab7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/manifest.py
@@ -0,0 +1,538 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.checksum:hashfunc_map,perform_multiple_checksums,verify_all',
+	'portage.util:write_atomic',
+)
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.exception import DigestException, FileNotFound, \
+	InvalidDataType, MissingParameter, PermissionDenied, \
+	PortageException, PortagePackageException
+from portage.localization import _
+
+class FileNotInManifestException(PortageException):
+	pass
+
+def manifest2AuxfileFilter(filename):
+	filename = filename.strip(os.sep)
+	mysplit = filename.split(os.path.sep)
+	if "CVS" in mysplit:
+		return False
+	for x in mysplit:
+		if x[:1] == '.':
+			return False
+	return not filename[:7] == 'digest-'
+
+def manifest2MiscfileFilter(filename):
+	filename = filename.strip(os.sep)
+	return not (filename in ["CVS", ".svn", "files", "Manifest"] or filename.endswith(".ebuild"))
+
+def guessManifestFileType(filename):
+	""" Perform a best effort guess of which type the given filename is, avoid using this if possible """
+	if filename.startswith("files" + os.sep + "digest-"):
+		return None
+	if filename.startswith("files" + os.sep):
+		return "AUX"
+	elif filename.endswith(".ebuild"):
+		return "EBUILD"
+	elif filename in ["ChangeLog", "metadata.xml"]:
+		return "MISC"
+	else:
+		return "DIST"
+
+def parseManifest2(mysplit):
+	myentry = None
+	if len(mysplit) > 4 and mysplit[0] in portage.const.MANIFEST2_IDENTIFIERS:
+		mytype = mysplit[0]
+		myname = mysplit[1]
+		try:
+			mysize = int(mysplit[2])
+		except ValueError:
+			return None
+		myhashes = dict(zip(mysplit[3::2], mysplit[4::2]))
+		myhashes["size"] = mysize
+		myentry = Manifest2Entry(type=mytype, name=myname, hashes=myhashes)
+	return myentry
+
+class ManifestEntry(object):
+	__slots__ = ("type", "name", "hashes")
+	def __init__(self, **kwargs):
+		for k, v in kwargs.items():
+			setattr(self, k, v)
+
+class Manifest2Entry(ManifestEntry):
+	def __str__(self):
+		myline = " ".join([self.type, self.name, str(self.hashes["size"])])
+		myhashkeys = list(self.hashes)
+		myhashkeys.remove("size")
+		myhashkeys.sort()
+		for h in myhashkeys:
+			myline += " " + h + " " + str(self.hashes[h])
+		return myline
+
+	def __eq__(self, other):
+		if not isinstance(other, Manifest2Entry) or \
+			self.type != other.type or \
+			self.name != other.name or \
+			self.hashes != other.hashes:
+			return False
+		return True
+
+	def __ne__(self, other):
+		return not self.__eq__(other)
+
+class Manifest(object):
+	parsers = (parseManifest2,)
+	def __init__(self, pkgdir, distdir, fetchlist_dict=None,
+		manifest1_compat=False, from_scratch=False):
+		""" create new Manifest instance for package in pkgdir
+		    and add compability entries for old portage versions if manifest1_compat == True.
+		    Do not parse Manifest file if from_scratch == True (only for internal use)
+			The fetchlist_dict parameter is required only for generation of
+			a Manifest (not needed for parsing and checking sums)."""
+		self.pkgdir = _unicode_decode(pkgdir).rstrip(os.sep) + os.sep
+		self.fhashdict = {}
+		self.hashes = set()
+		self.hashes.update(portage.const.MANIFEST2_HASH_FUNCTIONS)
+		if manifest1_compat:
+			raise NotImplementedError("manifest1 support has been removed")
+		self.hashes.difference_update(hashname for hashname in \
+			list(self.hashes) if hashname not in hashfunc_map)
+		self.hashes.add("size")
+		if manifest1_compat:
+			raise NotImplementedError("manifest1 support has been removed")
+		self.hashes.add(portage.const.MANIFEST2_REQUIRED_HASH)
+		for t in portage.const.MANIFEST2_IDENTIFIERS:
+			self.fhashdict[t] = {}
+		if not from_scratch:
+			self._read()
+		if fetchlist_dict != None:
+			self.fetchlist_dict = fetchlist_dict
+		else:
+			self.fetchlist_dict = {}
+		self.distdir = distdir
+		self.guessType = guessManifestFileType
+
+	def getFullname(self):
+		""" Returns the absolute path to the Manifest file for this instance """
+		return os.path.join(self.pkgdir, "Manifest")
+	
+	def getDigests(self):
+		""" Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """
+		rval = {}
+		for t in portage.const.MANIFEST2_IDENTIFIERS:
+			rval.update(self.fhashdict[t])
+		return rval
+	
+	def getTypeDigests(self, ftype):
+		""" Similar to getDigests(), but restricted to files of the given type. """
+		return self.fhashdict[ftype]
+
+	def _readManifest(self, file_path, myhashdict=None, **kwargs):
+		"""Parse a manifest.  If myhashdict is given then data will be added too it.
+		   Otherwise, a new dict will be created and returned."""
+		try:
+			fd = io.open(_unicode_encode(file_path,
+				encoding=_encodings['fs'], errors='strict'), mode='r',
+				encoding=_encodings['repo.content'], errors='replace')
+			if myhashdict is None:
+				myhashdict = {}
+			self._parseDigests(fd, myhashdict=myhashdict, **kwargs)
+			fd.close()
+			return myhashdict
+		except (OSError, IOError) as e:
+			if e.errno == errno.ENOENT:
+				raise FileNotFound(file_path)
+			else:
+				raise
+
+	def _read(self):
+		""" Parse Manifest file for this instance """
+		try:
+			self._readManifest(self.getFullname(), myhashdict=self.fhashdict)
+		except FileNotFound:
+			pass
+
+	def _parseManifestLines(self, mylines):
+		"""Parse manifest lines and return a list of manifest entries."""
+		for myline in mylines:
+			myentry = None
+			mysplit = myline.split()
+			for parser in self.parsers:
+				myentry = parser(mysplit)
+				if myentry is not None:
+					yield myentry
+					break # go to the next line
+
+	def _parseDigests(self, mylines, myhashdict=None, mytype=None):
+		"""Parse manifest entries and store the data in myhashdict.  If mytype
+		is specified, it will override the type for all parsed entries."""
+		if myhashdict is None:
+			myhashdict = {}
+		for myentry in self._parseManifestLines(mylines):
+			if mytype is None:
+				myentry_type = myentry.type
+			else:
+				myentry_type = mytype
+			myhashdict.setdefault(myentry_type, {})
+			myhashdict[myentry_type].setdefault(myentry.name, {})
+			myhashdict[myentry_type][myentry.name].update(myentry.hashes)
+		return myhashdict
+
+	def _getDigestData(self, distlist):
+		"""create a hash dict for a specific list of files"""
+		myhashdict = {}
+		for myname in distlist:
+			for mytype in self.fhashdict:
+				if myname in self.fhashdict[mytype]:
+					myhashdict.setdefault(mytype, {})
+					myhashdict[mytype].setdefault(myname, {})
+					myhashdict[mytype][myname].update(self.fhashdict[mytype][myname])
+		return myhashdict
+
+	def _createManifestEntries(self):
+		valid_hashes = set(portage.const.MANIFEST2_HASH_FUNCTIONS)
+		valid_hashes.add('size')
+		mytypes = list(self.fhashdict)
+		mytypes.sort()
+		for t in mytypes:
+			myfiles = list(self.fhashdict[t])
+			myfiles.sort()
+			for f in myfiles:
+				myentry = Manifest2Entry(
+					type=t, name=f, hashes=self.fhashdict[t][f].copy())
+				for h in list(myentry.hashes):
+					if h not in valid_hashes:
+						del myentry.hashes[h]
+				yield myentry
+
+	def checkIntegrity(self):
+		for t in self.fhashdict:
+			for f in self.fhashdict[t]:
+				if portage.const.MANIFEST2_REQUIRED_HASH not in self.fhashdict[t][f]:
+					raise MissingParameter(_("Missing %s checksum: %s %s") % (portage.const.MANIFEST2_REQUIRED_HASH, t, f))
+
+	def write(self, sign=False, force=False):
+		""" Write Manifest instance to disk, optionally signing it """
+		self.checkIntegrity()
+		try:
+			myentries = list(self._createManifestEntries())
+			update_manifest = True
+			if not force:
+				try:
+					f = io.open(_unicode_encode(self.getFullname(),
+						encoding=_encodings['fs'], errors='strict'),
+						mode='r', encoding=_encodings['repo.content'],
+						errors='replace')
+					oldentries = list(self._parseManifestLines(f))
+					f.close()
+					if len(oldentries) == len(myentries):
+						update_manifest = False
+						for i in range(len(oldentries)):
+							if oldentries[i] != myentries[i]:
+								update_manifest = True
+								break
+				except (IOError, OSError) as e:
+					if e.errno == errno.ENOENT:
+						pass
+					else:
+						raise
+			if update_manifest:
+				write_atomic(self.getFullname(),
+					"".join("%s\n" % str(myentry) for myentry in myentries))
+			if sign:
+				self.sign()
+		except (IOError, OSError) as e:
+			if e.errno == errno.EACCES:
+				raise PermissionDenied(str(e))
+			raise
+
+	def sign(self):
+		""" Sign the Manifest """
+		raise NotImplementedError()
+	
+	def validateSignature(self):
+		""" Validate signature on Manifest """
+		raise NotImplementedError()
+	
+	def addFile(self, ftype, fname, hashdict=None, ignoreMissing=False):
+		""" Add entry to Manifest optionally using hashdict to avoid recalculation of hashes """
+		if ftype == "AUX" and not fname.startswith("files/"):
+			fname = os.path.join("files", fname)
+		if not os.path.exists(self.pkgdir+fname) and not ignoreMissing:
+			raise FileNotFound(fname)
+		if not ftype in portage.const.MANIFEST2_IDENTIFIERS:
+			raise InvalidDataType(ftype)
+		if ftype == "AUX" and fname.startswith("files"):
+			fname = fname[6:]
+		self.fhashdict[ftype][fname] = {}
+		if hashdict != None:
+			self.fhashdict[ftype][fname].update(hashdict)
+		if not portage.const.MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]:
+			self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing)
+	
+	def removeFile(self, ftype, fname):
+		""" Remove given entry from Manifest """
+		del self.fhashdict[ftype][fname]
+	
+	def hasFile(self, ftype, fname):
+		""" Return whether the Manifest contains an entry for the given type,filename pair """
+		return (fname in self.fhashdict[ftype])
+	
+	def findFile(self, fname):
+		""" Return entrytype of the given file if present in Manifest or None if not present """
+		for t in portage.const.MANIFEST2_IDENTIFIERS:
+			if fname in self.fhashdict[t]:
+				return t
+		return None
+	
+	def create(self, checkExisting=False, assumeDistHashesSometimes=False,
+		assumeDistHashesAlways=False, requiredDistfiles=[]):
+		""" Recreate this Manifest from scratch.  This will not use any
+		existing checksums unless assumeDistHashesSometimes or
+		assumeDistHashesAlways is true (assumeDistHashesSometimes will only
+		cause DIST checksums to be reused if the file doesn't exist in
+		DISTDIR).  The requiredDistfiles parameter specifies a list of
+		distfiles to raise a FileNotFound exception for (if no file or existing
+		checksums are available), and defaults to all distfiles when not
+		specified."""
+		if checkExisting:
+			self.checkAllHashes()
+		if assumeDistHashesSometimes or assumeDistHashesAlways:
+			distfilehashes = self.fhashdict["DIST"]
+		else:
+			distfilehashes = {}
+		self.__init__(self.pkgdir, self.distdir,
+			fetchlist_dict=self.fetchlist_dict, from_scratch=True,
+			manifest1_compat=False)
+		cpvlist = []
+		pn = os.path.basename(self.pkgdir.rstrip(os.path.sep))
+		cat = self._pkgdir_category()
+
+		pkgdir = self.pkgdir
+
+		for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
+			break
+		for f in pkgdir_files:
+			try:
+				f = _unicode_decode(f,
+					encoding=_encodings['fs'], errors='strict')
+			except UnicodeDecodeError:
+				continue
+			if f[:1] == ".":
+				continue
+			pf = None
+			if f[-7:] == '.ebuild':
+				pf = f[:-7]
+			if pf is not None:
+				mytype = "EBUILD"
+				ps = portage.versions._pkgsplit(pf)
+				cpv = "%s/%s" % (cat, pf)
+				if not ps:
+					raise PortagePackageException(
+						_("Invalid package name: '%s'") % cpv)
+				if ps[0] != pn:
+					raise PortagePackageException(
+						_("Package name does not "
+						"match directory name: '%s'") % cpv)
+				cpvlist.append(cpv)
+			elif manifest2MiscfileFilter(f):
+				mytype = "MISC"
+			else:
+				continue
+			self.fhashdict[mytype][f] = perform_multiple_checksums(self.pkgdir+f, self.hashes)
+		recursive_files = []
+
+		pkgdir = self.pkgdir
+		cut_len = len(os.path.join(pkgdir, "files") + os.sep)
+		for parentdir, dirs, files in os.walk(os.path.join(pkgdir, "files")):
+			for f in files:
+				try:
+					f = _unicode_decode(f,
+						encoding=_encodings['fs'], errors='strict')
+				except UnicodeDecodeError:
+					continue
+				full_path = os.path.join(parentdir, f)
+				recursive_files.append(full_path[cut_len:])
+		for f in recursive_files:
+			if not manifest2AuxfileFilter(f):
+				continue
+			self.fhashdict["AUX"][f] = perform_multiple_checksums(
+				os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
+		distlist = set()
+		for cpv in cpvlist:
+			distlist.update(self._getCpvDistfiles(cpv))
+		if requiredDistfiles is None:
+			# This allows us to force removal of stale digests for the
+			# ebuild --force digest option (no distfiles are required).
+			requiredDistfiles = set()
+		elif len(requiredDistfiles) == 0:
+			# repoman passes in an empty list, which implies that all distfiles
+			# are required.
+			requiredDistfiles = distlist.copy()
+		required_hash_types = set()
+		required_hash_types.add("size")
+		required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
+		for f in distlist:
+			fname = os.path.join(self.distdir, f)
+			mystat = None
+			try:
+				mystat = os.stat(fname)
+			except OSError:
+				pass
+			if f in distfilehashes and \
+				not required_hash_types.difference(distfilehashes[f]) and \
+				((assumeDistHashesSometimes and mystat is None) or \
+				(assumeDistHashesAlways and mystat is None) or \
+				(assumeDistHashesAlways and mystat is not None and \
+				len(distfilehashes[f]) == len(self.hashes) and \
+				distfilehashes[f]["size"] == mystat.st_size)):
+				self.fhashdict["DIST"][f] = distfilehashes[f]
+			else:
+				try:
+					self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
+				except FileNotFound:
+					if f in requiredDistfiles:
+						raise
+
+	def _pkgdir_category(self):
+		return self.pkgdir.rstrip(os.sep).split(os.sep)[-2]
+
+	def _getAbsname(self, ftype, fname):
+		if ftype == "DIST":
+			absname = os.path.join(self.distdir, fname)
+		elif ftype == "AUX":
+			absname = os.path.join(self.pkgdir, "files", fname)
+		else:
+			absname = os.path.join(self.pkgdir, fname)
+		return absname	
+	
+	def checkAllHashes(self, ignoreMissingFiles=False):
+		for t in portage.const.MANIFEST2_IDENTIFIERS:
+			self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
+	
+	def checkTypeHashes(self, idtype, ignoreMissingFiles=False):
+		for f in self.fhashdict[idtype]:
+			self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles)
+	
+	def checkFileHashes(self, ftype, fname, ignoreMissing=False):
+		myhashes = self.fhashdict[ftype][fname]
+		try:
+			ok,reason = verify_all(self._getAbsname(ftype, fname), self.fhashdict[ftype][fname])
+			if not ok:
+				raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
+			return ok, reason
+		except FileNotFound as e:
+			if not ignoreMissing:
+				raise
+			return False, _("File Not Found: '%s'") % str(e)
+
+	def checkCpvHashes(self, cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=False):
+		""" check the hashes for all files associated to the given cpv, include all
+		AUX files and optionally all MISC files. """
+		if not onlyDistfiles:
+			self.checkTypeHashes("AUX", ignoreMissingFiles=False)
+			if checkMiscfiles:
+				self.checkTypeHashes("MISC", ignoreMissingFiles=False)
+			ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+			self.checkFileHashes("EBUILD", ebuildname, ignoreMissing=False)
+		if checkDistfiles or onlyDistfiles:
+			for f in self._getCpvDistfiles(cpv):
+				self.checkFileHashes("DIST", f, ignoreMissing=False)
+	
+	def _getCpvDistfiles(self, cpv):
+		""" Get a list of all DIST files associated to the given cpv """
+		return self.fetchlist_dict[cpv]
+
+	def getDistfilesSize(self, fetchlist):
+		total_bytes = 0
+		for f in fetchlist:
+			total_bytes += int(self.fhashdict["DIST"][f]["size"])
+		return total_bytes
+
+	def updateFileHashes(self, ftype, fname, checkExisting=True, ignoreMissing=True, reuseExisting=False):
+		""" Regenerate hashes for the given file """
+		if checkExisting:
+			self.checkFileHashes(ftype, fname, ignoreMissing=ignoreMissing)
+		if not ignoreMissing and fname not in self.fhashdict[ftype]:
+			raise FileNotInManifestException(fname)
+		if fname not in self.fhashdict[ftype]:
+			self.fhashdict[ftype][fname] = {}
+		myhashkeys = list(self.hashes)
+		if reuseExisting:
+			for k in [h for h in self.fhashdict[ftype][fname] if h in myhashkeys]:
+				myhashkeys.remove(k)
+		myhashes = perform_multiple_checksums(self._getAbsname(ftype, fname), myhashkeys)
+		self.fhashdict[ftype][fname].update(myhashes)
+	
+	def updateTypeHashes(self, idtype, checkExisting=False, ignoreMissingFiles=True):
+		""" Regenerate all hashes for all files of the given type """
+		for fname in self.fhashdict[idtype]:
+			self.updateFileHashes(idtype, fname, checkExisting)
+	
+	def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True):
+		""" Regenerate all hashes for all files in this Manifest. """
+		for idtype in portage.const.MANIFEST2_IDENTIFIERS:
+			self.updateTypeHashes(idtype, checkExisting=checkExisting,
+				ignoreMissingFiles=ignoreMissingFiles)
+
+	def updateCpvHashes(self, cpv, ignoreMissingFiles=True):
+		""" Regenerate all hashes associated to the given cpv (includes all AUX and MISC
+		files)."""
+		self.updateTypeHashes("AUX", ignoreMissingFiles=ignoreMissingFiles)
+		self.updateTypeHashes("MISC", ignoreMissingFiles=ignoreMissingFiles)
+		ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+		self.updateFileHashes("EBUILD", ebuildname, ignoreMissingFiles=ignoreMissingFiles)
+		for f in self._getCpvDistfiles(cpv):
+			self.updateFileHashes("DIST", f, ignoreMissingFiles=ignoreMissingFiles)
+
+	def updateHashesGuessType(self, fname, *args, **kwargs):
+		""" Regenerate hashes for the given file (guesses the type and then
+		calls updateFileHashes)."""
+		mytype = self.guessType(fname)
+		if mytype == "AUX":
+			fname = fname[len("files" + os.sep):]
+		elif mytype is None:
+			return
+		myrealtype = self.findFile(fname)
+		if myrealtype is not None:
+			mytype = myrealtype
+		return self.updateFileHashes(mytype, fname, *args, **kwargs)
+
+	def getFileData(self, ftype, fname, key):
+		""" Return the value of a specific (type,filename,key) triple, mainly useful
+		to get the size for distfiles."""
+		return self.fhashdict[ftype][fname][key]
+
+	def getVersions(self):
+		""" Returns a list of manifest versions present in the manifest file. """
+		rVal = []
+		mfname = self.getFullname()
+		if not os.path.exists(mfname):
+			return rVal
+		myfile = io.open(_unicode_encode(mfname,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'], errors='replace')
+		lines = myfile.readlines()
+		myfile.close()
+		for l in lines:
+			mysplit = l.split()
+			if len(mysplit) == 4 and mysplit[0] in portage.const.MANIFEST1_HASH_FUNCTIONS and not 1 in rVal:
+				rVal.append(1)
+			elif len(mysplit) > 4 and mysplit[0] in portage.const.MANIFEST2_IDENTIFIERS and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
+				rVal.append(2)
+		return rVal
+
+	def _catsplit(self, pkg_key):
+		"""Split a category and package, returning a list of [cat, pkg].
+		This is compatible with portage.catsplit()"""
+		return pkg_key.split("/", 1)

diff --git a/portage_with_autodep/pym/portage/news.py b/portage_with_autodep/pym/portage/news.py
new file mode 100644
index 0000000..866e5b0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/news.py
@@ -0,0 +1,351 @@
+# portage: news management code
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["NewsManager", "NewsItem", "DisplayRestriction",
+	"DisplayProfileRestriction", "DisplayKeywordRestriction",
+	"DisplayInstalledRestriction"]
+
+import io
+import logging
+import os as _os
+import re
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.util import apply_secpass_permissions, ensure_dirs, \
+	grabfile, normalize_path, write_atomic, writemsg_level
+from portage.data import portage_gid
+from portage.dep import isvalidatom
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.exception import InvalidLocation, OperationNotPermitted, \
+	PermissionDenied
+
+class NewsManager(object):
+	"""
+	This object manages GLEP 42 style news items.  It will cache news items
+	that have previously shown up and notify users when there are relevant news
+	items that apply to their packages that the user has not previously read.
+	
+	Creating a news manager requires:
+	root - typically ${ROOT} see man make.conf and man emerge for details
+	news_path - path to news items; usually $REPODIR/metadata/news
+	unread_path - path to the news.repoid.unread file; this helps us track news items
+	
+	"""
+
+	def __init__(self, portdb, vardb, news_path, unread_path, language_id='en'):
+		self.news_path = news_path
+		self.unread_path = unread_path
+		self.target_root = vardb.root
+		self.language_id = language_id
+		self.config = vardb.settings
+		self.vdb = vardb
+		self.portdb = portdb
+
+		# GLEP 42 says:
+		#   All news item related files should be root owned and in the
+		#   portage group with the group write (and, for directories,
+		#   execute) bits set. News files should be world readable.
+		self._uid = int(self.config["PORTAGE_INST_UID"])
+		self._gid = portage_gid
+		self._file_mode = 0o0064
+		self._dir_mode  = 0o0074
+		self._mode_mask = 0o0000
+
+		portdir = portdb.porttree_root
+		profiles_base = os.path.join(portdir, 'profiles') + os.path.sep
+		profile_path = None
+		if portdb.settings.profile_path:
+			profile_path = normalize_path(
+				os.path.realpath(portdb.settings.profile_path))
+			if profile_path.startswith(profiles_base):
+				profile_path = profile_path[len(profiles_base):]
+		self._profile_path = profile_path
+
+	def _unread_filename(self, repoid):
+		return os.path.join(self.unread_path, 'news-%s.unread' % repoid)
+
+	def _skip_filename(self, repoid):
+		return os.path.join(self.unread_path, 'news-%s.skip' % repoid)
+
+	def _news_dir(self, repoid):
+		repo_path = self.portdb.getRepositoryPath(repoid)
+		if repo_path is None:
+			raise AssertionError(_("Invalid repoID: %s") % repoid)
+		return os.path.join(repo_path, self.news_path)
+
+	def updateItems(self, repoid):
+		"""
+		Figure out which news items from NEWS_PATH are both unread and relevant to
+		the user (according to the GLEP 42 standards of relevancy).  Then add these
+		items into the news.repoid.unread file.
+		"""
+
+		# Ensure that the unread path exists and is writable.
+
+		try:
+			ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
+				mode=self._dir_mode, mask=self._mode_mask)
+		except (OperationNotPermitted, PermissionDenied):
+			return
+
+		if not os.access(self.unread_path, os.W_OK):
+			return
+
+		news_dir = self._news_dir(repoid)
+		try:
+			news = _os.listdir(_unicode_encode(news_dir,
+				encoding=_encodings['fs'], errors='strict'))
+		except OSError:
+			return
+
+		skip_filename = self._skip_filename(repoid)
+		unread_filename = self._unread_filename(repoid)
+		unread_lock = lockfile(unread_filename, wantnewlockfile=1)
+		try:
+			try:
+				unread = set(grabfile(unread_filename))
+				unread_orig = unread.copy()
+				skip = set(grabfile(skip_filename))
+				skip_orig = skip.copy()
+			except PermissionDenied:
+				return
+
+			updates = []
+			for itemid in news:
+				try:
+					itemid = _unicode_decode(itemid,
+						encoding=_encodings['fs'], errors='strict')
+				except UnicodeDecodeError:
+					itemid = _unicode_decode(itemid,
+						encoding=_encodings['fs'], errors='replace')
+					writemsg_level(
+						_("!!! Invalid encoding in news item name: '%s'\n") % \
+						itemid, level=logging.ERROR, noiselevel=-1)
+					continue
+
+				if itemid in skip:
+					continue
+				filename = os.path.join(news_dir, itemid,
+					itemid + "." + self.language_id + ".txt")
+				if not os.path.isfile(filename):
+					continue
+				item = NewsItem(filename, itemid)
+				if not item.isValid():
+					continue
+				if item.isRelevant(profile=self._profile_path,
+					config=self.config, vardb=self.vdb):
+					unread.add(item.name)
+					skip.add(item.name)
+
+			if unread != unread_orig:
+				write_atomic(unread_filename,
+					"".join("%s\n" % x for x in sorted(unread)))
+				apply_secpass_permissions(unread_filename,
+					uid=self._uid, gid=self._gid,
+					mode=self._file_mode, mask=self._mode_mask)
+
+			if skip != skip_orig:
+				write_atomic(skip_filename,
+					"".join("%s\n" % x for x in sorted(skip)))
+				apply_secpass_permissions(skip_filename,
+					uid=self._uid, gid=self._gid,
+					mode=self._file_mode, mask=self._mode_mask)
+
+		finally:
+			unlockfile(unread_lock)
+
+	def getUnreadItems(self, repoid, update=False):
+		"""
+		Determine if there are unread relevant items in news.repoid.unread.
+		If there are unread items return their number.
+		If update is specified, updateNewsItems( repoid ) will be called to
+		check for new items.
+		"""
+
+		if update:
+			self.updateItems(repoid)
+
+		unread_filename = self._unread_filename(repoid)
+		unread_lock = None
+		try:
+			unread_lock = lockfile(unread_filename, wantnewlockfile=1)
+		except (InvalidLocation, OperationNotPermitted, PermissionDenied):
+			pass
+		try:
+			try:
+				return len(grabfile(unread_filename))
+			except PermissionDenied:
+				return 0
+		finally:
+			if unread_lock:
+				unlockfile(unread_lock)
+
+_formatRE = re.compile("News-Item-Format:\s*([^\s]*)\s*$")
+_installedRE = re.compile("Display-If-Installed:(.*)\n")
+_profileRE = re.compile("Display-If-Profile:(.*)\n")
+_keywordRE = re.compile("Display-If-Keyword:(.*)\n")
+
+class NewsItem(object):
+	"""
+	This class encapsulates a GLEP 42 style news item.
+	It's purpose is to wrap parsing of these news items such that portage can determine
+	whether a particular item is 'relevant' or not.  This requires parsing the item
+	and determining 'relevancy restrictions'; these include "Display if Installed" or
+	"display if arch: x86" and so forth.
+
+	Creation of a news item involves passing in the path to the particular news item.
+	"""
+
+	def __init__(self, path, name):
+		"""
+		For a given news item we only want if it path is a file.
+		"""
+		self.path = path
+		self.name = name
+		self._parsed = False
+		self._valid = True
+
+	def isRelevant(self, vardb, config, profile):
+		"""
+		This function takes a dict of keyword arguments; one should pass in any
+		objects need to do to lookups (like what keywords we are on, what profile,
+		and a vardb so we can look at installed packages).
+		Each restriction will pluck out the items that are required for it to match
+		or raise a ValueError exception if the required object is not present.
+
+		Restrictions of the form Display-X are OR'd with like-restrictions;
+		otherwise restrictions are AND'd.  any_match is the ORing and
+		all_match is the ANDing.
+		"""
+
+		if not self._parsed:
+			self.parse()
+
+		if not len(self.restrictions):
+			return True
+
+		kwargs = \
+			{ 'vardb' : vardb,
+				'config' : config,
+				'profile' : profile }
+
+		all_match = True
+		for values in self.restrictions.values():
+			any_match = False
+			for restriction in values:
+				if restriction.checkRestriction(**kwargs):
+					any_match = True
+			if not any_match:
+				all_match = False
+
+		return all_match
+
+	def isValid(self):
+		if not self._parsed:
+			self.parse()
+		return self._valid
+
+	def parse(self):
+		lines = io.open(_unicode_encode(self.path,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['content'], errors='replace'
+			).readlines()
+		self.restrictions = {}
+		invalids = []
+		for i, line in enumerate(lines):
+			# Optimization to ignore regex matchines on lines that
+			# will never match
+			format_match = _formatRE.match(line)
+			if format_match is not None and format_match.group(1) != '1.0':
+				invalids.append((i + 1, line.rstrip('\n')))
+				break
+			if not line.startswith('D'):
+				continue
+			restricts = {  _installedRE : DisplayInstalledRestriction,
+					_profileRE : DisplayProfileRestriction,
+					_keywordRE : DisplayKeywordRestriction }
+			for regex, restriction in restricts.items():
+				match = regex.match(line)
+				if match:
+					restrict = restriction(match.groups()[0].strip())
+					if not restrict.isValid():
+						invalids.append((i + 1, line.rstrip("\n")))
+					else:
+						self.restrictions.setdefault(
+							id(restriction), []).append(restrict)
+					continue
+		if invalids:
+			self._valid = False
+			msg = []
+			msg.append(_("Invalid news item: %s") % (self.path,))
+			for lineno, line in invalids:
+				msg.append(_("  line %d: %s") % (lineno, line))
+			writemsg_level("".join("!!! %s\n" % x for x in msg),
+				level=logging.ERROR, noiselevel=-1)
+
+		self._parsed = True
+
+class DisplayRestriction(object):
+	"""
+	A base restriction object representing a restriction of display.
+	news items may have 'relevancy restrictions' preventing them from
+	being important.  In this case we need a manner of figuring out if
+	a particular item is relevant or not.  If any of it's restrictions
+	are met, then it is displayed
+	"""
+
+	def isValid(self):
+		return True
+
+	def checkRestriction(self, **kwargs):
+		raise NotImplementedError('Derived class should override this method')
+
+class DisplayProfileRestriction(DisplayRestriction):
+	"""
+	A profile restriction where a particular item shall only be displayed
+	if the user is running a specific profile.
+	"""
+
+	def __init__(self, profile):
+		self.profile = profile
+
+	def checkRestriction(self, **kwargs):
+		if self.profile == kwargs['profile']:
+			return True
+		return False
+
+class DisplayKeywordRestriction(DisplayRestriction):
+	"""
+	A keyword restriction where a particular item shall only be displayed
+	if the user is running a specific keyword.
+	"""
+
+	def __init__(self, keyword):
+		self.keyword = keyword
+
+	def checkRestriction(self, **kwargs):
+		if kwargs['config']['ARCH'] == self.keyword:
+			return True
+		return False
+
+class DisplayInstalledRestriction(DisplayRestriction):
+	"""
+	An Installation restriction where a particular item shall only be displayed
+	if the user has that item installed.
+	"""
+
+	def __init__(self, atom):
+		self.atom = atom
+
+	def isValid(self):
+		return isvalidatom(self.atom)
+
+	def checkRestriction(self, **kwargs):
+		vdb = kwargs['vardb']
+		if vdb.match(self.atom):
+			return True
+		return False

diff --git a/portage_with_autodep/pym/portage/output.py b/portage_with_autodep/pym/portage/output.py
new file mode 100644
index 0000000..0e8245f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/output.py
@@ -0,0 +1,794 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__docformat__ = "epytext"
+
+import errno
+import io
+import formatter
+import re
+import sys
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.util:writemsg',
+)
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.const import COLOR_MAP_FILE
+from portage.exception import CommandNotFound, FileNotFound, \
+	ParseError, PermissionDenied, PortageException
+from portage.localization import _
+
+havecolor=1
+dotitles=1
+
+_styles = {}
+"""Maps style class to tuple of attribute names."""
+
+codes = {}
+"""Maps attribute name to ansi code."""
+
+esc_seq = "\x1b["
+
+codes["normal"]       =  esc_seq + "0m"
+codes["reset"]        =  esc_seq + "39;49;00m"
+
+codes["bold"]         =  esc_seq + "01m"
+codes["faint"]        =  esc_seq + "02m"
+codes["standout"]     =  esc_seq + "03m"
+codes["underline"]    =  esc_seq + "04m"
+codes["blink"]        =  esc_seq + "05m"
+codes["overline"]     =  esc_seq + "06m"
+codes["reverse"]      =  esc_seq + "07m"
+codes["invisible"]    =  esc_seq + "08m"
+
+codes["no-attr"]      = esc_seq + "22m"
+codes["no-standout"]  = esc_seq + "23m"
+codes["no-underline"] = esc_seq + "24m"
+codes["no-blink"]     = esc_seq + "25m"
+codes["no-overline"]  = esc_seq + "26m"
+codes["no-reverse"]   = esc_seq + "27m"
+
+codes["bg_black"]      = esc_seq + "40m"
+codes["bg_darkred"]    = esc_seq + "41m"
+codes["bg_darkgreen"]  = esc_seq + "42m"
+codes["bg_brown"]      = esc_seq + "43m"
+codes["bg_darkblue"]   = esc_seq + "44m"
+codes["bg_purple"]     = esc_seq + "45m"
+codes["bg_teal"]       = esc_seq + "46m"
+codes["bg_lightgray"]  = esc_seq + "47m"
+codes["bg_default"]    = esc_seq + "49m"
+codes["bg_darkyellow"] = codes["bg_brown"]
+
+def color(fg, bg="default", attr=["normal"]):
+	mystr = codes[fg]
+	for x in [bg]+attr:
+		mystr += codes[x]
+	return mystr
+
+
+ansi_codes = []
+for x in range(30, 38):
+	ansi_codes.append("%im" % x)
+	ansi_codes.append("%i;01m" % x)
+
+rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00',
+	'0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA',
+	'0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF']
+
+for x in range(len(rgb_ansi_colors)):
+	codes[rgb_ansi_colors[x]] = esc_seq + ansi_codes[x]
+
+del x
+
+codes["black"]     = codes["0x000000"]
+codes["darkgray"]  = codes["0x555555"]
+
+codes["red"]       = codes["0xFF5555"]
+codes["darkred"]   = codes["0xAA0000"]
+
+codes["green"]     = codes["0x55FF55"]
+codes["darkgreen"] = codes["0x00AA00"]
+
+codes["yellow"]    = codes["0xFFFF55"]
+codes["brown"]     = codes["0xAA5500"]
+
+codes["blue"]      = codes["0x5555FF"]
+codes["darkblue"]  = codes["0x0000AA"]
+
+codes["fuchsia"]   = codes["0xFF55FF"]
+codes["purple"]    = codes["0xAA00AA"]
+
+codes["turquoise"] = codes["0x55FFFF"]
+codes["teal"]      = codes["0x00AAAA"]
+
+codes["white"]     = codes["0xFFFFFF"]
+codes["lightgray"] = codes["0xAAAAAA"]
+
+codes["darkteal"]   = codes["turquoise"]
+# Some terminals have darkyellow instead of brown.
+codes["0xAAAA00"]   = codes["brown"]
+codes["darkyellow"] = codes["0xAAAA00"]
+
+
+
+# Colors from /etc/init.d/functions.sh
+_styles["NORMAL"]     = ( "normal", )
+_styles["GOOD"]       = ( "green", )
+_styles["WARN"]       = ( "yellow", )
+_styles["BAD"]        = ( "red", )
+_styles["HILITE"]     = ( "teal", )
+_styles["BRACKET"]    = ( "blue", )
+
+# Portage functions
+_styles["INFORM"]                  = ( "darkgreen", )
+_styles["UNMERGE_WARN"]            = ( "red", )
+_styles["SECURITY_WARN"]           = ( "red", )
+_styles["MERGE_LIST_PROGRESS"]     = ( "yellow", )
+_styles["PKG_BLOCKER"]             = ( "red", )
+_styles["PKG_BLOCKER_SATISFIED"]   = ( "darkblue", )
+_styles["PKG_MERGE"]               = ( "darkgreen", )
+_styles["PKG_MERGE_SYSTEM"]        = ( "darkgreen", )
+_styles["PKG_MERGE_WORLD"]         = ( "green", )
+_styles["PKG_BINARY_MERGE"]        = ( "purple", )
+_styles["PKG_BINARY_MERGE_SYSTEM"] = ( "purple", )
+_styles["PKG_BINARY_MERGE_WORLD"]  = ( "fuchsia", )
+_styles["PKG_UNINSTALL"]           = ( "red", )
+_styles["PKG_NOMERGE"]             = ( "darkblue", )
+_styles["PKG_NOMERGE_SYSTEM"]      = ( "darkblue", )
+_styles["PKG_NOMERGE_WORLD"]       = ( "blue", )
+_styles["PROMPT_CHOICE_DEFAULT"]   = ( "green", )
+_styles["PROMPT_CHOICE_OTHER"]     = ( "red", )
+
+def _parse_color_map(config_root='/', onerror=None):
+	"""
+	Parse /etc/portage/color.map and return a dict of error codes.
+
+	@param onerror: an optional callback to handle any ParseError that would
+		otherwise be raised
+	@type onerror: callable
+	@rtype: dict
+	@return: a dictionary mapping color classes to color codes
+	"""
+	global codes, _styles
+	myfile = os.path.join(config_root, COLOR_MAP_FILE)
+	ansi_code_pattern = re.compile("^[0-9;]*m$") 
+	quotes = '\'"'
+	def strip_quotes(token):
+		if token[0] in quotes and token[0] == token[-1]:
+			token = token[1:-1]
+		return token
+	try:
+		lineno=0
+		for line in io.open(_unicode_encode(myfile,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['content'], errors='replace'):
+			lineno += 1
+
+			commenter_pos = line.find("#")
+			line = line[:commenter_pos].strip()
+			
+			if len(line) == 0:
+				continue
+			
+			split_line = line.split("=")
+			if len(split_line) != 2:
+				e = ParseError(_("'%s', line %s: expected exactly one occurrence of '=' operator") % \
+					(myfile, lineno))
+				raise e
+				if onerror:
+					onerror(e)
+				else:
+					raise e
+				continue
+
+			k = strip_quotes(split_line[0].strip())
+			v = strip_quotes(split_line[1].strip())
+			if not k in _styles and not k in codes:
+				e = ParseError(_("'%s', line %s: Unknown variable: '%s'") % \
+					(myfile, lineno, k))
+				if onerror:
+					onerror(e)
+				else:
+					raise e
+				continue
+			if ansi_code_pattern.match(v):
+				if k in _styles:
+					_styles[k] = ( esc_seq + v, )
+				elif k in codes:
+					codes[k] = esc_seq + v
+			else:
+				code_list = []
+				for x in v.split():
+					if x in codes:
+						if k in _styles:
+							code_list.append(x)
+						elif k in codes:
+							code_list.append(codes[x])
+					else:
+						e = ParseError(_("'%s', line %s: Undefined: '%s'") % \
+							(myfile, lineno, x))
+						if onerror:
+							onerror(e)
+						else:
+							raise e
+				if k in _styles:
+					_styles[k] = tuple(code_list)
+				elif k in codes:
+					codes[k] = "".join(code_list)
+	except (IOError, OSError) as e:
+		if e.errno == errno.ENOENT:
+			raise FileNotFound(myfile)
+		elif e.errno == errno.EACCES:
+			raise PermissionDenied(myfile)
+		raise
+
+def nc_len(mystr):
+	tmp = re.sub(esc_seq + "^m]+m", "", mystr);
+	return len(tmp)
+
+_legal_terms_re = re.compile(r'^(xterm|xterm-color|Eterm|aterm|rxvt|screen|kterm|rxvt-unicode|gnome|interix)')
+_disable_xtermTitle = None
+_max_xtermTitle_len = 253
+
+def xtermTitle(mystr, raw=False):
+	global _disable_xtermTitle
+	if _disable_xtermTitle is None:
+		_disable_xtermTitle = not (sys.stderr.isatty() and \
+		'TERM' in os.environ and \
+		_legal_terms_re.match(os.environ['TERM']) is not None)
+
+	if dotitles and not _disable_xtermTitle:
+		# If the title string is too big then the terminal can
+		# misbehave. Therefore, truncate it if it's too big.
+		if len(mystr) > _max_xtermTitle_len:
+			mystr = mystr[:_max_xtermTitle_len]
+		if not raw:
+			mystr = '\x1b]0;%s\x07' % mystr
+
+		# avoid potential UnicodeEncodeError
+		mystr = _unicode_encode(mystr,
+			encoding=_encodings['stdio'], errors='backslashreplace')
+		f = sys.stderr
+		if sys.hexversion >= 0x3000000:
+			f = f.buffer
+		f.write(mystr)
+		f.flush()
+
+default_xterm_title = None
+
+def xtermTitleReset():
+	global default_xterm_title
+	if default_xterm_title is None:
+		prompt_command = os.environ.get('PROMPT_COMMAND')
+		if prompt_command == "":
+			default_xterm_title = ""
+		elif prompt_command is not None:
+			if dotitles and \
+				'TERM' in os.environ and \
+				_legal_terms_re.match(os.environ['TERM']) is not None and \
+				sys.stderr.isatty():
+				from portage.process import find_binary, spawn
+				shell = os.environ.get("SHELL")
+				if not shell or not os.access(shell, os.EX_OK):
+					shell = find_binary("sh")
+				if shell:
+					spawn([shell, "-c", prompt_command], env=os.environ,
+						fd_pipes={0:sys.stdin.fileno(),1:sys.stderr.fileno(),
+						2:sys.stderr.fileno()})
+				else:
+					os.system(prompt_command)
+			return
+		else:
+			pwd = os.environ.get('PWD','')
+			home = os.environ.get('HOME', '')
+			if home != '' and pwd.startswith(home):
+				pwd = '~' + pwd[len(home):]
+			default_xterm_title = '\x1b]0;%s@%s:%s\x07' % (
+				os.environ.get('LOGNAME', ''),
+				os.environ.get('HOSTNAME', '').split('.', 1)[0], pwd)
+	xtermTitle(default_xterm_title, raw=True)
+
+def notitles():
+	"turn off title setting"
+	dotitles=0
+
+def nocolor():
+	"turn off colorization"
+	global havecolor
+	havecolor=0
+
+def resetColor():
+	return codes["reset"]
+
+def style_to_ansi_code(style):
+	"""
+	@param style: A style name
+	@type style: String
+	@rtype: String
+	@return: A string containing one or more ansi escape codes that are
+		used to render the given style.
+	"""
+	ret = ""
+	for attr_name in _styles[style]:
+		# allow stuff that has found it's way through ansi_code_pattern
+		ret += codes.get(attr_name, attr_name)
+	return ret
+
+def colorize(color_key, text):
+	global havecolor
+	if havecolor:
+		if color_key in codes:
+			return codes[color_key] + text + codes["reset"]
+		elif color_key in _styles:
+			return style_to_ansi_code(color_key) + text + codes["reset"]
+		else:
+			return text
+	else:
+		return text
+
+compat_functions_colors = ["bold","white","teal","turquoise","darkteal",
+	"fuchsia","purple","blue","darkblue","green","darkgreen","yellow",
+	"brown","darkyellow","red","darkred"]
+
+def create_color_func(color_key):
+	def derived_func(*args):
+		newargs = list(args)
+		newargs.insert(0, color_key)
+		return colorize(*newargs)
+	return derived_func
+
+for c in compat_functions_colors:
+	globals()[c] = create_color_func(c)
+
+class ConsoleStyleFile(object):
+	"""
+	A file-like object that behaves something like
+	the colorize() function. Style identifiers
+	passed in via the new_styles() method will be used to
+	apply console codes to output.
+	"""
+	def __init__(self, f):
+		self._file = f
+		self._styles = None
+		self.write_listener = None
+
+	def new_styles(self, styles):
+		self._styles = styles
+
+	def write(self, s):
+		# In python-2.6, DumbWriter.send_line_break() can write
+		# non-unicode '\n' which fails with TypeError if self._file
+		# is a text stream such as io.StringIO. Therefore, make sure
+		# input is converted to unicode when necessary.
+		s = _unicode_decode(s)
+		global havecolor
+		if havecolor and self._styles:
+			styled_s = []
+			for style in self._styles:
+				styled_s.append(style_to_ansi_code(style))
+			styled_s.append(s)
+			styled_s.append(codes["reset"])
+			self._write(self._file, "".join(styled_s))
+		else:
+			self._write(self._file, s)
+		if self.write_listener:
+			self._write(self.write_listener, s)
+
+	def _write(self, f, s):
+		# avoid potential UnicodeEncodeError
+		if f in (sys.stdout, sys.stderr):
+			s = _unicode_encode(s,
+				encoding=_encodings['stdio'], errors='backslashreplace')
+			if sys.hexversion >= 0x3000000:
+				f = f.buffer
+		f.write(s)
+
+	def writelines(self, lines):
+		for s in lines:
+			self.write(s)
+
+	def flush(self):
+		self._file.flush()
+
+	def close(self):
+		self._file.close()
+
+class StyleWriter(formatter.DumbWriter):
+	"""
+	This is just a DumbWriter with a hook in the new_styles() method
+	that passes a styles tuple as a single argument to a callable
+	style_listener attribute.
+	"""
+	def __init__(self, **kwargs):
+		formatter.DumbWriter.__init__(self, **kwargs)
+		self.style_listener = None
+
+	def new_styles(self, styles):
+		formatter.DumbWriter.new_styles(self, styles)
+		if self.style_listener:
+			self.style_listener(styles)
+
+def get_term_size():
+	"""
+	Get the number of lines and columns of the tty that is connected to
+	stdout.  Returns a tuple of (lines, columns) or (-1, -1) if an error
+	occurs. The curses module is used if available, otherwise the output of
+	`stty size` is parsed.
+	"""
+	if not sys.stdout.isatty():
+		return -1, -1
+	try:
+		import curses
+		try:
+			curses.setupterm()
+			return curses.tigetnum('lines'), curses.tigetnum('cols')
+		except curses.error:
+			pass
+	except ImportError:
+		pass
+	st, out = portage.subprocess_getstatusoutput('stty size')
+	if st == os.EX_OK:
+		out = out.split()
+		if len(out) == 2:
+			try:
+				return int(out[0]), int(out[1])
+			except ValueError:
+				pass
+	return -1, -1
+
+def set_term_size(lines, columns, fd):
+	"""
+	Set the number of lines and columns for the tty that is connected to fd.
+	For portability, this simply calls `stty rows $lines columns $columns`.
+	"""
+	from portage.process import spawn
+	cmd = ["stty", "rows", str(lines), "columns", str(columns)]
+	try:
+		spawn(cmd, env=os.environ, fd_pipes={0:fd})
+	except CommandNotFound:
+		writemsg(_("portage: stty: command not found\n"), noiselevel=-1)
+
+class EOutput(object):
+	"""
+	Performs fancy terminal formatting for status and informational messages.
+
+	The provided methods produce identical terminal output to the eponymous
+	functions in the shell script C{/sbin/functions.sh} and also accept
+	identical parameters.
+
+	This is not currently a drop-in replacement however, as the output-related
+	functions in C{/sbin/functions.sh} are oriented for use mainly by system
+	init scripts and ebuilds and their output can be customized via certain
+	C{RC_*} environment variables (see C{/etc/conf.d/rc}). B{EOutput} is not
+	customizable in this manner since it's intended for more general uses.
+	Likewise, no logging is provided.
+
+	@ivar quiet: Specifies if output should be silenced.
+	@type quiet: BooleanType
+	@ivar term_columns: Width of terminal in characters. Defaults to the value
+		specified by the shell's C{COLUMNS} variable, else to the queried tty
+		size, else to C{80}.
+	@type term_columns: IntType
+	"""
+
+	def __init__(self, quiet=False):
+		self.__last_e_cmd = ""
+		self.__last_e_len = 0
+		self.quiet = quiet
+		lines, columns = get_term_size()
+		if columns <= 0:
+			columns = 80
+		self.term_columns = columns
+		sys.stdout.flush()
+		sys.stderr.flush()
+
+	def _write(self, f, s):
+		# avoid potential UnicodeEncodeError
+		writemsg(s, noiselevel=-1, fd=f)
+
+	def __eend(self, caller, errno, msg):
+		if errno == 0:
+			status_brackets = colorize("BRACKET", "[ ") + colorize("GOOD", "ok") + colorize("BRACKET", " ]")
+		else:
+			status_brackets = colorize("BRACKET", "[ ") + colorize("BAD", "!!") + colorize("BRACKET", " ]")
+			if msg:
+				if caller == "eend":
+					self.eerror(msg[0])
+				elif caller == "ewend":
+					self.ewarn(msg[0])
+		if self.__last_e_cmd != "ebegin":
+			self.__last_e_len = 0
+		if not self.quiet:
+			out = sys.stdout
+			self._write(out,
+				"%*s%s\n" % ((self.term_columns - self.__last_e_len - 7),
+				"", status_brackets))
+
+	def ebegin(self, msg):
+		"""
+		Shows a message indicating the start of a process.
+
+		@param msg: A very brief (shorter than one line) description of the
+			starting process.
+		@type msg: StringType
+		"""
+		msg += " ..."
+		if not self.quiet:
+			self.einfon(msg)
+		self.__last_e_len = len(msg) + 3
+		self.__last_e_cmd = "ebegin"
+
+	def eend(self, errno, *msg):
+		"""
+		Indicates the completion of a process, optionally displaying a message
+		via L{eerror} if the process's exit status isn't C{0}.
+
+		@param errno: A standard UNIX C{errno} code returned by processes upon
+			exit.
+		@type errno: IntType
+		@param msg: I{(optional)} An error message, typically a standard UNIX
+			error string corresponding to C{errno}.
+		@type msg: StringType
+		"""
+		if not self.quiet:
+			self.__eend("eend", errno, msg)
+		self.__last_e_cmd = "eend"
+
+	def eerror(self, msg):
+		"""
+		Shows an error message.
+
+		@param msg: A very brief (shorter than one line) error message.
+		@type msg: StringType
+		"""
+		out = sys.stderr
+		if not self.quiet:
+			if self.__last_e_cmd == "ebegin":
+				self._write(out, "\n")
+			self._write(out, colorize("BAD", " * ") + msg + "\n")
+		self.__last_e_cmd = "eerror"
+
+	def einfo(self, msg):
+		"""
+		Shows an informative message terminated with a newline.
+
+		@param msg: A very brief (shorter than one line) informative message.
+		@type msg: StringType
+		"""
+		out = sys.stdout
+		if not self.quiet:
+			if self.__last_e_cmd == "ebegin":
+				self._write(out, "\n")
+			self._write(out, colorize("GOOD", " * ") + msg + "\n")
+		self.__last_e_cmd = "einfo"
+
+	def einfon(self, msg):
+		"""
+		Shows an informative message terminated without a newline.
+
+		@param msg: A very brief (shorter than one line) informative message.
+		@type msg: StringType
+		"""
+		out = sys.stdout
+		if not self.quiet:
+			if self.__last_e_cmd == "ebegin":
+				self._write(out, "\n")
+			self._write(out, colorize("GOOD", " * ") + msg)
+		self.__last_e_cmd = "einfon"
+
+	def ewarn(self, msg):
+		"""
+		Shows a warning message.
+
+		@param msg: A very brief (shorter than one line) warning message.
+		@type msg: StringType
+		"""
+		out = sys.stderr
+		if not self.quiet:
+			if self.__last_e_cmd == "ebegin":
+				self._write(out, "\n")
+			self._write(out, colorize("WARN", " * ") + msg + "\n")
+		self.__last_e_cmd = "ewarn"
+
+	def ewend(self, errno, *msg):
+		"""
+		Indicates the completion of a process, optionally displaying a message
+		via L{ewarn} if the process's exit status isn't C{0}.
+
+		@param errno: A standard UNIX C{errno} code returned by processes upon
+			exit.
+		@type errno: IntType
+		@param msg: I{(optional)} A warning message, typically a standard UNIX
+			error string corresponding to C{errno}.
+		@type msg: StringType
+		"""
+		if not self.quiet:
+			self.__eend("ewend", errno, msg)
+		self.__last_e_cmd = "ewend"
+
+class ProgressBar(object):
+	"""The interface is copied from the ProgressBar class from the EasyDialogs
+	module (which is Mac only)."""
+	def __init__(self, title=None, maxval=0, label=None):
+		self._title = title
+		self._maxval = maxval
+		self._label = maxval
+		self._curval = 0
+
+	@property
+	def curval(self):
+		"""
+		The current value (of type integer or long integer) of the progress
+		bar. The normal access methods coerce curval between 0 and maxval. This
+		attribute should not be altered directly.
+		"""
+		return self._curval
+
+	@property
+	def maxval(self):
+		"""
+		The maximum value (of type integer or long integer) of the progress
+		bar; the progress bar (thermometer style) is full when curval equals
+		maxval. If maxval is 0, the bar will be indeterminate (barber-pole).
+		This attribute should not be altered directly.
+		"""
+		return self._maxval
+
+	def title(self, newstr):
+		"""Sets the text in the title bar of the progress dialog to newstr."""
+		self._title = newstr
+
+	def label(self, newstr):
+		"""Sets the text in the progress box of the progress dialog to newstr."""
+		self._label = newstr
+
+	def set(self, value, maxval=None):
+		"""
+		Sets the progress bar's curval to value, and also maxval to max if the
+		latter is provided. value is first coerced between 0 and maxval. The
+		thermometer bar is updated to reflect the changes, including a change
+		from indeterminate to determinate or vice versa.
+		"""
+		if maxval is not None:
+			self._maxval = maxval
+		if value < 0:
+			value = 0
+		elif value > self._maxval:
+			value = self._maxval
+		self._curval = value
+
+	def inc(self, n=1):
+		"""Increments the progress bar's curval by n, or by 1 if n is not
+		provided. (Note that n may be negative, in which case the effect is a
+		decrement.) The progress bar is updated to reflect the change. If the
+		bar is indeterminate, this causes one ``spin'' of the barber pole. The
+		resulting curval is coerced between 0 and maxval if incrementing causes
+		it to fall outside this range.
+		"""
+		self.set(self._curval+n)
+
+class TermProgressBar(ProgressBar):
+	"""A tty progress bar similar to wget's."""
+	def __init__(self, **kwargs):
+		ProgressBar.__init__(self, **kwargs)
+		lines, self.term_columns = get_term_size()
+		self.file = sys.stdout
+		self._min_columns = 11
+		self._max_columns = 80
+		# for indeterminate mode, ranges from 0.0 to 1.0
+		self._position = 0.0
+
+	def set(self, value, maxval=None):
+		ProgressBar.set(self, value, maxval=maxval)
+		self._display_image(self._create_image())
+
+	def _display_image(self, image):
+		self.file.write('\r')
+		self.file.write(image)
+		self.file.flush()
+
+	def _create_image(self):
+		cols = self.term_columns
+		if cols > self._max_columns:
+			cols = self._max_columns
+		min_columns = self._min_columns
+		curval = self._curval
+		maxval = self._maxval
+		position = self._position
+		percentage_str_width = 4
+		square_brackets_width = 2
+		if cols < percentage_str_width:
+			return ""
+		bar_space = cols - percentage_str_width - square_brackets_width
+		if maxval == 0:
+			max_bar_width = bar_space-3
+			image = "    "
+			if cols < min_columns:
+				return image
+			if position <= 0.5:
+				offset = 2 * position
+			else:
+				offset = 2 * (1 - position)
+			delta = 0.5 / max_bar_width
+			position += delta
+			if position >= 1.0:
+				position = 0.0
+			# make sure it touches the ends
+			if 1.0 - position < delta:
+				position = 1.0
+			if position < 0.5 and 0.5 - position < delta:
+				position = 0.5
+			self._position = position
+			bar_width = int(offset * max_bar_width)
+			image = image + "[" + (bar_width * " ") + \
+				"<=>" + ((max_bar_width - bar_width) * " ") + "]"
+			return image
+		else:
+			percentage = int(100 * float(curval) / maxval)
+			if percentage == 100:
+				percentage_str_width += 1
+				bar_space -= 1
+			max_bar_width = bar_space - 1
+			image = ("%d%% " % percentage).rjust(percentage_str_width)
+			if cols < min_columns:
+				return image
+			offset = float(curval) / maxval
+			bar_width = int(offset * max_bar_width)
+			image = image + "[" + (bar_width * "=") + \
+				">" + ((max_bar_width - bar_width) * " ") + "]"
+			return image
+
+_color_map_loaded = False
+
+def _init(config_root='/'):
+	"""
+	Load color.map from the given config_root. This is called automatically
+	on first access of the codes or _styles attributes (unless it has already
+	been called for some other reason).
+	"""
+
+	global _color_map_loaded, codes, _styles
+	if _color_map_loaded:
+		return
+
+	_color_map_loaded = True
+	codes = object.__getattribute__(codes, '_attr')
+	_styles = object.__getattribute__(_styles, '_attr')
+
+	for k, v in codes.items():
+		codes[k] = _unicode_decode(v)
+
+	for k, v in _styles.items():
+		_styles[k] = _unicode_decode(v)
+
+	try:
+		_parse_color_map(config_root=config_root,
+			onerror=lambda e: writemsg("%s\n" % str(e), noiselevel=-1))
+	except FileNotFound:
+		pass
+	except PermissionDenied as e:
+		writemsg(_("Permission denied: '%s'\n") % str(e), noiselevel=-1)
+		del e
+	except PortageException as e:
+		writemsg("%s\n" % str(e), noiselevel=-1)
+		del e
+
+class _LazyInitColorMap(portage.proxy.objectproxy.ObjectProxy):
+
+	__slots__ = ('_attr',)
+
+	def __init__(self, attr):
+		portage.proxy.objectproxy.ObjectProxy.__init__(self)
+		object.__setattr__(self, '_attr', attr)
+
+	def _get_target(self):
+		_init()
+		return object.__getattribute__(self, '_attr')
+
+codes = _LazyInitColorMap(codes)
+_styles = _LazyInitColorMap(_styles)

diff --git a/portage_with_autodep/pym/portage/package/__init__.py b/portage_with_autodep/pym/portage/package/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/package/ebuild/__init__.py b/portage_with_autodep/pym/portage/package/ebuild/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py
new file mode 100644
index 0000000..cd22554
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py
@@ -0,0 +1,284 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+	'KeywordsManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator
+from portage.localization import _
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+from portage.util import grabdict_package, stack_lists, writemsg
+from portage.versions import cpv_getkey
+
+class KeywordsManager(object):
+	"""Manager class to handle keywords processing and validation"""
+
+	def __init__(self, profiles, abs_user_config, user_config=True,
+				global_accept_keywords=""):
+		self._pkeywords_list = []
+		rawpkeywords = [grabdict_package(
+			os.path.join(x, "package.keywords"), recursive=1,
+			verify_eapi=True) \
+			for x in profiles]
+		for pkeyworddict in rawpkeywords:
+			if not pkeyworddict:
+				# Omit non-existent files from the stack.
+				continue
+			cpdict = {}
+			for k, v in pkeyworddict.items():
+				cpdict.setdefault(k.cp, {})[k] = v
+			self._pkeywords_list.append(cpdict)
+		self._pkeywords_list = tuple(self._pkeywords_list)
+
+		self._p_accept_keywords = []
+		raw_p_accept_keywords = [grabdict_package(
+			os.path.join(x, "package.accept_keywords"), recursive=1,
+			verify_eapi=True) \
+			for x in profiles]
+		for d in raw_p_accept_keywords:
+			if not d:
+				# Omit non-existent files from the stack.
+				continue
+			cpdict = {}
+			for k, v in d.items():
+				cpdict.setdefault(k.cp, {})[k] = tuple(v)
+			self._p_accept_keywords.append(cpdict)
+		self._p_accept_keywords = tuple(self._p_accept_keywords)
+
+		self.pkeywordsdict = ExtendedAtomDict(dict)
+
+		if user_config:
+			pkgdict = grabdict_package(
+				os.path.join(abs_user_config, "package.keywords"),
+				recursive=1, allow_wildcard=True, allow_repo=True,
+				verify_eapi=False)
+
+			for k, v in grabdict_package(
+				os.path.join(abs_user_config, "package.accept_keywords"),
+				recursive=1, allow_wildcard=True, allow_repo=True,
+				verify_eapi=False).items():
+				pkgdict.setdefault(k, []).extend(v)
+
+			accept_keywords_defaults = global_accept_keywords.split()
+			accept_keywords_defaults = tuple('~' + keyword for keyword in \
+				accept_keywords_defaults if keyword[:1] not in "~-")
+			for k, v in pkgdict.items():
+				# default to ~arch if no specific keyword is given
+				if not v:
+					v = accept_keywords_defaults
+				else:
+					v = tuple(v)
+				self.pkeywordsdict.setdefault(k.cp, {})[k] = v
+
+
+	def getKeywords(self, cpv, slot, keywords, repo):
+		cp = cpv_getkey(cpv)
+		pkg = "".join((cpv, _slot_separator, slot))
+		if repo and repo != Package.UNKNOWN_REPO:
+			pkg = "".join((pkg, _repo_separator, repo))
+		keywords = [[x for x in keywords.split() if x != "-*"]]
+		for pkeywords_dict in self._pkeywords_list:
+			cpdict = pkeywords_dict.get(cp)
+			if cpdict:
+				pkg_keywords = ordered_by_atom_specificity(cpdict, pkg)
+				if pkg_keywords:
+					keywords.extend(pkg_keywords)
+		return stack_lists(keywords, incremental=True)
+
+
+	def getMissingKeywords(self,
+							cpv,
+							slot,
+							keywords,
+							repo,
+							global_accept_keywords,
+							backuped_accept_keywords):
+		"""
+		Take a package and return a list of any KEYWORDS that the user may
+		need to accept for the given package. If the KEYWORDS are empty
+		and the the ** keyword has not been accepted, the returned list will
+		contain ** alone (in order to distinguish from the case of "none
+		missing").
+
+		@param cpv: The package name (for package.keywords support)
+		@type cpv: String
+		@param slot: The 'SLOT' key from the raw package metadata
+		@type slot: String
+		@param keywords: The 'KEYWORDS' key from the raw package metadata
+		@type keywords: String
+		@param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+		@type global_accept_keywords: String
+		@param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+		@type backuped_accept_keywords: String
+		@rtype: List
+		@return: A list of KEYWORDS that have not been accepted.
+		"""
+
+		mygroups = self.getKeywords(cpv, slot, keywords, repo)
+		# Repoman may modify this attribute as necessary.
+		pgroups = global_accept_keywords.split()
+
+		unmaskgroups = self.getPKeywords(cpv, slot, repo,
+				global_accept_keywords)
+		pgroups.extend(unmaskgroups)
+
+		# Hack: Need to check the env directly here as otherwise stacking
+		# doesn't work properly as negative values are lost in the config
+		# object (bug #139600)
+		egroups = backuped_accept_keywords.split()
+
+		if unmaskgroups or egroups:
+			pgroups = self._getEgroups(egroups, pgroups)
+		else:
+			pgroups = set(pgroups)
+
+		return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+	def getRawMissingKeywords(self,
+							cpv,
+							slot,
+							keywords,
+							repo,
+							global_accept_keywords):
+		"""
+		Take a package and return a list of any KEYWORDS that the user may
+		need to accept for the given package. If the KEYWORDS are empty,
+		the returned list will contain ** alone (in order to distinguish
+		from the case of "none missing").  This DOES NOT apply any user config
+		package.accept_keywords acceptance.
+
+		@param cpv: The package name (for package.keywords support)
+		@type cpv: String
+		@param slot: The 'SLOT' key from the raw package metadata
+		@type slot: String
+		@param keywords: The 'KEYWORDS' key from the raw package metadata
+		@type keywords: String
+		@param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+		@type global_accept_keywords: String
+		@rtype: List
+		@return: lists of KEYWORDS that have not been accepted
+		and the keywords it looked for.
+		"""
+
+		mygroups = self.getKeywords(cpv, slot, keywords, repo)
+		pgroups = global_accept_keywords.split()
+		pgroups = set(pgroups)
+		return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+	@staticmethod
+	def _getEgroups(egroups, mygroups):
+		"""gets any keywords defined in the environment
+
+		@param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+		@type backuped_accept_keywords: String
+		@rtype: List
+		@return: list of KEYWORDS that have been accepted
+		"""
+		mygroups = list(mygroups)
+		mygroups.extend(egroups)
+		inc_pgroups = set()
+		for x in mygroups:
+			if x[:1] == "-":
+				if x == "-*":
+					inc_pgroups.clear()
+				else:
+					inc_pgroups.discard(x[1:])
+			else:
+				inc_pgroups.add(x)
+		return inc_pgroups
+
+
+	@staticmethod
+	def _getMissingKeywords(cpv, pgroups, mygroups):
+		"""Determines the missing keywords
+
+		@param pgroups: The pkg keywords accepted
+		@type pgroups: list
+		@param mygroups: The ebuild keywords
+		@type mygroups: list
+		"""
+		match = False
+		hasstable = False
+		hastesting = False
+		for gp in mygroups:
+			if gp == "*" or (gp == "-*" and len(mygroups) == 1):
+				writemsg(_("--- WARNING: Package '%(cpv)s' uses"
+					" '%(keyword)s' keyword.\n") % {"cpv": cpv, "keyword": gp},
+					 noiselevel=-1)
+				if gp == "*":
+					match = True
+					break
+			elif gp in pgroups:
+				match = True
+				break
+			elif gp.startswith("~"):
+				hastesting = True
+			elif not gp.startswith("-"):
+				hasstable = True
+		if not match and \
+			((hastesting and "~*" in pgroups) or \
+			(hasstable and "*" in pgroups) or "**" in pgroups):
+			match = True
+		if match:
+			missing = []
+		else:
+			if not mygroups:
+				# If KEYWORDS is empty then we still have to return something
+				# in order to distinguish from the case of "none missing".
+				mygroups.append("**")
+			missing = mygroups
+		return missing
+
+
+	def getPKeywords(self, cpv, slot, repo, global_accept_keywords):
+		"""Gets any package.keywords settings for cp for the given
+		cpv, slot and repo
+
+		@param cpv: The package name (for package.keywords support)
+		@type cpv: String
+		@param slot: The 'SLOT' key from the raw package metadata
+		@type slot: String
+		@param keywords: The 'KEYWORDS' key from the raw package metadata
+		@type keywords: String
+		@param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+		@type global_accept_keywords: String
+		@param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+		@type backuped_accept_keywords: String
+		@rtype: List
+		@return: list of KEYWORDS that have been accepted
+		"""
+
+		pgroups = global_accept_keywords.split()
+		cp = cpv_getkey(cpv)
+
+		unmaskgroups = []
+		if self._p_accept_keywords:
+			cpv_slot = "%s:%s" % (cpv, slot)
+			accept_keywords_defaults = tuple('~' + keyword for keyword in \
+				pgroups if keyword[:1] not in "~-")
+			for d in self._p_accept_keywords:
+				cpdict = d.get(cp)
+				if cpdict:
+					pkg_accept_keywords = \
+						ordered_by_atom_specificity(cpdict, cpv_slot)
+					if pkg_accept_keywords:
+						for x in pkg_accept_keywords:
+							if not x:
+								x = accept_keywords_defaults
+							unmaskgroups.extend(x)
+
+		pkgdict = self.pkeywordsdict.get(cp)
+		if pkgdict:
+			cpv_slot = "%s:%s" % (cpv, slot)
+			pkg_accept_keywords = \
+				ordered_by_atom_specificity(pkgdict, cpv_slot, repo=repo)
+			if pkg_accept_keywords:
+				for x in pkg_accept_keywords:
+					unmaskgroups.extend(x)
+		return unmaskgroups
+

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py
new file mode 100644
index 0000000..effd55b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py
@@ -0,0 +1,236 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+	'LicenseManager',
+)
+
+from portage import os
+from portage.dep import ExtendedAtomDict, use_reduce
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabdict, grabdict_package, writemsg
+from portage.versions import cpv_getkey
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+
+class LicenseManager(object):
+
+	def __init__(self, license_group_locations, abs_user_config, user_config=True):
+
+		self._accept_license_str = None
+		self._accept_license = None
+		self._license_groups = {}
+		self._plicensedict = ExtendedAtomDict(dict)
+		self._undef_lic_groups = set()
+
+		if user_config:
+			license_group_locations = list(license_group_locations) + [abs_user_config]
+
+		self._read_license_groups(license_group_locations)
+
+		if user_config:
+			self._read_user_config(abs_user_config)
+
+	def _read_user_config(self, abs_user_config):
+		licdict = grabdict_package(os.path.join(
+			abs_user_config, "package.license"), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False)
+		for k, v in licdict.items():
+			self._plicensedict.setdefault(k.cp, {})[k] = \
+				self.expandLicenseTokens(v)
+
+	def _read_license_groups(self, locations):
+		for loc in locations:
+			for k, v in grabdict(
+				os.path.join(loc, "license_groups")).items():
+				self._license_groups.setdefault(k, []).extend(v)
+
+		for k, v in self._license_groups.items():
+			self._license_groups[k] = frozenset(v)
+
+	def extract_global_changes(self, old=""):
+		ret = old
+		atom_license_map = self._plicensedict.get("*/*")
+		if atom_license_map is not None:
+			v = atom_license_map.pop("*/*", None)
+			if v is not None:
+				ret = " ".join(v)
+				if old:
+					ret = old + " " + ret
+				if not atom_license_map:
+					#No tokens left in atom_license_map, remove it.
+					del self._plicensedict["*/*"]
+		return ret
+	
+	def expandLicenseTokens(self, tokens):
+		""" Take a token from ACCEPT_LICENSE or package.license and expand it
+		if it's a group token (indicated by @) or just return it if it's not a
+		group.  If a group is negated then negate all group elements."""
+		expanded_tokens = []
+		for x in tokens:
+			expanded_tokens.extend(self._expandLicenseToken(x, None))
+		return expanded_tokens
+
+	def _expandLicenseToken(self, token, traversed_groups):
+		negate = False
+		rValue = []
+		if token.startswith("-"):
+			negate = True
+			license_name = token[1:]
+		else:
+			license_name = token
+		if not license_name.startswith("@"):
+			rValue.append(token)
+			return rValue
+		group_name = license_name[1:]
+		if traversed_groups is None:
+			traversed_groups = set()
+		license_group = self._license_groups.get(group_name)
+		if group_name in traversed_groups:
+			writemsg(_("Circular license group reference"
+				" detected in '%s'\n") % group_name, noiselevel=-1)
+			rValue.append("@"+group_name)
+		elif license_group:
+			traversed_groups.add(group_name)
+			for l in license_group:
+				if l.startswith("-"):
+					writemsg(_("Skipping invalid element %s"
+						" in license group '%s'\n") % (l, group_name),
+						noiselevel=-1)
+				else:
+					rValue.extend(self._expandLicenseToken(l, traversed_groups))
+		else:
+			if self._license_groups and \
+				group_name not in self._undef_lic_groups:
+				self._undef_lic_groups.add(group_name)
+				writemsg(_("Undefined license group '%s'\n") % group_name,
+					noiselevel=-1)
+			rValue.append("@"+group_name)
+		if negate:
+			rValue = ["-" + token for token in rValue]
+		return rValue
+
+	def _getPkgAcceptLicense(self, cpv, slot, repo):
+		"""
+		Get an ACCEPT_LICENSE list, accounting for package.license.
+		"""
+		accept_license = self._accept_license
+		cp = cpv_getkey(cpv)
+		cpdict = self._plicensedict.get(cp)
+		if cpdict:
+			cpv_slot = "%s:%s" % (cpv, slot)
+			plicence_list = ordered_by_atom_specificity(cpdict, cpv_slot, repo)
+			if plicence_list:
+				accept_license = list(self._accept_license)
+				for x in plicence_list:
+					accept_license.extend(x)
+		return accept_license
+
+	def get_prunned_accept_license(self, cpv, use, lic, slot, repo):
+		"""
+		Generate a pruned version of ACCEPT_LICENSE, by intersection with
+		LICENSE. This is required since otherwise ACCEPT_LICENSE might be
+		too big (bigger than ARG_MAX), causing execve() calls to fail with
+		E2BIG errors as in bug #262647.
+		"""
+		try:
+			licenses = set(use_reduce(lic, uselist=use, flat=True))
+		except InvalidDependString:
+			licenses = set()
+		licenses.discard('||')
+
+		accept_license = self._getPkgAcceptLicense(cpv, slot, repo)
+
+		if accept_license:
+			acceptable_licenses = set()
+			for x in accept_license:
+				if x == '*':
+					acceptable_licenses.update(licenses)
+				elif x == '-*':
+					acceptable_licenses.clear()
+				elif x[:1] == '-':
+					acceptable_licenses.discard(x[1:])
+				elif x in licenses:
+					acceptable_licenses.add(x)
+
+			licenses = acceptable_licenses
+		return ' '.join(sorted(licenses))
+
+	def getMissingLicenses(self, cpv, use, lic, slot, repo):
+		"""
+		Take a LICENSE string and return a list of any licenses that the user
+		may need to accept for the given package.  The returned list will not
+		contain any licenses that have already been accepted.  This method
+		can throw an InvalidDependString exception.
+
+		@param cpv: The package name (for package.license support)
+		@type cpv: String
+		@param use: "USE" from the cpv's metadata
+		@type use: String
+		@param lic: "LICENSE" from the cpv's metadata
+		@type lic: String
+		@param slot: "SLOT" from the cpv's metadata
+		@type slot: String
+		@rtype: List
+		@return: A list of licenses that have not been accepted.
+		"""
+
+		licenses = set(use_reduce(lic, matchall=1, flat=True))
+		licenses.discard('||')
+
+		acceptable_licenses = set()
+		for x in self._getPkgAcceptLicense(cpv, slot, repo):
+			if x == '*':
+				acceptable_licenses.update(licenses)
+			elif x == '-*':
+				acceptable_licenses.clear()
+			elif x[:1] == '-':
+				acceptable_licenses.discard(x[1:])
+			else:
+				acceptable_licenses.add(x)
+
+		license_str = lic
+		if "?" in license_str:
+			use = use.split()
+		else:
+			use = []
+
+		license_struct = use_reduce(license_str, uselist=use, opconvert=True)
+		return self._getMaskedLicenses(license_struct, acceptable_licenses)
+
+	def _getMaskedLicenses(self, license_struct, acceptable_licenses):
+		if not license_struct:
+			return []
+		if license_struct[0] == "||":
+			ret = []
+			for element in license_struct[1:]:
+				if isinstance(element, list):
+					if element:
+						tmp = self._getMaskedLicenses(element, acceptable_licenses)
+						if not tmp:
+							return []
+						ret.extend(tmp)
+				else:
+					if element in acceptable_licenses:
+						return []
+					ret.append(element)
+			# Return all masked licenses, since we don't know which combination
+			# (if any) the user will decide to unmask.
+			return ret
+
+		ret = []
+		for element in license_struct:
+			if isinstance(element, list):
+				if element:
+					ret.extend(self._getMaskedLicenses(element,
+						acceptable_licenses))
+			else:
+				if element not in acceptable_licenses:
+					ret.append(element)
+		return ret
+
+	def set_accept_license_str(self, accept_license_str):
+		if accept_license_str != self._accept_license_str:
+			self._accept_license_str = accept_license_str
+			self._accept_license = tuple(self.expandLicenseTokens(accept_license_str.split()))

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py
new file mode 100644
index 0000000..c2b115b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py
@@ -0,0 +1,182 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+	'LocationsManager',
+)
+
+import io
+from portage import os, eapi_is_supported, _encodings, _unicode_encode
+from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \
+	PROFILE_PATH, USER_CONFIG_PATH
+from portage.exception import DirectoryNotFound, ParseError
+from portage.localization import _
+from portage.util import ensure_dirs, grabfile, \
+	normalize_path, shlex_split, writemsg
+
+
+class LocationsManager(object):
+
+	def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
+		target_root=None):
+		self.user_profile_dir = None
+		self._local_repo_conf_path = None
+		self.eprefix = eprefix
+		self.config_root = config_root
+		self.target_root = target_root
+		self._user_config = local_config
+		
+		if self.eprefix is None:
+			self.eprefix = ""
+
+		if self.config_root is None:
+			self.config_root = self.eprefix + os.sep
+
+		self.config_root = normalize_path(os.path.abspath(
+			self.config_root)).rstrip(os.path.sep) + os.path.sep
+
+		self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
+		self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
+
+		if config_profile_path is None:
+			config_profile_path = \
+				os.path.join(self.config_root, PROFILE_PATH)
+			if os.path.isdir(config_profile_path):
+				self.profile_path = config_profile_path
+			else:
+				config_profile_path = \
+					os.path.join(self.abs_user_config, 'make.profile')
+				if os.path.isdir(config_profile_path):
+					self.profile_path = config_profile_path
+				else:
+					self.profile_path = None
+		else:
+			# NOTE: repoman may pass in an empty string
+			# here, in order to create an empty profile
+			# for checking dependencies of packages with
+			# empty KEYWORDS.
+			self.profile_path = config_profile_path
+
+
+		# The symlink might not exist or might not be a symlink.
+		self.profiles = []
+		if self.profile_path:
+			try:
+				self._addProfile(os.path.realpath(self.profile_path))
+			except ParseError as e:
+				writemsg(_("!!! Unable to parse profile: '%s'\n") % \
+					self.profile_path, noiselevel=-1)
+				writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
+				self.profiles = []
+
+		if self._user_config and self.profiles:
+			custom_prof = os.path.join(
+				self.config_root, CUSTOM_PROFILE_PATH)
+			if os.path.exists(custom_prof):
+				self.user_profile_dir = custom_prof
+				self.profiles.append(custom_prof)
+			del custom_prof
+
+		self.profiles = tuple(self.profiles)
+
+	def _check_var_directory(self, varname, var):
+		if not os.path.isdir(var):
+			writemsg(_("!!! Error: %s='%s' is not a directory. "
+				"Please correct this.\n") % (varname, var),
+				noiselevel=-1)
+			raise DirectoryNotFound(var)
+
+	def _addProfile(self, currentPath):
+		parentsFile = os.path.join(currentPath, "parent")
+		eapi_file = os.path.join(currentPath, "eapi")
+		try:
+			eapi = io.open(_unicode_encode(eapi_file,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['content'], errors='replace'
+				).readline().strip()
+		except IOError:
+			pass
+		else:
+			if not eapi_is_supported(eapi):
+				raise ParseError(_(
+					"Profile contains unsupported "
+					"EAPI '%s': '%s'") % \
+					(eapi, os.path.realpath(eapi_file),))
+		if os.path.exists(parentsFile):
+			parents = grabfile(parentsFile)
+			if not parents:
+				raise ParseError(
+					_("Empty parent file: '%s'") % parentsFile)
+			for parentPath in parents:
+				parentPath = normalize_path(os.path.join(
+					currentPath, parentPath))
+				if os.path.exists(parentPath):
+					self._addProfile(parentPath)
+				else:
+					raise ParseError(
+						_("Parent '%s' not found: '%s'") %  \
+						(parentPath, parentsFile))
+		self.profiles.append(currentPath)
+
+	def set_root_override(self, root_overwrite=None):
+		# Allow ROOT setting to come from make.conf if it's not overridden
+		# by the constructor argument (from the calling environment).
+		if self.target_root is None and root_overwrite is not None:
+			self.target_root = root_overwrite
+			if not self.target_root.strip():
+				self.target_root = None
+		if self.target_root is None:
+			self.target_root = "/"
+
+		self.target_root = normalize_path(os.path.abspath(
+			self.target_root)).rstrip(os.path.sep) + os.path.sep
+
+		ensure_dirs(self.target_root)
+		self._check_var_directory("ROOT", self.target_root)
+
+		self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep
+
+		# make.globals should not be relative to config_root
+		# because it only contains constants. However, if EPREFIX
+		# is set then there are two possible scenarios:
+		# 1) If $ROOT == "/" then make.globals should be
+		#    relative to EPREFIX.
+		# 2) If $ROOT != "/" then the correct location of
+		#    make.globals needs to be specified in the constructor
+		#    parameters, since it's a property of the host system
+		#    (and the current config represents the target system).
+		self.global_config_path = GLOBAL_CONFIG_PATH
+		if self.eprefix:
+			if self.target_root == "/":
+				# case (1) above
+				self.global_config_path = os.path.join(self.eprefix,
+					GLOBAL_CONFIG_PATH.lstrip(os.sep))
+			else:
+				# case (2) above
+				# For now, just assume make.globals is relative
+				# to EPREFIX.
+				# TODO: Pass in more info to the constructor,
+				# so we know the host system configuration.
+				self.global_config_path = os.path.join(self.eprefix,
+					GLOBAL_CONFIG_PATH.lstrip(os.sep))
+
+	def set_port_dirs(self, portdir, portdir_overlay):
+		self.portdir = portdir
+		self.portdir_overlay = portdir_overlay
+		if self.portdir_overlay is None:
+			self.portdir_overlay = ""
+
+		self.overlay_profiles = []
+		for ov in shlex_split(self.portdir_overlay):
+			ov = normalize_path(ov)
+			profiles_dir = os.path.join(ov, "profiles")
+			if os.path.isdir(profiles_dir):
+				self.overlay_profiles.append(profiles_dir)
+
+		self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
+		self.profile_and_user_locations = self.profile_locations[:]
+		if self._user_config:
+			self.profile_and_user_locations.append(self.abs_user_config)
+
+		self.profile_locations = tuple(self.profile_locations)
+		self.profile_and_user_locations = tuple(self.profile_and_user_locations)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py
new file mode 100644
index 0000000..df93e10
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py
@@ -0,0 +1,189 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+	'MaskManager',
+)
+
+from portage import os
+from portage.dep import ExtendedAtomDict, match_from_list, _repo_separator, _slot_separator
+from portage.util import append_repo, grabfile_package, stack_lists
+from portage.versions import cpv_getkey
+from _emerge.Package import Package
+
+class MaskManager(object):
+
+	def __init__(self, repositories, profiles, abs_user_config,
+		user_config=True, strict_umatched_removal=False):
+		self._punmaskdict = ExtendedAtomDict(list)
+		self._pmaskdict = ExtendedAtomDict(list)
+		# Preserves atoms that are eliminated by negative
+		# incrementals in user_pkgmasklines.
+		self._pmaskdict_raw = ExtendedAtomDict(list)
+
+		#Read profile/package.mask from every repo.
+		#Repositories inherit masks from their parent profiles and
+		#are able to remove mask from them with -atoms.
+		#Such a removal affects only the current repo, but not the parent.
+		#Add ::repo specs to every atom to make sure atoms only affect
+		#packages from the current repo.
+
+		# Cache the repository-wide package.mask files as a particular
+		# repo may be often referenced by others as the master.
+		pmask_cache = {}
+
+		def grab_pmask(loc):
+			if loc not in pmask_cache:
+				pmask_cache[loc] = grabfile_package(
+						os.path.join(loc, "profiles", "package.mask"),
+						recursive=1, remember_source_file=True, verify_eapi=True)
+			return pmask_cache[loc]
+
+		repo_pkgmasklines = []
+		for repo in repositories.repos_with_profiles():
+			lines = []
+			repo_lines = grab_pmask(repo.location)
+			for master in repo.masters:
+				master_lines = grab_pmask(master.location)
+				lines.append(stack_lists([master_lines, repo_lines], incremental=1,
+					remember_source_file=True, warn_for_unmatched_removal=True,
+					strict_warn_for_unmatched_removal=strict_umatched_removal))
+			if not repo.masters:
+				lines.append(stack_lists([repo_lines], incremental=1,
+					remember_source_file=True, warn_for_unmatched_removal=True,
+					strict_warn_for_unmatched_removal=strict_umatched_removal))
+			repo_pkgmasklines.extend(append_repo(stack_lists(lines), repo.name, remember_source_file=True))
+
+		repo_pkgunmasklines = []
+		for repo in repositories.repos_with_profiles():
+			repo_lines = grabfile_package(os.path.join(repo.location, "profiles", "package.unmask"), \
+				recursive=1, remember_source_file=True, verify_eapi=True)
+			lines = stack_lists([repo_lines], incremental=1, \
+				remember_source_file=True, warn_for_unmatched_removal=True,
+				strict_warn_for_unmatched_removal=strict_umatched_removal)
+			repo_pkgunmasklines.extend(append_repo(lines, repo.name, remember_source_file=True))
+
+		#Read package.mask from the user's profile. Stack them in the end
+		#to allow profiles to override masks from their parent profiles.
+		profile_pkgmasklines = []
+		profile_pkgunmasklines = []
+		for x in profiles:
+			profile_pkgmasklines.append(grabfile_package(
+				os.path.join(x, "package.mask"), recursive=1, remember_source_file=True, verify_eapi=True))
+			profile_pkgunmasklines.append(grabfile_package(
+				os.path.join(x, "package.unmask"), recursive=1, remember_source_file=True, verify_eapi=True))
+		profile_pkgmasklines = stack_lists(profile_pkgmasklines, incremental=1, \
+			remember_source_file=True, warn_for_unmatched_removal=True,
+			strict_warn_for_unmatched_removal=strict_umatched_removal)
+		profile_pkgunmasklines = stack_lists(profile_pkgunmasklines, incremental=1, \
+			remember_source_file=True, warn_for_unmatched_removal=True,
+			strict_warn_for_unmatched_removal=strict_umatched_removal)
+
+		#Read /etc/portage/package.mask. Don't stack it to allow the user to
+		#remove mask atoms from everywhere with -atoms.
+		user_pkgmasklines = []
+		user_pkgunmasklines = []
+		if user_config:
+			user_pkgmasklines = grabfile_package(
+				os.path.join(abs_user_config, "package.mask"), recursive=1, \
+				allow_wildcard=True, allow_repo=True, remember_source_file=True, verify_eapi=False)
+			user_pkgunmasklines = grabfile_package(
+				os.path.join(abs_user_config, "package.unmask"), recursive=1, \
+				allow_wildcard=True, allow_repo=True, remember_source_file=True, verify_eapi=False)
+
+		#Stack everything together. At this point, only user_pkgmasklines may contain -atoms.
+		#Don't warn for unmatched -atoms here, since we don't do it for any other user config file.
+		raw_pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines], \
+			incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+		pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines, user_pkgmasklines], \
+			incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+		pkgunmasklines = stack_lists([repo_pkgunmasklines, profile_pkgunmasklines, user_pkgunmasklines], \
+			incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+
+		for x, source_file in raw_pkgmasklines:
+			self._pmaskdict_raw.setdefault(x.cp, []).append(x)
+
+		for x, source_file in pkgmasklines:
+			self._pmaskdict.setdefault(x.cp, []).append(x)
+
+		for x, source_file in pkgunmasklines:
+			self._punmaskdict.setdefault(x.cp, []).append(x)
+
+		for d in (self._pmaskdict_raw, self._pmaskdict, self._punmaskdict):
+			for k, v in d.items():
+				d[k] = tuple(v)
+
+	def _getMaskAtom(self, cpv, slot, repo, unmask_atoms=None):
+		"""
+		Take a package and return a matching package.mask atom, or None if no
+		such atom exists or it has been cancelled by package.unmask. PROVIDE
+		is not checked, so atoms will not be found for old-style virtuals.
+
+		@param cpv: The package name
+		@type cpv: String
+		@param slot: The package's slot
+		@type slot: String
+		@param repo: The package's repository [optional]
+		@type repo: String
+		@param unmask_atoms: if desired pass in self._punmaskdict.get(cp)
+		@type unmask_atoms: list
+		@rtype: String
+		@return: A matching atom string or None if one is not found.
+		"""
+
+		cp = cpv_getkey(cpv)
+		mask_atoms = self._pmaskdict.get(cp)
+		if mask_atoms:
+			pkg = "".join((cpv, _slot_separator, slot))
+			if repo and repo != Package.UNKNOWN_REPO:
+				pkg = "".join((pkg, _repo_separator, repo))
+			pkg_list = [pkg]
+			for x in mask_atoms:
+				if not match_from_list(x, pkg_list):
+					continue
+				if unmask_atoms:
+					for y in unmask_atoms:
+						if match_from_list(y, pkg_list):
+							return None
+				return x
+		return None
+
+
+	def getMaskAtom(self, cpv, slot, repo):
+		"""
+		Take a package and return a matching package.mask atom, or None if no
+		such atom exists or it has been cancelled by package.unmask. PROVIDE
+		is not checked, so atoms will not be found for old-style virtuals.
+
+		@param cpv: The package name
+		@type cpv: String
+		@param slot: The package's slot
+		@type slot: String
+		@param repo: The package's repository [optional]
+		@type repo: String
+		@rtype: String
+		@return: A matching atom string or None if one is not found.
+		"""
+
+		cp = cpv_getkey(cpv)
+		return self._getMaskAtom(cpv, slot, repo, self._punmaskdict.get(cp))
+
+
+	def getRawMaskAtom(self, cpv, slot, repo):
+		"""
+		Take a package and return a matching package.mask atom, or None if no
+		such atom exists. It HAS NOT! been cancelled by any package.unmask.
+		PROVIDE is not checked, so atoms will not be found for old-style
+		virtuals.
+
+		@param cpv: The package name
+		@type cpv: String
+		@param slot: The package's slot
+		@type slot: String
+		@param repo: The package's repository [optional]
+		@type repo: String
+		@rtype: String
+		@return: A matching atom string or None if one is not found.
+		"""
+
+		return self._getMaskAtom(cpv, slot, repo)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py
new file mode 100644
index 0000000..d7ef0f6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py
@@ -0,0 +1,235 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+	'UseManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import ExtendedAtomDict, remove_slot, _get_useflag_re
+from portage.localization import _
+from portage.util import grabfile, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
+from portage.versions import cpv_getkey
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+class UseManager(object):
+
+	def __init__(self, repositories, profiles, abs_user_config, user_config=True):
+		#	file				variable
+		#--------------------------------
+		#	repositories
+		#--------------------------------
+		#	use.mask			_repo_usemask_dict
+		#	use.force			_repo_useforce_dict
+		#	package.use.mask		_repo_pusemask_dict
+		#	package.use.force		_repo_puseforce_dict
+		#--------------------------------
+		#	profiles
+		#--------------------------------
+		#	use.mask			_usemask_list
+		#	use.force			_useforce_list
+		#	package.use.mask		_pusemask_list
+		#	package.use			_pkgprofileuse
+		#	package.use.force		_puseforce_list
+		#--------------------------------
+		#	user config
+		#--------------------------------
+		#	package.use			_pusedict	
+
+		# Dynamic variables tracked by the config class
+		#--------------------------------
+		#	profiles
+		#--------------------------------
+		#	usemask
+		#	useforce
+		#--------------------------------
+		#	user config
+		#--------------------------------
+		#	puse
+
+		self._repo_usemask_dict = self._parse_repository_files_to_dict_of_tuples("use.mask", repositories)
+		self._repo_useforce_dict = self._parse_repository_files_to_dict_of_tuples("use.force", repositories)
+		self._repo_pusemask_dict = self._parse_repository_files_to_dict_of_dicts("package.use.mask", repositories)
+		self._repo_puseforce_dict = self._parse_repository_files_to_dict_of_dicts("package.use.force", repositories)
+		self._repo_puse_dict = self._parse_repository_files_to_dict_of_dicts("package.use", repositories)
+
+		self._usemask_list = self._parse_profile_files_to_tuple_of_tuples("use.mask", profiles)
+		self._useforce_list = self._parse_profile_files_to_tuple_of_tuples("use.force", profiles)
+		self._pusemask_list = self._parse_profile_files_to_tuple_of_dicts("package.use.mask", profiles)
+		self._pkgprofileuse = self._parse_profile_files_to_tuple_of_dicts("package.use", profiles, juststrings=True)
+		self._puseforce_list = self._parse_profile_files_to_tuple_of_dicts("package.use.force", profiles)
+
+		self._pusedict = self._parse_user_files_to_extatomdict("package.use", abs_user_config, user_config)
+
+		self.repositories = repositories
+	
+	def _parse_file_to_tuple(self, file_name):
+		ret = []
+		lines = grabfile(file_name, recursive=1)
+		eapi = read_corresponding_eapi_file(file_name)
+		useflag_re = _get_useflag_re(eapi)
+		for prefixed_useflag in lines:
+			if prefixed_useflag[:1] == "-":
+				useflag = prefixed_useflag[1:]
+			else:
+				useflag = prefixed_useflag
+			if useflag_re.match(useflag) is None:
+				writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
+					(file_name, prefixed_useflag), noiselevel=-1)
+			else:
+				ret.append(prefixed_useflag)
+		return tuple(ret)
+
+	def _parse_file_to_dict(self, file_name, juststrings=False):
+		ret = {}
+		location_dict = {}
+		file_dict = grabdict_package(file_name, recursive=1, verify_eapi=True)
+		eapi = read_corresponding_eapi_file(file_name)
+		useflag_re = _get_useflag_re(eapi)
+		for k, v in file_dict.items():
+			useflags = []
+			for prefixed_useflag in v:
+				if prefixed_useflag[:1] == "-":
+					useflag = prefixed_useflag[1:]
+				else:
+					useflag = prefixed_useflag
+				if useflag_re.match(useflag) is None:
+					writemsg(_("--- Invalid USE flag for '%s' in '%s': '%s'\n") %
+						(k, file_name, prefixed_useflag), noiselevel=-1)
+				else:
+					useflags.append(prefixed_useflag)
+			location_dict.setdefault(k, []).extend(useflags)
+		for k, v in location_dict.items():
+			if juststrings:
+				v = " ".join(v)
+			else:
+				v = tuple(v)
+			ret.setdefault(k.cp, {})[k] = v
+		return ret
+
+	def _parse_user_files_to_extatomdict(self, file_name, location, user_config):
+		ret = ExtendedAtomDict(dict)
+		if user_config:
+			pusedict = grabdict_package(
+				os.path.join(location, file_name), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False)
+			for k, v in pusedict.items():
+				ret.setdefault(k.cp, {})[k] = tuple(v)
+
+		return ret
+
+	def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories):
+		ret = {}
+		for repo in repositories.repos_with_profiles():
+			ret[repo.name] = self._parse_file_to_tuple(os.path.join(repo.location, "profiles", file_name))
+		return ret
+
+	def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories):
+		ret = {}
+		for repo in repositories.repos_with_profiles():
+			ret[repo.name] = self._parse_file_to_dict(os.path.join(repo.location, "profiles", file_name))
+		return ret
+
+	def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations):
+		return tuple(self._parse_file_to_tuple(os.path.join(profile, file_name)) for profile in locations)
+
+	def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations, juststrings=False):
+		return tuple(self._parse_file_to_dict(os.path.join(profile, file_name), juststrings) for profile in locations)
+
+	def getUseMask(self, pkg=None):
+		if pkg is None:
+			return frozenset(stack_lists(
+				self._usemask_list, incremental=True))
+
+		cp = getattr(pkg, "cp", None)
+		if cp is None:
+			cp = cpv_getkey(remove_slot(pkg))
+		usemask = []
+		if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+			repos = []
+			try:
+				repos.extend(repo.name for repo in
+					self.repositories[pkg.repo].masters)
+			except KeyError:
+				pass
+			repos.append(pkg.repo)
+			for repo in repos:
+				usemask.append(self._repo_usemask_dict.get(repo, {}))
+				cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp)
+				if cpdict:
+					pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+					if pkg_usemask:
+						usemask.extend(pkg_usemask)
+		for i, pusemask_dict in enumerate(self._pusemask_list):
+			if self._usemask_list[i]:
+				usemask.append(self._usemask_list[i])
+			cpdict = pusemask_dict.get(cp)
+			if cpdict:
+				pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+				if pkg_usemask:
+					usemask.extend(pkg_usemask)
+		return frozenset(stack_lists(usemask, incremental=True))
+
+	def getUseForce(self, pkg=None):
+		if pkg is None:
+			return frozenset(stack_lists(
+				self._useforce_list, incremental=True))
+
+		cp = getattr(pkg, "cp", None)
+		if cp is None:
+			cp = cpv_getkey(remove_slot(pkg))
+		useforce = []
+		if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+			repos = []
+			try:
+				repos.extend(repo.name for repo in
+					self.repositories[pkg.repo].masters)
+			except KeyError:
+				pass
+			repos.append(pkg.repo)
+			for repo in repos:
+				useforce.append(self._repo_useforce_dict.get(repo, {}))
+				cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp)
+				if cpdict:
+					pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+					if pkg_useforce:
+						useforce.extend(pkg_useforce)
+		for i, puseforce_dict in enumerate(self._puseforce_list):
+			if self._useforce_list[i]:
+				useforce.append(self._useforce_list[i])
+			cpdict = puseforce_dict.get(cp)
+			if cpdict:
+				pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+				if pkg_useforce:
+					useforce.extend(pkg_useforce)
+		return frozenset(stack_lists(useforce, incremental=True))
+
+	def getPUSE(self, pkg):
+		cp = getattr(pkg, "cp", None)
+		if cp is None:
+			cp = cpv_getkey(remove_slot(pkg))
+		ret = ""
+		cpdict = self._pusedict.get(cp)
+		if cpdict:
+			puse_matches = ordered_by_atom_specificity(cpdict, pkg)
+			if puse_matches:
+				puse_list = []
+				for x in puse_matches:
+					puse_list.extend(x)
+				ret = " ".join(puse_list)
+		return ret
+
+	def extract_global_USE_changes(self, old=""):
+		ret = old
+		cpdict = self._pusedict.get("*/*")
+		if cpdict is not None:
+			v = cpdict.pop("*/*", None)
+			if v is not None:
+				ret = " ".join(v)
+				if old:
+					ret = old + " " + ret
+				if not cpdict:
+					#No tokens left in atom_license_map, remove it.
+					del self._pusedict["*/*"]
+		return ret

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py
new file mode 100644
index 0000000..c4d1e36
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.py
@@ -0,0 +1,233 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+	'VirtualsManager',
+)
+
+from copy import deepcopy
+
+from portage import os
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.util import grabdict, stack_dictlist, writemsg
+from portage.versions import cpv_getkey
+
+class VirtualsManager(object):
+
+	def __init__(self, *args, **kwargs):
+		if kwargs.get("_copy"):
+			return
+
+		assert len(args) == 1, "VirtualsManager.__init__ takes one positional argument"
+		assert not kwargs, "unknown keyword argument(s) '%s' passed to VirtualsManager.__init__" % \
+			", ".join(kwargs)
+
+		profiles = args[0]
+		self._virtuals = None
+		self._dirVirtuals = None
+		self._virts_p = None
+
+		# Virtuals obtained from the vartree
+		self._treeVirtuals = None
+		# Virtuals added by the depgraph via self.add_depgraph_virtuals().
+		self._depgraphVirtuals = {}
+
+		#Initialise _dirVirtuals.
+		self._read_dirVirtuals(profiles)
+
+		#We could initialise _treeVirtuals here, but some consumers want to
+		#pass their own vartree.
+
+	def _read_dirVirtuals(self, profiles):
+		"""
+		Read the 'virtuals' file in all profiles.
+		"""
+		virtuals_list = []
+		for x in profiles:
+			virtuals_file = os.path.join(x, "virtuals")
+			virtuals_dict = grabdict(virtuals_file)
+			atoms_dict = {}
+			for k, v in virtuals_dict.items():
+				try:
+					virt_atom = Atom(k)
+				except InvalidAtom:
+					virt_atom = None
+				else:
+					if virt_atom.blocker or \
+						str(virt_atom) != str(virt_atom.cp):
+						virt_atom = None
+				if virt_atom is None:
+					writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
+						(virtuals_file, k), noiselevel=-1)
+					continue
+				providers = []
+				for atom in v:
+					atom_orig = atom
+					if atom[:1] == '-':
+						# allow incrementals
+						atom = atom[1:]
+					try:
+						atom = Atom(atom)
+					except InvalidAtom:
+						atom = None
+					else:
+						if atom.blocker:
+							atom = None
+					if atom is None:
+						writemsg(_("--- Invalid atom in %s: %s\n") % \
+							(virtuals_file, atom_orig), noiselevel=-1)
+					else:
+						if atom_orig == str(atom):
+							# normal atom, so return as Atom instance
+							providers.append(atom)
+						else:
+							# atom has special prefix, so return as string
+							providers.append(atom_orig)
+				if providers:
+					atoms_dict[virt_atom] = providers
+			if atoms_dict:
+				virtuals_list.append(atoms_dict)
+
+		self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
+
+		for virt in self._dirVirtuals:
+			# Preference for virtuals decreases from left to right.
+			self._dirVirtuals[virt].reverse()
+
+	def __deepcopy__(self, memo=None):
+		if memo is None:
+			memo = {}
+		result = VirtualsManager(_copy=True)
+		memo[id(self)] = result
+
+		# immutable attributes (internal policy ensures lack of mutation)
+		# _treeVirtuals is initilised by _populate_treeVirtuals().
+		# Before that it's 'None'.
+		result._treeVirtuals = self._treeVirtuals
+		memo[id(self._treeVirtuals)] = self._treeVirtuals
+		# _dirVirtuals is initilised by __init__.
+		result._dirVirtuals = self._dirVirtuals
+		memo[id(self._dirVirtuals)] = self._dirVirtuals
+
+		# mutable attributes (change when add_depgraph_virtuals() is called)
+		result._virtuals = deepcopy(self._virtuals, memo)
+		result._depgraphVirtuals = deepcopy(self._depgraphVirtuals, memo)
+		result._virts_p = deepcopy(self._virts_p, memo)
+
+		return result
+
+	def _compile_virtuals(self):
+		"""Stack installed and profile virtuals.  Preference for virtuals
+		decreases from left to right.
+		Order of preference:
+		1. installed and in profile
+		2. installed only
+		3. profile only
+		"""
+
+		assert self._treeVirtuals is not None, "_populate_treeVirtuals() must be called before " + \
+			"any query about virtuals"
+
+		# Virtuals by profile+tree preferences.
+		ptVirtuals   = {}
+
+		for virt, installed_list in self._treeVirtuals.items():
+			profile_list = self._dirVirtuals.get(virt, None)
+			if not profile_list:
+				continue
+			for cp in installed_list:
+				if cp in profile_list:
+					ptVirtuals.setdefault(virt, [])
+					ptVirtuals[virt].append(cp)
+
+		virtuals = stack_dictlist([ptVirtuals, self._treeVirtuals,
+			self._dirVirtuals, self._depgraphVirtuals])
+		self._virtuals = virtuals
+		self._virts_p = None
+
+	def getvirtuals(self):
+		"""
+		Computes self._virtuals if necessary and returns it.
+		self._virtuals is only computed on the first call.
+		"""
+		if self._virtuals is None:
+			self._compile_virtuals()
+
+		return self._virtuals
+
+	def _populate_treeVirtuals(self, vartree):
+		"""
+		Initialize _treeVirtuals from the given vartree.
+		It must not have been initialized already, otherwise
+		our assumptions about immutability don't hold.
+		"""
+		assert self._treeVirtuals is None, "treeVirtuals must not be reinitialized"
+
+		self._treeVirtuals = {}
+
+		for provide, cpv_list in vartree.get_all_provides().items():
+			try:
+				provide = Atom(provide)
+			except InvalidAtom:
+				continue
+			self._treeVirtuals[provide.cp] = \
+				[Atom(cpv_getkey(cpv)) for cpv in cpv_list]
+
+	def populate_treeVirtuals_if_needed(self, vartree):
+		"""
+		Initialize _treeVirtuals if it hasn't been done already.
+		This is a hack for consumers that already have an populated vartree.
+		"""
+		if self._treeVirtuals is not None:
+			return
+
+		self._populate_treeVirtuals(vartree)
+
+	def add_depgraph_virtuals(self, mycpv, virts):
+		"""This updates the preferences for old-style virtuals,
+		affecting the behavior of dep_expand() and dep_check()
+		calls. It can change dbapi.match() behavior since that
+		calls dep_expand(). However, dbapi instances have
+		internal match caches that are not invalidated when
+		preferences are updated here. This can potentially
+		lead to some inconsistency (relevant to bug #1343)."""
+
+		#Ensure that self._virtuals is populated.
+		if self._virtuals is None:
+			self.getvirtuals()
+
+		modified = False
+		cp = Atom(cpv_getkey(mycpv))
+		for virt in virts:
+			try:
+				virt = Atom(virt).cp
+			except InvalidAtom:
+				continue
+			providers = self._virtuals.get(virt)
+			if providers and cp in providers:
+				continue
+			providers = self._depgraphVirtuals.get(virt)
+			if providers is None:
+				providers = []
+				self._depgraphVirtuals[virt] = providers
+			if cp not in providers:
+				providers.append(cp)
+				modified = True
+
+		if modified:
+			self._compile_virtuals()
+
+	def get_virts_p(self):
+		if self._virts_p is not None:
+			return self._virts_p
+
+		virts = self.getvirtuals()
+		virts_p = {}
+		for x in virts:
+			vkeysplit = x.split("/")
+			if vkeysplit[1] not in virts_p:
+				virts_p[vkeysplit[1]] = virts[x]
+		self._virts_p = virts_p
+		return virts_p

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py b/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py b/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py
new file mode 100644
index 0000000..d3db545
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.py
@@ -0,0 +1,23 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.process import find_binary
+from portage.util import shlex_split
+
+def validate_cmd_var(v):
+	"""
+	Validate an evironment variable value to see if it
+	contains an executable command as the first token.
+	returns (valid, token_list) where 'valid' is boolean and 'token_list'
+	is the (possibly empty) list of tokens split by shlex.
+	"""
+	invalid = False
+	v_split = shlex_split(v)
+	if not v_split:
+		invalid = True
+	elif os.path.isabs(v_split[0]):
+		invalid = not os.access(v_split[0], os.EX_OK)
+	elif find_binary(v_split[0]) is None:
+		invalid = True
+	return (not invalid, v_split)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py b/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py
new file mode 100644
index 0000000..62236fd
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.py
@@ -0,0 +1,128 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+	'features_set',
+)
+
+import logging
+
+from portage.const import SUPPORTED_FEATURES
+from portage.localization import _
+from portage.output import colorize
+from portage.util import writemsg_level
+
+class features_set(object):
+	"""
+	Provides relevant set operations needed for access and modification of
+	config.features. The FEATURES variable is automatically synchronized
+	upon modification.
+
+	Modifications result in a permanent override that will cause the change
+	to propagate to the incremental stacking mechanism in config.regenerate().
+	This eliminates the need to call config.backup_changes() when FEATURES
+	is modified, since any overrides are guaranteed to persist despite calls
+	to config.reset().
+	"""
+
+	def __init__(self, settings):
+		self._settings = settings
+		self._features = set()
+
+	def __contains__(self, k):
+		return k in self._features
+
+	def __iter__(self):
+		return iter(self._features)
+
+	def _sync_env_var(self):
+		self._settings['FEATURES'] = ' '.join(sorted(self._features))
+
+	def add(self, k):
+		self._settings.modifying()
+		self._settings._features_overrides.append(k)
+		if k not in self._features:
+			self._features.add(k)
+			self._sync_env_var()
+
+	def update(self, values):
+		self._settings.modifying()
+		values = list(values)
+		self._settings._features_overrides.extend(values)
+		need_sync = False
+		for k in values:
+			if k in self._features:
+				continue
+			self._features.add(k)
+			need_sync = True
+		if need_sync:
+			self._sync_env_var()
+
+	def difference_update(self, values):
+		self._settings.modifying()
+		values = list(values)
+		self._settings._features_overrides.extend('-' + k for k in values)
+		remove_us = self._features.intersection(values)
+		if remove_us:
+			self._features.difference_update(values)
+			self._sync_env_var()
+
+	def remove(self, k):
+		"""
+		This never raises KeyError, since it records a permanent override
+		that will prevent the given flag from ever being added again by
+		incremental stacking in config.regenerate().
+		"""
+		self.discard(k)
+
+	def discard(self, k):
+		self._settings.modifying()
+		self._settings._features_overrides.append('-' + k)
+		if k in self._features:
+			self._features.remove(k)
+			self._sync_env_var()
+
+	def _validate(self):
+		"""
+		Implements unknown-features-warn and unknown-features-filter.
+		"""
+		if 'unknown-features-warn' in self._features:
+			unknown_features = \
+				self._features.difference(SUPPORTED_FEATURES)
+			if unknown_features:
+				unknown_features = unknown_features.difference(
+					self._settings._unknown_features)
+				if unknown_features:
+					self._settings._unknown_features.update(unknown_features)
+					writemsg_level(colorize("BAD",
+						_("FEATURES variable contains unknown value(s): %s") % \
+						", ".join(sorted(unknown_features))) \
+						+ "\n", level=logging.WARNING, noiselevel=-1)
+
+		if 'unknown-features-filter' in self._features:
+			unknown_features = \
+				self._features.difference(SUPPORTED_FEATURES)
+			if unknown_features:
+				self.difference_update(unknown_features)
+				self._prune_overrides()
+
+	def _prune_overrides(self):
+		"""
+		If there are lots of invalid package.env FEATURES settings
+		then unknown-features-filter can make _features_overrides
+		grow larger and larger, so prune it. This performs incremental
+		stacking with preservation of negative values since they need
+		to persist for future config.regenerate() calls.
+		"""
+		overrides_set = set(self._settings._features_overrides)
+		positive = set()
+		negative = set()
+		for x in self._settings._features_overrides:
+			if x[:1] == '-':
+				positive.discard(x[1:])
+				negative.add(x[1:])
+			else:
+				positive.add(x)
+				negative.discard(x)
+		self._settings._features_overrides[:] = \
+			list(positive) + list('-' + x for x in negative)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py
new file mode 100644
index 0000000..4f46781
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py
@@ -0,0 +1,64 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+	'ordered_by_atom_specificity', 'prune_incremental',
+)
+
+from _emerge.Package import Package
+from portage.dep import best_match_to_list, _repo_separator
+
+def ordered_by_atom_specificity(cpdict, pkg, repo=None):
+	"""
+	Return a list of matched values from the given cpdict,
+	in ascending order by atom specificity. The rationale
+	for this order is that package.* config files are
+	typically written in ChangeLog like fashion, so it's
+	most friendly if the order that the atoms are written
+	does not matter. Therefore, settings from more specific
+	atoms override those of less specific atoms. Without
+	this behavior, settings from relatively unspecific atoms
+	would (somewhat confusingly) override the settings of
+	more specific atoms, requiring people to make adjustments
+	to the order that atoms are listed in the config file in
+	order to achieve desired results (and thus corrupting
+	the ChangeLog like ordering of the file).
+	"""
+	if repo and repo != Package.UNKNOWN_REPO:
+		pkg = pkg + _repo_separator + repo
+
+	results = []
+	keys = list(cpdict)
+
+	while keys:
+		bestmatch = best_match_to_list(pkg, keys)
+		if bestmatch:
+			keys.remove(bestmatch)
+			results.append(cpdict[bestmatch])
+		else:
+			break
+
+	if results:
+		# reverse, so the most specific atoms come last
+		results.reverse()
+
+	return results
+
+def prune_incremental(split):
+	"""
+	Prune off any parts of an incremental variable that are
+	made irrelevant by the latest occuring * or -*. This
+	could be more aggressive but that might be confusing
+	and the point is just to reduce noise a bit.
+	"""
+	for i, x in enumerate(reversed(split)):
+		if x == '*':
+			split = split[-i-1:]
+			break
+		elif x == '-*':
+			if i == 0:
+				split = []
+			else:
+				split = split[-i:]
+			break
+	return split

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py
new file mode 100644
index 0000000..6d42809
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py
@@ -0,0 +1,185 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+	'case_insensitive_vars', 'default_globals', 'env_blacklist', \
+	'environ_filter', 'environ_whitelist', 'environ_whitelist_re',
+)
+
+import re
+
+# Blacklisted variables are internal variables that are never allowed
+# to enter the config instance from the external environment or
+# configuration files.
+env_blacklist = frozenset((
+	"A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
+	"EBUILD_FORCE_TEST", "EBUILD_PHASE", "EBUILD_SKIP_MANIFEST",
+	"ED", "EMERGE_FROM", "EPREFIX", "EROOT",
+	"HOMEPAGE", "INHERITED", "IUSE",
+	"KEYWORDS", "LICENSE", "MERGE_TYPE",
+	"PDEPEND", "PF", "PKGUSE", "PORTAGE_BACKGROUND",
+	"PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDIR_LOCKED",
+	"PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
+	"PORTAGE_NONFATAL", "PORTAGE_REPO_NAME", "PORTAGE_SANDBOX_COMPAT_LEVEL",
+	"PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT",
+	"ROOT", "SLOT", "SRC_URI"
+))
+
+environ_whitelist = []
+
+# Whitelisted variables are always allowed to enter the ebuild
+# environment. Generally, this only includes special portage
+# variables. Ebuilds can unset variables that are not whitelisted
+# and rely on them remaining unset for future phases, without them
+# leaking back in from various locations (bug #189417). It's very
+# important to set our special BASH_ENV variable in the ebuild
+# environment in order to prevent sandbox from sourcing /etc/profile
+# in it's bashrc (causing major leakage).
+environ_whitelist += [
+	"ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "D",
+	"DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
+	"EBUILD_FORCE_TEST",
+	"EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "ED",
+	"EMERGE_FROM", "EPREFIX", "EROOT",
+	"FEATURES", "FILESDIR", "HOME", "MERGE_TYPE", "NOCOLOR", "PATH",
+	"PKGDIR",
+	"PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
+	"PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST",
+	"PORTAGE_BASHRC", "PM_EBUILD_HOOK_DIR",
+	"PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
+	"PORTAGE_BINPKG_TMPFILE",
+	"PORTAGE_BIN_PATH",
+	"PORTAGE_BUILDDIR", "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
+	"PORTAGE_COLORMAP",
+	"PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
+	"PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES",
+	"PORTAGE_GID", "PORTAGE_GRPNAME",
+	"PORTAGE_INST_GID", "PORTAGE_INST_UID",
+	"PORTAGE_IPC_DAEMON", "PORTAGE_IUSE",
+	"PORTAGE_LOG_FILE",
+	"PORTAGE_PYM_PATH", "PORTAGE_PYTHON", "PORTAGE_QUIET",
+	"PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
+	"PORTAGE_SANDBOX_COMPAT_LEVEL", "PORTAGE_SIGPIPE_STATUS",
+	"PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME",
+	"PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
+	"PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
+	"REPLACING_VERSIONS", "REPLACED_BY_VERSION",
+	"ROOT", "ROOTPATH", "T", "TMP", "TMPDIR",
+	"USE_EXPAND", "USE_ORDER", "WORKDIR",
+	"XARGS",
+]
+
+# user config variables
+environ_whitelist += [
+	"DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
+]
+
+environ_whitelist += [
+	"A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
+]
+
+# misc variables inherited from the calling environment
+environ_whitelist += [
+	"COLORTERM", "DISPLAY", "EDITOR", "LESS",
+	"LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
+	"TERM", "TERMCAP", "USER",
+	'ftp_proxy', 'http_proxy', 'no_proxy',
+]
+
+# tempdir settings
+environ_whitelist += [
+	"TMPDIR", "TEMP", "TMP",
+]
+
+# localization settings
+environ_whitelist += [
+	"LANG", "LC_COLLATE", "LC_CTYPE", "LC_MESSAGES",
+	"LC_MONETARY", "LC_NUMERIC", "LC_TIME", "LC_PAPER",
+	"LC_ALL",
+]
+
+# other variables inherited from the calling environment
+environ_whitelist += [
+	"CVS_RSH", "ECHANGELOG_USER",
+	"GPG_AGENT_INFO", "LOG_SOCKET",
+	"SSH_AGENT_PID", "SSH_AUTH_SOCK"
+	"STY", "WINDOW", "XAUTHORITY",
+]
+
+environ_whitelist = frozenset(environ_whitelist)
+
+environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
+
+# Filter selected variables in the config.environ() method so that
+# they don't needlessly propagate down into the ebuild environment.
+environ_filter = []
+
+# Exclude anything that could be extremely long here (like SRC_URI)
+# since that could cause execve() calls to fail with E2BIG errors. For
+# example, see bug #262647.
+environ_filter += [
+	'DEPEND', 'RDEPEND', 'PDEPEND', 'SRC_URI',
+]
+
+# misc variables inherited from the calling environment
+environ_filter += [
+	"INFOPATH", "MANPATH", "USER",
+]
+
+# variables that break bash
+environ_filter += [
+	"HISTFILE", "POSIXLY_CORRECT",
+]
+
+# portage config variables and variables set directly by portage
+environ_filter += [
+	"ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
+	"CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
+	"CONFIG_PROTECT_MASK", "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
+	"EMERGE_LOG_DIR",
+	"EMERGE_WARNING_DELAY",
+	"FETCHCOMMAND", "FETCHCOMMAND_FTP",
+	"FETCHCOMMAND_HTTP", "FETCHCOMMAND_HTTPS",
+	"FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP",
+	"GENTOO_MIRRORS", "NOCONFMEM", "O",
+	"PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE",
+	"PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_BUILDIR_LOCKED", "PORTAGE_CALLER",
+	"PORTAGE_ELOG_CLASSES",
+	"PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
+	"PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
+	"PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
+	"PORTAGE_GPG_DIR",
+	"PORTAGE_GPG_KEY", "PORTAGE_GPG_SIGNING_COMMAND",
+	"PORTAGE_IONICE_COMMAND",
+	"PORTAGE_PACKAGE_EMPTY_ABORT",
+	"PORTAGE_REPO_DUPLICATE_WARN",
+	"PORTAGE_RO_DISTDIRS",
+	"PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
+	"PORTAGE_RSYNC_RETRIES", "PORTAGE_SYNC_STALE",
+	"PORTAGE_USE", "PORT_LOGDIR",
+	"QUICKPKG_DEFAULT_OPTS",
+	"RESUMECOMMAND", "RESUMECOMMAND_FTP",
+	"RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTPS",
+	"RESUMECOMMAND_RSYNC", "RESUMECOMMAND_SFTP",
+	"SYNC", "USE_EXPAND_HIDDEN", "USE_ORDER",
+]
+
+environ_filter = frozenset(environ_filter)
+
+# Variables that are not allowed to have per-repo or per-package
+# settings.
+global_only_vars = frozenset([
+	"CONFIG_PROTECT",
+])
+
+default_globals = {
+	'ACCEPT_LICENSE':           '* -@EULA',
+	'ACCEPT_PROPERTIES':        '*',
+	'PORTAGE_BZIP2_COMMAND':    'bzip2',
+}
+
+validate_commands = ('PORTAGE_BZIP2_COMMAND', 'PORTAGE_BUNZIP2_COMMAND',)
+
+# To enhance usability, make some vars case insensitive
+# by forcing them to lower case.
+case_insensitive_vars = ('AUTOCLEAN', 'NOCOLOR',)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py
new file mode 100644
index 0000000..f14050b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.py
@@ -0,0 +1,27 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.package.ebuild._ipc.IpcCommand import IpcCommand
+
+class ExitCommand(IpcCommand):
+
+	__slots__ = ('exitcode', 'reply_hook',)
+
+	def __init__(self):
+		IpcCommand.__init__(self)
+		self.reply_hook = None
+		self.exitcode = None
+
+	def __call__(self, argv):
+
+		if self.exitcode is not None:
+			# Ignore all but the first call, since if die is called
+			# then we certainly want to honor that exitcode, even
+			# the ebuild process manages to send a second exit
+			# command.
+			self.reply_hook = None
+		else:
+			self.exitcode = int(argv[1])
+
+		# (stdout, stderr, returncode)
+		return ('', '', 0)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py
new file mode 100644
index 0000000..efb27f0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.py
@@ -0,0 +1,9 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class IpcCommand(object):
+
+	__slots__ = ()
+
+	def __call__(self, argv):
+		raise NotImplementedError(self)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py
new file mode 100644
index 0000000..fb6e61e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py
@@ -0,0 +1,98 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.dep import Atom
+from portage.elog import messages as elog_messages
+from portage.exception import InvalidAtom
+from portage.package.ebuild._ipc.IpcCommand import IpcCommand
+from portage.util import normalize_path
+from portage.versions import best
+
+class QueryCommand(IpcCommand):
+
+	__slots__ = ('phase', 'settings',)
+
+	_db = None
+
+	def __init__(self, settings, phase):
+		IpcCommand.__init__(self)
+		self.settings = settings
+		self.phase = phase
+
+	def __call__(self, argv):
+		"""
+		@returns: tuple of (stdout, stderr, returncode)
+		"""
+
+		cmd, root, atom_str = argv
+
+		try:
+			atom = Atom(atom_str)
+		except InvalidAtom:
+			return ('', 'invalid atom: %s\n' % atom_str, 2)
+
+		warnings = []
+		try:
+			atom = Atom(atom_str, eapi=self.settings.get('EAPI'))
+		except InvalidAtom as e:
+			warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e))
+
+		use = self.settings.get('PORTAGE_BUILT_USE')
+		if use is None:
+			use = self.settings['PORTAGE_USE']
+
+		use = frozenset(use.split())
+		atom = atom.evaluate_conditionals(use)
+
+		db = self._db
+		if db is None:
+			db = portage.db
+
+		warnings_str = ''
+		if warnings:
+			warnings_str = self._elog('eqawarn', warnings)
+
+		root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
+		if root not in db:
+			return ('', 'invalid ROOT: %s\n' % root, 2)
+
+		vardb = db[root]["vartree"].dbapi
+
+		if cmd == 'has_version':
+			if vardb.match(atom):
+				returncode = 0
+			else:
+				returncode = 1
+			return ('', warnings_str, returncode)
+		elif cmd == 'best_version':
+			m = best(vardb.match(atom))
+			return ('%s\n' % m, warnings_str, 0)
+		else:
+			return ('', 'invalid command: %s\n' % cmd, 2)
+
+	def _elog(self, elog_funcname, lines):
+		"""
+		This returns a string, to be returned via ipc and displayed at the
+		appropriate place in the build output. We wouldn't want to open the
+		log here since it is already opened by AbstractEbuildProcess and we
+		don't want to corrupt it, especially if it is being written with
+		compression.
+		"""
+		out = io.StringIO()
+		phase = self.phase
+		elog_func = getattr(elog_messages, elog_funcname)
+		global_havecolor = portage.output.havecolor
+		try:
+			portage.output.havecolor = \
+				self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+			for line in lines:
+				elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+		finally:
+			portage.output.havecolor = global_havecolor
+		msg = out.getvalue()
+		return msg

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py
new file mode 100644
index 0000000..befdc89
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py
@@ -0,0 +1,82 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+
+from portage import os
+from portage.const import EBUILD_PHASES
+from portage.elog import elog_process
+from portage.package.ebuild.config import config
+from portage.package.ebuild.doebuild import doebuild_environment
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.PollScheduler import PollScheduler
+
+def spawn_nofetch(portdb, ebuild_path, settings=None):
+	"""
+	This spawns pkg_nofetch if appropriate. The settings parameter
+	is useful only if setcpv has already been called in order
+	to cache metadata. It will be cloned internally, in order to
+	prevent any changes from interfering with the calling code.
+	If settings is None then a suitable config instance will be
+	acquired from the given portdbapi instance.
+
+	A private PORTAGE_BUILDDIR will be created and cleaned up, in
+	order to avoid any interference with any other processes.
+	If PORTAGE_TMPDIR is writable, that will be used, otherwise
+	the default directory for the tempfile module will be used.
+
+	We only call the pkg_nofetch phase if either RESTRICT=fetch
+	is set or the package has explicitly overridden the default
+	pkg_nofetch implementation. This allows specialized messages
+	to be displayed for problematic packages even though they do
+	not set RESTRICT=fetch (bug #336499).
+
+	This function does nothing if the PORTAGE_PARALLEL_FETCHONLY
+	variable is set in the config instance.
+	"""
+
+	if settings is None:
+		settings = config(clone=portdb.settings)
+	else:
+		settings = config(clone=settings)
+
+	if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
+		return
+
+	# We must create our private PORTAGE_TMPDIR before calling
+	# doebuild_environment(), since lots of variables such
+	# as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
+	portage_tmpdir = settings.get('PORTAGE_TMPDIR')
+	if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
+		portage_tmpdir = None
+	private_tmpdir = tempfile.mkdtemp(dir=portage_tmpdir)
+	settings['PORTAGE_TMPDIR'] = private_tmpdir
+	settings.backup_changes('PORTAGE_TMPDIR')
+	# private temp dir was just created, so it's not locked yet
+	settings.pop('PORTAGE_BUILDIR_LOCKED', None)
+
+	try:
+		doebuild_environment(ebuild_path, 'nofetch',
+			settings=settings, db=portdb)
+		restrict = settings['PORTAGE_RESTRICT'].split()
+		defined_phases = settings['DEFINED_PHASES'].split()
+		if not defined_phases:
+			# When DEFINED_PHASES is undefined, assume all
+			# phases are defined.
+			defined_phases = EBUILD_PHASES
+
+		if 'fetch' not in restrict and \
+			'nofetch' not in defined_phases:
+			return
+
+		prepare_build_dirs(settings=settings)
+		ebuild_phase = EbuildPhase(background=False,
+			phase='nofetch', scheduler=PollScheduler().sched_iface,
+			settings=settings)
+		ebuild_phase.start()
+		ebuild_phase.wait()
+		elog_process(settings.mycpv, settings)
+	finally:
+		shutil.rmtree(private_tmpdir)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/config.py b/portage_with_autodep/pym/portage/package/ebuild/config.py
new file mode 100644
index 0000000..a8c6ad6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/config.py
@@ -0,0 +1,2224 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+	'autouse', 'best_from_dict', 'check_config_instance', 'config',
+]
+
+import copy
+from itertools import chain
+import logging
+import re
+import sys
+import warnings
+
+from _emerge.Package import Package
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.data:portage_gid',
+)
+from portage import bsd_chflags, \
+	load_mod, os, selinux, _unicode_decode
+from portage.const import CACHE_PATH, \
+	DEPCACHE_PATH, INCREMENTALS, MAKE_CONF_FILE, \
+	MODULES_FILE_PATH, PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, \
+	PRIVATE_PATH, PROFILE_PATH, USER_CONFIG_PATH, \
+	USER_VIRTUALS_FILE
+from portage.const import _SANDBOX_COMPAT_LEVEL
+from portage.dbapi import dbapi
+from portage.dbapi.porttree import portdbapi
+from portage.dbapi.vartree import vartree
+from portage.dep import Atom, isvalidatom, match_from_list, use_reduce, _repo_separator, _slot_separator
+from portage.eapi import eapi_exports_AA, eapi_exports_merge_type, \
+	eapi_supports_prefix, eapi_exports_replace_vars
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.exception import InvalidDependString, PortageException
+from portage.localization import _
+from portage.output import colorize
+from portage.process import fakeroot_capable, sandbox_capable
+from portage.repository.config import load_repository_config
+from portage.util import ensure_dirs, getconfig, grabdict, \
+	grabdict_package, grabfile, grabfile_package, LazyItemsDict, \
+	normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \
+	writemsg, writemsg_level
+from portage.versions import catpkgsplit, catsplit, cpv_getkey
+
+from portage.package.ebuild._config import special_env_vars
+from portage.package.ebuild._config.env_var_validation import validate_cmd_var
+from portage.package.ebuild._config.features_set import features_set
+from portage.package.ebuild._config.KeywordsManager import KeywordsManager
+from portage.package.ebuild._config.LicenseManager import LicenseManager
+from portage.package.ebuild._config.UseManager import UseManager
+from portage.package.ebuild._config.LocationsManager import LocationsManager
+from portage.package.ebuild._config.MaskManager import MaskManager
+from portage.package.ebuild._config.VirtualsManager import VirtualsManager
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity, prune_incremental
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+def autouse(myvartree, use_cache=1, mysettings=None):
+	warnings.warn("portage.autouse() is deprecated",
+		DeprecationWarning, stacklevel=2)
+	return ""
+
+def check_config_instance(test):
+	if not isinstance(test, config):
+		raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
+
+def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
+	for x in key_order:
+		if x in top_dict and key in top_dict[x]:
+			if FullCopy:
+				return copy.deepcopy(top_dict[x][key])
+			else:
+				return top_dict[x][key]
+	if EmptyOnError:
+		return ""
+	else:
+		raise KeyError("Key not found in list; '%s'" % key)
+
+def _lazy_iuse_regex(iuse_implicit):
+	"""
+	The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
+	and the value is only used when an ebuild phase needs to be executed
+	(it's used only to generate QA notices).
+	"""
+	# Escape anything except ".*" which is supposed to pass through from
+	# _get_implicit_iuse().
+	regex = sorted(re.escape(x) for x in iuse_implicit)
+	regex = "^(%s)$" % "|".join(regex)
+	regex = regex.replace("\\.\\*", ".*")
+	return regex
+
+class _iuse_implicit_match_cache(object):
+
+	def __init__(self, settings):
+		self._iuse_implicit_re = re.compile("^(%s)$" % \
+			"|".join(settings._get_implicit_iuse()))
+		self._cache = {}
+
+	def __call__(self, flag):
+		"""
+		Returns True if the flag is matched, False otherwise.
+		"""
+		try:
+			return self._cache[flag]
+		except KeyError:
+			m = self._iuse_implicit_re.match(flag) is not None
+			self._cache[flag] = m
+			return m
+
+class config(object):
+	"""
+	This class encompasses the main portage configuration.  Data is pulled from
+	ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
+	parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
+	overrides.
+
+	Generally if you need data like USE flags, FEATURES, environment variables,
+	virtuals ...etc you look in here.
+	"""
+
+	_setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI',
+		'INHERITED', 'IUSE', 'REQUIRED_USE', 'KEYWORDS', 'LICENSE', 'PDEPEND',
+		'PROPERTIES', 'PROVIDE', 'RDEPEND', 'SLOT',
+		'repository', 'RESTRICT', 'LICENSE',)
+
+	_case_insensitive_vars = special_env_vars.case_insensitive_vars
+	_default_globals = special_env_vars.default_globals
+	_env_blacklist = special_env_vars.env_blacklist
+	_environ_filter = special_env_vars.environ_filter
+	_environ_whitelist = special_env_vars.environ_whitelist
+	_environ_whitelist_re = special_env_vars.environ_whitelist_re
+	_global_only_vars = special_env_vars.global_only_vars
+
+	def __init__(self, clone=None, mycpv=None, config_profile_path=None,
+		config_incrementals=None, config_root=None, target_root=None,
+		_eprefix=None, local_config=True, env=None, _unmatched_removal=False):
+		"""
+		@param clone: If provided, init will use deepcopy to copy by value the instance.
+		@type clone: Instance of config class.
+		@param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
+		and then calling instance.setcpv(mycpv).
+		@type mycpv: String
+		@param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
+		@type config_profile_path: String
+		@param config_incrementals: List of incremental variables
+			(defaults to portage.const.INCREMENTALS)
+		@type config_incrementals: List
+		@param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
+		@type config_root: String
+		@param target_root: __init__ override of $ROOT env variable.
+		@type target_root: String
+		@param _eprefix: set the EPREFIX variable (private, used by internal tests)
+		@type _eprefix: String
+		@param local_config: Enables loading of local config (/etc/portage); used most by repoman to
+		ignore local config (keywording and unmasking)
+		@type local_config: Boolean
+		@param env: The calling environment which is used to override settings.
+			Defaults to os.environ if unspecified.
+		@type env: dict
+		@param _unmatched_removal: Enabled by repoman when the
+			--unmatched-removal option is given.
+		@type _unmatched_removal: Boolean
+		"""
+
+		# rename local _eprefix variable for convenience
+		eprefix = _eprefix
+		del _eprefix
+
+		# When initializing the global portage.settings instance, avoid
+		# raising exceptions whenever possible since exceptions thrown
+		# from 'import portage' or 'import portage.exceptions' statements
+		# can practically render the api unusable for api consumers.
+		tolerant = hasattr(portage, '_initializing_globals')
+		self._tolerant = tolerant
+
+		self.locked   = 0
+		self.mycpv    = None
+		self._setcpv_args_hash = None
+		self.puse     = ""
+		self._penv    = []
+		self.modifiedkeys = []
+		self.uvlist = []
+		self._accept_chost_re = None
+		self._accept_properties = None
+		self._features_overrides = []
+		self._make_defaults = None
+
+		# _unknown_features records unknown features that
+		# have triggered warning messages, and ensures that
+		# the same warning isn't shown twice.
+		self._unknown_features = set()
+
+		self.local_config = local_config
+
+		if clone:
+			# For immutable attributes, use shallow copy for
+			# speed and memory conservation.
+			self._tolerant = clone._tolerant
+			self.categories = clone.categories
+			self.depcachedir = clone.depcachedir
+			self.incrementals = clone.incrementals
+			self.module_priority = clone.module_priority
+			self.profile_path = clone.profile_path
+			self.profiles = clone.profiles
+			self.packages = clone.packages
+			self.repositories = clone.repositories
+			self._iuse_implicit_match = clone._iuse_implicit_match
+			self._non_user_variables = clone._non_user_variables
+			self._repo_make_defaults = clone._repo_make_defaults
+			self.usemask = clone.usemask
+			self.useforce = clone.useforce
+			self.puse = clone.puse
+			self.user_profile_dir = clone.user_profile_dir
+			self.local_config = clone.local_config
+			self.make_defaults_use = clone.make_defaults_use
+			self.mycpv = clone.mycpv
+			self._setcpv_args_hash = clone._setcpv_args_hash
+
+			# immutable attributes (internal policy ensures lack of mutation)
+			self._keywords_manager = clone._keywords_manager
+			self._use_manager = clone._use_manager
+			self._mask_manager = clone._mask_manager
+
+			# shared mutable attributes
+			self._unknown_features = clone._unknown_features
+
+			self.modules         = copy.deepcopy(clone.modules)
+			self._penv = copy.deepcopy(clone._penv)
+
+			self.configdict = copy.deepcopy(clone.configdict)
+			self.configlist = [
+				self.configdict['env.d'],
+				self.configdict['repo'],
+				self.configdict['pkginternal'],
+				self.configdict['globals'],
+				self.configdict['defaults'],
+				self.configdict['conf'],
+				self.configdict['pkg'],
+				self.configdict['env'],
+			]
+			self.lookuplist = self.configlist[:]
+			self.lookuplist.reverse()
+			self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
+			self.backupenv  = self.configdict["backupenv"]
+			self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
+			self.pprovideddict = copy.deepcopy(clone.pprovideddict)
+			self.features = features_set(self)
+			self.features._features = copy.deepcopy(clone.features._features)
+			self._features_overrides = copy.deepcopy(clone._features_overrides)
+
+			#Strictly speaking _license_manager is not immutable. Users need to ensure that
+			#extract_global_changes() is called right after __init__ (if at all).
+			#It also has the mutable member _undef_lic_groups. It is used to track
+			#undefined license groups, to not display an error message for the same
+			#group again and again. Because of this, it's useful to share it between
+			#all LicenseManager instances.
+			self._license_manager = clone._license_manager
+
+			self._virtuals_manager = copy.deepcopy(clone._virtuals_manager)
+
+			self._accept_properties = copy.deepcopy(clone._accept_properties)
+			self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
+			self._penvdict = copy.deepcopy(clone._penvdict)
+			self._expand_map = copy.deepcopy(clone._expand_map)
+
+		else:
+			locations_manager = LocationsManager(config_root=config_root,
+				config_profile_path=config_profile_path, eprefix=eprefix,
+				local_config=local_config, target_root=target_root)
+
+			eprefix = locations_manager.eprefix
+			config_root = locations_manager.config_root
+			self.profiles = locations_manager.profiles
+			self.profile_path = locations_manager.profile_path
+			self.user_profile_dir = locations_manager.user_profile_dir
+			abs_user_config = locations_manager.abs_user_config
+
+			make_conf = getconfig(
+				os.path.join(config_root, MAKE_CONF_FILE),
+				tolerant=tolerant, allow_sourcing=True) or {}
+
+			make_conf.update(getconfig(
+				os.path.join(abs_user_config, 'make.conf'),
+				tolerant=tolerant, allow_sourcing=True,
+				expand=make_conf) or {})
+
+			# Allow ROOT setting to come from make.conf if it's not overridden
+			# by the constructor argument (from the calling environment).
+			locations_manager.set_root_override(make_conf.get("ROOT"))
+			target_root = locations_manager.target_root
+			eroot = locations_manager.eroot
+			self.global_config_path = locations_manager.global_config_path
+
+			if config_incrementals is None:
+				self.incrementals = INCREMENTALS
+			else:
+				self.incrementals = config_incrementals
+			if not isinstance(self.incrementals, frozenset):
+				self.incrementals = frozenset(self.incrementals)
+
+			self.module_priority    = ("user", "default")
+			self.modules            = {}
+			modules_loader = KeyValuePairFileLoader(
+				os.path.join(config_root, MODULES_FILE_PATH), None, None)
+			modules_dict, modules_errors = modules_loader.load()
+			self.modules["user"] = modules_dict
+			if self.modules["user"] is None:
+				self.modules["user"] = {}
+			self.modules["default"] = {
+				"portdbapi.metadbmodule": "portage.cache.metadata.database",
+				"portdbapi.auxdbmodule":  "portage.cache.flat_hash.database",
+			}
+
+			self.configlist=[]
+
+			# back up our incremental variables:
+			self.configdict={}
+			self._use_expand_dict = {}
+			# configlist will contain: [ env.d, globals, defaults, conf, pkg, backupenv, env ]
+			self.configlist.append({})
+			self.configdict["env.d"] = self.configlist[-1]
+
+			self.configlist.append({})
+			self.configdict["repo"] = self.configlist[-1]
+
+			self.configlist.append({})
+			self.configdict["pkginternal"] = self.configlist[-1]
+
+			self.packages_list = [grabfile_package(os.path.join(x, "packages"), verify_eapi=True) for x in self.profiles]
+			self.packages      = tuple(stack_lists(self.packages_list, incremental=1))
+			del self.packages_list
+			#self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
+
+			# revmaskdict
+			self.prevmaskdict={}
+			for x in self.packages:
+				# Negative atoms are filtered by the above stack_lists() call.
+				if not isinstance(x, Atom):
+					x = Atom(x.lstrip('*'))
+				self.prevmaskdict.setdefault(x.cp, []).append(x)
+
+			# The expand_map is used for variable substitution
+			# in getconfig() calls, and the getconfig() calls
+			# update expand_map with the value of each variable
+			# assignment that occurs. Variable substitution occurs
+			# in the following order, which corresponds to the
+			# order of appearance in self.lookuplist:
+			#
+			#   * env.d
+			#   * make.globals
+			#   * make.defaults
+			#   * make.conf
+			#
+			# Notably absent is "env", since we want to avoid any
+			# interaction with the calling environment that might
+			# lead to unexpected results.
+			expand_map = {}
+			self._expand_map = expand_map
+
+			env_d = getconfig(os.path.join(eroot, "etc", "profile.env"),
+				expand=expand_map)
+			# env_d will be None if profile.env doesn't exist.
+			if env_d:
+				self.configdict["env.d"].update(env_d)
+				expand_map.update(env_d)
+
+			# backupenv is used for calculating incremental variables.
+			if env is None:
+				env = os.environ
+
+			# Avoid potential UnicodeDecodeError exceptions later.
+			env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
+				for k, v in env.items())
+
+			self.backupenv = env_unicode
+
+			if env_d:
+				# Remove duplicate values so they don't override updated
+				# profile.env values later (profile.env is reloaded in each
+				# call to self.regenerate).
+				for k, v in env_d.items():
+					try:
+						if self.backupenv[k] == v:
+							del self.backupenv[k]
+					except KeyError:
+						pass
+				del k, v
+
+			self.configdict["env"] = LazyItemsDict(self.backupenv)
+
+			for x in (self.global_config_path,):
+				self.mygcfg = getconfig(os.path.join(x, "make.globals"),
+					expand=expand_map)
+				if self.mygcfg:
+					break
+
+			if self.mygcfg is None:
+				self.mygcfg = {}
+
+			for k, v in self._default_globals.items():
+				self.mygcfg.setdefault(k, v)
+
+			self.configlist.append(self.mygcfg)
+			self.configdict["globals"]=self.configlist[-1]
+
+			self.make_defaults_use = []
+			self.mygcfg = {}
+			if self.profiles:
+				mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
+					expand=expand_map) for x in self.profiles]
+				self._make_defaults = mygcfg_dlists
+				self.mygcfg = stack_dicts(mygcfg_dlists,
+					incrementals=self.incrementals)
+				if self.mygcfg is None:
+					self.mygcfg = {}
+			self.configlist.append(self.mygcfg)
+			self.configdict["defaults"]=self.configlist[-1]
+
+			self.mygcfg = getconfig(
+				os.path.join(config_root, MAKE_CONF_FILE),
+				tolerant=tolerant, allow_sourcing=True,
+				expand=expand_map) or {}
+
+			self.mygcfg.update(getconfig(
+				os.path.join(abs_user_config, 'make.conf'),
+				tolerant=tolerant, allow_sourcing=True,
+				expand=expand_map) or {})
+
+			# Don't allow the user to override certain variables in make.conf
+			profile_only_variables = self.configdict["defaults"].get(
+				"PROFILE_ONLY_VARIABLES", "").split()
+			profile_only_variables = stack_lists([profile_only_variables])
+			non_user_variables = set()
+			non_user_variables.update(profile_only_variables)
+			non_user_variables.update(self._env_blacklist)
+			non_user_variables.update(self._global_only_vars)
+			non_user_variables = frozenset(non_user_variables)
+			self._non_user_variables = non_user_variables
+
+			for k in profile_only_variables:
+				self.mygcfg.pop(k, None)
+
+			self.configlist.append(self.mygcfg)
+			self.configdict["conf"]=self.configlist[-1]
+
+			self.configlist.append(LazyItemsDict())
+			self.configdict["pkg"]=self.configlist[-1]
+
+			self.configdict["backupenv"] = self.backupenv
+
+			# Don't allow the user to override certain variables in the env
+			for k in profile_only_variables:
+				self.backupenv.pop(k, None)
+
+			self.configlist.append(self.configdict["env"])
+
+			# make lookuplist for loading package.*
+			self.lookuplist=self.configlist[:]
+			self.lookuplist.reverse()
+
+			# Blacklist vars that could interfere with portage internals.
+			for blacklisted in self._env_blacklist:
+				for cfg in self.lookuplist:
+					cfg.pop(blacklisted, None)
+				self.backupenv.pop(blacklisted, None)
+			del blacklisted, cfg
+
+			self["PORTAGE_CONFIGROOT"] = config_root
+			self.backup_changes("PORTAGE_CONFIGROOT")
+			self["ROOT"] = target_root
+			self.backup_changes("ROOT")
+
+			self["EPREFIX"] = eprefix
+			self.backup_changes("EPREFIX")
+			self["EROOT"] = eroot
+			self.backup_changes("EROOT")
+
+			self["PORTAGE_SANDBOX_COMPAT_LEVEL"] = _SANDBOX_COMPAT_LEVEL
+			self.backup_changes("PORTAGE_SANDBOX_COMPAT_LEVEL")
+
+			self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict)
+			self._penvdict = portage.dep.ExtendedAtomDict(dict)
+
+			#Loading Repositories
+			self.repositories = load_repository_config(self)
+
+			#filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
+			main_repo = self.repositories.mainRepo()
+			if main_repo is not None:
+				main_repo = main_repo.user_location
+				self["PORTDIR"] = main_repo
+				self.backup_changes("PORTDIR")
+
+			# repoman controls PORTDIR_OVERLAY via the environment, so no
+			# special cases are needed here.
+			portdir_overlay = list(self.repositories.repoUserLocationList())
+			if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
+				portdir_overlay = portdir_overlay[1:]
+
+			new_ov = []
+			if portdir_overlay:
+				whitespace_re = re.compile(r"\s")
+				for ov in portdir_overlay:
+					ov = normalize_path(ov)
+					if os.path.isdir(ov):
+						if whitespace_re.search(ov) is not None:
+							ov = portage._shell_quote(ov)
+						new_ov.append(ov)
+					else:
+						writemsg(_("!!! Invalid PORTDIR_OVERLAY"
+							" (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+			self["PORTDIR_OVERLAY"] = " ".join(new_ov)
+			self.backup_changes("PORTDIR_OVERLAY")
+
+			locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
+
+			self._repo_make_defaults = {}
+			for repo in self.repositories.repos_with_profiles():
+				d = getconfig(os.path.join(repo.location, "profiles", "make.defaults"),
+					expand=self.configdict["globals"].copy()) or {}
+				if d:
+					for k in chain(self._env_blacklist,
+						profile_only_variables, self._global_only_vars):
+						d.pop(k, None)
+				self._repo_make_defaults[repo.name] = d
+
+			#Read package.keywords and package.accept_keywords.
+			self._keywords_manager = KeywordsManager(self.profiles, abs_user_config, \
+				local_config, global_accept_keywords=self.configdict["defaults"].get("ACCEPT_KEYWORDS", ""))
+
+			#Read all USE related files from profiles and optionally from user config.
+			self._use_manager = UseManager(self.repositories, self.profiles, abs_user_config, user_config=local_config)
+			#Initialize all USE related variables we track ourselves.
+			self.usemask = self._use_manager.getUseMask()
+			self.useforce = self._use_manager.getUseForce()
+			self.configdict["conf"]["USE"] = \
+				self._use_manager.extract_global_USE_changes( \
+					self.configdict["conf"].get("USE", ""))
+
+			#Read license_groups and optionally license_groups and package.license from user config
+			self._license_manager = LicenseManager(locations_manager.profile_locations, \
+				abs_user_config, user_config=local_config)
+			#Extract '*/*' entries from package.license
+			self.configdict["conf"]["ACCEPT_LICENSE"] = \
+				self._license_manager.extract_global_changes( \
+					self.configdict["conf"].get("ACCEPT_LICENSE", ""))
+
+			#Read package.mask and package.unmask from profiles and optionally from user config
+			self._mask_manager = MaskManager(self.repositories, self.profiles,
+				abs_user_config, user_config=local_config,
+				strict_umatched_removal=_unmatched_removal)
+
+			self._virtuals_manager = VirtualsManager(self.profiles)
+
+			if local_config:
+				#package.properties
+				propdict = grabdict_package(os.path.join(
+					abs_user_config, "package.properties"), recursive=1, allow_wildcard=True, \
+					allow_repo=True, verify_eapi=False)
+				v = propdict.pop("*/*", None)
+				if v is not None:
+					if "ACCEPT_PROPERTIES" in self.configdict["conf"]:
+						self.configdict["conf"]["ACCEPT_PROPERTIES"] += " " + " ".join(v)
+					else:
+						self.configdict["conf"]["ACCEPT_PROPERTIES"] = " ".join(v)
+				for k, v in propdict.items():
+					self._ppropertiesdict.setdefault(k.cp, {})[k] = v
+
+				#package.env
+				penvdict = grabdict_package(os.path.join(
+					abs_user_config, "package.env"), recursive=1, allow_wildcard=True, \
+					allow_repo=True, verify_eapi=False)
+				v = penvdict.pop("*/*", None)
+				if v is not None:
+					global_wildcard_conf = {}
+					self._grab_pkg_env(v, global_wildcard_conf)
+					incrementals = self.incrementals
+					conf_configdict = self.configdict["conf"]
+					for k, v in global_wildcard_conf.items():
+						if k in incrementals:
+							if k in conf_configdict:
+								conf_configdict[k] = \
+									conf_configdict[k] + " " + v
+							else:
+								conf_configdict[k] = v
+						else:
+							conf_configdict[k] = v
+						expand_map[k] = v
+
+				for k, v in penvdict.items():
+					self._penvdict.setdefault(k.cp, {})[k] = v
+
+			#getting categories from an external file now
+			self.categories = [grabfile(os.path.join(x, "categories")) \
+				for x in locations_manager.profile_and_user_locations]
+			category_re = dbapi._category_re
+			self.categories = tuple(sorted(
+				x for x in stack_lists(self.categories, incremental=1)
+				if category_re.match(x) is not None))
+
+			archlist = [grabfile(os.path.join(x, "arch.list")) \
+				for x in locations_manager.profile_and_user_locations]
+			archlist = stack_lists(archlist, incremental=1)
+			self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
+
+			pkgprovidedlines = [grabfile(os.path.join(x, "package.provided"), recursive=1) for x in self.profiles]
+			pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
+			has_invalid_data = False
+			for x in range(len(pkgprovidedlines)-1, -1, -1):
+				myline = pkgprovidedlines[x]
+				if not isvalidatom("=" + myline):
+					writemsg(_("Invalid package name in package.provided: %s\n") % \
+						myline, noiselevel=-1)
+					has_invalid_data = True
+					del pkgprovidedlines[x]
+					continue
+				cpvr = catpkgsplit(pkgprovidedlines[x])
+				if not cpvr or cpvr[0] == "null":
+					writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
+						noiselevel=-1)
+					has_invalid_data = True
+					del pkgprovidedlines[x]
+					continue
+				if cpvr[0] == "virtual":
+					writemsg(_("Virtual package in package.provided: %s\n") % \
+						myline, noiselevel=-1)
+					has_invalid_data = True
+					del pkgprovidedlines[x]
+					continue
+			if has_invalid_data:
+				writemsg(_("See portage(5) for correct package.provided usage.\n"),
+					noiselevel=-1)
+			self.pprovideddict = {}
+			for x in pkgprovidedlines:
+				x_split = catpkgsplit(x)
+				if x_split is None:
+					continue
+				mycatpkg = cpv_getkey(x)
+				if mycatpkg in self.pprovideddict:
+					self.pprovideddict[mycatpkg].append(x)
+				else:
+					self.pprovideddict[mycatpkg]=[x]
+
+			# reasonable defaults; this is important as without USE_ORDER,
+			# USE will always be "" (nothing set)!
+			if "USE_ORDER" not in self:
+				self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:repo:env.d"
+
+			self["PORTAGE_GID"] = str(portage_gid)
+			self.backup_changes("PORTAGE_GID")
+
+			self.depcachedir = DEPCACHE_PATH
+			if eprefix:
+				# See comments about make.globals and EPREFIX
+				# above. DEPCACHE_PATH is similar.
+				if target_root == "/":
+					# case (1) above
+					self.depcachedir = os.path.join(eprefix,
+						DEPCACHE_PATH.lstrip(os.sep))
+				else:
+					# case (2) above
+					# For now, just assume DEPCACHE_PATH is relative
+					# to EPREFIX.
+					# TODO: Pass in more info to the constructor,
+					# so we know the host system configuration.
+					self.depcachedir = os.path.join(eprefix,
+						DEPCACHE_PATH.lstrip(os.sep))
+
+			if self.get("PORTAGE_DEPCACHEDIR", None):
+				self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
+			self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
+			self.backup_changes("PORTAGE_DEPCACHEDIR")
+
+			if "CBUILD" not in self and "CHOST" in self:
+				self["CBUILD"] = self["CHOST"]
+				self.backup_changes("CBUILD")
+
+			self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
+			self.backup_changes("PORTAGE_BIN_PATH")
+			self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
+			self.backup_changes("PORTAGE_PYM_PATH")
+
+			for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
+				try:
+					self[var] = str(int(self.get(var, "0")))
+				except ValueError:
+					writemsg(_("!!! %s='%s' is not a valid integer.  "
+						"Falling back to '0'.\n") % (var, self[var]),
+						noiselevel=-1)
+					self[var] = "0"
+				self.backup_changes(var)
+
+			# initialize self.features
+			self.regenerate()
+
+			if bsd_chflags:
+				self.features.add('chflags')
+
+			if 'parse-eapi-ebuild-head' in self.features:
+				portage._validate_cache_for_unsupported_eapis = False
+
+			self._iuse_implicit_match = _iuse_implicit_match_cache(self)
+
+			self._validate_commands()
+
+		for k in self._case_insensitive_vars:
+			if k in self:
+				self[k] = self[k].lower()
+				self.backup_changes(k)
+
+		if mycpv:
+			self.setcpv(mycpv)
+
+	def _validate_commands(self):
+		for k in special_env_vars.validate_commands:
+			v = self.get(k)
+			if v is not None:
+				valid, v_split = validate_cmd_var(v)
+
+				if not valid:
+					if v_split:
+						writemsg_level(_("%s setting is invalid: '%s'\n") % \
+							(k, v), level=logging.ERROR, noiselevel=-1)
+
+					# before deleting the invalid setting, backup
+					# the default value if available
+					v = self.configdict['globals'].get(k)
+					if v is not None:
+						default_valid, v_split = validate_cmd_var(v)
+						if not default_valid:
+							if v_split:
+								writemsg_level(
+									_("%s setting from make.globals" + \
+									" is invalid: '%s'\n") % \
+									(k, v), level=logging.ERROR, noiselevel=-1)
+							# make.globals seems corrupt, so try for
+							# a hardcoded default instead
+							v = self._default_globals.get(k)
+
+					# delete all settings for this key,
+					# including the invalid one
+					del self[k]
+					self.backupenv.pop(k, None)
+					if v:
+						# restore validated default
+						self.configdict['globals'][k] = v
+
+	def _init_dirs(self):
+		"""
+		Create a few directories that are critical to portage operation
+		"""
+		if not os.access(self["EROOT"], os.W_OK):
+			return
+
+		#                                gid, mode, mask, preserve_perms
+		dir_mode_map = {
+			"tmp"             : (         -1, 0o1777,  0,  True),
+			"var/tmp"         : (         -1, 0o1777,  0,  True),
+			PRIVATE_PATH      : (portage_gid, 0o2750, 0o2, False),
+			CACHE_PATH        : (portage_gid,  0o755, 0o2, False)
+		}
+
+		for mypath, (gid, mode, modemask, preserve_perms) \
+			in dir_mode_map.items():
+			mydir = os.path.join(self["EROOT"], mypath)
+			if preserve_perms and os.path.isdir(mydir):
+				# Only adjust permissions on some directories if
+				# they don't exist yet. This gives freedom to the
+				# user to adjust permissions to suit their taste.
+				continue
+			try:
+				ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
+			except PortageException as e:
+				writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
+					noiselevel=-1)
+				writemsg("!!! %s\n" % str(e),
+					noiselevel=-1)
+
+	@property
+	def pkeywordsdict(self):
+		result = self._keywords_manager.pkeywordsdict.copy()
+		for k, v in result.items():
+			result[k] = v.copy()
+		return result
+
+	@property
+	def pmaskdict(self):
+		return self._mask_manager._pmaskdict.copy()
+
+	@property
+	def punmaskdict(self):
+		return self._mask_manager._punmaskdict.copy()
+
+	def expandLicenseTokens(self, tokens):
+		""" Take a token from ACCEPT_LICENSE or package.license and expand it
+		if it's a group token (indicated by @) or just return it if it's not a
+		group.  If a group is negated then negate all group elements."""
+		return self._license_manager.expandLicenseTokens(tokens)
+
+	def validate(self):
+		"""Validate miscellaneous settings and display warnings if necessary.
+		(This code was previously in the global scope of portage.py)"""
+
+		groups = self["ACCEPT_KEYWORDS"].split()
+		archlist = self.archlist()
+		if not archlist:
+			writemsg(_("--- 'profiles/arch.list' is empty or "
+				"not available. Empty portage tree?\n"), noiselevel=1)
+		else:
+			for group in groups:
+				if group not in archlist and \
+					not (group.startswith("-") and group[1:] in archlist) and \
+					group not in ("*", "~*", "**"):
+					writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
+						noiselevel=-1)
+
+		abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
+			PROFILE_PATH)
+		if (not self.profile_path or \
+			not os.path.exists(os.path.join(self.profile_path, "parent"))) and \
+			os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
+			writemsg(_("\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
+				noiselevel=-1)
+			writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
+			writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
+
+		abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
+			USER_VIRTUALS_FILE)
+		if os.path.exists(abs_user_virtuals):
+			writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
+			writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
+			writemsg("!!! this new location.\n\n")
+
+		if not sandbox_capable and \
+			("sandbox" in self.features or "usersandbox" in self.features):
+			if self.profile_path is not None and \
+				os.path.realpath(self.profile_path) == \
+				os.path.realpath(os.path.join(
+				self["PORTAGE_CONFIGROOT"], PROFILE_PATH)):
+				# Don't show this warning when running repoman and the
+				# sandbox feature came from a profile that doesn't belong
+				# to the user.
+				writemsg(colorize("BAD", _("!!! Problem with sandbox"
+					" binary. Disabling...\n\n")), noiselevel=-1)
+
+		if "fakeroot" in self.features and \
+			not fakeroot_capable:
+			writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
+				"fakeroot binary is not installed.\n"), noiselevel=-1)
+
+	def load_best_module(self,property_string):
+		best_mod = best_from_dict(property_string,self.modules,self.module_priority)
+		mod = None
+		try:
+			mod = load_mod(best_mod)
+		except ImportError:
+			if not best_mod.startswith("cache."):
+				raise
+			else:
+				best_mod = "portage." + best_mod
+				try:
+					mod = load_mod(best_mod)
+				except ImportError:
+					raise
+		return mod
+
+	def lock(self):
+		self.locked = 1
+
+	def unlock(self):
+		self.locked = 0
+
+	def modifying(self):
+		if self.locked:
+			raise Exception(_("Configuration is locked."))
+
+	def backup_changes(self,key=None):
+		self.modifying()
+		if key and key in self.configdict["env"]:
+			self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
+		else:
+			raise KeyError(_("No such key defined in environment: %s") % key)
+
+	def reset(self, keeping_pkg=0, use_cache=None):
+		"""
+		Restore environment from self.backupenv, call self.regenerate()
+		@param keeping_pkg: Should we keep the setcpv() data or delete it.
+		@type keeping_pkg: Boolean
+		@rype: None
+		"""
+
+		if use_cache is not None:
+			warnings.warn("The use_cache parameter for config.reset() is deprecated and without effect.",
+				DeprecationWarning, stacklevel=2)
+
+		self.modifying()
+		self.configdict["env"].clear()
+		self.configdict["env"].update(self.backupenv)
+
+		self.modifiedkeys = []
+		if not keeping_pkg:
+			self.mycpv = None
+			self._setcpv_args_hash = None
+			self.puse = ""
+			del self._penv[:]
+			self.configdict["pkg"].clear()
+			self.configdict["pkginternal"].clear()
+			self.configdict["repo"].clear()
+			self.configdict["defaults"]["USE"] = \
+				" ".join(self.make_defaults_use)
+			self.usemask = self._use_manager.getUseMask()
+			self.useforce = self._use_manager.getUseForce()
+		self.regenerate()
+
+	class _lazy_vars(object):
+
+		__slots__ = ('built_use', 'settings', 'values')
+
+		def __init__(self, built_use, settings):
+			self.built_use = built_use
+			self.settings = settings
+			self.values = None
+
+		def __getitem__(self, k):
+			if self.values is None:
+				self.values = self._init_values()
+			return self.values[k]
+
+		def _init_values(self):
+			values = {}
+			settings = self.settings
+			use = self.built_use
+			if use is None:
+				use = frozenset(settings['PORTAGE_USE'].split())
+
+			values['ACCEPT_LICENSE'] = settings._license_manager.get_prunned_accept_license( \
+				settings.mycpv, use, settings['LICENSE'], settings['SLOT'], settings.get('PORTAGE_REPO_NAME'))
+			values['PORTAGE_RESTRICT'] = self._restrict(use, settings)
+			return values
+
+		def _restrict(self, use, settings):
+			try:
+				restrict = set(use_reduce(settings['RESTRICT'], uselist=use, flat=True))
+			except InvalidDependString:
+				restrict = set()
+			return ' '.join(sorted(restrict))
+
+	class _lazy_use_expand(object):
+		"""
+		Lazily evaluate USE_EXPAND variables since they are only needed when
+		an ebuild shell is spawned. Variables values are made consistent with
+		the previously calculated USE settings.
+		"""
+
+		def __init__(self, use, usemask, iuse_implicit,
+			use_expand_split, use_expand_dict):
+			self._use = use
+			self._usemask = usemask
+			self._iuse_implicit = iuse_implicit
+			self._use_expand_split = use_expand_split
+			self._use_expand_dict = use_expand_dict
+
+		def __getitem__(self, key):
+			prefix = key.lower() + '_'
+			prefix_len = len(prefix)
+			expand_flags = set( x[prefix_len:] for x in self._use \
+				if x[:prefix_len] == prefix )
+			var_split = self._use_expand_dict.get(key, '').split()
+			# Preserve the order of var_split because it can matter for things
+			# like LINGUAS.
+			var_split = [ x for x in var_split if x in expand_flags ]
+			var_split.extend(expand_flags.difference(var_split))
+			has_wildcard = '*' in expand_flags
+			if has_wildcard:
+				var_split = [ x for x in var_split if x != "*" ]
+			has_iuse = set()
+			for x in self._iuse_implicit:
+				if x[:prefix_len] == prefix:
+					has_iuse.add(x[prefix_len:])
+			if has_wildcard:
+				# * means to enable everything in IUSE that's not masked
+				if has_iuse:
+					usemask = self._usemask
+					for suffix in has_iuse:
+						x = prefix + suffix
+						if x not in usemask:
+							if suffix not in expand_flags:
+								var_split.append(suffix)
+				else:
+					# If there is a wildcard and no matching flags in IUSE then
+					# LINGUAS should be unset so that all .mo files are
+					# installed.
+					var_split = []
+			# Make the flags unique and filter them according to IUSE.
+			# Also, continue to preserve order for things like LINGUAS
+			# and filter any duplicates that variable may contain.
+			filtered_var_split = []
+			remaining = has_iuse.intersection(var_split)
+			for x in var_split:
+				if x in remaining:
+					remaining.remove(x)
+					filtered_var_split.append(x)
+			var_split = filtered_var_split
+
+			if var_split:
+				value = ' '.join(var_split)
+			else:
+				# Don't export empty USE_EXPAND vars unless the user config
+				# exports them as empty.  This is required for vars such as
+				# LINGUAS, where unset and empty have different meanings.
+				if has_wildcard:
+					# ebuild.sh will see this and unset the variable so
+					# that things like LINGUAS work properly
+					value = '*'
+				else:
+					if has_iuse:
+						value = ''
+					else:
+						# It's not in IUSE, so just allow the variable content
+						# to pass through if it is defined somewhere.  This
+						# allows packages that support LINGUAS but don't
+						# declare it in IUSE to use the variable outside of the
+						# USE_EXPAND context.
+						value = None
+
+			return value
+
+	def setcpv(self, mycpv, use_cache=None, mydb=None):
+		"""
+		Load a particular CPV into the config, this lets us see the
+		Default USE flags for a particular ebuild as well as the USE
+		flags from package.use.
+
+		@param mycpv: A cpv to load
+		@type mycpv: string
+		@param mydb: a dbapi instance that supports aux_get with the IUSE key.
+		@type mydb: dbapi or derivative.
+		@rtype: None
+		"""
+
+		if use_cache is not None:
+			warnings.warn("The use_cache parameter for config.setcpv() is deprecated and without effect.",
+				DeprecationWarning, stacklevel=2)
+
+		self.modifying()
+
+		pkg = None
+		built_use = None
+		explicit_iuse = None
+		if not isinstance(mycpv, basestring):
+			pkg = mycpv
+			mycpv = pkg.cpv
+			mydb = pkg.metadata
+			explicit_iuse = pkg.iuse.all
+			args_hash = (mycpv, id(pkg))
+			if pkg.built:
+				built_use = pkg.use.enabled
+		else:
+			args_hash = (mycpv, id(mydb))
+
+		if args_hash == self._setcpv_args_hash:
+			return
+		self._setcpv_args_hash = args_hash
+
+		has_changed = False
+		self.mycpv = mycpv
+		cat, pf = catsplit(mycpv)
+		cp = cpv_getkey(mycpv)
+		cpv_slot = self.mycpv
+		pkginternaluse = ""
+		iuse = ""
+		pkg_configdict = self.configdict["pkg"]
+		previous_iuse = pkg_configdict.get("IUSE")
+		previous_features = pkg_configdict.get("FEATURES")
+
+		aux_keys = self._setcpv_aux_keys
+
+		# Discard any existing metadata and package.env settings from
+		# the previous package instance.
+		pkg_configdict.clear()
+
+		pkg_configdict["CATEGORY"] = cat
+		pkg_configdict["PF"] = pf
+		repository = None
+		if mydb:
+			if not hasattr(mydb, "aux_get"):
+				for k in aux_keys:
+					if k in mydb:
+						# Make these lazy, since __getitem__ triggers
+						# evaluation of USE conditionals which can't
+						# occur until PORTAGE_USE is calculated below.
+						pkg_configdict.addLazySingleton(k,
+							mydb.__getitem__, k)
+			else:
+				# When calling dbapi.aux_get(), grab USE for built/installed
+				# packages since we want to save it PORTAGE_BUILT_USE for
+				# evaluating conditional USE deps in atoms passed via IPC to
+				# helpers like has_version and best_version.
+				aux_keys = list(aux_keys)
+				aux_keys.append('USE')
+				for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
+					pkg_configdict[k] = v
+				built_use = frozenset(pkg_configdict.pop('USE').split())
+				if not built_use:
+					# Empty USE means this dbapi instance does not contain
+					# built packages.
+					built_use = None
+
+			repository = pkg_configdict.pop("repository", None)
+			if repository is not None:
+				pkg_configdict["PORTAGE_REPO_NAME"] = repository
+			slot = pkg_configdict["SLOT"]
+			iuse = pkg_configdict["IUSE"]
+			if pkg is None:
+				cpv_slot = "%s:%s" % (self.mycpv, slot)
+			else:
+				cpv_slot = pkg
+			pkginternaluse = []
+			for x in iuse.split():
+				if x.startswith("+"):
+					pkginternaluse.append(x[1:])
+				elif x.startswith("-"):
+					pkginternaluse.append(x)
+			pkginternaluse = " ".join(pkginternaluse)
+		if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
+			self.configdict["pkginternal"]["USE"] = pkginternaluse
+			has_changed = True
+
+		repo_env = []
+		if repository and repository != Package.UNKNOWN_REPO:
+			repos = []
+			try:
+				repos.extend(repo.name for repo in
+					self.repositories[repository].masters)
+			except KeyError:
+				pass
+			repos.append(repository)
+			for repo in repos:
+				d = self._repo_make_defaults.get(repo)
+				if d is None:
+					d = {}
+				else:
+					# make a copy, since we might modify it with
+					# package.use settings
+					d = d.copy()
+				cpdict = self._use_manager._repo_puse_dict.get(repo, {}).get(cp)
+				if cpdict:
+					repo_puse = ordered_by_atom_specificity(cpdict, cpv_slot)
+					if repo_puse:
+						for x in repo_puse:
+							d["USE"] = d.get("USE", "") + " " + " ".join(x)
+				if d:
+					repo_env.append(d)
+
+		if repo_env or self.configdict["repo"]:
+			self.configdict["repo"].clear()
+			self.configdict["repo"].update(stack_dicts(repo_env,
+				incrementals=self.incrementals))
+			has_changed = True
+
+		defaults = []
+		for i, pkgprofileuse_dict in enumerate(self._use_manager._pkgprofileuse):
+			if self.make_defaults_use[i]:
+				defaults.append(self.make_defaults_use[i])
+			cpdict = pkgprofileuse_dict.get(cp)
+			if cpdict:
+				pkg_defaults = ordered_by_atom_specificity(cpdict, cpv_slot)
+				if pkg_defaults:
+					defaults.extend(pkg_defaults)
+		defaults = " ".join(defaults)
+		if defaults != self.configdict["defaults"].get("USE",""):
+			self.configdict["defaults"]["USE"] = defaults
+			has_changed = True
+
+		useforce = self._use_manager.getUseForce(cpv_slot)
+		if useforce != self.useforce:
+			self.useforce = useforce
+			has_changed = True
+
+		usemask = self._use_manager.getUseMask(cpv_slot)
+		if usemask != self.usemask:
+			self.usemask = usemask
+			has_changed = True
+
+		oldpuse = self.puse
+		self.puse = self._use_manager.getPUSE(cpv_slot)
+		if oldpuse != self.puse:
+			has_changed = True
+		self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
+		self.configdict["pkg"]["USE"]    = self.puse[:] # this gets appended to USE
+
+		if previous_features:
+			# The package from the previous setcpv call had package.env
+			# settings which modified FEATURES. Therefore, trigger a
+			# regenerate() call in order to ensure that self.features
+			# is accurate.
+			has_changed = True
+
+		self._penv = []
+		cpdict = self._penvdict.get(cp)
+		if cpdict:
+			penv_matches = ordered_by_atom_specificity(cpdict, cpv_slot)
+			if penv_matches:
+				for x in penv_matches:
+					self._penv.extend(x)
+
+		protected_pkg_keys = set(pkg_configdict)
+		protected_pkg_keys.discard('USE')
+
+		# If there are _any_ package.env settings for this package
+		# then it automatically triggers config.reset(), in order
+		# to account for possible incremental interaction between
+		# package.use, package.env, and overrides from the calling
+		# environment (configdict['env']).
+		if self._penv:
+			has_changed = True
+			# USE is special because package.use settings override
+			# it. Discard any package.use settings here and they'll
+			# be added back later.
+			pkg_configdict.pop('USE', None)
+			self._grab_pkg_env(self._penv, pkg_configdict,
+				protected_keys=protected_pkg_keys)
+
+			# Now add package.use settings, which override USE from
+			# package.env
+			if self.puse:
+				if 'USE' in pkg_configdict:
+					pkg_configdict['USE'] = \
+						pkg_configdict['USE'] + " " + self.puse
+				else:
+					pkg_configdict['USE'] = self.puse
+
+		if has_changed:
+			self.reset(keeping_pkg=1)
+
+		env_configdict = self.configdict['env']
+
+		# Ensure that "pkg" values are always preferred over "env" values.
+		# This must occur _after_ the above reset() call, since reset()
+		# copies values from self.backupenv.
+		for k in protected_pkg_keys:
+			env_configdict.pop(k, None)
+
+		lazy_vars = self._lazy_vars(built_use, self)
+		env_configdict.addLazySingleton('ACCEPT_LICENSE',
+			lazy_vars.__getitem__, 'ACCEPT_LICENSE')
+		env_configdict.addLazySingleton('PORTAGE_RESTRICT',
+			lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
+
+		if built_use is not None:
+			pkg_configdict['PORTAGE_BUILT_USE'] = ' '.join(built_use)
+
+		# If reset() has not been called, it's safe to return
+		# early if IUSE has not changed.
+		if not has_changed and previous_iuse == iuse:
+			return
+
+		# Filter out USE flags that aren't part of IUSE. This has to
+		# be done for every setcpv() call since practically every
+		# package has different IUSE.
+		use = set(self["USE"].split())
+		if explicit_iuse is None:
+			explicit_iuse = frozenset(x.lstrip("+-") for x in iuse.split())
+		iuse_implicit_match = self._iuse_implicit_match
+		portage_iuse = self._get_implicit_iuse()
+		portage_iuse.update(explicit_iuse)
+
+		# PORTAGE_IUSE is not always needed so it's lazily evaluated.
+		self.configdict["env"].addLazySingleton(
+			"PORTAGE_IUSE", _lazy_iuse_regex, portage_iuse)
+
+		ebuild_force_test = self.get("EBUILD_FORCE_TEST") == "1"
+		if ebuild_force_test and \
+			not hasattr(self, "_ebuild_force_test_msg_shown"):
+				self._ebuild_force_test_msg_shown = True
+				writemsg(_("Forcing test.\n"), noiselevel=-1)
+		if "test" in self.features:
+			if "test" in self.usemask and not ebuild_force_test:
+				# "test" is in IUSE and USE=test is masked, so execution
+				# of src_test() probably is not reliable. Therefore,
+				# temporarily disable FEATURES=test just for this package.
+				self["FEATURES"] = " ".join(x for x in self.features \
+					if x != "test")
+				use.discard("test")
+			else:
+				use.add("test")
+				if ebuild_force_test and "test" in self.usemask:
+					self.usemask = \
+						frozenset(x for x in self.usemask if x != "test")
+
+		# Allow _* flags from USE_EXPAND wildcards to pass through here.
+		use.difference_update([x for x in use \
+			if (x not in explicit_iuse and \
+			not iuse_implicit_match(x)) and x[-2:] != '_*'])
+
+		# Use the calculated USE flags to regenerate the USE_EXPAND flags so
+		# that they are consistent. For optimal performance, use slice
+		# comparison instead of startswith().
+		use_expand_split = set(x.lower() for \
+			x in self.get('USE_EXPAND', '').split())
+		lazy_use_expand = self._lazy_use_expand(use, self.usemask,
+			portage_iuse, use_expand_split, self._use_expand_dict)
+
+		use_expand_iuses = {}
+		for x in portage_iuse:
+			x_split = x.split('_')
+			if len(x_split) == 1:
+				continue
+			for i in range(len(x_split) - 1):
+				k = '_'.join(x_split[:i+1])
+				if k in use_expand_split:
+					v = use_expand_iuses.get(k)
+					if v is None:
+						v = set()
+						use_expand_iuses[k] = v
+					v.add(x)
+					break
+
+		# If it's not in IUSE, variable content is allowed
+		# to pass through if it is defined somewhere.  This
+		# allows packages that support LINGUAS but don't
+		# declare it in IUSE to use the variable outside of the
+		# USE_EXPAND context.
+		for k, use_expand_iuse in use_expand_iuses.items():
+			if k + '_*' in use:
+				use.update( x for x in use_expand_iuse if x not in usemask )
+			k = k.upper()
+			self.configdict['env'].addLazySingleton(k,
+				lazy_use_expand.__getitem__, k)
+
+		# Filtered for the ebuild environment. Store this in a separate
+		# attribute since we still want to be able to see global USE
+		# settings for things like emerge --info.
+
+		self.configdict["env"]["PORTAGE_USE"] = \
+			" ".join(sorted(x for x in use if x[-2:] != '_*'))
+
+	def _grab_pkg_env(self, penv, container, protected_keys=None):
+		if protected_keys is None:
+			protected_keys = ()
+		abs_user_config = os.path.join(
+			self['PORTAGE_CONFIGROOT'], USER_CONFIG_PATH)
+		non_user_variables = self._non_user_variables
+		# Make a copy since we don't want per-package settings
+		# to pollute the global expand_map.
+		expand_map = self._expand_map.copy()
+		incrementals = self.incrementals
+		for envname in penv:
+			penvfile = os.path.join(abs_user_config, "env", envname)
+			penvconfig = getconfig(penvfile, tolerant=self._tolerant,
+				allow_sourcing=True, expand=expand_map)
+			if penvconfig is None:
+				writemsg("!!! %s references non-existent file: %s\n" % \
+					(os.path.join(abs_user_config, 'package.env'), penvfile),
+					noiselevel=-1)
+			else:
+				for k, v in penvconfig.items():
+					if k in protected_keys or \
+						k in non_user_variables:
+						writemsg("!!! Illegal variable " + \
+							"'%s' assigned in '%s'\n" % \
+							(k, penvfile), noiselevel=-1)
+					elif k in incrementals:
+						if k in container:
+							container[k] = container[k] + " " + v
+						else:
+							container[k] = v
+					else:
+						container[k] = v
+
+	def _get_implicit_iuse(self):
+		"""
+		Some flags are considered to
+		be implicit members of IUSE:
+		  * Flags derived from ARCH
+		  * Flags derived from USE_EXPAND_HIDDEN variables
+		  * Masked flags, such as those from {,package}use.mask
+		  * Forced flags, such as those from {,package}use.force
+		  * build and bootstrap flags used by bootstrap.sh
+		"""
+		iuse_implicit = set()
+		# Flags derived from ARCH.
+		arch = self.configdict["defaults"].get("ARCH")
+		if arch:
+			iuse_implicit.add(arch)
+		iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
+
+		# Flags derived from USE_EXPAND_HIDDEN variables
+		# such as ELIBC, KERNEL, and USERLAND.
+		use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
+		for x in use_expand_hidden:
+			iuse_implicit.add(x.lower() + "_.*")
+
+		# Flags that have been masked or forced.
+		iuse_implicit.update(self.usemask)
+		iuse_implicit.update(self.useforce)
+
+		# build and bootstrap flags used by bootstrap.sh
+		iuse_implicit.add("build")
+		iuse_implicit.add("bootstrap")
+
+		# Controlled by FEATURES=test. Make this implicit, so handling
+		# of FEATURES=test is consistent regardless of explicit IUSE.
+		# Users may use use.mask/package.use.mask to control
+		# FEATURES=test for all ebuilds, regardless of explicit IUSE.
+		iuse_implicit.add("test")
+
+		return iuse_implicit
+
+	def _getUseMask(self, pkg):
+		return self._use_manager.getUseMask(pkg)
+
+	def _getUseForce(self, pkg):
+		return self._use_manager.getUseForce(pkg)
+
+	def _getMaskAtom(self, cpv, metadata):
+		"""
+		Take a package and return a matching package.mask atom, or None if no
+		such atom exists or it has been cancelled by package.unmask. PROVIDE
+		is not checked, so atoms will not be found for old-style virtuals.
+
+		@param cpv: The package name
+		@type cpv: String
+		@param metadata: A dictionary of raw package metadata
+		@type metadata: dict
+		@rtype: String
+		@return: A matching atom string or None if one is not found.
+		"""
+		return self._mask_manager.getMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
+
+	def _getRawMaskAtom(self, cpv, metadata):
+		"""
+		Take a package and return a matching package.mask atom, or None if no
+		such atom exists or it has been cancelled by package.unmask. PROVIDE
+		is not checked, so atoms will not be found for old-style virtuals.
+
+		@param cpv: The package name
+		@type cpv: String
+		@param metadata: A dictionary of raw package metadata
+		@type metadata: dict
+		@rtype: String
+		@return: A matching atom string or None if one is not found.
+		"""
+		return self._mask_manager.getRawMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
+
+
+	def _getProfileMaskAtom(self, cpv, metadata):
+		"""
+		Take a package and return a matching profile atom, or None if no
+		such atom exists. Note that a profile atom may or may not have a "*"
+		prefix. PROVIDE is not checked, so atoms will not be found for
+		old-style virtuals.
+
+		@param cpv: The package name
+		@type cpv: String
+		@param metadata: A dictionary of raw package metadata
+		@type metadata: dict
+		@rtype: String
+		@return: A matching profile atom string or None if one is not found.
+		"""
+
+		cp = cpv_getkey(cpv)
+		profile_atoms = self.prevmaskdict.get(cp)
+		if profile_atoms:
+			pkg = "".join((cpv, _slot_separator, metadata["SLOT"]))
+			repo = metadata.get("repository")
+			if repo and repo != Package.UNKNOWN_REPO:
+				pkg = "".join((pkg, _repo_separator, repo))
+			pkg_list = [pkg]
+			for x in profile_atoms:
+				if match_from_list(x, pkg_list):
+					continue
+				return x
+		return None
+
+	def _getKeywords(self, cpv, metadata):
+		return self._keywords_manager.getKeywords(cpv, metadata["SLOT"], \
+			metadata.get("KEYWORDS", ""), metadata.get("repository"))
+
+	def _getMissingKeywords(self, cpv, metadata):
+		"""
+		Take a package and return a list of any KEYWORDS that the user may
+		need to accept for the given package. If the KEYWORDS are empty
+		and the the ** keyword has not been accepted, the returned list will
+		contain ** alone (in order to distinguish from the case of "none
+		missing").
+
+		@param cpv: The package name (for package.keywords support)
+		@type cpv: String
+		@param metadata: A dictionary of raw package metadata
+		@type metadata: dict
+		@rtype: List
+		@return: A list of KEYWORDS that have not been accepted.
+		"""
+
+		# Hack: Need to check the env directly here as otherwise stacking
+		# doesn't work properly as negative values are lost in the config
+		# object (bug #139600)
+		backuped_accept_keywords = self.configdict["backupenv"].get("ACCEPT_KEYWORDS", "")
+		global_accept_keywords = self["ACCEPT_KEYWORDS"]
+
+		return self._keywords_manager.getMissingKeywords(cpv, metadata["SLOT"], \
+			metadata.get("KEYWORDS", ""), metadata.get('repository'), \
+			global_accept_keywords, backuped_accept_keywords)
+
+	def _getRawMissingKeywords(self, cpv, metadata):
+		"""
+		Take a package and return a list of any KEYWORDS that the user may
+		need to accept for the given package. If the KEYWORDS are empty,
+		the returned list will contain ** alone (in order to distinguish
+		from the case of "none missing").  This DOES NOT apply any user config
+		package.accept_keywords acceptance.
+
+		@param cpv: The package name (for package.keywords support)
+		@type cpv: String
+		@param metadata: A dictionary of raw package metadata
+		@type metadata: dict
+		@rtype: List
+		@return: lists of KEYWORDS that have not been accepted
+		and the keywords it looked for.
+		"""
+		return self._keywords_manager.getRawMissingKeywords(cpv, metadata["SLOT"], \
+			metadata.get("KEYWORDS", ""), metadata.get('repository'), \
+			self.get("ACCEPT_KEYWORDS", ""))
+
+	def _getPKeywords(self, cpv, metadata):
+		global_accept_keywords = self.get("ACCEPT_KEYWORDS", "")
+
+		return self._keywords_manager.getPKeywords(cpv, metadata["SLOT"], \
+			metadata.get('repository'), global_accept_keywords)
+
+	def _getMissingLicenses(self, cpv, metadata):
+		"""
+		Take a LICENSE string and return a list of any licenses that the user
+		may need to accept for the given package.  The returned list will not
+		contain any licenses that have already been accepted.  This method
+		can throw an InvalidDependString exception.
+
+		@param cpv: The package name (for package.license support)
+		@type cpv: String
+		@param metadata: A dictionary of raw package metadata
+		@type metadata: dict
+		@rtype: List
+		@return: A list of licenses that have not been accepted.
+		"""
+		return self._license_manager.getMissingLicenses( \
+			cpv, metadata["USE"], metadata["LICENSE"], metadata["SLOT"], metadata.get('repository'))
+
+	def _getMissingProperties(self, cpv, metadata):
+		"""
+		Take a PROPERTIES string and return a list of any properties the user
+		may need to accept for the given package.  The returned list will not
+		contain any properties that have already been accepted.  This method
+		can throw an InvalidDependString exception.
+
+		@param cpv: The package name (for package.properties support)
+		@type cpv: String
+		@param metadata: A dictionary of raw package metadata
+		@type metadata: dict
+		@rtype: List
+		@return: A list of properties that have not been accepted.
+		"""
+		accept_properties = self._accept_properties
+		cp = cpv_getkey(cpv)
+		cpdict = self._ppropertiesdict.get(cp)
+		if cpdict:
+			cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
+			pproperties_list = ordered_by_atom_specificity(cpdict, cpv_slot, repo=metadata.get('repository'))
+			if pproperties_list:
+				accept_properties = list(self._accept_properties)
+				for x in pproperties_list:
+					accept_properties.extend(x)
+
+		properties_str = metadata.get("PROPERTIES", "")
+		properties = set(use_reduce(properties_str, matchall=1, flat=True))
+		properties.discard('||')
+
+		acceptable_properties = set()
+		for x in accept_properties:
+			if x == '*':
+				acceptable_properties.update(properties)
+			elif x == '-*':
+				acceptable_properties.clear()
+			elif x[:1] == '-':
+				acceptable_properties.discard(x[1:])
+			else:
+				acceptable_properties.add(x)
+
+		if "?" in properties_str:
+			use = metadata["USE"].split()
+		else:
+			use = []
+
+		properties_struct = use_reduce(properties_str, uselist=use, opconvert=True)
+		return self._getMaskedProperties(properties_struct, acceptable_properties)
+
+	def _getMaskedProperties(self, properties_struct, acceptable_properties):
+		if not properties_struct:
+			return []
+		if properties_struct[0] == "||":
+			ret = []
+			for element in properties_struct[1:]:
+				if isinstance(element, list):
+					if element:
+						tmp = self._getMaskedProperties(
+							element, acceptable_properties)
+						if not tmp:
+							return []
+						ret.extend(tmp)
+				else:
+					if element in acceptable_properties:
+						return[]
+					ret.append(element)
+			# Return all masked properties, since we don't know which combination
+			# (if any) the user will decide to unmask
+			return ret
+
+		ret = []
+		for element in properties_struct:
+			if isinstance(element, list):
+				if element:
+					ret.extend(self._getMaskedProperties(element,
+						acceptable_properties))
+			else:
+				if element not in acceptable_properties:
+					ret.append(element)
+		return ret
+
+	def _accept_chost(self, cpv, metadata):
+		"""
+		@return True if pkg CHOST is accepted, False otherwise.
+		"""
+		if self._accept_chost_re is None:
+			accept_chost = self.get("ACCEPT_CHOSTS", "").split()
+			if not accept_chost:
+				chost = self.get("CHOST")
+				if chost:
+					accept_chost.append(chost)
+			if not accept_chost:
+				self._accept_chost_re = re.compile(".*")
+			elif len(accept_chost) == 1:
+				try:
+					self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
+				except re.error as e:
+					writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
+						(accept_chost[0], e), noiselevel=-1)
+					self._accept_chost_re = re.compile("^$")
+			else:
+				try:
+					self._accept_chost_re = re.compile(
+						r'^(%s)$' % "|".join(accept_chost))
+				except re.error as e:
+					writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
+						(" ".join(accept_chost), e), noiselevel=-1)
+					self._accept_chost_re = re.compile("^$")
+
+		pkg_chost = metadata.get('CHOST', '')
+		return not pkg_chost or \
+			self._accept_chost_re.match(pkg_chost) is not None
+
+	def setinst(self, mycpv, mydbapi):
+		"""This updates the preferences for old-style virtuals,
+		affecting the behavior of dep_expand() and dep_check()
+		calls. It can change dbapi.match() behavior since that
+		calls dep_expand(). However, dbapi instances have
+		internal match caches that are not invalidated when
+		preferences are updated here. This can potentially
+		lead to some inconsistency (relevant to bug #1343)."""
+		self.modifying()
+
+		# Grab the virtuals this package provides and add them into the tree virtuals.
+		if not hasattr(mydbapi, "aux_get"):
+			provides = mydbapi["PROVIDE"]
+		else:
+			provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
+		if not provides:
+			return
+		if isinstance(mydbapi, portdbapi):
+			self.setcpv(mycpv, mydb=mydbapi)
+			myuse = self["PORTAGE_USE"]
+		elif not hasattr(mydbapi, "aux_get"):
+			myuse = mydbapi["USE"]
+		else:
+			myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
+		virts = use_reduce(provides, uselist=myuse.split(), flat=True)
+
+		# Ensure that we don't trigger the _treeVirtuals
+		# assertion in VirtualsManager._compile_virtuals().
+		self.getvirtuals()
+		self._virtuals_manager.add_depgraph_virtuals(mycpv, virts)
+
+	def reload(self):
+		"""Reload things like /etc/profile.env that can change during runtime."""
+		env_d_filename = os.path.join(self["EROOT"], "etc", "profile.env")
+		self.configdict["env.d"].clear()
+		env_d = getconfig(env_d_filename, expand=False)
+		if env_d:
+			# env_d will be None if profile.env doesn't exist.
+			self.configdict["env.d"].update(env_d)
+
+	def regenerate(self, useonly=0, use_cache=None):
+		"""
+		Regenerate settings
+		This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
+		re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
+		variables.  This also updates the env.d configdict; useful in case an ebuild
+		changes the environment.
+
+		If FEATURES has already stacked, it is not stacked twice.
+
+		@param useonly: Only regenerate USE flags (not any other incrementals)
+		@type useonly: Boolean
+		@rtype: None
+		"""
+
+		if use_cache is not None:
+			warnings.warn("The use_cache parameter for config.regenerate() is deprecated and without effect.",
+				DeprecationWarning, stacklevel=2)
+
+		self.modifying()
+
+		if useonly:
+			myincrementals=["USE"]
+		else:
+			myincrementals = self.incrementals
+		myincrementals = set(myincrementals)
+
+		# Process USE last because it depends on USE_EXPAND which is also
+		# an incremental!
+		myincrementals.discard("USE")
+
+		mydbs = self.configlist[:-1]
+		mydbs.append(self.backupenv)
+
+		# ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
+		# used to match all licenses without every having to explicitly expand
+		# it to all licenses.
+		if self.local_config:
+			mysplit = []
+			for curdb in mydbs:
+				mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
+			mysplit = prune_incremental(mysplit)
+			accept_license_str = ' '.join(mysplit)
+			self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
+			self._license_manager.set_accept_license_str(accept_license_str)
+		else:
+			# repoman will accept any license
+			self._license_manager.set_accept_license_str("*")
+
+		# ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
+		if self.local_config:
+			mysplit = []
+			for curdb in mydbs:
+				mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
+			mysplit = prune_incremental(mysplit)
+			self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
+			if tuple(mysplit) != self._accept_properties:
+				self._accept_properties = tuple(mysplit)
+		else:
+			# repoman will accept any property
+			self._accept_properties = ('*',)
+
+		increment_lists = {}
+		for k in myincrementals:
+			incremental_list = []
+			increment_lists[k] = incremental_list
+			for curdb in mydbs:
+				v = curdb.get(k)
+				if v is not None:
+					incremental_list.append(v.split())
+
+		if 'FEATURES' in increment_lists:
+			increment_lists['FEATURES'].append(self._features_overrides)
+
+		myflags = set()
+		for mykey, incremental_list in increment_lists.items():
+
+			myflags.clear()
+			for mysplit in incremental_list:
+
+				for x in mysplit:
+					if x=="-*":
+						# "-*" is a special "minus" var that means "unset all settings".
+						# so USE="-* gnome" will have *just* gnome enabled.
+						myflags.clear()
+						continue
+
+					if x[0]=="+":
+						# Not legal. People assume too much. Complain.
+						writemsg(colorize("BAD",
+							_("%s values should not start with a '+': %s") % (mykey,x)) \
+							+ "\n", noiselevel=-1)
+						x=x[1:]
+						if not x:
+							continue
+
+					if (x[0]=="-"):
+						myflags.discard(x[1:])
+						continue
+
+					# We got here, so add it now.
+					myflags.add(x)
+
+			#store setting in last element of configlist, the original environment:
+			if myflags or mykey in self:
+				self.configlist[-1][mykey] = " ".join(sorted(myflags))
+
+		# Do the USE calculation last because it depends on USE_EXPAND.
+		use_expand = self.get("USE_EXPAND", "").split()
+		use_expand_dict = self._use_expand_dict
+		use_expand_dict.clear()
+		for k in use_expand:
+			v = self.get(k)
+			if v is not None:
+				use_expand_dict[k] = v
+
+		# In order to best accomodate the long-standing practice of
+		# setting default USE_EXPAND variables in the profile's
+		# make.defaults, we translate these variables into their
+		# equivalent USE flags so that useful incremental behavior
+		# is enabled (for sub-profiles).
+		configdict_defaults = self.configdict['defaults']
+		if self._make_defaults is not None:
+			for i, cfg in enumerate(self._make_defaults):
+				if not cfg:
+					self.make_defaults_use.append("")
+					continue
+				use = cfg.get("USE", "")
+				expand_use = []
+				for k in use_expand_dict:
+					v = cfg.get(k)
+					if v is None:
+						continue
+					prefix = k.lower() + '_'
+					if k in myincrementals:
+						for x in v.split():
+							if x[:1] == '-':
+								expand_use.append('-' + prefix + x[1:])
+							else:
+								expand_use.append(prefix + x)
+					else:
+						for x in v.split():
+							expand_use.append(prefix + x)
+				if expand_use:
+					expand_use.append(use)
+					use  = ' '.join(expand_use)
+				self.make_defaults_use.append(use)
+			self.make_defaults_use = tuple(self.make_defaults_use)
+			configdict_defaults['USE'] = ' '.join(
+				stack_lists([x.split() for x in self.make_defaults_use]))
+			# Set to None so this code only runs once.
+			self._make_defaults = None
+
+		if not self.uvlist:
+			for x in self["USE_ORDER"].split(":"):
+				if x in self.configdict:
+					self.uvlist.append(self.configdict[x])
+			self.uvlist.reverse()
+
+		# For optimal performance, use slice
+		# comparison instead of startswith().
+		iuse = self.configdict["pkg"].get("IUSE")
+		if iuse is not None:
+			iuse = [x.lstrip("+-") for x in iuse.split()]
+		myflags = set()
+		for curdb in self.uvlist:
+			cur_use_expand = [x for x in use_expand if x in curdb]
+			mysplit = curdb.get("USE", "").split()
+			if not mysplit and not cur_use_expand:
+				continue
+			for x in mysplit:
+				if x == "-*":
+					myflags.clear()
+					continue
+
+				if x[0] == "+":
+					writemsg(colorize("BAD", _("USE flags should not start "
+						"with a '+': %s\n") % x), noiselevel=-1)
+					x = x[1:]
+					if not x:
+						continue
+
+				if x[0] == "-":
+					if x[-2:] == '_*':
+						prefix = x[1:-1]
+						prefix_len = len(prefix)
+						myflags.difference_update(
+							[y for y in myflags if \
+							y[:prefix_len] == prefix])
+					myflags.discard(x[1:])
+					continue
+
+				if iuse is not None and x[-2:] == '_*':
+					# Expand wildcards here, so that cases like
+					# USE="linguas_* -linguas_en_US" work correctly.
+					prefix = x[:-1]
+					prefix_len = len(prefix)
+					has_iuse = False
+					for y in iuse:
+						if y[:prefix_len] == prefix:
+							has_iuse = True
+							myflags.add(y)
+					if not has_iuse:
+						# There are no matching IUSE, so allow the
+						# wildcard to pass through. This allows
+						# linguas_* to trigger unset LINGUAS in
+						# cases when no linguas_ flags are in IUSE.
+						myflags.add(x)
+				else:
+					myflags.add(x)
+
+			if curdb is configdict_defaults:
+				# USE_EXPAND flags from make.defaults are handled
+				# earlier, in order to provide useful incremental
+				# behavior (for sub-profiles).
+				continue
+
+			for var in cur_use_expand:
+				var_lower = var.lower()
+				is_not_incremental = var not in myincrementals
+				if is_not_incremental:
+					prefix = var_lower + "_"
+					prefix_len = len(prefix)
+					for x in list(myflags):
+						if x[:prefix_len] == prefix:
+							myflags.remove(x)
+				for x in curdb[var].split():
+					if x[0] == "+":
+						if is_not_incremental:
+							writemsg(colorize("BAD", _("Invalid '+' "
+								"operator in non-incremental variable "
+								 "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+							continue
+						else:
+							writemsg(colorize("BAD", _("Invalid '+' "
+								"operator in incremental variable "
+								 "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+						x = x[1:]
+					if x[0] == "-":
+						if is_not_incremental:
+							writemsg(colorize("BAD", _("Invalid '-' "
+								"operator in non-incremental variable "
+								 "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+							continue
+						myflags.discard(var_lower + "_" + x[1:])
+						continue
+					myflags.add(var_lower + "_" + x)
+
+		if hasattr(self, "features"):
+			self.features._features.clear()
+		else:
+			self.features = features_set(self)
+		self.features._features.update(self.get('FEATURES', '').split())
+		self.features._sync_env_var()
+		self.features._validate()
+
+		myflags.update(self.useforce)
+		arch = self.configdict["defaults"].get("ARCH")
+		if arch:
+			myflags.add(arch)
+
+		myflags.difference_update(self.usemask)
+		self.configlist[-1]["USE"]= " ".join(sorted(myflags))
+
+		if self.mycpv is None:
+			# Generate global USE_EXPAND variables settings that are
+			# consistent with USE, for display by emerge --info. For
+			# package instances, these are instead generated via
+			# setcpv().
+			for k in use_expand:
+				prefix = k.lower() + '_'
+				prefix_len = len(prefix)
+				expand_flags = set( x[prefix_len:] for x in myflags \
+					if x[:prefix_len] == prefix )
+				var_split = use_expand_dict.get(k, '').split()
+				var_split = [ x for x in var_split if x in expand_flags ]
+				var_split.extend(sorted(expand_flags.difference(var_split)))
+				if var_split:
+					self.configlist[-1][k] = ' '.join(var_split)
+				elif k in self:
+					self.configlist[-1][k] = ''
+
+	@property
+	def virts_p(self):
+		warnings.warn("portage config.virts_p attribute " + \
+			"is deprecated, use config.get_virts_p()",
+			DeprecationWarning, stacklevel=2)
+		return self.get_virts_p()
+
+	@property
+	def virtuals(self):
+		warnings.warn("portage config.virtuals attribute " + \
+			"is deprecated, use config.getvirtuals()",
+			DeprecationWarning, stacklevel=2)
+		return self.getvirtuals()
+
+	def get_virts_p(self):
+		# Ensure that we don't trigger the _treeVirtuals
+		# assertion in VirtualsManager._compile_virtuals().
+		self.getvirtuals()
+		return self._virtuals_manager.get_virts_p()
+
+	def getvirtuals(self):
+		if self._virtuals_manager._treeVirtuals is None:
+			#Hack around the fact that VirtualsManager needs a vartree
+			#and vartree needs a config instance.
+			#This code should be part of VirtualsManager.getvirtuals().
+			if self.local_config:
+				temp_vartree = vartree(settings=self)
+				self._virtuals_manager._populate_treeVirtuals(temp_vartree)
+			else:
+				self._virtuals_manager._treeVirtuals = {}
+
+		return self._virtuals_manager.getvirtuals()
+
+	def _populate_treeVirtuals_if_needed(self, vartree):
+		"""Reduce the provides into a list by CP."""
+		if self._virtuals_manager._treeVirtuals is None:
+			if self.local_config:
+				self._virtuals_manager._populate_treeVirtuals(vartree)
+			else:
+				self._virtuals_manager._treeVirtuals = {}
+
+	def __delitem__(self,mykey):
+		self.modifying()
+		for x in self.lookuplist:
+			if x != None:
+				if mykey in x:
+					del x[mykey]
+
+	def __getitem__(self,mykey):
+		for d in self.lookuplist:
+			if mykey in d:
+				return d[mykey]
+		return '' # for backward compat, don't raise KeyError
+
+	def get(self, k, x=None):
+		for d in self.lookuplist:
+			if k in d:
+				return d[k]
+		return x
+
+	def pop(self, key, *args):
+		if len(args) > 1:
+			raise TypeError(
+				"pop expected at most 2 arguments, got " + \
+				repr(1 + len(args)))
+		v = self
+		for d in reversed(self.lookuplist):
+			v = d.pop(key, v)
+		if v is self:
+			if args:
+				return args[0]
+			raise KeyError(key)
+		return v
+
+	def __contains__(self, mykey):
+		"""Called to implement membership test operators (in and not in)."""
+		for d in self.lookuplist:
+			if mykey in d:
+				return True
+		return False
+
+	def setdefault(self, k, x=None):
+		v = self.get(k)
+		if v is not None:
+			return v
+		else:
+			self[k] = x
+			return x
+
+	def keys(self):
+		return list(self)
+
+	def __iter__(self):
+		keys = set()
+		for d in self.lookuplist:
+			keys.update(d)
+		return iter(keys)
+
+	def iterkeys(self):
+		return iter(self)
+
+	def iteritems(self):
+		for k in self:
+			yield (k, self[k])
+
+	def items(self):
+		return list(self.iteritems())
+
+	def __setitem__(self,mykey,myvalue):
+		"set a value; will be thrown away at reset() time"
+		if not isinstance(myvalue, basestring):
+			raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
+
+		# Avoid potential UnicodeDecodeError exceptions later.
+		mykey = _unicode_decode(mykey)
+		myvalue = _unicode_decode(myvalue)
+
+		self.modifying()
+		self.modifiedkeys.append(mykey)
+		self.configdict["env"][mykey]=myvalue
+
+	def environ(self):
+		"return our locally-maintained environment"
+		mydict={}
+		environ_filter = self._environ_filter
+
+		eapi = self.get('EAPI')
+		phase = self.get('EBUILD_PHASE')
+		filter_calling_env = False
+		if self.mycpv is not None and \
+			phase not in ('clean', 'cleanrm', 'depend', 'fetch'):
+			temp_dir = self.get('T')
+			if temp_dir is not None and \
+				os.path.exists(os.path.join(temp_dir, 'environment')):
+				filter_calling_env = True
+
+		environ_whitelist = self._environ_whitelist
+		for x in self:
+			if x in environ_filter:
+				continue
+			myvalue = self[x]
+			if not isinstance(myvalue, basestring):
+				writemsg(_("!!! Non-string value in config: %s=%s\n") % \
+					(x, myvalue), noiselevel=-1)
+				continue
+			if filter_calling_env and \
+				x not in environ_whitelist and \
+				not self._environ_whitelist_re.match(x):
+				# Do not allow anything to leak into the ebuild
+				# environment unless it is explicitly whitelisted.
+				# This ensures that variables unset by the ebuild
+				# remain unset (bug #189417).
+				continue
+			mydict[x] = myvalue
+		if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
+			writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
+			mydict["HOME"]=mydict["BUILD_PREFIX"][:]
+
+		if filter_calling_env:
+			if phase:
+				whitelist = []
+				if "rpm" == phase:
+					whitelist.append("RPMDIR")
+				for k in whitelist:
+					v = self.get(k)
+					if v is not None:
+						mydict[k] = v
+
+		# At some point we may want to stop exporting FEATURES to the ebuild
+		# environment, in order to prevent ebuilds from abusing it. In
+		# preparation for that, export it as PORTAGE_FEATURES so that bashrc
+		# users will be able to migrate any FEATURES conditional code to
+		# use this alternative variable.
+		mydict["PORTAGE_FEATURES"] = self["FEATURES"]
+
+		# Filtered by IUSE and implicit IUSE.
+		mydict["USE"] = self.get("PORTAGE_USE", "")
+
+		# Don't export AA to the ebuild environment in EAPIs that forbid it
+		if not eapi_exports_AA(eapi):
+			mydict.pop("AA", None)
+
+		if not eapi_exports_merge_type(eapi):
+			mydict.pop("MERGE_TYPE", None)
+
+		# Prefix variables are supported starting with EAPI 3.
+		if phase == 'depend' or eapi is None or not eapi_supports_prefix(eapi):
+			mydict.pop("ED", None)
+			mydict.pop("EPREFIX", None)
+			mydict.pop("EROOT", None)
+
+		if phase == 'depend':
+			mydict.pop('FILESDIR', None)
+
+		if phase not in ("pretend", "setup", "preinst", "postinst") or \
+			not eapi_exports_replace_vars(eapi):
+			mydict.pop("REPLACING_VERSIONS", None)
+
+		if phase not in ("prerm", "postrm") or \
+			not eapi_exports_replace_vars(eapi):
+			mydict.pop("REPLACED_BY_VERSION", None)
+
+		return mydict
+
+	def thirdpartymirrors(self):
+		if getattr(self, "_thirdpartymirrors", None) is None:
+			profileroots = [os.path.join(self["PORTDIR"], "profiles")]
+			for x in shlex_split(self.get("PORTDIR_OVERLAY", "")):
+				profileroots.insert(0, os.path.join(x, "profiles"))
+			thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
+			self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
+		return self._thirdpartymirrors
+
+	def archlist(self):
+		_archlist = []
+		for myarch in self["PORTAGE_ARCHLIST"].split():
+			_archlist.append(myarch)
+			_archlist.append("~" + myarch)
+		return _archlist
+
+	def selinux_enabled(self):
+		if getattr(self, "_selinux_enabled", None) is None:
+			self._selinux_enabled = 0
+			if "selinux" in self["USE"].split():
+				if selinux:
+					if selinux.is_selinux_enabled() == 1:
+						self._selinux_enabled = 1
+					else:
+						self._selinux_enabled = 0
+				else:
+					writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
+						noiselevel=-1)
+					self._selinux_enabled = 0
+
+		return self._selinux_enabled
+
+	if sys.hexversion >= 0x3000000:
+		keys = __iter__
+		items = iteritems

diff --git a/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py b/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py
new file mode 100644
index 0000000..3fab4da
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.py
@@ -0,0 +1,42 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['deprecated_profile_check']
+
+import io
+
+from portage import os, _encodings, _unicode_encode
+from portage.const import DEPRECATED_PROFILE_FILE
+from portage.localization import _
+from portage.output import colorize
+from portage.util import writemsg
+
+def deprecated_profile_check(settings=None):
+	config_root = "/"
+	if settings is not None:
+		config_root = settings["PORTAGE_CONFIGROOT"]
+	deprecated_profile_file = os.path.join(config_root,
+		DEPRECATED_PROFILE_FILE)
+	if not os.access(deprecated_profile_file, os.R_OK):
+		return False
+	dcontent = io.open(_unicode_encode(deprecated_profile_file,
+		encoding=_encodings['fs'], errors='strict'), 
+		mode='r', encoding=_encodings['content'], errors='replace').readlines()
+	writemsg(colorize("BAD", _("\n!!! Your current profile is "
+		"deprecated and not supported anymore.")) + "\n", noiselevel=-1)
+	writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
+		"profile.")) + "\n", noiselevel=-1)
+	if not dcontent:
+		writemsg(colorize("BAD", _("!!! Please refer to the "
+			"Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
+		return True
+	newprofile = dcontent[0]
+	writemsg(colorize("BAD", _("!!! Please upgrade to the "
+		"following profile if possible:")) + "\n", noiselevel=-1)
+	writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
+	if len(dcontent) > 1:
+		writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
+		for myline in dcontent[1:]:
+			writemsg(myline, noiselevel=-1)
+		writemsg("\n\n", noiselevel=-1)
+	return True

diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py
new file mode 100644
index 0000000..1e34b14
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py
@@ -0,0 +1,167 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digestcheck']
+
+import warnings
+
+from portage import os, _encodings, _unicode_decode
+from portage.exception import DigestException, FileNotFound
+from portage.localization import _
+from portage.manifest import Manifest
+from portage.output import EOutput
+from portage.util import writemsg
+
+def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
+	"""
+	Verifies checksums. Assumes all files have been downloaded.
+	@rtype: int
+	@returns: 1 on success and 0 on failure
+	"""
+
+	if justmanifest is not None:
+		warnings.warn("The justmanifest parameter of the " + \
+			"portage.package.ebuild.digestcheck.digestcheck()" + \
+			" function is now unused.",
+			DeprecationWarning, stacklevel=2)
+		justmanifest = None
+
+	if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
+		return 1
+	allow_missing = "allow-missing-manifests" in mysettings.features
+	pkgdir = mysettings["O"]
+	manifest_path = os.path.join(pkgdir, "Manifest")
+	if not os.path.exists(manifest_path):
+		if allow_missing:
+			return 1
+		writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
+			noiselevel=-1)
+		if strict:
+			return 0
+		else:
+			return 1
+	if mf is None:
+		mf = Manifest(pkgdir, mysettings["DISTDIR"])
+	manifest_empty = True
+	for d in mf.fhashdict.values():
+		if d:
+			manifest_empty = False
+			break
+	if manifest_empty:
+		writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
+			noiselevel=-1)
+		if strict:
+			return 0
+		else:
+			return 1
+	eout = EOutput()
+	eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+	try:
+		if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
+			eout.ebegin(_("checking ebuild checksums ;-)"))
+			mf.checkTypeHashes("EBUILD")
+			eout.eend(0)
+			eout.ebegin(_("checking auxfile checksums ;-)"))
+			mf.checkTypeHashes("AUX")
+			eout.eend(0)
+			eout.ebegin(_("checking miscfile checksums ;-)"))
+			mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
+			eout.eend(0)
+		for f in myfiles:
+			eout.ebegin(_("checking %s ;-)") % f)
+			ftype = mf.findFile(f)
+			if ftype is None:
+				eout.eend(1)
+				writemsg(_("\n!!! Missing digest for '%s'\n") % (f,),
+					noiselevel=-1)
+				return 0
+			mf.checkFileHashes(ftype, f)
+			eout.eend(0)
+	except FileNotFound as e:
+		eout.eend(1)
+		writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
+			noiselevel=-1)
+		return 0
+	except DigestException as e:
+		eout.eend(1)
+		writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
+		writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
+		writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
+		writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
+		writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
+		return 0
+	if allow_missing:
+		# In this case we ignore any missing digests that
+		# would otherwise be detected below.
+		return 1
+	# Make sure that all of the ebuilds are actually listed in the Manifest.
+	for f in os.listdir(pkgdir):
+		pf = None
+		if f[-7:] == '.ebuild':
+			pf = f[:-7]
+		if pf is not None and not mf.hasFile("EBUILD", f):
+			writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
+				os.path.join(pkgdir, f), noiselevel=-1)
+			if strict:
+				return 0
+	# epatch will just grab all the patches out of a directory, so we have to
+	# make sure there aren't any foreign files that it might grab.
+	filesdir = os.path.join(pkgdir, "files")
+
+	for parent, dirs, files in os.walk(filesdir):
+		try:
+			parent = _unicode_decode(parent,
+				encoding=_encodings['fs'], errors='strict')
+		except UnicodeDecodeError:
+			parent = _unicode_decode(parent,
+				encoding=_encodings['fs'], errors='replace')
+			writemsg(_("!!! Path contains invalid "
+				"character(s) for encoding '%s': '%s'") \
+				% (_encodings['fs'], parent), noiselevel=-1)
+			if strict:
+				return 0
+			continue
+		for d in dirs:
+			d_bytes = d
+			try:
+				d = _unicode_decode(d,
+					encoding=_encodings['fs'], errors='strict')
+			except UnicodeDecodeError:
+				d = _unicode_decode(d,
+					encoding=_encodings['fs'], errors='replace')
+				writemsg(_("!!! Path contains invalid "
+					"character(s) for encoding '%s': '%s'") \
+					% (_encodings['fs'], os.path.join(parent, d)),
+					noiselevel=-1)
+				if strict:
+					return 0
+				dirs.remove(d_bytes)
+				continue
+			if d.startswith(".") or d == "CVS":
+				dirs.remove(d_bytes)
+		for f in files:
+			try:
+				f = _unicode_decode(f,
+					encoding=_encodings['fs'], errors='strict')
+			except UnicodeDecodeError:
+				f = _unicode_decode(f,
+					encoding=_encodings['fs'], errors='replace')
+				if f.startswith("."):
+					continue
+				f = os.path.join(parent, f)[len(filesdir) + 1:]
+				writemsg(_("!!! File name contains invalid "
+					"character(s) for encoding '%s': '%s'") \
+					% (_encodings['fs'], f), noiselevel=-1)
+				if strict:
+					return 0
+				continue
+			if f.startswith("."):
+				continue
+			f = os.path.join(parent, f)[len(filesdir) + 1:]
+			file_type = mf.findFile(f)
+			if file_type != "AUX" and not f.startswith("digest-"):
+				writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
+					os.path.join(filesdir, f), noiselevel=-1)
+				if strict:
+					return 0
+	return 1

diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestgen.py b/portage_with_autodep/pym/portage/package/ebuild/digestgen.py
new file mode 100644
index 0000000..eb7210e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/digestgen.py
@@ -0,0 +1,202 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digestgen']
+
+import errno
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+)
+
+from portage import os
+from portage.const import MANIFEST2_REQUIRED_HASH
+from portage.dbapi.porttree import FetchlistDict
+from portage.dep import use_reduce
+from portage.exception import InvalidDependString, FileNotFound, \
+	PermissionDenied, PortagePackageException
+from portage.localization import _
+from portage.manifest import Manifest
+from portage.output import colorize
+from portage.package.ebuild.fetch import fetch
+from portage.util import writemsg, writemsg_stdout
+from portage.versions import catsplit
+
+def digestgen(myarchives=None, mysettings=None, myportdb=None):
+	"""
+	Generates a digest file if missing. Fetches files if necessary.
+	NOTE: myarchives and mysettings used to be positional arguments,
+		so their order must be preserved for backward compatibility.
+	@param mysettings: the ebuild config (mysettings["O"] must correspond
+		to the ebuild's parent directory)
+	@type mysettings: config
+	@param myportdb: a portdbapi instance
+	@type myportdb: portdbapi
+	@rtype: int
+	@returns: 1 on success and 0 on failure
+	"""
+	if mysettings is None or myportdb is None:
+		raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")
+
+	try:
+		portage._doebuild_manifest_exempt_depend += 1
+		distfiles_map = {}
+		fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
+		for cpv in fetchlist_dict:
+			try:
+				for myfile in fetchlist_dict[cpv]:
+					distfiles_map.setdefault(myfile, []).append(cpv)
+			except InvalidDependString as e:
+				writemsg("!!! %s\n" % str(e), noiselevel=-1)
+				del e
+				return 0
+		mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
+		manifest1_compat = False
+		mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
+			fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
+		# Don't require all hashes since that can trigger excessive
+		# fetches when sufficient digests already exist.  To ease transition
+		# while Manifest 1 is being removed, only require hashes that will
+		# exist before and after the transition.
+		required_hash_types = set()
+		required_hash_types.add("size")
+		required_hash_types.add(MANIFEST2_REQUIRED_HASH)
+		dist_hashes = mf.fhashdict.get("DIST", {})
+
+		# To avoid accidental regeneration of digests with the incorrect
+		# files (such as partially downloaded files), trigger the fetch
+		# code if the file exists and it's size doesn't match the current
+		# manifest entry. If there really is a legitimate reason for the
+		# digest to change, `ebuild --force digest` can be used to avoid
+		# triggering this code (or else the old digests can be manually
+		# removed from the Manifest).
+		missing_files = []
+		for myfile in distfiles_map:
+			myhashes = dist_hashes.get(myfile)
+			if not myhashes:
+				try:
+					st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+				except OSError:
+					st = None
+				if st is None or st.st_size == 0:
+					missing_files.append(myfile)
+				continue
+			size = myhashes.get("size")
+
+			try:
+				st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+			except OSError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				del e
+				if size == 0:
+					missing_files.append(myfile)
+					continue
+				if required_hash_types.difference(myhashes):
+					missing_files.append(myfile)
+					continue
+			else:
+				if st.st_size == 0 or size is not None and size != st.st_size:
+					missing_files.append(myfile)
+					continue
+
+		if missing_files:
+				mytree = os.path.realpath(os.path.dirname(
+					os.path.dirname(mysettings["O"])))
+				for myfile in missing_files:
+					uris = set()
+					all_restrict = set()
+					for cpv in distfiles_map[myfile]:
+						uris.update(myportdb.getFetchMap(
+							cpv, mytree=mytree)[myfile])
+						restrict = myportdb.aux_get(cpv, ['RESTRICT'],
+							mytree=mytree)[0]
+						# Here we ignore conditional parts of RESTRICT since
+						# they don't apply unconditionally. Assume such
+						# conditionals only apply on the client side where
+						# digestgen() does not need to be called.
+						all_restrict.update(use_reduce(restrict,
+							flat=True, matchnone=True))
+
+						# fetch() uses CATEGORY and PF to display a message
+						# when fetch restriction is triggered.
+						cat, pf = catsplit(cpv)
+						mysettings["CATEGORY"] = cat
+						mysettings["PF"] = pf
+
+					# fetch() uses PORTAGE_RESTRICT to control fetch
+					# restriction, which is only applied to files that
+					# are not fetchable via a mirror:// URI.
+					mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)
+
+					try:
+						st = os.stat(os.path.join(
+							mysettings["DISTDIR"],myfile))
+					except OSError:
+						st = None
+
+					if not fetch({myfile : uris}, mysettings):
+						myebuild = os.path.join(mysettings["O"],
+							catsplit(cpv)[1] + ".ebuild")
+						spawn_nofetch(myportdb, myebuild,
+							settings=mysettings)
+						writemsg(_("!!! Fetch failed for %s, can't update "
+							"Manifest\n") % myfile, noiselevel=-1)
+						if myfile in dist_hashes and \
+							st is not None and st.st_size > 0:
+							# stat result is obtained before calling fetch(),
+							# since fetch may rename the existing file if the
+							# digest does not match.
+							writemsg(_("!!! If you would like to "
+								"forcefully replace the existing "
+								"Manifest entry\n!!! for %s, use "
+								"the following command:\n") % myfile + \
+								"!!!    " + colorize("INFORM",
+								"ebuild --force %s manifest" % \
+								os.path.basename(myebuild)) + "\n",
+								noiselevel=-1)
+						return 0
+		writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
+		try:
+			mf.create(assumeDistHashesSometimes=True,
+				assumeDistHashesAlways=(
+				"assume-digests" in mysettings.features))
+		except FileNotFound as e:
+			writemsg(_("!!! File %s doesn't exist, can't update "
+				"Manifest\n") % e, noiselevel=-1)
+			return 0
+		except PortagePackageException as e:
+			writemsg(("!!! %s\n") % (e,), noiselevel=-1)
+			return 0
+		try:
+			mf.write(sign=False)
+		except PermissionDenied as e:
+			writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
+			return 0
+		if "assume-digests" not in mysettings.features:
+			distlist = list(mf.fhashdict.get("DIST", {}))
+			distlist.sort()
+			auto_assumed = []
+			for filename in distlist:
+				if not os.path.exists(
+					os.path.join(mysettings["DISTDIR"], filename)):
+					auto_assumed.append(filename)
+			if auto_assumed:
+				mytree = os.path.realpath(
+					os.path.dirname(os.path.dirname(mysettings["O"])))
+				cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
+				pkgs = myportdb.cp_list(cp, mytree=mytree)
+				pkgs.sort()
+				writemsg_stdout("  digest.assumed" + colorize("WARN",
+					str(len(auto_assumed)).rjust(18)) + "\n")
+				for pkg_key in pkgs:
+					fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
+					pv = pkg_key.split("/")[1]
+					for filename in auto_assumed:
+						if filename in fetchlist:
+							writemsg_stdout(
+								"   %s::%s\n" % (pv, filename))
+		return 1
+	finally:
+		portage._doebuild_manifest_exempt_depend -= 1

diff --git a/portage_with_autodep/pym/portage/package/ebuild/doebuild.py b/portage_with_autodep/pym/portage/package/ebuild/doebuild.py
new file mode 100644
index 0000000..c76c1ed
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/doebuild.py
@@ -0,0 +1,1791 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['doebuild', 'doebuild_environment', 'spawn', 'spawnebuild']
+
+import gzip
+import errno
+import io
+from itertools import chain
+import logging
+import os as _os
+import re
+import shutil
+import signal
+import stat
+import sys
+import tempfile
+from textwrap import wrap
+import time
+import warnings
+import zlib
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.package.ebuild.config:check_config_instance',
+	'portage.package.ebuild.digestcheck:digestcheck',
+	'portage.package.ebuild.digestgen:digestgen',
+	'portage.package.ebuild.fetch:fetch',
+	'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+	'portage.util.ExtractKernelVersion:ExtractKernelVersion'
+)
+
+from portage import auxdbkeys, bsd_chflags, \
+	eapi_is_supported, merge, os, selinux, \
+	unmerge, _encodings, _parse_eapi_ebuild_head, _os_merge, \
+	_shell_quote, _unicode_decode, _unicode_encode
+from portage.const import EBUILD_SH_ENV_FILE, EBUILD_SH_ENV_DIR, \
+	EBUILD_SH_BINARY, INVALID_ENV_FILE, MISC_SH_BINARY
+from portage.data import portage_gid, portage_uid, secpass, \
+	uid, userpriv_groups
+from portage.dbapi.porttree import _parse_uri_map
+from portage.dep import Atom, check_required_use, \
+	human_readable_required_use, paren_enclose, use_reduce
+from portage.eapi import eapi_exports_KV, eapi_exports_merge_type, \
+	eapi_exports_replace_vars, eapi_has_required_use, \
+	eapi_has_src_prepare_and_src_configure, eapi_has_pkg_pretend
+from portage.elog import elog_process
+from portage.elog.messages import eerror, eqawarn
+from portage.exception import DigestException, FileNotFound, \
+	IncorrectParameter, InvalidDependString, PermissionDenied, \
+	UnsupportedAPIException
+from portage.localization import _
+from portage.manifest import Manifest
+from portage.output import style_to_ansi_code
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from portage.util import apply_recursive_permissions, \
+	apply_secpass_permissions, noiselimit, normalize_path, \
+	writemsg, writemsg_stdout, write_atomic
+from portage.util.lafilefixer import rewrite_lafile	
+from portage.versions import _pkgsplit
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.EbuildSpawnProcess import EbuildSpawnProcess
+from _emerge.Package import Package
+from _emerge.PollScheduler import PollScheduler
+from _emerge.RootConfig import RootConfig
+
+_unsandboxed_phases = frozenset([
+	"clean", "cleanrm", "config",
+	"help", "info", "postinst",
+	"preinst", "pretend", "postrm",
+	"prerm", "setup"
+])
+
+def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
+	"""
+	All proper ebuild phases which execute ebuild.sh are spawned
+	via this function. No exceptions.
+	"""
+
+	if phase in _unsandboxed_phases:
+		kwargs['free'] = True
+
+	if phase == 'depend':
+		kwargs['droppriv'] = 'userpriv' in settings.features
+
+	if actionmap is not None and phase in actionmap:
+		kwargs.update(actionmap[phase]["args"])
+		cmd = actionmap[phase]["cmd"] % phase
+	else:
+		if phase == 'cleanrm':
+			ebuild_sh_arg = 'clean'
+		else:
+			ebuild_sh_arg = phase
+
+		cmd = "%s %s" % (_shell_quote(
+			os.path.join(settings["PORTAGE_BIN_PATH"],
+			os.path.basename(EBUILD_SH_BINARY))),
+			ebuild_sh_arg)
+
+	settings['EBUILD_PHASE'] = phase
+	try:
+		return spawn(cmd, settings, **kwargs)
+	finally:
+		settings.pop('EBUILD_PHASE', None)
+
+def _spawn_phase(phase, settings, actionmap=None, **kwargs):
+	if kwargs.get('returnpid'):
+		return _doebuild_spawn(phase, settings, actionmap=actionmap, **kwargs)
+
+	ebuild_phase = EbuildPhase(actionmap=actionmap, background=False,
+		phase=phase, scheduler=PollScheduler().sched_iface,
+		settings=settings)
+	ebuild_phase.start()
+	ebuild_phase.wait()
+	return ebuild_phase.returncode
+
+def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
+	debug=False, use_cache=None, db=None):
+	"""
+	Create and store environment variable in the config instance
+	that's passed in as the "settings" parameter. This will raise
+	UnsupportedAPIException if the given ebuild has an unsupported
+	EAPI. All EAPI dependent code comes last, so that essential
+	variables like PORTAGE_BUILDDIR are still initialized even in
+	cases when UnsupportedAPIException needs to be raised, which
+	can be useful when uninstalling a package that has corrupt
+	EAPI metadata.
+	The myroot and use_cache parameters are unused.
+	"""
+	myroot = None
+	use_cache = None
+
+	if settings is None:
+		raise TypeError("settings argument is required")
+
+	if db is None:
+		raise TypeError("db argument is required")
+
+	mysettings = settings
+	mydbapi = db
+	ebuild_path = os.path.abspath(myebuild)
+	pkg_dir     = os.path.dirname(ebuild_path)
+	mytree = os.path.dirname(os.path.dirname(pkg_dir))
+
+	if "CATEGORY" in mysettings.configdict["pkg"]:
+		cat = mysettings.configdict["pkg"]["CATEGORY"]
+	else:
+		cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
+
+	mypv = os.path.basename(ebuild_path)[:-7]
+
+	mycpv = cat+"/"+mypv
+	mysplit = _pkgsplit(mypv)
+	if mysplit is None:
+		raise IncorrectParameter(
+			_("Invalid ebuild path: '%s'") % myebuild)
+
+	# Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
+	# so that the caller can override it.
+	tmpdir = mysettings["PORTAGE_TMPDIR"]
+
+	if mydo == 'depend':
+		if mycpv != mysettings.mycpv:
+			# Don't pass in mydbapi here since the resulting aux_get
+			# call would lead to infinite 'depend' phase recursion.
+			mysettings.setcpv(mycpv)
+	else:
+		# If EAPI isn't in configdict["pkg"], it means that setcpv()
+		# hasn't been called with the mydb argument, so we have to
+		# call it here (portage code always calls setcpv properly,
+		# but api consumers might not).
+		if mycpv != mysettings.mycpv or \
+			"EAPI" not in mysettings.configdict["pkg"]:
+			# Reload env.d variables and reset any previous settings.
+			mysettings.reload()
+			mysettings.reset()
+			mysettings.setcpv(mycpv, mydb=mydbapi)
+
+	# config.reset() might have reverted a change made by the caller,
+	# so restore it to its original value. Sandbox needs canonical
+	# paths, so realpath it.
+	mysettings["PORTAGE_TMPDIR"] = os.path.realpath(tmpdir)
+
+	mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
+	mysettings["EBUILD_PHASE"] = mydo
+
+	# Set requested Python interpreter for Portage helpers.
+	mysettings['PORTAGE_PYTHON'] = portage._python_interpreter
+
+	# This is used by assert_sigpipe_ok() that's used by the ebuild
+	# unpack() helper. SIGPIPE is typically 13, but its better not
+	# to assume that.
+	mysettings['PORTAGE_SIGPIPE_STATUS'] = str(128 + signal.SIGPIPE)
+
+	# We are disabling user-specific bashrc files.
+	mysettings["BASH_ENV"] = INVALID_ENV_FILE
+
+	if debug: # Otherwise it overrides emerge's settings.
+		# We have no other way to set debug... debug can't be passed in
+		# due to how it's coded... Don't overwrite this so we can use it.
+		mysettings["PORTAGE_DEBUG"] = "1"
+
+	mysettings["EBUILD"]   = ebuild_path
+	mysettings["O"]        = pkg_dir
+	mysettings.configdict["pkg"]["CATEGORY"] = cat
+	mysettings["FILESDIR"] = pkg_dir+"/files"
+	mysettings["PF"]       = mypv
+
+	if hasattr(mydbapi, '_repo_info'):
+		repo_info = mydbapi._repo_info[mytree]
+		mysettings['PORTDIR'] = repo_info.portdir
+		mysettings['PORTDIR_OVERLAY'] = repo_info.portdir_overlay
+		mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo_info.name
+
+	mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
+	mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
+	mysettings["RPMDIR"]  = os.path.realpath(mysettings["RPMDIR"])
+
+	mysettings["ECLASSDIR"]   = mysettings["PORTDIR"]+"/eclass"
+	mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
+
+	mysettings["PROFILE_PATHS"] = "\n".join(mysettings.profiles)
+	mysettings["P"]  = mysplit[0]+"-"+mysplit[1]
+	mysettings["PN"] = mysplit[0]
+	mysettings["PV"] = mysplit[1]
+	mysettings["PR"] = mysplit[2]
+
+	if noiselimit < 0:
+		mysettings["PORTAGE_QUIET"] = "1"
+
+	if mysplit[2] == "r0":
+		mysettings["PVR"]=mysplit[1]
+	else:
+		mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
+
+	if "PATH" in mysettings:
+		mysplit=mysettings["PATH"].split(":")
+	else:
+		mysplit=[]
+	# Note: PORTAGE_BIN_PATH may differ from the global constant
+	# when portage is reinstalling itself.
+	portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
+	if portage_bin_path not in mysplit:
+		mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
+
+	# All temporary directories should be subdirectories of
+	# $PORTAGE_TMPDIR/portage, since it's common for /tmp and /var/tmp
+	# to be mounted with the "noexec" option (see bug #346899).
+	mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
+	mysettings["PKG_TMPDIR"]   = mysettings["BUILD_PREFIX"]+"/._unmerge_"
+
+	# Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
+	# locations in order to prevent interference.
+	if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
+		mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+			mysettings["PKG_TMPDIR"],
+			mysettings["CATEGORY"], mysettings["PF"])
+	else:
+		mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+			mysettings["BUILD_PREFIX"],
+			mysettings["CATEGORY"], mysettings["PF"])
+
+	mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
+	mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
+	mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
+	mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
+
+	# Prefix forward compatability
+	mysettings["ED"] = mysettings["D"]
+
+	mysettings["PORTAGE_BASHRC"] = os.path.join(
+		mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
+	mysettings["PM_EBUILD_HOOK_DIR"] = os.path.join(
+		mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_DIR)
+
+	# Allow color.map to control colors associated with einfo, ewarn, etc...
+	mycolors = []
+	for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
+		mycolors.append("%s=$'%s'" % \
+			(c, style_to_ansi_code(c)))
+	mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
+
+	# All EAPI dependent code comes last, so that essential variables
+	# like PORTAGE_BUILDDIR are still initialized even in cases when
+	# UnsupportedAPIException needs to be raised, which can be useful
+	# when uninstalling a package that has corrupt EAPI metadata.
+	eapi = None
+	if mydo == 'depend' and 'EAPI' not in mysettings.configdict['pkg']:
+		if eapi is None and 'parse-eapi-ebuild-head' in mysettings.features:
+			eapi = _parse_eapi_ebuild_head(
+				io.open(_unicode_encode(ebuild_path,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['content'], errors='replace'))
+
+		if eapi is not None:
+			if not eapi_is_supported(eapi):
+				raise UnsupportedAPIException(mycpv, eapi)
+			mysettings.configdict['pkg']['EAPI'] = eapi
+
+	if mydo != "depend":
+		# Metadata vars such as EAPI and RESTRICT are
+		# set by the above config.setcpv() call.
+		eapi = mysettings["EAPI"]
+		if not eapi_is_supported(eapi):
+			# can't do anything with this.
+			raise UnsupportedAPIException(mycpv, eapi)
+
+		if hasattr(mydbapi, "getFetchMap") and \
+			("A" not in mysettings.configdict["pkg"] or \
+			"AA" not in mysettings.configdict["pkg"]):
+			src_uri, = mydbapi.aux_get(mysettings.mycpv,
+				["SRC_URI"], mytree=mytree)
+			metadata = {
+				"EAPI"    : eapi,
+				"SRC_URI" : src_uri,
+			}
+			use = frozenset(mysettings["PORTAGE_USE"].split())
+			try:
+				uri_map = _parse_uri_map(mysettings.mycpv, metadata, use=use)
+			except InvalidDependString:
+				mysettings.configdict["pkg"]["A"] = ""
+			else:
+				mysettings.configdict["pkg"]["A"] = " ".join(uri_map)
+
+			try:
+				uri_map = _parse_uri_map(mysettings.mycpv, metadata)
+			except InvalidDependString:
+				mysettings.configdict["pkg"]["AA"] = ""
+			else:
+				mysettings.configdict["pkg"]["AA"] = " ".join(uri_map)
+
+	if not eapi_exports_KV(eapi):
+		# Discard KV for EAPIs that don't support it. Cache KV is restored
+		# from the backupenv whenever config.reset() is called.
+		mysettings.pop('KV', None)
+	elif mydo != 'depend' and 'KV' not in mysettings and \
+		mydo in ('compile', 'config', 'configure', 'info',
+		'install', 'nofetch', 'postinst', 'postrm', 'preinst',
+		'prepare', 'prerm', 'setup', 'test', 'unpack'):
+		mykv, err1 = ExtractKernelVersion(
+			os.path.join(mysettings['EROOT'], "usr/src/linux"))
+		if mykv:
+			# Regular source tree
+			mysettings["KV"] = mykv
+		else:
+			mysettings["KV"] = ""
+		mysettings.backup_changes("KV")
+
+_doebuild_manifest_cache = None
+_doebuild_broken_ebuilds = set()
+_doebuild_broken_manifests = set()
+_doebuild_commands_without_builddir = (
+	'clean', 'cleanrm', 'depend', 'digest',
+	'fetch', 'fetchall', 'help', 'manifest'
+)
+
+def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
+	fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
+	mydbapi=None, vartree=None, prev_mtimes=None,
+	fd_pipes=None, returnpid=False):
+	"""
+	Wrapper function that invokes specific ebuild phases through the spawning
+	of ebuild.sh
+	
+	@param myebuild: name of the ebuild to invoke the phase on (CPV)
+	@type myebuild: String
+	@param mydo: Phase to run
+	@type mydo: String
+	@param myroot: $ROOT (usually '/', see man make.conf)
+	@type myroot: String
+	@param mysettings: Portage Configuration
+	@type mysettings: instance of portage.config
+	@param debug: Turns on various debug information (eg, debug for spawn)
+	@type debug: Boolean
+	@param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
+	@type listonly: Boolean
+	@param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
+	@type fetchonly: Boolean
+	@param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
+	@type cleanup: Boolean
+	@param dbkey: A file path where metadata generated by the 'depend' phase
+		will be written.
+	@type dbkey: String
+	@param use_cache: Enables the cache
+	@type use_cache: Boolean
+	@param fetchall: Used to wrap fetch(), fetches all URIs (even ones invalid due to USE conditionals)
+	@type fetchall: Boolean
+	@param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
+	@type tree: String
+	@param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
+	@type mydbapi: portdbapi instance
+	@param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
+	@type vartree: vartree instance
+	@param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
+	@type prev_mtimes: dictionary
+	@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
+		for example.
+	@type fd_pipes: Dictionary
+	@param returnpid: Return a list of process IDs for a successful spawn, or
+		an integer value if spawn is unsuccessful. NOTE: This requires the
+		caller clean up all returned PIDs.
+	@type returnpid: Boolean
+	@rtype: Boolean
+	@returns:
+	1. 0 for success
+	2. 1 for error
+	
+	Most errors have an accompanying error message.
+	
+	listonly and fetchonly are only really necessary for operations involving 'fetch'
+	prev_mtimes are only necessary for merge operations.
+	Other variables may not be strictly required, many have defaults that are set inside of doebuild.
+	
+	"""
+	
+	if not tree:
+		writemsg("Warning: tree not specified to doebuild\n")
+		tree = "porttree"
+	
+	# chunked out deps for each phase, so that ebuild binary can use it 
+	# to collapse targets down.
+	actionmap_deps={
+	"pretend"  : [],
+	"setup":  ["pretend"],
+	"unpack": ["setup"],
+	"prepare": ["unpack"],
+	"configure": ["prepare"],
+	"compile":["configure"],
+	"test":   ["compile"],
+	"install":["test"],
+	"rpm":    ["install"],
+	"package":["install"],
+	}
+	
+	if mydbapi is None:
+		mydbapi = portage.db[myroot][tree].dbapi
+
+	if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
+		vartree = portage.db[myroot]["vartree"]
+
+	features = mysettings.features
+
+	clean_phases = ("clean", "cleanrm")
+	validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
+	                "config", "info", "setup", "depend", "pretend",
+	                "fetch", "fetchall", "digest",
+	                "unpack", "prepare", "configure", "compile", "test",
+	                "install", "rpm", "qmerge", "merge",
+	                "package","unmerge", "manifest"]
+
+	if mydo not in validcommands:
+		validcommands.sort()
+		writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
+			noiselevel=-1)
+		for vcount in range(len(validcommands)):
+			if vcount%6 == 0:
+				writemsg("\n!!! ", noiselevel=-1)
+			writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
+		writemsg("\n", noiselevel=-1)
+		return 1
+
+	if returnpid and mydo != 'depend':
+		warnings.warn("portage.doebuild() called " + \
+			"with returnpid parameter enabled. This usage will " + \
+			"not be supported in the future.",
+			DeprecationWarning, stacklevel=2)
+
+	if mydo == "fetchall":
+		fetchall = 1
+		mydo = "fetch"
+
+	parallel_fetchonly = mydo in ("fetch", "fetchall") and \
+		"PORTAGE_PARALLEL_FETCHONLY" in mysettings
+
+	if mydo not in clean_phases and not os.path.exists(myebuild):
+		writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
+			noiselevel=-1)
+		return 1
+
+	global _doebuild_manifest_cache
+	mf = None
+	if "strict" in features and \
+		"digest" not in features and \
+		tree == "porttree" and \
+		mydo not in ("digest", "manifest", "help") and \
+		not portage._doebuild_manifest_exempt_depend:
+		# Always verify the ebuild checksums before executing it.
+		global _doebuild_broken_ebuilds
+
+		if myebuild in _doebuild_broken_ebuilds:
+			return 1
+
+		pkgdir = os.path.dirname(myebuild)
+		manifest_path = os.path.join(pkgdir, "Manifest")
+
+		# Avoid checking the same Manifest several times in a row during a
+		# regen with an empty cache.
+		if _doebuild_manifest_cache is None or \
+			_doebuild_manifest_cache.getFullname() != manifest_path:
+			_doebuild_manifest_cache = None
+			if not os.path.exists(manifest_path):
+				out = portage.output.EOutput()
+				out.eerror(_("Manifest not found for '%s'") % (myebuild,))
+				_doebuild_broken_ebuilds.add(myebuild)
+				return 1
+			mf = Manifest(pkgdir, mysettings["DISTDIR"])
+
+		else:
+			mf = _doebuild_manifest_cache
+
+		try:
+			mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
+		except KeyError:
+			out = portage.output.EOutput()
+			out.eerror(_("Missing digest for '%s'") % (myebuild,))
+			_doebuild_broken_ebuilds.add(myebuild)
+			return 1
+		except FileNotFound:
+			out = portage.output.EOutput()
+			out.eerror(_("A file listed in the Manifest "
+				"could not be found: '%s'") % (myebuild,))
+			_doebuild_broken_ebuilds.add(myebuild)
+			return 1
+		except DigestException as e:
+			out = portage.output.EOutput()
+			out.eerror(_("Digest verification failed:"))
+			out.eerror("%s" % e.value[0])
+			out.eerror(_("Reason: %s") % e.value[1])
+			out.eerror(_("Got: %s") % e.value[2])
+			out.eerror(_("Expected: %s") % e.value[3])
+			_doebuild_broken_ebuilds.add(myebuild)
+			return 1
+
+		if mf.getFullname() in _doebuild_broken_manifests:
+			return 1
+
+		if mf is not _doebuild_manifest_cache:
+
+			# Make sure that all of the ebuilds are
+			# actually listed in the Manifest.
+			for f in os.listdir(pkgdir):
+				pf = None
+				if f[-7:] == '.ebuild':
+					pf = f[:-7]
+				if pf is not None and not mf.hasFile("EBUILD", f):
+					f = os.path.join(pkgdir, f)
+					if f not in _doebuild_broken_ebuilds:
+						out = portage.output.EOutput()
+						out.eerror(_("A file is not listed in the "
+							"Manifest: '%s'") % (f,))
+					_doebuild_broken_manifests.add(manifest_path)
+					return 1
+
+			# Only cache it if the above stray files test succeeds.
+			_doebuild_manifest_cache = mf
+
+	logfile=None
+	builddir_lock = None
+	tmpdir = None
+	tmpdir_orig = None
+
+	try:
+		if mydo in ("digest", "manifest", "help"):
+			# Temporarily exempt the depend phase from manifest checks, in case
+			# aux_get calls trigger cache generation.
+			portage._doebuild_manifest_exempt_depend += 1
+
+		# If we don't need much space and we don't need a constant location,
+		# we can temporarily override PORTAGE_TMPDIR with a random temp dir
+		# so that there's no need for locking and it can be used even if the
+		# user isn't in the portage group.
+		if mydo in ("info",):
+			tmpdir = tempfile.mkdtemp()
+			tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
+			mysettings["PORTAGE_TMPDIR"] = tmpdir
+
+		doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
+			use_cache, mydbapi)
+
+		if mydo in clean_phases:
+			builddir_lock = None
+			if not returnpid and \
+				'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+				builddir_lock = EbuildBuildDir(
+					scheduler=PollScheduler().sched_iface,
+					settings=mysettings)
+				builddir_lock.lock()
+			try:
+				return _spawn_phase(mydo, mysettings,
+					fd_pipes=fd_pipes, returnpid=returnpid)
+			finally:
+				if builddir_lock is not None:
+					builddir_lock.unlock()
+
+		restrict = set(mysettings.get('PORTAGE_RESTRICT', '').split())
+		# get possible slot information from the deps file
+		if mydo == "depend":
+			writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
+			if returnpid:
+				return _spawn_phase(mydo, mysettings,
+					fd_pipes=fd_pipes, returnpid=returnpid)
+			elif isinstance(dbkey, dict):
+				warnings.warn("portage.doebuild() called " + \
+					"with dict dbkey argument. This usage will " + \
+					"not be supported in the future.",
+					DeprecationWarning, stacklevel=2)
+				mysettings["dbkey"] = ""
+				pr, pw = os.pipe()
+				fd_pipes = {
+					0:sys.stdin.fileno(),
+					1:sys.stdout.fileno(),
+					2:sys.stderr.fileno(),
+					9:pw}
+				mypids = _spawn_phase(mydo, mysettings, returnpid=True,
+					fd_pipes=fd_pipes)
+				os.close(pw) # belongs exclusively to the child process now
+				f = os.fdopen(pr, 'rb', 0)
+				for k, v in zip(auxdbkeys,
+					(_unicode_decode(line).rstrip('\n') for line in f)):
+					dbkey[k] = v
+				f.close()
+				retval = os.waitpid(mypids[0], 0)[1]
+				portage.process.spawned_pids.remove(mypids[0])
+				# If it got a signal, return the signal that was sent, but
+				# shift in order to distinguish it from a return value. (just
+				# like portage.process.spawn() would do).
+				if retval & 0xff:
+					retval = (retval & 0xff) << 8
+				else:
+					# Otherwise, return its exit code.
+					retval = retval >> 8
+				if retval == os.EX_OK and len(dbkey) != len(auxdbkeys):
+					# Don't trust bash's returncode if the
+					# number of lines is incorrect.
+					retval = 1
+				return retval
+			elif dbkey:
+				mysettings["dbkey"] = dbkey
+			else:
+				mysettings["dbkey"] = \
+					os.path.join(mysettings.depcachedir, "aux_db_key_temp")
+
+			return _spawn_phase(mydo, mysettings,
+				fd_pipes=fd_pipes, returnpid=returnpid)
+
+		# Validate dependency metadata here to ensure that ebuilds with invalid
+		# data are never installed via the ebuild command. Don't bother when
+		# returnpid == True since there's no need to do this every time emerge
+		# executes a phase.
+		if tree == "porttree":
+			rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
+			if rval != os.EX_OK:
+				return rval
+
+		# The info phase is special because it uses mkdtemp so and
+		# user (not necessarily in the portage group) can run it.
+		if mydo not in ('info',) and \
+			mydo not in _doebuild_commands_without_builddir:
+			rval = _check_temp_dir(mysettings)
+			if rval != os.EX_OK:
+				return rval
+
+		if mydo == "unmerge":
+			return unmerge(mysettings["CATEGORY"],
+				mysettings["PF"], myroot, mysettings, vartree=vartree)
+
+		# Build directory creation isn't required for any of these.
+		# In the fetch phase, the directory is needed only for RESTRICT=fetch
+		# in order to satisfy the sane $PWD requirement (from bug #239560)
+		# when pkg_nofetch is spawned.
+		have_build_dirs = False
+		if not parallel_fetchonly and \
+			mydo not in ('digest', 'fetch', 'help', 'manifest'):
+			if not returnpid and \
+				'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+				builddir_lock = EbuildBuildDir(
+					scheduler=PollScheduler().sched_iface,
+					settings=mysettings)
+				builddir_lock.lock()
+			mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
+			if mystatus:
+				return mystatus
+			have_build_dirs = True
+
+			# emerge handles logging externally
+			if not returnpid:
+				# PORTAGE_LOG_FILE is set by the
+				# above prepare_build_dirs() call.
+				logfile = mysettings.get("PORTAGE_LOG_FILE")
+
+		if have_build_dirs:
+			rval = _prepare_env_file(mysettings)
+			if rval != os.EX_OK:
+				return rval
+
+		if eapi_exports_merge_type(mysettings["EAPI"]) and \
+			"MERGE_TYPE" not in mysettings.configdict["pkg"]:
+			if tree == "porttree":
+				mysettings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+				mysettings.configdict["pkg"]["MERGE_TYPE"] = "source"
+			elif tree == "bintree":
+				mysettings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+				mysettings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+		# NOTE: It's not possible to set REPLACED_BY_VERSION for prerm
+		#       and postrm here, since we don't necessarily know what
+		#       versions are being installed. This could be a problem
+		#       for API consumers if they don't use dblink.treewalk()
+		#       to execute prerm and postrm.
+		if eapi_exports_replace_vars(mysettings["EAPI"]) and \
+			(mydo in ("postinst", "preinst", "pretend", "setup") or \
+			("noauto" not in features and not returnpid and \
+			(mydo in actionmap_deps or mydo in ("merge", "package", "qmerge")))):
+			if not vartree:
+				writemsg("Warning: vartree not given to doebuild. " + \
+					"Cannot set REPLACING_VERSIONS in pkg_{pretend,setup}\n")
+			else:
+				vardb = vartree.dbapi
+				cpv = mysettings.mycpv
+				cp = portage.versions.cpv_getkey(cpv)
+				slot = mysettings["SLOT"]
+				cpv_slot = cp + ":" + slot
+				mysettings["REPLACING_VERSIONS"] = " ".join(
+					set(portage.versions.cpv_getversion(match) \
+						for match in vardb.match(cpv_slot) + \
+						vardb.match('='+cpv)))
+
+		# if any of these are being called, handle them -- running them out of
+		# the sandbox -- and stop now.
+		if mydo in ("config", "help", "info", "postinst",
+			"preinst", "pretend", "postrm", "prerm"):
+			return _spawn_phase(mydo, mysettings,
+				fd_pipes=fd_pipes, logfile=logfile, returnpid=returnpid)
+
+		mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
+
+		# Only try and fetch the files if we are going to need them ...
+		# otherwise, if user has FEATURES=noauto and they run `ebuild clean
+		# unpack compile install`, we will try and fetch 4 times :/
+		need_distfiles = tree == "porttree" and \
+			(mydo in ("fetch", "unpack") or \
+			mydo not in ("digest", "manifest") and "noauto" not in features)
+		alist = set(mysettings.configdict["pkg"].get("A", "").split())
+		if need_distfiles:
+
+			src_uri, = mydbapi.aux_get(mysettings.mycpv,
+				["SRC_URI"], mytree=os.path.dirname(os.path.dirname(
+				os.path.dirname(myebuild))))
+			metadata = {
+				"EAPI"    : mysettings["EAPI"],
+				"SRC_URI" : src_uri,
+			}
+			use = frozenset(mysettings["PORTAGE_USE"].split())
+			try:
+				alist = _parse_uri_map(mysettings.mycpv, metadata, use=use)
+				aalist = _parse_uri_map(mysettings.mycpv, metadata)
+			except InvalidDependString as e:
+				writemsg("!!! %s\n" % str(e), noiselevel=-1)
+				writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv,
+					noiselevel=-1)
+				del e
+				return 1
+
+			if "mirror" in features or fetchall:
+				fetchme = aalist
+			else:
+				fetchme = alist
+
+			dist_digests = None
+			if mf is not None:
+				dist_digests = mf.getTypeDigests("DIST")
+			if not fetch(fetchme, mysettings, listonly=listonly,
+				fetchonly=fetchonly, allow_missing_digests=True,
+				digests=dist_digests):
+				spawn_nofetch(mydbapi, myebuild, settings=mysettings)
+				if listonly:
+					# The convention for listonly mode is to report
+					# success in any case, even though fetch() may
+					# return unsuccessfully in order to trigger the
+					# nofetch phase.
+					return 0
+				return 1
+
+		if mydo == "fetch":
+			# Files are already checked inside fetch(),
+			# so do not check them again.
+			checkme = []
+		else:
+			checkme = alist
+
+		if mydo == "fetch" and listonly:
+			return 0
+
+		try:
+			if mydo == "manifest":
+				mf = None
+				_doebuild_manifest_cache = None
+				return not digestgen(mysettings=mysettings, myportdb=mydbapi)
+			elif mydo == "digest":
+				mf = None
+				_doebuild_manifest_cache = None
+				return not digestgen(mysettings=mysettings, myportdb=mydbapi)
+			elif mydo != 'fetch' and \
+				"digest" in mysettings.features:
+				# Don't do this when called by emerge or when called just
+				# for fetch (especially parallel-fetch) since it's not needed
+				# and it can interfere with parallel tasks.
+				mf = None
+				_doebuild_manifest_cache = None
+				digestgen(mysettings=mysettings, myportdb=mydbapi)
+		except PermissionDenied as e:
+			writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
+			if mydo in ("digest", "manifest"):
+				return 1
+
+		# See above comment about fetching only when needed
+		if tree == 'porttree' and \
+			not digestcheck(checkme, mysettings, "strict" in features, mf=mf):
+			return 1
+
+		if mydo == "fetch":
+			return 0
+
+		# remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
+		if tree == 'porttree' and \
+			((mydo != "setup" and "noauto" not in features) \
+			or mydo in ("install", "unpack")):
+			_prepare_fake_distdir(mysettings, alist)
+
+		#initial dep checks complete; time to process main commands
+		actionmap = _spawn_actionmap(mysettings)
+
+		# merge the deps in so we have again a 'full' actionmap
+		# be glad when this can die.
+		for x in actionmap:
+			if len(actionmap_deps.get(x, [])):
+				actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
+
+		if mydo in actionmap:
+			bintree = None
+			if mydo == "package":
+				# Make sure the package directory exists before executing
+				# this phase. This can raise PermissionDenied if
+				# the current user doesn't have write access to $PKGDIR.
+				if hasattr(portage, 'db'):
+					bintree = portage.db[mysettings["ROOT"]]["bintree"]
+					mysettings["PORTAGE_BINPKG_TMPFILE"] = \
+						bintree.getname(mysettings.mycpv) + \
+						".%s" % (os.getpid(),)
+					bintree._ensure_dir(os.path.dirname(
+						mysettings["PORTAGE_BINPKG_TMPFILE"]))
+				else:
+					parent_dir = os.path.join(mysettings["PKGDIR"],
+						mysettings["CATEGORY"])
+					portage.util.ensure_dirs(parent_dir)
+					if not os.access(parent_dir, os.W_OK):
+						raise PermissionDenied(
+							"access('%s', os.W_OK)" % parent_dir)
+			retval = spawnebuild(mydo,
+				actionmap, mysettings, debug, logfile=logfile,
+				fd_pipes=fd_pipes, returnpid=returnpid)
+
+			if retval == os.EX_OK:
+				if mydo == "package" and bintree is not None:
+					bintree.inject(mysettings.mycpv,
+						filename=mysettings["PORTAGE_BINPKG_TMPFILE"])
+		elif mydo=="qmerge":
+			# check to ensure install was run.  this *only* pops up when users
+			# forget it and are using ebuild
+			if not os.path.exists(
+				os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
+				writemsg(_("!!! mydo=qmerge, but the install phase has not been run\n"),
+					noiselevel=-1)
+				return 1
+			# qmerge is a special phase that implies noclean.
+			if "noclean" not in mysettings.features:
+				mysettings.features.add("noclean")
+			#qmerge is specifically not supposed to do a runtime dep check
+			retval = merge(
+				mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
+				os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
+				myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
+				mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes)
+		elif mydo=="merge":
+			retval = spawnebuild("install", actionmap, mysettings, debug,
+				alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
+				returnpid=returnpid)
+			if retval != os.EX_OK:
+				# The merge phase handles this already.  Callers don't know how
+				# far this function got, so we have to call elog_process() here
+				# so that it's only called once.
+				elog_process(mysettings.mycpv, mysettings)
+			if retval == os.EX_OK:
+				retval = merge(mysettings["CATEGORY"], mysettings["PF"],
+					mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
+					"build-info"), myroot, mysettings,
+					myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
+					vartree=vartree, prev_mtimes=prev_mtimes)
+		else:
+			writemsg_stdout(_("!!! Unknown mydo: %s\n") % mydo, noiselevel=-1)
+			return 1
+
+		return retval
+
+	finally:
+
+		if builddir_lock is not None:
+			builddir_lock.unlock()
+		if tmpdir:
+			mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
+			shutil.rmtree(tmpdir)
+
+		mysettings.pop("REPLACING_VERSIONS", None)
+
+		# Make sure that DISTDIR is restored to it's normal value before we return!
+		if "PORTAGE_ACTUAL_DISTDIR" in mysettings:
+			mysettings["DISTDIR"] = mysettings["PORTAGE_ACTUAL_DISTDIR"]
+			del mysettings["PORTAGE_ACTUAL_DISTDIR"]
+
+		if logfile and not returnpid:
+			try:
+				if os.stat(logfile).st_size == 0:
+					os.unlink(logfile)
+			except OSError:
+				pass
+
+		if mydo in ("digest", "manifest", "help"):
+			# If necessary, depend phase has been triggered by aux_get calls
+			# and the exemption is no longer needed.
+			portage._doebuild_manifest_exempt_depend -= 1
+
+def _check_temp_dir(settings):
+	if "PORTAGE_TMPDIR" not in settings or \
+		not os.path.isdir(settings["PORTAGE_TMPDIR"]):
+		writemsg(_("The directory specified in your "
+			"PORTAGE_TMPDIR variable, '%s',\n"
+			"does not exist.  Please create this directory or "
+			"correct your PORTAGE_TMPDIR setting.\n") % \
+			settings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
+		return 1
+
+	# as some people use a separate PORTAGE_TMPDIR mount
+	# we prefer that as the checks below would otherwise be pointless
+	# for those people.
+	if os.path.exists(os.path.join(settings["PORTAGE_TMPDIR"], "portage")):
+		checkdir = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
+	else:
+		checkdir = settings["PORTAGE_TMPDIR"]
+
+	if not os.access(checkdir, os.W_OK):
+		writemsg(_("%s is not writable.\n"
+			"Likely cause is that you've mounted it as readonly.\n") % checkdir,
+			noiselevel=-1)
+		return 1
+
+	else:
+		fd = tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir)
+		os.chmod(fd.name, 0o755)
+		if not os.access(fd.name, os.X_OK):
+			writemsg(_("Can not execute files in %s\n"
+				"Likely cause is that you've mounted it with one of the\n"
+				"following mount options: 'noexec', 'user', 'users'\n\n"
+				"Please make sure that portage can execute files in this directory.\n") % checkdir,
+				noiselevel=-1)
+			return 1
+
+	return os.EX_OK
+
+def _prepare_env_file(settings):
+	"""
+	Extract environment.bz2 if it exists, but only if the destination
+	environment file doesn't already exist. There are lots of possible
+	states when doebuild() calls this function, and we want to avoid
+	clobbering an existing environment file.
+	"""
+
+	env_extractor = BinpkgEnvExtractor(background=False,
+		scheduler=PollScheduler().sched_iface, settings=settings)
+
+	if env_extractor.dest_env_exists():
+		# There are lots of possible states when doebuild()
+		# calls this function, and we want to avoid
+		# clobbering an existing environment file.
+		return os.EX_OK
+
+	if not env_extractor.saved_env_exists():
+		# If the environment.bz2 doesn't exist, then ebuild.sh will
+		# source the ebuild as a fallback.
+		return os.EX_OK
+
+	env_extractor.start()
+	env_extractor.wait()
+	return env_extractor.returncode
+
+def _prepare_fake_distdir(settings, alist):
+	orig_distdir = settings["DISTDIR"]
+	settings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir
+	edpath = settings["DISTDIR"] = \
+		os.path.join(settings["PORTAGE_BUILDDIR"], "distdir")
+	portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755)
+
+	# Remove any unexpected files or directories.
+	for x in os.listdir(edpath):
+		symlink_path = os.path.join(edpath, x)
+		st = os.lstat(symlink_path)
+		if x in alist and stat.S_ISLNK(st.st_mode):
+			continue
+		if stat.S_ISDIR(st.st_mode):
+			shutil.rmtree(symlink_path)
+		else:
+			os.unlink(symlink_path)
+
+	# Check for existing symlinks and recreate if necessary.
+	for x in alist:
+		symlink_path = os.path.join(edpath, x)
+		target = os.path.join(orig_distdir, x)
+		try:
+			link_target = os.readlink(symlink_path)
+		except OSError:
+			os.symlink(target, symlink_path)
+		else:
+			if link_target != target:
+				os.unlink(symlink_path)
+				os.symlink(target, symlink_path)
+
+def _spawn_actionmap(settings):
+	features = settings.features
+	restrict = settings["PORTAGE_RESTRICT"].split()
+	nosandbox = (("userpriv" in features) and \
+		("usersandbox" not in features) and \
+		"userpriv" not in restrict and \
+		"nouserpriv" not in restrict)
+	if nosandbox and ("userpriv" not in features or \
+		"userpriv" in restrict or \
+		"nouserpriv" in restrict):
+		nosandbox = ("sandbox" not in features and \
+			"usersandbox" not in features)
+
+	if "depcheck" in features or "depcheckstrict" in features:
+		nosandbox = True
+
+	if not portage.process.sandbox_capable:
+		nosandbox = True
+
+	sesandbox = settings.selinux_enabled() and \
+		"sesandbox" in features
+
+	droppriv = "userpriv" in features and \
+		"userpriv" not in restrict and \
+		secpass >= 2
+
+	fakeroot = "fakeroot" in features
+
+	portage_bin_path = settings["PORTAGE_BIN_PATH"]
+	ebuild_sh_binary = os.path.join(portage_bin_path,
+		os.path.basename(EBUILD_SH_BINARY))
+	misc_sh_binary = os.path.join(portage_bin_path,
+		os.path.basename(MISC_SH_BINARY))
+	ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
+	misc_sh = _shell_quote(misc_sh_binary) + " dyn_%s"
+
+	# args are for the to spawn function
+	actionmap = {
+"pretend":  {"cmd":ebuild_sh, "args":{"droppriv":0,        "free":1,         "sesandbox":0,         "fakeroot":0}},
+"setup":    {"cmd":ebuild_sh, "args":{"droppriv":0,        "free":1,         "sesandbox":0,         "fakeroot":0}},
+"unpack":   {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0,         "sesandbox":sesandbox, "fakeroot":0}},
+"prepare":  {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0,         "sesandbox":sesandbox, "fakeroot":0}},
+"configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"compile":  {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"test":     {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"install":  {"cmd":ebuild_sh, "args":{"droppriv":0,        "free":0,         "sesandbox":sesandbox, "fakeroot":fakeroot}},
+"rpm":      {"cmd":misc_sh,   "args":{"droppriv":0,        "free":0,         "sesandbox":0,         "fakeroot":fakeroot}},
+"package":  {"cmd":misc_sh,   "args":{"droppriv":0,        "free":0,         "sesandbox":0,         "fakeroot":fakeroot}},
+		}
+
+	return actionmap
+
+def _validate_deps(mysettings, myroot, mydo, mydbapi):
+
+	invalid_dep_exempt_phases = \
+		set(["clean", "cleanrm", "help", "prerm", "postrm"])
+	all_keys = set(Package.metadata_keys)
+	all_keys.add("SRC_URI")
+	all_keys = tuple(all_keys)
+	metadata = dict(zip(all_keys,
+		mydbapi.aux_get(mysettings.mycpv, all_keys)))
+
+	class FakeTree(object):
+		def __init__(self, mydb):
+			self.dbapi = mydb
+
+	root_config = RootConfig(mysettings, {"porttree":FakeTree(mydbapi)}, None)
+
+	pkg = Package(built=False, cpv=mysettings.mycpv,
+		metadata=metadata, root_config=root_config,
+		type_name="ebuild")
+
+	msgs = []
+	if pkg.invalid:
+		for k, v in pkg.invalid.items():
+			for msg in v:
+				msgs.append("  %s\n" % (msg,))
+
+	if msgs:
+		portage.util.writemsg_level(_("Error(s) in metadata for '%s':\n") % \
+			(mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
+		for x in msgs:
+			portage.util.writemsg_level(x,
+				level=logging.ERROR, noiselevel=-1)
+		if mydo not in invalid_dep_exempt_phases:
+			return 1
+
+	if not pkg.built and \
+		mydo not in ("digest", "help", "manifest") and \
+		pkg.metadata["REQUIRED_USE"] and \
+		eapi_has_required_use(pkg.metadata["EAPI"]):
+		result = check_required_use(pkg.metadata["REQUIRED_USE"],
+			pkg.use.enabled, pkg.iuse.is_valid_flag)
+		if not result:
+			reduced_noise = result.tounicode()
+			writemsg("\n  %s\n" % _("The following REQUIRED_USE flag" + \
+				" constraints are unsatisfied:"), noiselevel=-1)
+			writemsg("    %s\n" % reduced_noise,
+				noiselevel=-1)
+			normalized_required_use = \
+				" ".join(pkg.metadata["REQUIRED_USE"].split())
+			if reduced_noise != normalized_required_use:
+				writemsg("\n  %s\n" % _("The above constraints " + \
+					"are a subset of the following complete expression:"),
+					noiselevel=-1)
+				writemsg("    %s\n" % \
+					human_readable_required_use(normalized_required_use),
+					noiselevel=-1)
+			writemsg("\n", noiselevel=-1)
+			return 1
+
+	return os.EX_OK
+
+# XXX This would be to replace getstatusoutput completely.
+# XXX Issue: cannot block execution. Deadlock condition.
+def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakeroot=0, **keywords):
+	"""
+	Spawn a subprocess with extra portage-specific options.
+	Optiosn include:
+
+	Sandbox: Sandbox means the spawned process will be limited in its ability t
+	read and write files (normally this means it is restricted to ${D}/)
+	SElinux Sandbox: Enables sandboxing on SElinux
+	Reduced Privileges: Drops privilages such that the process runs as portage:portage
+	instead of as root.
+
+	Notes: os.system cannot be used because it messes with signal handling.  Instead we
+	use the portage.process spawn* family of functions.
+
+	This function waits for the process to terminate.
+
+	@param mystring: Command to run
+	@type mystring: String
+	@param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
+	@type mysettings: Dictionary or config instance
+	@param debug: Ignored
+	@type debug: Boolean
+	@param free: Enable sandboxing for this process
+	@type free: Boolean
+	@param droppriv: Drop to portage:portage when running this command
+	@type droppriv: Boolean
+	@param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
+	@type sesandbox: Boolean
+	@param fakeroot: Run this command with faked root privileges
+	@type fakeroot: Boolean
+	@param keywords: Extra options encoded as a dict, to be passed to spawn
+	@type keywords: Dictionary
+	@rtype: Integer
+	@returns:
+	1. The return code of the spawned process.
+	"""
+
+	check_config_instance(mysettings)
+
+	fd_pipes = keywords.get("fd_pipes")
+	if fd_pipes is None:
+		fd_pipes = {
+			0:sys.stdin.fileno(),
+			1:sys.stdout.fileno(),
+			2:sys.stderr.fileno(),
+		}
+	# In some cases the above print statements don't flush stdout, so
+	# it needs to be flushed before allowing a child process to use it
+	# so that output always shows in the correct order.
+	stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
+	for fd in fd_pipes.values():
+		if fd in stdout_filenos:
+			sys.stdout.flush()
+			sys.stderr.flush()
+			break
+
+	features = mysettings.features
+	# TODO: Enable fakeroot to be used together with droppriv.  The
+	# fake ownership/permissions will have to be converted to real
+	# permissions in the merge phase.
+	fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
+	if droppriv and not uid and portage_gid and portage_uid:
+		keywords.update({"uid":portage_uid,"gid":portage_gid,
+			"groups":userpriv_groups,"umask":0o02})
+	if not free:
+		free=((droppriv and "usersandbox" not in features) or \
+			(not droppriv and "sandbox" not in features and \
+			"usersandbox" not in features and not fakeroot))
+
+	if not free and not (fakeroot or portage.process.sandbox_capable):
+		free = True
+
+	if mysettings.mycpv is not None:
+		keywords["opt_name"] = "[%s]" % mysettings.mycpv
+	else:
+		keywords["opt_name"] = "[%s/%s]" % \
+			(mysettings.get("CATEGORY",""), mysettings.get("PF",""))
+
+	if "depcheck" in features or "depcheckstrict" in features:
+		keywords["opt_name"] += " bash"
+		spawn_func = portage.process.spawn_autodep
+	elif free or "SANDBOX_ACTIVE" in os.environ:
+		keywords["opt_name"] += " bash"
+		spawn_func = portage.process.spawn_bash
+	elif fakeroot:
+		keywords["opt_name"] += " fakeroot"
+		keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
+		spawn_func = portage.process.spawn_fakeroot
+	else:
+		keywords["opt_name"] += " sandbox"
+		spawn_func = portage.process.spawn_sandbox
+
+	if sesandbox:
+		spawn_func = selinux.spawn_wrapper(spawn_func,
+			mysettings["PORTAGE_SANDBOX_T"])
+
+	if keywords.get("returnpid"):
+		return spawn_func(mystring, env=mysettings.environ(), **keywords)
+
+	proc = EbuildSpawnProcess(
+		background=False, args=mystring,
+		scheduler=PollScheduler().sched_iface, spawn_func=spawn_func,
+		settings=mysettings, **keywords)
+
+	proc.start()
+	proc.wait()
+
+	return proc.returncode
+
+# parse actionmap to spawn ebuild with the appropriate args
+def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
+	logfile=None, fd_pipes=None, returnpid=False):
+
+	if returnpid:
+		warnings.warn("portage.spawnebuild() called " + \
+			"with returnpid parameter enabled. This usage will " + \
+			"not be supported in the future.",
+			DeprecationWarning, stacklevel=2)
+
+	if not returnpid and \
+		(alwaysdep or "noauto" not in mysettings.features):
+		# process dependency first
+		if "dep" in actionmap[mydo]:
+			retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
+				mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
+				fd_pipes=fd_pipes, returnpid=returnpid)
+			if retval:
+				return retval
+
+	eapi = mysettings["EAPI"]
+
+	if mydo in ("configure", "prepare") and not eapi_has_src_prepare_and_src_configure(eapi):
+		return os.EX_OK
+
+	if mydo == "pretend" and not eapi_has_pkg_pretend(eapi):
+		return os.EX_OK
+
+	return _spawn_phase(mydo, mysettings,
+		actionmap=actionmap, logfile=logfile,
+		fd_pipes=fd_pipes, returnpid=returnpid)
+
+_post_phase_cmds = {
+
+	"install" : [
+		"install_qa_check",
+		"install_symlink_html_docs"],
+
+	"preinst" : [
+		"preinst_sfperms",
+		"preinst_selinux_labels",
+		"preinst_suid_scan",
+		"preinst_mask"]
+}
+
+def _post_phase_userpriv_perms(mysettings):
+	if "userpriv" in mysettings.features and secpass >= 2:
+		""" Privileged phases may have left files that need to be made
+		writable to a less privileged user."""
+		apply_recursive_permissions(mysettings["T"],
+			uid=portage_uid, gid=portage_gid, dirmode=0o70, dirmask=0,
+			filemode=0o60, filemask=0)
+
+def _check_build_log(mysettings, out=None):
+	"""
+	Search the content of $PORTAGE_LOG_FILE if it exists
+	and generate the following QA Notices when appropriate:
+
+	  * Automake "maintainer mode"
+	  * command not found
+	  * Unrecognized configure options
+	"""
+	logfile = mysettings.get("PORTAGE_LOG_FILE")
+	if logfile is None:
+		return
+	try:
+		f = open(_unicode_encode(logfile, encoding=_encodings['fs'],
+			errors='strict'), mode='rb')
+	except EnvironmentError:
+		return
+
+	if logfile.endswith('.gz'):
+		f =  gzip.GzipFile(filename='', mode='rb', fileobj=f)
+
+	am_maintainer_mode = []
+	bash_command_not_found = []
+	bash_command_not_found_re = re.compile(
+		r'(.*): line (\d*): (.*): command not found$')
+	command_not_found_exclude_re = re.compile(r'/configure: line ')
+	helper_missing_file = []
+	helper_missing_file_re = re.compile(
+		r'^!!! (do|new).*: .* does not exist$')
+
+	configure_opts_warn = []
+	configure_opts_warn_re = re.compile(
+		r'^configure: WARNING: [Uu]nrecognized options: ')
+
+	# Exclude output from dev-libs/yaz-3.0.47 which looks like this:
+	#
+	#Configuration:
+	#  Automake:                   ${SHELL} /var/tmp/portage/dev-libs/yaz-3.0.47/work/yaz-3.0.47/config/missing --run automake-1.10
+	am_maintainer_mode_re = re.compile(r'/missing --run ')
+	am_maintainer_mode_exclude_re = \
+		re.compile(r'(/missing --run (autoheader|autotest|help2man|makeinfo)|^\s*Automake:\s)')
+
+	make_jobserver_re = \
+		re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
+	make_jobserver = []
+
+	def _eerror(lines):
+		for line in lines:
+			eerror(line, phase="install", key=mysettings.mycpv, out=out)
+
+	try:
+		for line in f:
+			line = _unicode_decode(line)
+			if am_maintainer_mode_re.search(line) is not None and \
+				am_maintainer_mode_exclude_re.search(line) is None:
+				am_maintainer_mode.append(line.rstrip("\n"))
+
+			if bash_command_not_found_re.match(line) is not None and \
+				command_not_found_exclude_re.search(line) is None:
+				bash_command_not_found.append(line.rstrip("\n"))
+
+			if helper_missing_file_re.match(line) is not None:
+				helper_missing_file.append(line.rstrip("\n"))
+
+			if configure_opts_warn_re.match(line) is not None:
+				configure_opts_warn.append(line.rstrip("\n"))
+
+			if make_jobserver_re.match(line) is not None:
+				make_jobserver.append(line.rstrip("\n"))
+
+	except zlib.error as e:
+		_eerror(["portage encountered a zlib error: '%s'" % (e,),
+			"while reading the log file: '%s'" % logfile])
+	finally:
+		f.close()
+
+	def _eqawarn(lines):
+		for line in lines:
+			eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
+	wrap_width = 70
+
+	if am_maintainer_mode:
+		msg = [_("QA Notice: Automake \"maintainer mode\" detected:")]
+		msg.append("")
+		msg.extend("\t" + line for line in am_maintainer_mode)
+		msg.append("")
+		msg.extend(wrap(_(
+			"If you patch Makefile.am, "
+			"configure.in,  or configure.ac then you "
+			"should use autotools.eclass and "
+			"eautomake or eautoreconf. Exceptions "
+			"are limited to system packages "
+			"for which it is impossible to run "
+			"autotools during stage building. "
+			"See http://www.gentoo.org/p"
+			"roj/en/qa/autofailure.xml for more information."),
+			wrap_width))
+		_eqawarn(msg)
+
+	if bash_command_not_found:
+		msg = [_("QA Notice: command not found:")]
+		msg.append("")
+		msg.extend("\t" + line for line in bash_command_not_found)
+		_eqawarn(msg)
+
+	if helper_missing_file:
+		msg = [_("QA Notice: file does not exist:")]
+		msg.append("")
+		msg.extend("\t" + line[4:] for line in helper_missing_file)
+		_eqawarn(msg)
+
+	if configure_opts_warn:
+		msg = [_("QA Notice: Unrecognized configure options:")]
+		msg.append("")
+		msg.extend("\t" + line for line in configure_opts_warn)
+		_eqawarn(msg)
+
+	if make_jobserver:
+		msg = [_("QA Notice: make jobserver unavailable:")]
+		msg.append("")
+		msg.extend("\t" + line for line in make_jobserver)
+		_eqawarn(msg)
+
+def _post_src_install_chost_fix(settings):
+	"""
+	It's possible that the ebuild has changed the
+	CHOST variable, so revert it to the initial
+	setting.
+	"""
+	if settings.get('CATEGORY') == 'virtual':
+		return
+
+	chost = settings.get('CHOST')
+	if chost:
+		write_atomic(os.path.join(settings['PORTAGE_BUILDDIR'],
+			'build-info', 'CHOST'), chost + '\n')
+
+_vdb_use_conditional_keys = ('DEPEND', 'LICENSE', 'PDEPEND',
+	'PROPERTIES', 'PROVIDE', 'RDEPEND', 'RESTRICT',)
+_vdb_use_conditional_atoms = frozenset(['DEPEND', 'PDEPEND', 'RDEPEND'])
+
+def _preinst_bsdflags(mysettings):
+	if bsd_chflags:
+		# Save all the file flags for restoration later.
+		os.system("mtree -c -p %s -k flags > %s" % \
+			(_shell_quote(mysettings["D"]),
+			_shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+		# Remove all the file flags to avoid EPERM errors.
+		os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \
+			(_shell_quote(mysettings["D"]),))
+		os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \
+			(_shell_quote(mysettings["D"]),))
+
+
+def _postinst_bsdflags(mysettings):
+	if bsd_chflags:
+		# Restore all of the flags saved above.
+		os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
+			(_shell_quote(mysettings["ROOT"]),
+			_shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+def _post_src_install_uid_fix(mysettings, out):
+	"""
+	Files in $D with user and group bits that match the "portage"
+	user or group are automatically mapped to PORTAGE_INST_UID and
+	PORTAGE_INST_GID if necessary. The chown system call may clear
+	S_ISUID and S_ISGID bits, so those bits are restored if
+	necessary.
+	"""
+
+	os = _os_merge
+
+	inst_uid = int(mysettings["PORTAGE_INST_UID"])
+	inst_gid = int(mysettings["PORTAGE_INST_GID"])
+
+	_preinst_bsdflags(mysettings)
+
+	destdir = mysettings["D"]
+	unicode_errors = []
+
+	while True:
+
+		unicode_error = False
+		size = 0
+		counted_inodes = set()
+		fixlafiles_announced = False
+		fixlafiles = "fixlafiles" in mysettings.features
+
+		for parent, dirs, files in os.walk(destdir):
+			try:
+				parent = _unicode_decode(parent,
+					encoding=_encodings['merge'], errors='strict')
+			except UnicodeDecodeError:
+				new_parent = _unicode_decode(parent,
+					encoding=_encodings['merge'], errors='replace')
+				new_parent = _unicode_encode(new_parent,
+					encoding=_encodings['merge'], errors='backslashreplace')
+				new_parent = _unicode_decode(new_parent,
+					encoding=_encodings['merge'], errors='replace')
+				os.rename(parent, new_parent)
+				unicode_error = True
+				unicode_errors.append(new_parent[len(destdir):])
+				break
+
+			for fname in chain(dirs, files):
+				try:
+					fname = _unicode_decode(fname,
+						encoding=_encodings['merge'], errors='strict')
+				except UnicodeDecodeError:
+					fpath = _os.path.join(
+						parent.encode(_encodings['merge']), fname)
+					new_fname = _unicode_decode(fname,
+						encoding=_encodings['merge'], errors='replace')
+					new_fname = _unicode_encode(new_fname,
+						encoding=_encodings['merge'], errors='backslashreplace')
+					new_fname = _unicode_decode(new_fname,
+						encoding=_encodings['merge'], errors='replace')
+					new_fpath = os.path.join(parent, new_fname)
+					os.rename(fpath, new_fpath)
+					unicode_error = True
+					unicode_errors.append(new_fpath[len(destdir):])
+					fname = new_fname
+					fpath = new_fpath
+				else:
+					fpath = os.path.join(parent, fname)
+
+				if fixlafiles and \
+					fname.endswith(".la") and os.path.isfile(fpath):
+					f = open(_unicode_encode(fpath,
+						encoding=_encodings['merge'], errors='strict'),
+						mode='rb')
+					has_lafile_header = b'.la - a libtool library file' \
+						in f.readline()
+					f.seek(0)
+					contents = f.read()
+					f.close()
+					try:
+						needs_update, new_contents = rewrite_lafile(contents)
+					except portage.exception.InvalidData as e:
+						needs_update = False
+						if not fixlafiles_announced:
+							fixlafiles_announced = True
+							writemsg("Fixing .la files\n", fd=out)
+
+						# Suppress warnings if the file does not have the
+						# expected header (bug #340725). Even if the header is
+						# missing, we still call rewrite_lafile() since some
+						# valid libtool archives may not have the header.
+						msg = "   %s is not a valid libtool archive, skipping\n" % fpath[len(destdir):]
+						qa_msg = "QA Notice: invalid .la file found: %s, %s" % (fpath[len(destdir):], e)
+						if has_lafile_header:
+							writemsg(msg, fd=out)
+							eqawarn(qa_msg, key=mysettings.mycpv, out=out)
+
+					if needs_update:
+						if not fixlafiles_announced:
+							fixlafiles_announced = True
+							writemsg("Fixing .la files\n", fd=out)
+						writemsg("   %s\n" % fpath[len(destdir):], fd=out)
+						# write_atomic succeeds even in some cases in which
+						# a normal write might fail due to file permission
+						# settings on some operating systems such as HP-UX
+						write_atomic(_unicode_encode(fpath,
+							encoding=_encodings['merge'], errors='strict'),
+							new_contents, mode='wb')
+
+				mystat = os.lstat(fpath)
+				if stat.S_ISREG(mystat.st_mode) and \
+					mystat.st_ino not in counted_inodes:
+					counted_inodes.add(mystat.st_ino)
+					size += mystat.st_size
+				if mystat.st_uid != portage_uid and \
+					mystat.st_gid != portage_gid:
+					continue
+				myuid = -1
+				mygid = -1
+				if mystat.st_uid == portage_uid:
+					myuid = inst_uid
+				if mystat.st_gid == portage_gid:
+					mygid = inst_gid
+				apply_secpass_permissions(
+					_unicode_encode(fpath, encoding=_encodings['merge']),
+					uid=myuid, gid=mygid,
+					mode=mystat.st_mode, stat_cached=mystat,
+					follow_links=False)
+
+			if unicode_error:
+				break
+
+		if not unicode_error:
+			break
+
+	if unicode_errors:
+		for l in _merge_unicode_error(unicode_errors):
+			eerror(l, phase='install', key=mysettings.mycpv, out=out)
+
+	build_info_dir = os.path.join(mysettings['PORTAGE_BUILDDIR'],
+		'build-info')
+
+	io.open(_unicode_encode(os.path.join(build_info_dir,
+		'SIZE'), encoding=_encodings['fs'], errors='strict'),
+		mode='w', encoding=_encodings['repo.content'],
+		errors='strict').write(_unicode_decode(str(size) + '\n'))
+
+	io.open(_unicode_encode(os.path.join(build_info_dir,
+		'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),
+		mode='w', encoding=_encodings['repo.content'],
+		errors='strict').write(_unicode_decode("%.0f\n" % (time.time(),)))
+
+	use = frozenset(mysettings['PORTAGE_USE'].split())
+	for k in _vdb_use_conditional_keys:
+		v = mysettings.configdict['pkg'].get(k)
+		filename = os.path.join(build_info_dir, k)
+		if v is None:
+			try:
+				os.unlink(filename)
+			except OSError:
+				pass
+			continue
+
+		if k.endswith('DEPEND'):
+			token_class = Atom
+		else:
+			token_class = None
+
+		v = use_reduce(v, uselist=use, token_class=token_class)
+		v = paren_enclose(v)
+		if not v:
+			try:
+				os.unlink(filename)
+			except OSError:
+				pass
+			continue
+		io.open(_unicode_encode(os.path.join(build_info_dir,
+			k), encoding=_encodings['fs'], errors='strict'),
+			mode='w', encoding=_encodings['repo.content'],
+			errors='strict').write(_unicode_decode(v + '\n'))
+
+	_reapply_bsdflags_to_image(mysettings)
+
+def _reapply_bsdflags_to_image(mysettings):
+	"""
+	Reapply flags saved and removed by _preinst_bsdflags.
+	"""
+	if bsd_chflags:
+		os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
+			(_shell_quote(mysettings["D"]),
+			_shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+def _post_src_install_soname_symlinks(mysettings, out):
+	"""
+	Check that libraries in $D have corresponding soname symlinks.
+	If symlinks are missing then create them and trigger a QA Notice.
+	This requires $PORTAGE_BUILDDIR/build-info/NEEDED.ELF.2 for
+	operation.
+	"""
+
+	image_dir = mysettings["D"]
+	needed_filename = os.path.join(mysettings["PORTAGE_BUILDDIR"],
+		"build-info", "NEEDED.ELF.2")
+
+	try:
+		lines = io.open(_unicode_encode(needed_filename,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'],
+			errors='replace').readlines()
+	except IOError as e:
+		if e.errno not in (errno.ENOENT, errno.ESTALE):
+			raise
+		return
+
+	libpaths = set(portage.util.getlibpaths(
+		mysettings["ROOT"], env=mysettings))
+	libpath_inodes = set()
+	for libpath in libpaths:
+		libdir = os.path.join(mysettings["ROOT"], libpath.lstrip(os.sep))
+		try:
+			s = os.stat(libdir)
+		except OSError:
+			continue
+		else:
+			libpath_inodes.add((s.st_dev, s.st_ino))
+
+	is_libdir_cache = {}
+
+	def is_libdir(obj_parent):
+		try:
+			return is_libdir_cache[obj_parent]
+		except KeyError:
+			pass
+
+		rval = False
+		if obj_parent in libpaths:
+			rval = True
+		else:
+			parent_path = os.path.join(mysettings["ROOT"],
+				obj_parent.lstrip(os.sep))
+			try:
+				s = os.stat(parent_path)
+			except OSError:
+				pass
+			else:
+				if (s.st_dev, s.st_ino) in libpath_inodes:
+					rval = True
+
+		is_libdir_cache[obj_parent] = rval
+		return rval
+
+	missing_symlinks = []
+
+	# Parse NEEDED.ELF.2 like LinkageMapELF.rebuild() does.
+	for l in lines:
+		l = l.rstrip("\n")
+		if not l:
+			continue
+		fields = l.split(";")
+		if len(fields) < 5:
+			portage.util.writemsg_level(_("\nWrong number of fields " \
+				"in %s: %s\n\n") % (needed_filename, l),
+				level=logging.ERROR, noiselevel=-1)
+			continue
+
+		obj, soname = fields[1:3]
+		if not soname:
+			continue
+		if not is_libdir(os.path.dirname(obj)):
+			continue
+
+		obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))
+		sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)
+		try:
+			os.lstat(sym_file_path)
+		except OSError as e:
+			if e.errno not in (errno.ENOENT, errno.ESTALE):
+				raise
+		else:
+			continue
+
+		missing_symlinks.append((obj, soname))
+
+	if not missing_symlinks:
+		return
+
+	qa_msg = ["QA Notice: Missing soname symlink(s) " + \
+		"will be automatically created:"]
+	qa_msg.append("")
+	qa_msg.extend("\t%s -> %s" % (os.path.join(
+		os.path.dirname(obj).lstrip(os.sep), soname),
+		os.path.basename(obj))
+		for obj, soname in missing_symlinks)
+	qa_msg.append("")
+	for line in qa_msg:
+		eqawarn(line, key=mysettings.mycpv, out=out)
+
+	_preinst_bsdflags(mysettings)
+	for obj, soname in missing_symlinks:
+		obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))
+		sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)
+		os.symlink(os.path.basename(obj_file_path), sym_file_path)
+	_reapply_bsdflags_to_image(mysettings)
+
+def _merge_unicode_error(errors):
+	lines = []
+
+	msg = _("This package installs one or more file names containing "
+		"characters that do not match your current locale "
+		"settings. The current setting for filesystem encoding is '%s'.") \
+		% _encodings['merge']
+	lines.extend(wrap(msg, 72))
+
+	lines.append("")
+	errors.sort()
+	lines.extend("\t" + x for x in errors)
+	lines.append("")
+
+	if _encodings['merge'].lower().replace('_', '').replace('-', '') != 'utf8':
+		msg = _("For best results, UTF-8 encoding is recommended. See "
+			"the Gentoo Linux Localization Guide for instructions "
+			"about how to configure your locale for UTF-8 encoding:")
+		lines.extend(wrap(msg, 72))
+		lines.append("")
+		lines.append("\t" + \
+			"http://www.gentoo.org/doc/en/guide-localization.xml")
+		lines.append("")
+
+	return lines

diff --git a/portage_with_autodep/pym/portage/package/ebuild/fetch.py b/portage_with_autodep/pym/portage/package/ebuild/fetch.py
new file mode 100644
index 0000000..5cbbf87
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/fetch.py
@@ -0,0 +1,1129 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+__all__ = ['fetch']
+
+import errno
+import io
+import logging
+import random
+import re
+import shutil
+import stat
+import sys
+import tempfile
+
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.package.ebuild.config:check_config_instance,config',
+	'portage.package.ebuild.doebuild:doebuild_environment,' + \
+		'_doebuild_spawn',
+	'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+)
+
+from portage import OrderedDict, os, selinux, _encodings, \
+	_shell_quote, _unicode_encode
+from portage.checksum import hashfunc_map, perform_md5, verify_all
+from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
+	GLOBAL_CONFIG_PATH
+from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
+from portage.exception import FileNotFound, OperationNotPermitted, \
+	PortageException, TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.manifest import Manifest
+from portage.output import colorize, EOutput
+from portage.util import apply_recursive_permissions, \
+	apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
+	varexpand, writemsg, writemsg_level, writemsg_stdout
+from portage.process import spawn
+
+_userpriv_spawn_kwargs = (
+	("uid",    portage_uid),
+	("gid",    portage_gid),
+	("groups", userpriv_groups),
+	("umask",  0o02),
+)
+
+def _spawn_fetch(settings, args, **kwargs):
+	"""
+	Spawn a process with appropriate settings for fetching, including
+	userfetch and selinux support.
+	"""
+
+	global _userpriv_spawn_kwargs
+
+	# Redirect all output to stdout since some fetchers like
+	# wget pollute stderr (if portage detects a problem then it
+	# can send it's own message to stderr).
+	if "fd_pipes" not in kwargs:
+
+		kwargs["fd_pipes"] = {
+			0 : sys.stdin.fileno(),
+			1 : sys.stdout.fileno(),
+			2 : sys.stdout.fileno(),
+		}
+
+	if "userfetch" in settings.features and \
+		os.getuid() == 0 and portage_gid and portage_uid:
+		kwargs.update(_userpriv_spawn_kwargs)
+
+	spawn_func = spawn
+
+	if settings.selinux_enabled():
+		spawn_func = selinux.spawn_wrapper(spawn_func,
+			settings["PORTAGE_FETCH_T"])
+
+		# bash is an allowed entrypoint, while most binaries are not
+		if args[0] != BASH_BINARY:
+			args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
+
+	# Ensure that EBUILD_PHASE is set to fetch, so that config.environ()
+	# does not filter the calling environment (which may contain needed
+	# proxy variables, as in bug #315421).
+	phase_backup = settings.get('EBUILD_PHASE')
+	settings['EBUILD_PHASE'] = 'fetch'
+	try:
+		rval = spawn_func(args, env=settings.environ(), **kwargs)
+	finally:
+		if phase_backup is None:
+			settings.pop('EBUILD_PHASE', None)
+		else:
+			settings['EBUILD_PHASE'] = phase_backup
+
+	return rval
+
+_userpriv_test_write_file_cache = {}
+_userpriv_test_write_cmd_script = ">> %(file_path)s 2>/dev/null ; rval=$? ; " + \
+	"rm -f  %(file_path)s ; exit $rval"
+
+def _userpriv_test_write_file(settings, file_path):
+	"""
+	Drop privileges and try to open a file for writing. The file may or
+	may not exist, and the parent directory is assumed to exist. The file
+	is removed before returning.
+
+	@param settings: A config instance which is passed to _spawn_fetch()
+	@param file_path: A file path to open and write.
+	@return: True if write succeeds, False otherwise.
+	"""
+
+	global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
+	rval = _userpriv_test_write_file_cache.get(file_path)
+	if rval is not None:
+		return rval
+
+	args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
+		{"file_path" : _shell_quote(file_path)}]
+
+	returncode = _spawn_fetch(settings, args)
+
+	rval = returncode == os.EX_OK
+	_userpriv_test_write_file_cache[file_path] = rval
+	return rval
+
+def _checksum_failure_temp_file(distdir, basename):
+	"""
+	First try to find a duplicate temp file with the same checksum and return
+	that filename if available. Otherwise, use mkstemp to create a new unique
+	filename._checksum_failure_.$RANDOM, rename the given file, and return the
+	new filename. In any case, filename will be renamed or removed before this
+	function returns a temp filename.
+	"""
+
+	filename = os.path.join(distdir, basename)
+	size = os.stat(filename).st_size
+	checksum = None
+	tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
+	for temp_filename in os.listdir(distdir):
+		if not tempfile_re.match(temp_filename):
+			continue
+		temp_filename = os.path.join(distdir, temp_filename)
+		try:
+			if size != os.stat(temp_filename).st_size:
+				continue
+		except OSError:
+			continue
+		try:
+			temp_checksum = perform_md5(temp_filename)
+		except FileNotFound:
+			# Apparently the temp file disappeared. Let it go.
+			continue
+		if checksum is None:
+			checksum = perform_md5(filename)
+		if checksum == temp_checksum:
+			os.unlink(filename)
+			return temp_filename
+
+	fd, temp_filename = \
+		tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
+	os.close(fd)
+	os.rename(filename, temp_filename)
+	return temp_filename
+
+def _check_digests(filename, digests, show_errors=1):
+	"""
+	Check digests and display a message if an error occurs.
+	@return True if all digests match, False otherwise.
+	"""
+	verified_ok, reason = verify_all(filename, digests)
+	if not verified_ok:
+		if show_errors:
+			writemsg(_("!!! Previously fetched"
+				" file: '%s'\n") % filename, noiselevel=-1)
+			writemsg(_("!!! Reason: %s\n") % reason[0],
+				noiselevel=-1)
+			writemsg(_("!!! Got:      %s\n"
+				"!!! Expected: %s\n") % \
+				(reason[1], reason[2]), noiselevel=-1)
+		return False
+	return True
+
+def _check_distfile(filename, digests, eout, show_errors=1):
+	"""
+	@return a tuple of (match, stat_obj) where match is True if filename
+	matches all given digests (if any) and stat_obj is a stat result, or
+	None if the file does not exist.
+	"""
+	if digests is None:
+		digests = {}
+	size = digests.get("size")
+	if size is not None and len(digests) == 1:
+		digests = None
+
+	try:
+		st = os.stat(filename)
+	except OSError:
+		return (False, None)
+	if size is not None and size != st.st_size:
+		return (False, st)
+	if not digests:
+		if size is not None:
+			eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
+			eout.eend(0)
+		elif st.st_size == 0:
+			# Zero-byte distfiles are always invalid.
+			return (False, st)
+	else:
+		if _check_digests(filename, digests, show_errors=show_errors):
+			eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
+				" ".join(sorted(digests))))
+			eout.eend(0)
+		else:
+			return (False, st)
+	return (True, st)
+
+_fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
+
+_size_suffix_map = {
+	''  : 0,
+	'K' : 10,
+	'M' : 20,
+	'G' : 30,
+	'T' : 40,
+	'P' : 50,
+	'E' : 60,
+	'Z' : 70,
+	'Y' : 80,
+}
+
+def fetch(myuris, mysettings, listonly=0, fetchonly=0,
+	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
+	allow_missing_digests=True):
+	"fetch files.  Will use digest file if available."
+
+	if not myuris:
+		return 1
+
+	features = mysettings.features
+	restrict = mysettings.get("PORTAGE_RESTRICT","").split()
+
+	userfetch = secpass >= 2 and "userfetch" in features
+	userpriv = secpass >= 2 and "userpriv" in features
+
+	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
+	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
+	if restrict_mirror:
+		if ("mirror" in features) and ("lmirror" not in features):
+			# lmirror should allow you to bypass mirror restrictions.
+			# XXX: This is not a good thing, and is temporary at best.
+			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
+			return 1
+
+	# Generally, downloading the same file repeatedly from
+	# every single available mirror is a waste of bandwidth
+	# and time, so there needs to be a cap.
+	checksum_failure_max_tries = 5
+	v = checksum_failure_max_tries
+	try:
+		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
+			checksum_failure_max_tries))
+	except (ValueError, OverflowError):
+		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
+			" contains non-integer value: '%s'\n") % \
+			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
+		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
+			"default value: %s\n") % checksum_failure_max_tries,
+			noiselevel=-1)
+		v = checksum_failure_max_tries
+	if v < 1:
+		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
+			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
+		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
+			"default value: %s\n") % checksum_failure_max_tries,
+			noiselevel=-1)
+		v = checksum_failure_max_tries
+	checksum_failure_max_tries = v
+	del v
+
+	fetch_resume_size_default = "350K"
+	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
+	if fetch_resume_size is not None:
+		fetch_resume_size = "".join(fetch_resume_size.split())
+		if not fetch_resume_size:
+			# If it's undefined or empty, silently use the default.
+			fetch_resume_size = fetch_resume_size_default
+		match = _fetch_resume_size_re.match(fetch_resume_size)
+		if match is None or \
+			(match.group(2).upper() not in _size_suffix_map):
+			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
+				" contains an unrecognized format: '%s'\n") % \
+				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
+			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
+				"default value: %s\n") % fetch_resume_size_default,
+				noiselevel=-1)
+			fetch_resume_size = None
+	if fetch_resume_size is None:
+		fetch_resume_size = fetch_resume_size_default
+		match = _fetch_resume_size_re.match(fetch_resume_size)
+	fetch_resume_size = int(match.group(1)) * \
+		2 ** _size_suffix_map[match.group(2).upper()]
+
+	# Behave like the package has RESTRICT="primaryuri" after a
+	# couple of checksum failures, to increase the probablility
+	# of success before checksum_failure_max_tries is reached.
+	checksum_failure_primaryuri = 2
+	thirdpartymirrors = mysettings.thirdpartymirrors()
+
+	# In the background parallel-fetch process, it's safe to skip checksum
+	# verification of pre-existing files in $DISTDIR that have the correct
+	# file size. The parent process will verify their checksums prior to
+	# the unpack phase.
+
+	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
+	if parallel_fetchonly:
+		fetchonly = 1
+
+	check_config_instance(mysettings)
+
+	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
+		CUSTOM_MIRRORS_FILE), recursive=1)
+
+	mymirrors=[]
+
+	if listonly or ("distlocks" not in features):
+		use_locks = 0
+
+	fetch_to_ro = 0
+	if "skiprocheck" in features:
+		fetch_to_ro = 1
+
+	if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
+		if use_locks:
+			writemsg(colorize("BAD",
+				_("!!! For fetching to a read-only filesystem, "
+				"locking should be turned off.\n")), noiselevel=-1)
+			writemsg(_("!!! This can be done by adding -distlocks to "
+				"FEATURES in /etc/make.conf\n"), noiselevel=-1)
+#			use_locks = 0
+
+	# local mirrors are always added
+	if "local" in custommirrors:
+		mymirrors += custommirrors["local"]
+
+	if restrict_mirror:
+		# We don't add any mirrors.
+		pass
+	else:
+		if try_mirrors:
+			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
+
+	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
+	if skip_manifest:
+		allow_missing_digests = True
+	pkgdir = mysettings.get("O")
+	if digests is None and not (pkgdir is None or skip_manifest):
+		mydigests = Manifest(
+			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
+	elif digests is None or skip_manifest:
+		# no digests because fetch was not called for a specific package
+		mydigests = {}
+	else:
+		mydigests = digests
+
+	ro_distdirs = [x for x in \
+		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
+		if os.path.isdir(x)]
+
+	fsmirrors = []
+	for x in range(len(mymirrors)-1,-1,-1):
+		if mymirrors[x] and mymirrors[x][0]=='/':
+			fsmirrors += [mymirrors[x]]
+			del mymirrors[x]
+
+	restrict_fetch = "fetch" in restrict
+	force_mirror = "force-mirror" in features and not restrict_mirror
+	custom_local_mirrors = custommirrors.get("local", [])
+	if restrict_fetch:
+		# With fetch restriction, a normal uri may only be fetched from
+		# custom local mirrors (if available).  A mirror:// uri may also
+		# be fetched from specific mirrors (effectively overriding fetch
+		# restriction, but only for specific mirrors).
+		locations = custom_local_mirrors
+	else:
+		locations = mymirrors
+
+	file_uri_tuples = []
+	# Check for 'items' attribute since OrderedDict is not a dict.
+	if hasattr(myuris, 'items'):
+		for myfile, uri_set in myuris.items():
+			for myuri in uri_set:
+				file_uri_tuples.append((myfile, myuri))
+	else:
+		for myuri in myuris:
+			file_uri_tuples.append((os.path.basename(myuri), myuri))
+
+	filedict = OrderedDict()
+	primaryuri_indexes={}
+	primaryuri_dict = {}
+	thirdpartymirror_uris = {}
+	for myfile, myuri in file_uri_tuples:
+		if myfile not in filedict:
+			filedict[myfile]=[]
+			for y in range(0,len(locations)):
+				filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
+		if myuri[:9]=="mirror://":
+			eidx = myuri.find("/", 9)
+			if eidx != -1:
+				mirrorname = myuri[9:eidx]
+				path = myuri[eidx+1:]
+
+				# Try user-defined mirrors first
+				if mirrorname in custommirrors:
+					for cmirr in custommirrors[mirrorname]:
+						filedict[myfile].append(
+							cmirr.rstrip("/") + "/" + path)
+
+				# now try the official mirrors
+				if mirrorname in thirdpartymirrors:
+					random.shuffle(thirdpartymirrors[mirrorname])
+
+					uris = [locmirr.rstrip("/") + "/" + path \
+						for locmirr in thirdpartymirrors[mirrorname]]
+					filedict[myfile].extend(uris)
+					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
+
+				if not filedict[myfile]:
+					writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
+			else:
+				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
+				writemsg("  %s\n" % (myuri), noiselevel=-1)
+		else:
+			if restrict_fetch or force_mirror:
+				# Only fetch from specific mirrors is allowed.
+				continue
+			if "primaryuri" in restrict:
+				# Use the source site first.
+				if myfile in primaryuri_indexes:
+					primaryuri_indexes[myfile] += 1
+				else:
+					primaryuri_indexes[myfile] = 0
+				filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
+			else:
+				filedict[myfile].append(myuri)
+			primaryuris = primaryuri_dict.get(myfile)
+			if primaryuris is None:
+				primaryuris = []
+				primaryuri_dict[myfile] = primaryuris
+			primaryuris.append(myuri)
+
+	# Prefer thirdpartymirrors over normal mirrors in cases when
+	# the file does not yet exist on the normal mirrors.
+	for myfile, uris in thirdpartymirror_uris.items():
+		primaryuri_dict.setdefault(myfile, []).extend(uris)
+
+	can_fetch=True
+
+	if listonly:
+		can_fetch = False
+
+	if can_fetch and not fetch_to_ro:
+		global _userpriv_test_write_file_cache
+		dirmode  = 0o070
+		filemode =   0o60
+		modemask =    0o2
+		dir_gid = portage_gid
+		if "FAKED_MODE" in mysettings:
+			# When inside fakeroot, directories with portage's gid appear
+			# to have root's gid. Therefore, use root's gid instead of
+			# portage's gid to avoid spurrious permissions adjustments
+			# when inside fakeroot.
+			dir_gid = 0
+		distdir_dirs = [""]
+		try:
+			
+			for x in distdir_dirs:
+				mydir = os.path.join(mysettings["DISTDIR"], x)
+				write_test_file = os.path.join(
+					mydir, ".__portage_test_write__")
+
+				try:
+					st = os.stat(mydir)
+				except OSError:
+					st = None
+
+				if st is not None and stat.S_ISDIR(st.st_mode):
+					if not (userfetch or userpriv):
+						continue
+					if _userpriv_test_write_file(mysettings, write_test_file):
+						continue
+
+				_userpriv_test_write_file_cache.pop(write_test_file, None)
+				if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
+					if st is None:
+						# The directory has just been created
+						# and therefore it must be empty.
+						continue
+					writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
+						noiselevel=-1)
+					def onerror(e):
+						raise # bail out on the first error that occurs during recursion
+					if not apply_recursive_permissions(mydir,
+						gid=dir_gid, dirmode=dirmode, dirmask=modemask,
+						filemode=filemode, filemask=modemask, onerror=onerror):
+						raise OperationNotPermitted(
+							_("Failed to apply recursive permissions for the portage group."))
+		except PortageException as e:
+			if not os.path.isdir(mysettings["DISTDIR"]):
+				writemsg("!!! %s\n" % str(e), noiselevel=-1)
+				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
+				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
+
+	if can_fetch and \
+		not fetch_to_ro and \
+		not os.access(mysettings["DISTDIR"], os.W_OK):
+		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
+			noiselevel=-1)
+		can_fetch = False
+
+	distdir_writable = can_fetch and not fetch_to_ro
+	failed_files = set()
+	restrict_fetch_msg = False
+
+	for myfile in filedict:
+		"""
+		fetched  status
+		0        nonexistent
+		1        partially downloaded
+		2        completely downloaded
+		"""
+		fetched = 0
+
+		orig_digests = mydigests.get(myfile, {})
+
+		if not (allow_missing_digests or listonly):
+			verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
+			verifiable_hash_types.discard("size")
+			if not verifiable_hash_types:
+				expected = set(hashfunc_map)
+				expected.discard("size")
+				expected = " ".join(sorted(expected))
+				got = set(orig_digests)
+				got.discard("size")
+				got = " ".join(sorted(got))
+				reason = (_("Insufficient data for checksum verification"),
+					got, expected)
+				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
+					noiselevel=-1)
+				writemsg(_("!!! Reason: %s\n") % reason[0],
+					noiselevel=-1)
+				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
+					(reason[1], reason[2]), noiselevel=-1)
+
+				if fetchonly:
+					failed_files.add(myfile)
+					continue
+				else:
+					return 0
+
+		size = orig_digests.get("size")
+		if size == 0:
+			# Zero-byte distfiles are always invalid, so discard their digests.
+			del mydigests[myfile]
+			orig_digests.clear()
+			size = None
+		pruned_digests = orig_digests
+		if parallel_fetchonly:
+			pruned_digests = {}
+			if size is not None:
+				pruned_digests["size"] = size
+
+		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
+		has_space = True
+		has_space_superuser = True
+		file_lock = None
+		if listonly:
+			writemsg_stdout("\n", noiselevel=-1)
+		else:
+			# check if there is enough space in DISTDIR to completely store myfile
+			# overestimate the filesize so we aren't bitten by FS overhead
+			vfs_stat = None
+			if size is not None and hasattr(os, "statvfs"):
+				try:
+					vfs_stat = os.statvfs(mysettings["DISTDIR"])
+				except OSError as e:
+					writemsg_level("!!! statvfs('%s'): %s\n" %
+						(mysettings["DISTDIR"], e),
+						noiselevel=-1, level=logging.ERROR)
+					del e
+
+			if vfs_stat is not None:
+				try:
+					mysize = os.stat(myfile_path).st_size
+				except OSError as e:
+					if e.errno not in (errno.ENOENT, errno.ESTALE):
+						raise
+					del e
+					mysize = 0
+				if (size - mysize + vfs_stat.f_bsize) >= \
+					(vfs_stat.f_bsize * vfs_stat.f_bavail):
+
+					if (size - mysize + vfs_stat.f_bsize) >= \
+						(vfs_stat.f_bsize * vfs_stat.f_bfree):
+						has_space_superuser = False
+
+					if not has_space_superuser:
+						has_space = False
+					elif secpass < 2:
+						has_space = False
+					elif userfetch:
+						has_space = False
+
+			if not has_space:
+				writemsg(_("!!! Insufficient space to store %s in %s\n") % \
+					(myfile, mysettings["DISTDIR"]), noiselevel=-1)
+
+				if has_space_superuser:
+					writemsg(_("!!! Insufficient privileges to use "
+						"remaining space.\n"), noiselevel=-1)
+					if userfetch:
+						writemsg(_("!!! You may set FEATURES=\"-userfetch\""
+							" in /etc/make.conf in order to fetch with\n"
+							"!!! superuser privileges.\n"), noiselevel=-1)
+
+			if distdir_writable and use_locks:
+
+				lock_kwargs = {}
+				if fetchonly:
+					lock_kwargs["flags"] = os.O_NONBLOCK
+
+				try:
+					file_lock = lockfile(myfile_path,
+						wantnewlockfile=1, **lock_kwargs)
+				except TryAgain:
+					writemsg(_(">>> File '%s' is already locked by "
+						"another fetcher. Continuing...\n") % myfile,
+						noiselevel=-1)
+					continue
+		try:
+			if not listonly:
+
+				eout = EOutput()
+				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
+				match, mystat = _check_distfile(
+					myfile_path, pruned_digests, eout)
+				if match:
+					if distdir_writable:
+						try:
+							apply_secpass_permissions(myfile_path,
+								gid=portage_gid, mode=0o664, mask=0o2,
+								stat_cached=mystat)
+						except PortageException as e:
+							if not os.access(myfile_path, os.R_OK):
+								writemsg(_("!!! Failed to adjust permissions:"
+									" %s\n") % str(e), noiselevel=-1)
+							del e
+					continue
+
+				if distdir_writable and mystat is None:
+					# Remove broken symlinks if necessary.
+					try:
+						os.unlink(myfile_path)
+					except OSError:
+						pass
+
+				if mystat is not None:
+					if stat.S_ISDIR(mystat.st_mode):
+						writemsg_level(
+							_("!!! Unable to fetch file since "
+							"a directory is in the way: \n"
+							"!!!   %s\n") % myfile_path,
+							level=logging.ERROR, noiselevel=-1)
+						return 0
+
+					if mystat.st_size == 0:
+						if distdir_writable:
+							try:
+								os.unlink(myfile_path)
+							except OSError:
+								pass
+					elif distdir_writable:
+						if mystat.st_size < fetch_resume_size and \
+							mystat.st_size < size:
+							# If the file already exists and the size does not
+							# match the existing digests, it may be that the
+							# user is attempting to update the digest. In this
+							# case, the digestgen() function will advise the
+							# user to use `ebuild --force foo.ebuild manifest`
+							# in order to force the old digests to be replaced.
+							# Since the user may want to keep this file, rename
+							# it instead of deleting it.
+							writemsg(_(">>> Renaming distfile with size "
+								"%d (smaller than " "PORTAGE_FETCH_RESU"
+								"ME_MIN_SIZE)\n") % mystat.st_size)
+							temp_filename = \
+								_checksum_failure_temp_file(
+								mysettings["DISTDIR"], myfile)
+							writemsg_stdout(_("Refetching... "
+								"File renamed to '%s'\n\n") % \
+								temp_filename, noiselevel=-1)
+						elif mystat.st_size >= size:
+							temp_filename = \
+								_checksum_failure_temp_file(
+								mysettings["DISTDIR"], myfile)
+							writemsg_stdout(_("Refetching... "
+								"File renamed to '%s'\n\n") % \
+								temp_filename, noiselevel=-1)
+
+				if distdir_writable and ro_distdirs:
+					readonly_file = None
+					for x in ro_distdirs:
+						filename = os.path.join(x, myfile)
+						match, mystat = _check_distfile(
+							filename, pruned_digests, eout)
+						if match:
+							readonly_file = filename
+							break
+					if readonly_file is not None:
+						try:
+							os.unlink(myfile_path)
+						except OSError as e:
+							if e.errno not in (errno.ENOENT, errno.ESTALE):
+								raise
+							del e
+						os.symlink(readonly_file, myfile_path)
+						continue
+
+				if fsmirrors and not os.path.exists(myfile_path) and has_space:
+					for mydir in fsmirrors:
+						mirror_file = os.path.join(mydir, myfile)
+						try:
+							shutil.copyfile(mirror_file, myfile_path)
+							writemsg(_("Local mirror has file: %s\n") % myfile)
+							break
+						except (IOError, OSError) as e:
+							if e.errno not in (errno.ENOENT, errno.ESTALE):
+								raise
+							del e
+
+				try:
+					mystat = os.stat(myfile_path)
+				except OSError as e:
+					if e.errno not in (errno.ENOENT, errno.ESTALE):
+						raise
+					del e
+				else:
+					try:
+						apply_secpass_permissions(
+							myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
+							stat_cached=mystat)
+					except PortageException as e:
+						if not os.access(myfile_path, os.R_OK):
+							writemsg(_("!!! Failed to adjust permissions:"
+								" %s\n") % str(e), noiselevel=-1)
+
+					# If the file is empty then it's obviously invalid. Remove
+					# the empty file and try to download if possible.
+					if mystat.st_size == 0:
+						if distdir_writable:
+							try:
+								os.unlink(myfile_path)
+							except EnvironmentError:
+								pass
+					elif myfile not in mydigests:
+						# We don't have a digest, but the file exists.  We must
+						# assume that it is fully downloaded.
+						continue
+					else:
+						if mystat.st_size < mydigests[myfile]["size"] and \
+							not restrict_fetch:
+							fetched = 1 # Try to resume this download.
+						elif parallel_fetchonly and \
+							mystat.st_size == mydigests[myfile]["size"]:
+							eout = EOutput()
+							eout.quiet = \
+								mysettings.get("PORTAGE_QUIET") == "1"
+							eout.ebegin(
+								"%s size ;-)" % (myfile, ))
+							eout.eend(0)
+							continue
+						else:
+							verified_ok, reason = verify_all(
+								myfile_path, mydigests[myfile])
+							if not verified_ok:
+								writemsg(_("!!! Previously fetched"
+									" file: '%s'\n") % myfile, noiselevel=-1)
+								writemsg(_("!!! Reason: %s\n") % reason[0],
+									noiselevel=-1)
+								writemsg(_("!!! Got:      %s\n"
+									"!!! Expected: %s\n") % \
+									(reason[1], reason[2]), noiselevel=-1)
+								if reason[0] == _("Insufficient data for checksum verification"):
+									return 0
+								if distdir_writable:
+									temp_filename = \
+										_checksum_failure_temp_file(
+										mysettings["DISTDIR"], myfile)
+									writemsg_stdout(_("Refetching... "
+										"File renamed to '%s'\n\n") % \
+										temp_filename, noiselevel=-1)
+							else:
+								eout = EOutput()
+								eout.quiet = \
+									mysettings.get("PORTAGE_QUIET", None) == "1"
+								digests = mydigests.get(myfile)
+								if digests:
+									digests = list(digests)
+									digests.sort()
+									eout.ebegin(
+										"%s %s ;-)" % (myfile, " ".join(digests)))
+									eout.eend(0)
+								continue # fetch any remaining files
+
+			# Create a reversed list since that is optimal for list.pop().
+			uri_list = filedict[myfile][:]
+			uri_list.reverse()
+			checksum_failure_count = 0
+			tried_locations = set()
+			while uri_list:
+				loc = uri_list.pop()
+				# Eliminate duplicates here in case we've switched to
+				# "primaryuri" mode on the fly due to a checksum failure.
+				if loc in tried_locations:
+					continue
+				tried_locations.add(loc)
+				if listonly:
+					writemsg_stdout(loc+" ", noiselevel=-1)
+					continue
+				# allow different fetchcommands per protocol
+				protocol = loc[0:loc.find("://")]
+
+				global_config_path = GLOBAL_CONFIG_PATH
+				if mysettings['EPREFIX']:
+					global_config_path = os.path.join(mysettings['EPREFIX'],
+							GLOBAL_CONFIG_PATH.lstrip(os.sep))
+
+				missing_file_param = False
+				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
+				fetchcommand = mysettings.get(fetchcommand_var)
+				if fetchcommand is None:
+					fetchcommand_var = "FETCHCOMMAND"
+					fetchcommand = mysettings.get(fetchcommand_var)
+					if fetchcommand is None:
+						writemsg_level(
+							_("!!! %s is unset. It should "
+							"have been defined in\n!!! %s/make.globals.\n") \
+							% (fetchcommand_var, global_config_path),
+							level=logging.ERROR, noiselevel=-1)
+						return 0
+				if "${FILE}" not in fetchcommand:
+					writemsg_level(
+						_("!!! %s does not contain the required ${FILE}"
+						" parameter.\n") % fetchcommand_var,
+						level=logging.ERROR, noiselevel=-1)
+					missing_file_param = True
+
+				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
+				resumecommand = mysettings.get(resumecommand_var)
+				if resumecommand is None:
+					resumecommand_var = "RESUMECOMMAND"
+					resumecommand = mysettings.get(resumecommand_var)
+					if resumecommand is None:
+						writemsg_level(
+							_("!!! %s is unset. It should "
+							"have been defined in\n!!! %s/make.globals.\n") \
+							% (resumecommand_var, global_config_path),
+							level=logging.ERROR, noiselevel=-1)
+						return 0
+				if "${FILE}" not in resumecommand:
+					writemsg_level(
+						_("!!! %s does not contain the required ${FILE}"
+						" parameter.\n") % resumecommand_var,
+						level=logging.ERROR, noiselevel=-1)
+					missing_file_param = True
+
+				if missing_file_param:
+					writemsg_level(
+						_("!!! Refer to the make.conf(5) man page for "
+						"information about how to\n!!! correctly specify "
+						"FETCHCOMMAND and RESUMECOMMAND.\n"),
+						level=logging.ERROR, noiselevel=-1)
+					if myfile != os.path.basename(loc):
+						return 0
+
+				if not can_fetch:
+					if fetched != 2:
+						try:
+							mysize = os.stat(myfile_path).st_size
+						except OSError as e:
+							if e.errno not in (errno.ENOENT, errno.ESTALE):
+								raise
+							del e
+							mysize = 0
+
+						if mysize == 0:
+							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
+								noiselevel=-1)
+						elif size is None or size > mysize:
+							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
+								noiselevel=-1)
+						else:
+							writemsg(_("!!! File %s is incorrect size, "
+								"but unable to retry.\n") % myfile, noiselevel=-1)
+						return 0
+					else:
+						continue
+
+				if fetched != 2 and has_space:
+					#we either need to resume or start the download
+					if fetched == 1:
+						try:
+							mystat = os.stat(myfile_path)
+						except OSError as e:
+							if e.errno not in (errno.ENOENT, errno.ESTALE):
+								raise
+							del e
+							fetched = 0
+						else:
+							if mystat.st_size < fetch_resume_size:
+								writemsg(_(">>> Deleting distfile with size "
+									"%d (smaller than " "PORTAGE_FETCH_RESU"
+									"ME_MIN_SIZE)\n") % mystat.st_size)
+								try:
+									os.unlink(myfile_path)
+								except OSError as e:
+									if e.errno not in \
+										(errno.ENOENT, errno.ESTALE):
+										raise
+									del e
+								fetched = 0
+					if fetched == 1:
+						#resume mode:
+						writemsg(_(">>> Resuming download...\n"))
+						locfetch=resumecommand
+						command_var = resumecommand_var
+					else:
+						#normal mode:
+						locfetch=fetchcommand
+						command_var = fetchcommand_var
+					writemsg_stdout(_(">>> Downloading '%s'\n") % \
+						re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
+					variables = {
+						"DISTDIR": mysettings["DISTDIR"],
+						"URI":     loc,
+						"FILE":    myfile
+					}
+
+					myfetch = shlex_split(locfetch)
+					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
+					myret = -1
+					try:
+
+						myret = _spawn_fetch(mysettings, myfetch)
+
+					finally:
+						try:
+							apply_secpass_permissions(myfile_path,
+								gid=portage_gid, mode=0o664, mask=0o2)
+						except FileNotFound:
+							pass
+						except PortageException as e:
+							if not os.access(myfile_path, os.R_OK):
+								writemsg(_("!!! Failed to adjust permissions:"
+									" %s\n") % str(e), noiselevel=-1)
+							del e
+
+					# If the file is empty then it's obviously invalid.  Don't
+					# trust the return value from the fetcher.  Remove the
+					# empty file and try to download again.
+					try:
+						if os.stat(myfile_path).st_size == 0:
+							os.unlink(myfile_path)
+							fetched = 0
+							continue
+					except EnvironmentError:
+						pass
+
+					if mydigests is not None and myfile in mydigests:
+						try:
+							mystat = os.stat(myfile_path)
+						except OSError as e:
+							if e.errno not in (errno.ENOENT, errno.ESTALE):
+								raise
+							del e
+							fetched = 0
+						else:
+
+							if stat.S_ISDIR(mystat.st_mode):
+								# This can happen if FETCHCOMMAND erroneously
+								# contains wget's -P option where it should
+								# instead have -O.
+								writemsg_level(
+									_("!!! The command specified in the "
+									"%s variable appears to have\n!!! "
+									"created a directory instead of a "
+									"normal file.\n") % command_var,
+									level=logging.ERROR, noiselevel=-1)
+								writemsg_level(
+									_("!!! Refer to the make.conf(5) "
+									"man page for information about how "
+									"to\n!!! correctly specify "
+									"FETCHCOMMAND and RESUMECOMMAND.\n"),
+									level=logging.ERROR, noiselevel=-1)
+								return 0
+
+							# no exception?  file exists. let digestcheck() report
+							# an appropriately for size or checksum errors
+
+							# If the fetcher reported success and the file is
+							# too small, it's probably because the digest is
+							# bad (upstream changed the distfile).  In this
+							# case we don't want to attempt to resume. Show a
+							# digest verification failure to that the user gets
+							# a clue about what just happened.
+							if myret != os.EX_OK and \
+								mystat.st_size < mydigests[myfile]["size"]:
+								# Fetch failed... Try the next one... Kill 404 files though.
+								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
+									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
+									if html404.search(io.open(
+										_unicode_encode(myfile_path,
+										encoding=_encodings['fs'], errors='strict'),
+										mode='r', encoding=_encodings['content'], errors='replace'
+										).read()):
+										try:
+											os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+											writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
+											fetched = 0
+											continue
+										except (IOError, OSError):
+											pass
+								fetched = 1
+								continue
+							if True:
+								# File is the correct size--check the checksums for the fetched
+								# file NOW, for those users who don't have a stable/continuous
+								# net connection. This way we have a chance to try to download
+								# from another mirror...
+								verified_ok,reason = verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
+								if not verified_ok:
+									print(reason)
+									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
+										noiselevel=-1)
+									writemsg(_("!!! Reason: %s\n") % reason[0],
+										noiselevel=-1)
+									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
+										(reason[1], reason[2]), noiselevel=-1)
+									if reason[0] == _("Insufficient data for checksum verification"):
+										return 0
+									temp_filename = \
+										_checksum_failure_temp_file(
+										mysettings["DISTDIR"], myfile)
+									writemsg_stdout(_("Refetching... "
+										"File renamed to '%s'\n\n") % \
+										temp_filename, noiselevel=-1)
+									fetched=0
+									checksum_failure_count += 1
+									if checksum_failure_count == \
+										checksum_failure_primaryuri:
+										# Switch to "primaryuri" mode in order
+										# to increase the probablility of
+										# of success.
+										primaryuris = \
+											primaryuri_dict.get(myfile)
+										if primaryuris:
+											uri_list.extend(
+												reversed(primaryuris))
+									if checksum_failure_count >= \
+										checksum_failure_max_tries:
+										break
+								else:
+									eout = EOutput()
+									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+									digests = mydigests.get(myfile)
+									if digests:
+										eout.ebegin("%s %s ;-)" % \
+											(myfile, " ".join(sorted(digests))))
+										eout.eend(0)
+									fetched=2
+									break
+					else:
+						if not myret:
+							fetched=2
+							break
+						elif mydigests!=None:
+							writemsg(_("No digest file available and download failed.\n\n"),
+								noiselevel=-1)
+		finally:
+			if use_locks and file_lock:
+				unlockfile(file_lock)
+				file_lock = None
+
+		if listonly:
+			writemsg_stdout("\n", noiselevel=-1)
+		if fetched != 2:
+			if restrict_fetch and not restrict_fetch_msg:
+				restrict_fetch_msg = True
+				msg = _("\n!!! %s/%s"
+					" has fetch restriction turned on.\n"
+					"!!! This probably means that this "
+					"ebuild's files must be downloaded\n"
+					"!!! manually.  See the comments in"
+					" the ebuild for more information.\n\n") % \
+					(mysettings["CATEGORY"], mysettings["PF"])
+				writemsg_level(msg,
+					level=logging.ERROR, noiselevel=-1)
+			elif restrict_fetch:
+				pass
+			elif listonly:
+				pass
+			elif not filedict[myfile]:
+				writemsg(_("Warning: No mirrors available for file"
+					" '%s'\n") % (myfile), noiselevel=-1)
+			else:
+				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
+					noiselevel=-1)
+
+			if listonly:
+				failed_files.add(myfile)
+				continue
+			elif fetchonly:
+				failed_files.add(myfile)
+				continue
+			return 0
+	if failed_files:
+		return 0
+	return 1

diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py
new file mode 100644
index 0000000..f2af638
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py
@@ -0,0 +1,124 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['getmaskingreason']
+
+import portage
+from portage import os
+from portage.const import USER_CONFIG_PATH
+from portage.dep import Atom, match_from_list, _slot_separator, _repo_separator
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.repository.config import _gen_valid_repo
+from portage.util import grablines, normalize_path
+from portage.versions import catpkgsplit
+from _emerge.Package import Package
+
+def getmaskingreason(mycpv, metadata=None, settings=None,
+	portdb=None, return_location=False, myrepo=None):
+	"""
+	If specified, the myrepo argument is assumed to be valid. This
+	should be a safe assumption since portdbapi methods always
+	return valid repo names and valid "repository" metadata from
+	aux_get.
+	"""
+	if settings is None:
+		settings = portage.settings
+	if portdb is None:
+		portdb = portage.portdb
+	mysplit = catpkgsplit(mycpv)
+	if not mysplit:
+		raise ValueError(_("invalid CPV: %s") % mycpv)
+
+	if metadata is None:
+		db_keys = list(portdb._aux_cache_keys)
+		try:
+			metadata = dict(zip(db_keys,
+				portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
+		except KeyError:
+			if not portdb.cpv_exists(mycpv):
+				raise
+		else:
+			if myrepo is None:
+				myrepo = _gen_valid_repo(metadata["repository"])
+
+	elif myrepo is None:
+		myrepo = metadata.get("repository")
+		if myrepo is not None:
+			myrepo = _gen_valid_repo(metadata["repository"])
+
+	if metadata is not None and \
+		not portage.eapi_is_supported(metadata["EAPI"]):
+		# Return early since otherwise we might produce invalid
+		# results given that the EAPI is not supported. Also,
+		# metadata is mostly useless in this case since it doesn't
+		# contain essential things like SLOT.
+		if return_location:
+			return (None, None)
+		else:
+			return None
+
+	# Sometimes we can't access SLOT or repository due to corruption.
+	pkg = mycpv
+	if metadata is not None:
+		pkg = "".join((mycpv, _slot_separator, metadata["SLOT"]))
+	# At this point myrepo should be None, a valid name, or
+	# Package.UNKNOWN_REPO which we ignore.
+	if myrepo is not None and myrepo != Package.UNKNOWN_REPO:
+		pkg = "".join((pkg, _repo_separator, myrepo))
+	cpv_slot_list = [pkg]
+
+	mycp=mysplit[0]+"/"+mysplit[1]
+
+	# XXX- This is a temporary duplicate of code from the config constructor.
+	locations = [os.path.join(settings["PORTDIR"], "profiles")]
+	locations.extend(settings.profiles)
+	for ov in settings["PORTDIR_OVERLAY"].split():
+		profdir = os.path.join(normalize_path(ov), "profiles")
+		if os.path.isdir(profdir):
+			locations.append(profdir)
+	locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
+		USER_CONFIG_PATH))
+	locations.reverse()
+	pmasklists = []
+	for profile in locations:
+		pmask_filename = os.path.join(profile, "package.mask")
+		pmasklists.append((pmask_filename, grablines(pmask_filename, recursive=1)))
+
+	pmaskdict = settings._mask_manager._pmaskdict
+	if mycp in pmaskdict:
+		for x in pmaskdict[mycp]:
+			if match_from_list(x, cpv_slot_list):
+				x = x.without_repo
+				for pmask in pmasklists:
+					comment = ""
+					comment_valid = -1
+					pmask_filename = pmask[0]
+					for i in range(len(pmask[1])):
+						l = pmask[1][i].strip()
+						try:
+							l_atom = Atom(l, allow_repo=True,
+								allow_wildcard=True).without_repo
+						except InvalidAtom:
+							l_atom = None
+						if l == "":
+							comment = ""
+							comment_valid = -1
+						elif l[0] == "#":
+							comment += (l+"\n")
+							comment_valid = i + 1
+						elif l_atom == x:
+							if comment_valid != i:
+								comment = ""
+							if return_location:
+								return (comment, pmask_filename)
+							else:
+								return comment
+						elif comment_valid != -1:
+							# Apparently this comment applies to multiple masks, so
+							# it remains valid until a blank line is encountered.
+							comment_valid += 1
+	if return_location:
+		return (None, None)
+	else:
+		return None

diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py
new file mode 100644
index 0000000..4c65fcc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py
@@ -0,0 +1,174 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['getmaskingstatus']
+
+import sys
+
+import portage
+from portage import eapi_is_supported, _eapi_is_deprecated
+from portage.dep import match_from_list, _slot_separator, _repo_separator
+from portage.localization import _
+from portage.package.ebuild.config import config
+from portage.versions import catpkgsplit, cpv_getkey
+from _emerge.Package import Package
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+class _UnmaskHint(object):
+
+	__slots__ = ('key', 'value')
+
+	def __init__(self, key, value):
+		self.key = key
+		self.value = value
+
+class _MaskReason(object):
+
+	__slots__ = ('category', 'message', 'unmask_hint')
+
+	def __init__(self, category, message, unmask_hint=None):
+		self.category = category
+		self.message = message
+		self.unmask_hint = unmask_hint
+
+def getmaskingstatus(mycpv, settings=None, portdb=None, myrepo=None):
+	if settings is None:
+		settings = config(clone=portage.settings)
+	if portdb is None:
+		portdb = portage.portdb
+
+	return [mreason.message for \
+		mreason in _getmaskingstatus(mycpv, settings, portdb,myrepo)]
+
+def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
+
+	metadata = None
+	installed = False
+	if not isinstance(mycpv, basestring):
+		# emerge passed in a Package instance
+		pkg = mycpv
+		mycpv = pkg.cpv
+		metadata = pkg.metadata
+		installed = pkg.installed
+
+	mysplit = catpkgsplit(mycpv)
+	if not mysplit:
+		raise ValueError(_("invalid CPV: %s") % mycpv)
+	if metadata is None:
+		db_keys = list(portdb._aux_cache_keys)
+		try:
+			metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
+		except KeyError:
+			if not portdb.cpv_exists(mycpv):
+				raise
+			return [_MaskReason("corruption", "corruption")]
+		if "?" in metadata["LICENSE"]:
+			settings.setcpv(mycpv, mydb=metadata)
+			metadata["USE"] = settings["PORTAGE_USE"]
+		else:
+			metadata["USE"] = ""
+
+	rValue = []
+
+	# profile checking
+	if settings._getProfileMaskAtom(mycpv, metadata):
+		rValue.append(_MaskReason("profile", "profile"))
+
+	# package.mask checking
+	if settings._getMaskAtom(mycpv, metadata):
+		rValue.append(_MaskReason("package.mask", "package.mask", _UnmaskHint("p_mask", None)))
+
+	# keywords checking
+	eapi = metadata["EAPI"]
+	mygroups = settings._getKeywords(mycpv, metadata)
+	licenses = metadata["LICENSE"]
+	properties = metadata["PROPERTIES"]
+	if eapi.startswith("-"):
+		eapi = eapi[1:]
+	if not eapi_is_supported(eapi):
+		return [_MaskReason("EAPI", "EAPI %s" % eapi)]
+	elif _eapi_is_deprecated(eapi) and not installed:
+		return [_MaskReason("EAPI", "EAPI %s" % eapi)]
+	egroups = settings.configdict["backupenv"].get(
+		"ACCEPT_KEYWORDS", "").split()
+	global_accept_keywords = settings.get("ACCEPT_KEYWORDS", "")
+	pgroups = global_accept_keywords.split()
+	myarch = settings["ARCH"]
+	if pgroups and myarch not in pgroups:
+		"""For operating systems other than Linux, ARCH is not necessarily a
+		valid keyword."""
+		myarch = pgroups[0].lstrip("~")
+
+	# NOTE: This logic is copied from KeywordsManager.getMissingKeywords().
+	unmaskgroups = settings._keywords_manager.getPKeywords(mycpv,
+		metadata["SLOT"], metadata["repository"], global_accept_keywords)
+	pgroups.extend(unmaskgroups)
+	if unmaskgroups or egroups:
+		pgroups = settings._keywords_manager._getEgroups(egroups, pgroups)
+	else:
+		pgroups = set(pgroups)
+
+	kmask = "missing"
+	kmask_hint = None
+
+	if '**' in pgroups:
+		kmask = None
+	else:
+		for keyword in pgroups:
+			if keyword in mygroups:
+				kmask = None
+				break
+
+	if kmask:
+		for gp in mygroups:
+			if gp=="*":
+				kmask=None
+				break
+			elif gp=="-"+myarch and myarch in pgroups:
+				kmask="-"+myarch
+				break
+			elif gp=="~"+myarch and myarch in pgroups:
+				kmask="~"+myarch
+				kmask_hint = _UnmaskHint("unstable keyword", kmask)
+				break
+
+	if kmask == "missing":
+		kmask_hint = _UnmaskHint("unstable keyword", "**")
+
+	try:
+		missing_licenses = settings._getMissingLicenses(mycpv, metadata)
+		if missing_licenses:
+			allowed_tokens = set(["||", "(", ")"])
+			allowed_tokens.update(missing_licenses)
+			license_split = licenses.split()
+			license_split = [x for x in license_split \
+				if x in allowed_tokens]
+			msg = license_split[:]
+			msg.append("license(s)")
+			rValue.append(_MaskReason("LICENSE", " ".join(msg), _UnmaskHint("license", set(missing_licenses))))
+	except portage.exception.InvalidDependString as e:
+		rValue.append(_MaskReason("invalid", "LICENSE: "+str(e)))
+
+	try:
+		missing_properties = settings._getMissingProperties(mycpv, metadata)
+		if missing_properties:
+			allowed_tokens = set(["||", "(", ")"])
+			allowed_tokens.update(missing_properties)
+			properties_split = properties.split()
+			properties_split = [x for x in properties_split \
+					if x in allowed_tokens]
+			msg = properties_split[:]
+			msg.append("properties")
+			rValue.append(_MaskReason("PROPERTIES", " ".join(msg)))
+	except portage.exception.InvalidDependString as e:
+		rValue.append(_MaskReason("invalid", "PROPERTIES: "+str(e)))
+
+	# Only show KEYWORDS masks for installed packages
+	# if they're not masked for any other reason.
+	if kmask and (not installed or not rValue):
+		rValue.append(_MaskReason("KEYWORDS",
+			kmask + " keyword", unmask_hint=kmask_hint))
+
+	return rValue

diff --git a/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py
new file mode 100644
index 0000000..616dc2e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py
@@ -0,0 +1,370 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['prepare_build_dirs']
+
+import errno
+import gzip
+import shutil
+import stat
+import time
+
+from portage import os, _encodings, _unicode_encode, _unicode_decode
+from portage.data import portage_gid, portage_uid, secpass
+from portage.exception import DirectoryNotFound, FileNotFound, \
+	OperationNotPermitted, PermissionDenied, PortageException
+from portage.localization import _
+from portage.output import colorize
+from portage.util import apply_recursive_permissions, \
+	apply_secpass_permissions, ensure_dirs, normalize_path, writemsg
+
+def prepare_build_dirs(myroot=None, settings=None, cleanup=False):
+	"""
+	The myroot parameter is ignored.
+	"""
+	myroot = None
+
+	if settings is None:
+		raise TypeError("settings argument is required")
+
+	mysettings = settings
+	clean_dirs = [mysettings["HOME"]]
+
+	# We enable cleanup when we want to make sure old cruft (such as the old
+	# environment) doesn't interfere with the current phase.
+	if cleanup and 'keeptemp' not in mysettings.features:
+		clean_dirs.append(mysettings["T"])
+
+	for clean_dir in clean_dirs:
+		try:
+			shutil.rmtree(clean_dir)
+		except OSError as oe:
+			if errno.ENOENT == oe.errno:
+				pass
+			elif errno.EPERM == oe.errno:
+				writemsg("%s\n" % oe, noiselevel=-1)
+				writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
+					clean_dir, noiselevel=-1)
+				return 1
+			else:
+				raise
+
+	def makedirs(dir_path):
+		try:
+			os.makedirs(dir_path)
+		except OSError as oe:
+			if errno.EEXIST == oe.errno:
+				pass
+			elif errno.EPERM == oe.errno:
+				writemsg("%s\n" % oe, noiselevel=-1)
+				writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
+					dir_path, noiselevel=-1)
+				return False
+			else:
+				raise
+		return True
+
+	mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
+
+	mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
+	mydirs.append(os.path.dirname(mydirs[-1]))
+
+	try:
+		for mydir in mydirs:
+			ensure_dirs(mydir)
+			try:
+				apply_secpass_permissions(mydir,
+					gid=portage_gid, uid=portage_uid, mode=0o70, mask=0)
+			except PortageException:
+				if not os.path.isdir(mydir):
+					raise
+		for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
+			"""These directories don't necessarily need to be group writable.
+			However, the setup phase is commonly run as a privileged user prior
+			to the other phases being run by an unprivileged user.  Currently,
+			we use the portage group to ensure that the unprivleged user still
+			has write access to these directories in any case."""
+			ensure_dirs(mysettings[dir_key], mode=0o775)
+			apply_secpass_permissions(mysettings[dir_key],
+				uid=portage_uid, gid=portage_gid)
+	except PermissionDenied as e:
+		writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
+		return 1
+	except OperationNotPermitted as e:
+		writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
+		return 1
+	except FileNotFound as e:
+		writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
+		return 1
+
+	# Reset state for things like noauto and keepwork in FEATURES.
+	for x in ('.die_hooks',):
+		try:
+			os.unlink(os.path.join(mysettings['PORTAGE_BUILDDIR'], x))
+		except OSError:
+			pass
+
+	_prepare_workdir(mysettings)
+	if mysettings.get("EBUILD_PHASE") not in ("info", "fetch", "pretend"):
+		# Avoid spurious permissions adjustments when fetching with
+		# a temporary PORTAGE_TMPDIR setting (for fetchonly).
+		_prepare_features_dirs(mysettings)
+
+def _adjust_perms_msg(settings, msg):
+
+	def write(msg):
+		writemsg(msg, noiselevel=-1)
+
+	background = settings.get("PORTAGE_BACKGROUND") == "1"
+	log_path = settings.get("PORTAGE_LOG_FILE")
+	log_file = None
+
+	if background and log_path is not None:
+		try:
+			log_file = open(_unicode_encode(log_path,
+				encoding=_encodings['fs'], errors='strict'), mode='ab')
+		except IOError:
+			def write(msg):
+				pass
+		else:
+			if log_path.endswith('.gz'):
+				log_file =  gzip.GzipFile(filename='',
+					mode='ab', fileobj=log_file)
+			def write(msg):
+				log_file.write(_unicode_encode(msg))
+				log_file.flush()
+
+	try:
+		write(msg)
+	finally:
+		if log_file is not None:
+			log_file.close()
+
+def _prepare_features_dirs(mysettings):
+
+	# Use default ABI libdir in accordance with bug #355283.
+	libdir = None
+	default_abi = mysettings.get("DEFAULT_ABI")
+	if default_abi:
+		libdir = mysettings.get("LIBDIR_" + default_abi)
+	if not libdir:
+		libdir = "lib"
+
+	features_dirs = {
+		"ccache":{
+			"path_dir": "/usr/%s/ccache/bin" % (libdir,),
+			"basedir_var":"CCACHE_DIR",
+			"default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
+			"always_recurse":False},
+		"distcc":{
+			"path_dir": "/usr/%s/distcc/bin" % (libdir,),
+			"basedir_var":"DISTCC_DIR",
+			"default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
+			"subdirs":("lock", "state"),
+			"always_recurse":True}
+	}
+	dirmode  = 0o2070
+	filemode =   0o60
+	modemask =    0o2
+	restrict = mysettings.get("PORTAGE_RESTRICT","").split()
+	droppriv = secpass >= 2 and \
+		"userpriv" in mysettings.features and \
+		"userpriv" not in restrict
+	for myfeature, kwargs in features_dirs.items():
+		if myfeature in mysettings.features:
+			failure = False
+			basedir = mysettings.get(kwargs["basedir_var"])
+			if basedir is None or not basedir.strip():
+				basedir = kwargs["default_dir"]
+				mysettings[kwargs["basedir_var"]] = basedir
+			try:
+				path_dir = kwargs["path_dir"]
+				if not os.path.isdir(path_dir):
+					raise DirectoryNotFound(path_dir)
+
+				mydirs = [mysettings[kwargs["basedir_var"]]]
+				if "subdirs" in kwargs:
+					for subdir in kwargs["subdirs"]:
+						mydirs.append(os.path.join(basedir, subdir))
+				for mydir in mydirs:
+					modified = ensure_dirs(mydir)
+					# Generally, we only want to apply permissions for
+					# initial creation.  Otherwise, we don't know exactly what
+					# permissions the user wants, so should leave them as-is.
+					droppriv_fix = False
+					if droppriv:
+						st = os.stat(mydir)
+						if st.st_gid != portage_gid or \
+							not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
+							droppriv_fix = True
+						if not droppriv_fix:
+							# Check permissions of files in the directory.
+							for filename in os.listdir(mydir):
+								try:
+									subdir_st = os.lstat(
+										os.path.join(mydir, filename))
+								except OSError:
+									continue
+								if subdir_st.st_gid != portage_gid or \
+									((stat.S_ISDIR(subdir_st.st_mode) and \
+									not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
+									droppriv_fix = True
+									break
+
+					if droppriv_fix:
+						_adjust_perms_msg(mysettings,
+							colorize("WARN", " * ") + \
+							_("Adjusting permissions "
+							"for FEATURES=userpriv: '%s'\n") % mydir)
+					elif modified:
+						_adjust_perms_msg(mysettings,
+							colorize("WARN", " * ") + \
+							_("Adjusting permissions "
+							"for FEATURES=%s: '%s'\n") % (myfeature, mydir))
+
+					if modified or kwargs["always_recurse"] or droppriv_fix:
+						def onerror(e):
+							raise	# The feature is disabled if a single error
+									# occurs during permissions adjustment.
+						if not apply_recursive_permissions(mydir,
+						gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+						filemode=filemode, filemask=modemask, onerror=onerror):
+							raise OperationNotPermitted(
+								_("Failed to apply recursive permissions for the portage group."))
+
+			except DirectoryNotFound as e:
+				failure = True
+				writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
+					(e,), noiselevel=-1)
+				writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
+					noiselevel=-1)
+
+			except PortageException as e:
+				failure = True
+				writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+				writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
+					(kwargs["basedir_var"], basedir), noiselevel=-1)
+				writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
+					noiselevel=-1)
+
+			if failure:
+				mysettings.features.remove(myfeature)
+				time.sleep(5)
+
+def _prepare_workdir(mysettings):
+	workdir_mode = 0o700
+	try:
+		mode = mysettings["PORTAGE_WORKDIR_MODE"]
+		if mode.isdigit():
+			parsed_mode = int(mode, 8)
+		elif mode == "":
+			raise KeyError()
+		else:
+			raise ValueError()
+		if parsed_mode & 0o7777 != parsed_mode:
+			raise ValueError("Invalid file mode: %s" % mode)
+		else:
+			workdir_mode = parsed_mode
+	except KeyError as e:
+		writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
+	except ValueError as e:
+		if len(str(e)) > 0:
+			writemsg("%s\n" % e)
+		writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
+		(mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
+	mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
+	try:
+		apply_secpass_permissions(mysettings["WORKDIR"],
+		uid=portage_uid, gid=portage_gid, mode=workdir_mode)
+	except FileNotFound:
+		pass # ebuild.sh will create it
+
+	if mysettings.get("PORT_LOGDIR", "") == "":
+		while "PORT_LOGDIR" in mysettings:
+			del mysettings["PORT_LOGDIR"]
+	if "PORT_LOGDIR" in mysettings:
+		try:
+			modified = ensure_dirs(mysettings["PORT_LOGDIR"])
+			if modified:
+				# Only initialize group/mode if the directory doesn't
+				# exist, so that we don't override permissions if they
+				# were previously set by the administrator.
+				# NOTE: These permissions should be compatible with our
+				# default logrotate config as discussed in bug 374287.
+				apply_secpass_permissions(mysettings["PORT_LOGDIR"],
+					uid=portage_uid, gid=portage_gid, mode=0o2770)
+		except PortageException as e:
+			writemsg("!!! %s\n" % str(e), noiselevel=-1)
+			writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
+				mysettings["PORT_LOGDIR"], noiselevel=-1)
+			writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
+			while "PORT_LOGDIR" in mysettings:
+				del mysettings["PORT_LOGDIR"]
+
+	compress_log_ext = ''
+	if 'compress-build-logs' in mysettings.features:
+		compress_log_ext = '.gz'
+
+	logdir_subdir_ok = False
+	if "PORT_LOGDIR" in mysettings and \
+		os.access(mysettings["PORT_LOGDIR"], os.W_OK):
+		logdir = normalize_path(mysettings["PORT_LOGDIR"])
+		logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
+		if not os.path.exists(logid_path):
+			open(_unicode_encode(logid_path), 'w')
+		logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
+			time.gmtime(os.stat(logid_path).st_mtime)),
+			encoding=_encodings['content'], errors='replace')
+
+		if "split-log" in mysettings.features:
+			log_subdir = os.path.join(logdir, "build", mysettings["CATEGORY"])
+			mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+				log_subdir, "%s:%s.log%s" %
+				(mysettings["PF"], logid_time, compress_log_ext))
+		else:
+			log_subdir = logdir
+			mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+				logdir, "%s:%s:%s.log%s" % \
+				(mysettings["CATEGORY"], mysettings["PF"], logid_time,
+				compress_log_ext))
+
+		if log_subdir is logdir:
+			logdir_subdir_ok = True
+		else:
+			try:
+				_ensure_log_subdirs(logdir, log_subdir)
+			except PortageException as e:
+				writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
+
+			if os.access(log_subdir, os.W_OK):
+				logdir_subdir_ok = True
+			else:
+				writemsg(_unicode_decode("!!! %s: %s\n") %
+					(_("Permission Denied"), log_subdir), noiselevel=-1)
+
+	if not logdir_subdir_ok:
+		# NOTE: When sesandbox is enabled, the local SELinux security policies
+		# may not allow output to be piped out of the sesandbox domain. The
+		# current policy will allow it to work when a pty is available, but
+		# not through a normal pipe. See bug #162404.
+		mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+			mysettings["T"], "build.log%s" % compress_log_ext)
+
+def _ensure_log_subdirs(logdir, subdir):
+	"""
+	This assumes that logdir exists, and creates subdirectories down
+	to subdir as necessary. The gid of logdir is copied to all
+	subdirectories, along with 0x2070 mode bits if present. Both logdir
+	and subdir are assumed to be normalized absolute paths.
+	"""
+	st = os.stat(logdir)
+	gid = st.st_gid
+	grp_mode = 0o2070 & st.st_mode
+
+	logdir_split_len = len(logdir.split(os.sep))
+	subdir_split = subdir.split(os.sep)[logdir_split_len:]
+	subdir_split.reverse()
+	current = logdir
+	while subdir_split:
+		current = os.path.join(current, subdir_split.pop())
+		ensure_dirs(current, gid=gid, mode=grp_mode, mask=0)

diff --git a/portage_with_autodep/pym/portage/process.py b/portage_with_autodep/pym/portage/process.py
new file mode 100644
index 0000000..6866a2f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/process.py
@@ -0,0 +1,427 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+import atexit
+import signal
+import sys
+import traceback
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.util:dump_traceback',
+)
+
+from portage.const import BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY, AUTODEP_LIBRARY
+from portage.exception import CommandNotFound
+
+try:
+	import resource
+	max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+except ImportError:
+	max_fd_limit = 256
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+if os.path.isdir("/proc/%i/fd" % os.getpid()):
+	def get_open_fds():
+		return (int(fd) for fd in os.listdir("/proc/%i/fd" % os.getpid()) \
+			if fd.isdigit())
+else:
+	def get_open_fds():
+		return range(max_fd_limit)
+
+sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and
+                   os.access(SANDBOX_BINARY, os.X_OK))
+
+autodep_capable = (os.path.isfile(AUTODEP_LIBRARY) and
+                   os.access(AUTODEP_LIBRARY, os.X_OK))
+
+fakeroot_capable = (os.path.isfile(FAKEROOT_BINARY) and
+                    os.access(FAKEROOT_BINARY, os.X_OK))
+
+def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
+	"""
+	Spawns a bash shell running a specific commands
+	
+	@param mycommand: The command for bash to run
+	@type mycommand: String
+	@param debug: Turn bash debugging on (set -x)
+	@type debug: Boolean
+	@param opt_name: Name of the spawned process (detaults to binary name)
+	@type opt_name: String
+	@param keywords: Extra Dictionary arguments to pass to spawn
+	@type keywords: Dictionary
+	"""
+
+	args = [BASH_BINARY]
+	if not opt_name:
+		opt_name = os.path.basename(mycommand.split()[0])
+	if debug:
+		# Print commands and their arguments as they are executed.
+		args.append("-x")
+	args.append("-c")
+	args.append(mycommand)
+	return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_autodep(mycommand, opt_name=None, **keywords):
+	if not autodep_capable:
+		return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+	if "env" not in keywords or "LOG_SOCKET" not in keywords["env"]:
+		return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+
+	# Core part: tell the loader to preload logging library
+	keywords["env"]["LD_PRELOAD"]=AUTODEP_LIBRARY
+	return spawn_bash(mycommand, opt_name=opt_name, **keywords)	
+
+def spawn_sandbox(mycommand, opt_name=None, **keywords):
+	if not sandbox_capable:
+		return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+	args=[SANDBOX_BINARY]
+	if not opt_name:
+		opt_name = os.path.basename(mycommand.split()[0])
+	args.append(mycommand)
+	return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_fakeroot(mycommand, fakeroot_state=None, opt_name=None, **keywords):
+	args=[FAKEROOT_BINARY]
+	if not opt_name:
+		opt_name = os.path.basename(mycommand.split()[0])
+	if fakeroot_state:
+		open(fakeroot_state, "a").close()
+		args.append("-s")
+		args.append(fakeroot_state)
+		args.append("-i")
+		args.append(fakeroot_state)
+	args.append("--")
+	args.append(BASH_BINARY)
+	args.append("-c")
+	args.append(mycommand)
+	return spawn(args, opt_name=opt_name, **keywords)
+
+_exithandlers = []
+def atexit_register(func, *args, **kargs):
+	"""Wrapper around atexit.register that is needed in order to track
+	what is registered.  For example, when portage restarts itself via
+	os.execv, the atexit module does not work so we have to do it
+	manually by calling the run_exitfuncs() function in this module."""
+	_exithandlers.append((func, args, kargs))
+
+def run_exitfuncs():
+	"""This should behave identically to the routine performed by
+	the atexit module at exit time.  It's only necessary to call this
+	function when atexit will not work (because of os.execv, for
+	example)."""
+
+	# This function is a copy of the private atexit._run_exitfuncs()
+	# from the python 2.4.2 sources.  The only difference from the
+	# original function is in the output to stderr.
+	exc_info = None
+	while _exithandlers:
+		func, targs, kargs = _exithandlers.pop()
+		try:
+			func(*targs, **kargs)
+		except SystemExit:
+			exc_info = sys.exc_info()
+		except: # No idea what they called, so we need this broad except here.
+			dump_traceback("Error in portage.process.run_exitfuncs", noiselevel=0)
+			exc_info = sys.exc_info()
+
+	if exc_info is not None:
+		if sys.hexversion >= 0x3000000:
+			raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
+		else:
+			exec("raise exc_info[0], exc_info[1], exc_info[2]")
+
+atexit.register(run_exitfuncs)
+
+# We need to make sure that any processes spawned are killed off when
+# we exit. spawn() takes care of adding and removing pids to this list
+# as it creates and cleans up processes.
+spawned_pids = []
+def cleanup():
+	while spawned_pids:
+		pid = spawned_pids.pop()
+		try:
+			# With waitpid and WNOHANG, only check the
+			# first element of the tuple since the second
+			# element may vary (bug #337465).
+			if os.waitpid(pid, os.WNOHANG)[0] == 0:
+				os.kill(pid, signal.SIGTERM)
+				os.waitpid(pid, 0)
+		except OSError:
+			# This pid has been cleaned up outside
+			# of spawn().
+			pass
+
+atexit_register(cleanup)
+
+def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
+          uid=None, gid=None, groups=None, umask=None, logfile=None,
+          path_lookup=True, pre_exec=None):
+	"""
+	Spawns a given command.
+	
+	@param mycommand: the command to execute
+	@type mycommand: String or List (Popen style list)
+	@param env: A dict of Key=Value pairs for env variables
+	@type env: Dictionary
+	@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
+	@type opt_name: String
+	@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
+	@type fd_pipes: Dictionary
+	@param returnpid: Return the Process IDs for a successful spawn.
+	NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
+	@type returnpid: Boolean
+	@param uid: User ID to spawn as; useful for dropping privilages
+	@type uid: Integer
+	@param gid: Group ID to spawn as; useful for dropping privilages
+	@type gid: Integer
+	@param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
+	@type groups: List
+	@param umask: An integer representing the umask for the process (see man chmod for umask details)
+	@type umask: Integer
+	@param logfile: name of a file to use for logging purposes
+	@type logfile: String
+	@param path_lookup: If the binary is not fully specified then look for it in PATH
+	@type path_lookup: Boolean
+	@param pre_exec: A function to be called with no arguments just prior to the exec call.
+	@type pre_exec: callable
+	
+	logfile requires stdout and stderr to be assigned to this process (ie not pointed
+	   somewhere else.)
+	
+	"""
+
+	# mycommand is either a str or a list
+	if isinstance(mycommand, basestring):
+		mycommand = mycommand.split()
+
+	if sys.hexversion < 0x3000000:
+		# Avoid a potential UnicodeEncodeError from os.execve().
+		env_bytes = {}
+		for k, v in env.items():
+			env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
+				_unicode_encode(v, encoding=_encodings['content'])
+		env = env_bytes
+		del env_bytes
+
+	# If an absolute path to an executable file isn't given
+	# search for it unless we've been told not to.
+	binary = mycommand[0]
+	if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
+		(not os.path.isabs(binary) or not os.path.isfile(binary)
+	    or not os.access(binary, os.X_OK)):
+		binary = path_lookup and find_binary(binary) or None
+		if not binary:
+			raise CommandNotFound(mycommand[0])
+
+	# If we haven't been told what file descriptors to use
+	# default to propagating our stdin, stdout and stderr.
+	if fd_pipes is None:
+		fd_pipes = {
+			0:sys.stdin.fileno(),
+			1:sys.stdout.fileno(),
+			2:sys.stderr.fileno(),
+		}
+
+	# mypids will hold the pids of all processes created.
+	mypids = []
+
+	if logfile:
+		# Using a log file requires that stdout and stderr
+		# are assigned to the process we're running.
+		if 1 not in fd_pipes or 2 not in fd_pipes:
+			raise ValueError(fd_pipes)
+
+		# Create a pipe
+		(pr, pw) = os.pipe()
+
+		# Create a tee process, giving it our stdout and stderr
+		# as well as the read end of the pipe.
+		mypids.extend(spawn(('tee', '-i', '-a', logfile),
+		              returnpid=True, fd_pipes={0:pr,
+		              1:fd_pipes[1], 2:fd_pipes[2]}))
+
+		# We don't need the read end of the pipe, so close it.
+		os.close(pr)
+
+		# Assign the write end of the pipe to our stdout and stderr.
+		fd_pipes[1] = pw
+		fd_pipes[2] = pw
+
+	pid = os.fork()
+
+	if not pid:
+		try:
+			_exec(binary, mycommand, opt_name, fd_pipes,
+			      env, gid, groups, uid, umask, pre_exec)
+		except SystemExit:
+			raise
+		except Exception as e:
+			# We need to catch _any_ exception so that it doesn't
+			# propagate out of this function and cause exiting
+			# with anything other than os._exit()
+			sys.stderr.write("%s:\n   %s\n" % (e, " ".join(mycommand)))
+			traceback.print_exc()
+			sys.stderr.flush()
+			os._exit(1)
+
+	# Add the pid to our local and the global pid lists.
+	mypids.append(pid)
+	spawned_pids.append(pid)
+
+	# If we started a tee process the write side of the pipe is no
+	# longer needed, so close it.
+	if logfile:
+		os.close(pw)
+
+	# If the caller wants to handle cleaning up the processes, we tell
+	# it about all processes that were created.
+	if returnpid:
+		return mypids
+
+	# Otherwise we clean them up.
+	while mypids:
+
+		# Pull the last reader in the pipe chain. If all processes
+		# in the pipe are well behaved, it will die when the process
+		# it is reading from dies.
+		pid = mypids.pop(0)
+
+		# and wait for it.
+		retval = os.waitpid(pid, 0)[1]
+
+		# When it's done, we can remove it from the
+		# global pid list as well.
+		spawned_pids.remove(pid)
+
+		if retval:
+			# If it failed, kill off anything else that
+			# isn't dead yet.
+			for pid in mypids:
+				# With waitpid and WNOHANG, only check the
+				# first element of the tuple since the second
+				# element may vary (bug #337465).
+				if os.waitpid(pid, os.WNOHANG)[0] == 0:
+					os.kill(pid, signal.SIGTERM)
+					os.waitpid(pid, 0)
+				spawned_pids.remove(pid)
+
+			# If it got a signal, return the signal that was sent.
+			if (retval & 0xff):
+				return ((retval & 0xff) << 8)
+
+			# Otherwise, return its exit code.
+			return (retval >> 8)
+
+	# Everything succeeded
+	return 0
+
+def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
+	pre_exec):
+
+	"""
+	Execute a given binary with options
+	
+	@param binary: Name of program to execute
+	@type binary: String
+	@param mycommand: Options for program
+	@type mycommand: String
+	@param opt_name: Name of process (defaults to binary)
+	@type opt_name: String
+	@param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 }
+	@type fd_pipes: Dictionary
+	@param env: Key,Value mapping for Environmental Variables
+	@type env: Dictionary
+	@param gid: Group ID to run the process under
+	@type gid: Integer
+	@param groups: Groups the Process should be in.
+	@type groups: Integer
+	@param uid: User ID to run the process under
+	@type uid: Integer
+	@param umask: an int representing a unix umask (see man chmod for umask details)
+	@type umask: Integer
+	@param pre_exec: A function to be called with no arguments just prior to the exec call.
+	@type pre_exec: callable
+	@rtype: None
+	@returns: Never returns (calls os.execve)
+	"""
+	
+	# If the process we're creating hasn't been given a name
+	# assign it the name of the executable.
+	if not opt_name:
+		opt_name = os.path.basename(binary)
+
+	# Set up the command's argument list.
+	myargs = [opt_name]
+	myargs.extend(mycommand[1:])
+
+	# Use default signal handlers in order to avoid problems
+	# killing subprocesses as reported in bug #353239.
+	signal.signal(signal.SIGINT, signal.SIG_DFL)
+	signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+	# Quiet killing of subprocesses by SIGPIPE (see bug #309001).
+	signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+	# Avoid issues triggered by inheritance of SIGQUIT handler from
+	# the parent process (see bug #289486).
+	signal.signal(signal.SIGQUIT, signal.SIG_DFL)
+
+	_setup_pipes(fd_pipes)
+
+	# Set requested process permissions.
+	if gid:
+		os.setgid(gid)
+	if groups:
+		os.setgroups(groups)
+	if uid:
+		os.setuid(uid)
+	if umask:
+		os.umask(umask)
+	if pre_exec:
+		pre_exec()
+
+	# And switch to the new process.
+	os.execve(binary, myargs, env)
+
+def _setup_pipes(fd_pipes):
+	"""Setup pipes for a forked process."""
+	my_fds = {}
+	# To protect from cases where direct assignment could
+	# clobber needed fds ({1:2, 2:1}) we first dupe the fds
+	# into unused fds.
+	for fd in fd_pipes:
+		my_fds[fd] = os.dup(fd_pipes[fd])
+	# Then assign them to what they should be.
+	for fd in my_fds:
+		os.dup2(my_fds[fd], fd)
+	# Then close _all_ fds that haven't been explicitly
+	# requested to be kept open.
+	for fd in get_open_fds():
+		if fd not in my_fds:
+			try:
+				os.close(fd)
+			except OSError:
+				pass
+
+def find_binary(binary):
+	"""
+	Given a binary name, find the binary in PATH
+	
+	@param binary: Name of the binary to find
+	@type string
+	@rtype: None or string
+	@returns: full path to binary or None if the binary could not be located.
+	"""
+	for path in os.environ.get("PATH", "").split(":"):
+		filename = "%s/%s" % (path, binary)
+		if os.access(filename, os.X_OK) and os.path.isfile(filename):
+			return filename
+	return None

diff --git a/portage_with_autodep/pym/portage/proxy/__init__.py b/portage_with_autodep/pym/portage/proxy/__init__.py
new file mode 100644
index 0000000..f98c564
--- /dev/null
+++ b/portage_with_autodep/pym/portage/proxy/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/proxy/lazyimport.py b/portage_with_autodep/pym/portage/proxy/lazyimport.py
new file mode 100644
index 0000000..ad4a542
--- /dev/null
+++ b/portage_with_autodep/pym/portage/proxy/lazyimport.py
@@ -0,0 +1,212 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['lazyimport']
+
+import sys
+import types
+
+try:
+	import threading
+except ImportError:
+	import dummy_threading as threading
+
+from portage.proxy.objectproxy import ObjectProxy
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+_module_proxies = {}
+_module_proxies_lock = threading.RLock()
+
+def _preload_portage_submodules():
+	"""
+	Load lazily referenced portage submodules into memory,
+	so imports won't fail during portage upgrade/downgrade.
+	Note that this recursively loads only the modules that
+	are lazily referenced by currently imported modules,
+	so some portage submodules may still remain unimported
+	after this function is called.
+	"""
+	imported = set()
+	while True:
+		remaining = False
+		for name in list(_module_proxies):
+			if name.startswith('portage.'):
+				if name in imported:
+					continue
+				imported.add(name)
+				remaining = True
+				__import__(name)
+				_unregister_module_proxy(name)
+		if not remaining:
+			break
+
+def _register_module_proxy(name, proxy):
+	_module_proxies_lock.acquire()
+	try:
+		proxy_list = _module_proxies.get(name)
+		if proxy_list is None:
+			proxy_list = []
+			_module_proxies[name] = proxy_list
+		proxy_list.append(proxy)
+	finally:
+		_module_proxies_lock.release()
+
+def _unregister_module_proxy(name):
+	"""
+	Destroy all proxies that reference the give module name. Also, check
+	for other proxies referenced by modules that have been imported and
+	destroy those proxies too. This way, destruction of a single proxy
+	can trigger destruction of all the rest. If a target module appears
+	to be partially imported (indicated when an AttributeError is caught),
+	this function will leave in place proxies that reference it.
+	"""
+	_module_proxies_lock.acquire()
+	try:
+		if name in _module_proxies:
+			modules = sys.modules
+			for name, proxy_list in list(_module_proxies.items()):
+				if name not in modules:
+					continue
+				# First delete this name from the dict so that
+				# if this same thread reenters below, it won't
+				# enter this path again.
+				del _module_proxies[name]
+				try:
+					while proxy_list:
+						proxy = proxy_list.pop()
+						object.__getattribute__(proxy, '_get_target')()
+				except AttributeError:
+					# Apparently the target module is only partially
+					# imported, so proxies that reference it cannot
+					# be destroyed yet.
+					proxy_list.append(proxy)
+					_module_proxies[name] = proxy_list
+	finally:
+		_module_proxies_lock.release()
+
+class _LazyImport(ObjectProxy):
+
+	__slots__ = ('_scope', '_alias', '_name', '_target')
+
+	def __init__(self, scope, alias, name):
+		ObjectProxy.__init__(self)
+		object.__setattr__(self, '_scope', scope)
+		object.__setattr__(self, '_alias', alias)
+		object.__setattr__(self, '_name', name)
+		_register_module_proxy(name, self)
+
+	def _get_target(self):
+		try:
+			return object.__getattribute__(self, '_target')
+		except AttributeError:
+			pass
+		name = object.__getattribute__(self, '_name')
+		__import__(name)
+		target = sys.modules[name]
+		object.__setattr__(self, '_target', target)
+		object.__getattribute__(self, '_scope')[
+			object.__getattribute__(self, '_alias')] = target
+		_unregister_module_proxy(name)
+		return target
+
+class _LazyImportFrom(_LazyImport):
+
+	__slots__ = ('_attr_name',)
+
+	def __init__(self, scope, name, attr_name, alias):
+		object.__setattr__(self, '_attr_name', attr_name)
+		_LazyImport.__init__(self, scope, alias, name)
+
+	def _get_target(self):
+		try:
+			return object.__getattribute__(self, '_target')
+		except AttributeError:
+			pass
+		name = object.__getattribute__(self, '_name')
+		attr_name = object.__getattribute__(self, '_attr_name')
+		__import__(name)
+		# If called by _unregister_module_proxy() and the target module is
+		# partially imported, then the following getattr call may raise an
+		# AttributeError for _unregister_module_proxy() to handle.
+		target = getattr(sys.modules[name], attr_name)
+		object.__setattr__(self, '_target', target)
+		object.__getattribute__(self, '_scope')[
+			object.__getattribute__(self, '_alias')] = target
+		_unregister_module_proxy(name)
+		return target
+
+def lazyimport(scope, *args):
+	"""
+	Create a proxy in the given scope in order to performa a lazy import.
+
+	Syntax         Result
+	foo            import foo
+	foo:bar,baz    from foo import bar, baz
+	foo:bar@baz    from foo import bar as baz
+
+	@param scope: the scope in which to place the import, typically globals()
+	@type myfilename: dict
+	@param args: module names to import
+	@type args: strings
+	"""
+
+	modules = sys.modules
+
+	for s in args:
+		parts = s.split(':', 1)
+		if len(parts) == 1:
+			name = s
+
+			if not name or not isinstance(name, basestring):
+				raise ValueError(name)
+
+			components = name.split('.')
+			parent_scope = scope
+			for i in range(len(components)):
+				alias = components[i]
+				if i < len(components) - 1:
+					parent_name = ".".join(components[:i+1])
+					__import__(parent_name)
+					mod = modules.get(parent_name)
+					if not isinstance(mod, types.ModuleType):
+						# raise an exception
+						__import__(name)
+					parent_scope[alias] = mod
+					parent_scope = mod.__dict__
+					continue
+
+				already_imported = modules.get(name)
+				if already_imported is not None:
+					parent_scope[alias] = already_imported
+				else:
+					parent_scope[alias] = \
+						_LazyImport(parent_scope, alias, name)
+
+		else:
+			name, fromlist = parts
+			already_imported = modules.get(name)
+			fromlist = fromlist.split(',')
+			for s in fromlist:
+				if not s:
+					# This happens if there's an extra comma in fromlist.
+					raise ValueError('Empty module attribute name')
+				alias = s.split('@', 1)
+				if len(alias) == 1:
+					alias = alias[0]
+					attr_name = alias
+				else:
+					attr_name, alias = alias
+				if already_imported is not None:
+					try:
+						scope[alias] = getattr(already_imported, attr_name)
+					except AttributeError:
+						# Apparently the target module is only partially
+						# imported, so create a proxy.
+						already_imported = None
+						scope[alias] = \
+							_LazyImportFrom(scope, name, attr_name, alias)
+				else:
+					scope[alias] = \
+						_LazyImportFrom(scope, name, attr_name, alias)

diff --git a/portage_with_autodep/pym/portage/proxy/objectproxy.py b/portage_with_autodep/pym/portage/proxy/objectproxy.py
new file mode 100644
index 0000000..92b36d1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/proxy/objectproxy.py
@@ -0,0 +1,91 @@
+# Copyright 2008-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+__all__ = ['ObjectProxy']
+
+class ObjectProxy(object):
+
+	"""
+	Object that acts as a proxy to another object, forwarding
+	attribute accesses and method calls. This can be useful
+	for implementing lazy initialization.
+	"""
+
+	__slots__ = ()
+
+	def _get_target(self):
+		raise NotImplementedError(self)
+
+	def __getattribute__(self, attr):
+		result = object.__getattribute__(self, '_get_target')()
+		return getattr(result, attr)
+
+	def __setattr__(self, attr, value):
+		result = object.__getattribute__(self, '_get_target')()
+		setattr(result, attr, value)
+
+	def __call__(self, *args, **kwargs):
+		result = object.__getattribute__(self, '_get_target')()
+		return result(*args, **kwargs)
+
+	def __setitem__(self, key, value):
+		object.__getattribute__(self, '_get_target')()[key] = value
+
+	def __getitem__(self, key):
+		return object.__getattribute__(self, '_get_target')()[key]
+
+	def __delitem__(self, key):
+		del object.__getattribute__(self, '_get_target')()[key]
+
+	def __contains__(self, key):
+		return key in object.__getattribute__(self, '_get_target')()
+
+	def __iter__(self):
+		return iter(object.__getattribute__(self, '_get_target')())
+
+	def __len__(self):
+		return len(object.__getattribute__(self, '_get_target')())
+
+	def __repr__(self):
+		return repr(object.__getattribute__(self, '_get_target')())
+
+	def __str__(self):
+		return str(object.__getattribute__(self, '_get_target')())
+
+	def __add__(self, other):
+		return self.__str__() + other
+
+	def __hash__(self):
+		return hash(object.__getattribute__(self, '_get_target')())
+
+	def __ge__(self, other):
+		return object.__getattribute__(self, '_get_target')() >= other
+
+	def __gt__(self, other):
+		return object.__getattribute__(self, '_get_target')() > other
+
+	def __le__(self, other):
+		return object.__getattribute__(self, '_get_target')() <= other
+
+	def __lt__(self, other):
+		return object.__getattribute__(self, '_get_target')() < other
+
+	def __eq__(self, other):
+		return object.__getattribute__(self, '_get_target')() == other
+
+	def __ne__(self, other):
+		return object.__getattribute__(self, '_get_target')() != other
+
+	def __bool__(self):
+		return bool(object.__getattribute__(self, '_get_target')())
+
+	if sys.hexversion < 0x3000000:
+		__nonzero__ = __bool__
+
+		def __unicode__(self):
+			return unicode(object.__getattribute__(self, '_get_target')())
+
+	def __int__(self):
+		return int(object.__getattribute__(self, '_get_target')())

diff --git a/portage_with_autodep/pym/portage/repository/__init__.py b/portage_with_autodep/pym/portage/repository/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/repository/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/repository/config.py b/portage_with_autodep/pym/portage/repository/config.py
new file mode 100644
index 0000000..9f0bb99
--- /dev/null
+++ b/portage_with_autodep/pym/portage/repository/config.py
@@ -0,0 +1,504 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import logging
+import re
+
+try:
+	from configparser import SafeConfigParser, ParsingError
+except ImportError:
+	from ConfigParser import SafeConfigParser, ParsingError
+from portage import os
+from portage.const import USER_CONFIG_PATH, REPO_NAME_LOC
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.util import normalize_path, writemsg, writemsg_level, shlex_split
+from portage.localization import _
+from portage import _unicode_encode
+from portage import _encodings
+
+_repo_name_sub_re = re.compile(r'[^\w-]')
+
+def _gen_valid_repo(name):
+	"""
+	Substitute hyphen in place of characters that don't conform to PMS 3.1.5,
+	and strip hyphen from left side if necessary. This returns None if the
+	given name contains no valid characters.
+	"""
+	name = _repo_name_sub_re.sub(' ', name.strip())
+	name = '-'.join(name.split())
+	name = name.lstrip('-')
+	if not name:
+		name = None
+	return name
+
+class RepoConfig(object):
+	"""Stores config of one repository"""
+
+	__slots__ = ['aliases', 'eclass_overrides', 'eclass_locations', 'location', 'user_location', 'masters', 'main_repo',
+		'missing_repo_name', 'name', 'priority', 'sync', 'format']
+
+	def __init__(self, name, repo_opts):
+		"""Build a RepoConfig with options in repo_opts
+		   Try to read repo_name in repository location, but if
+		   it is not found use variable name as repository name"""
+		aliases = repo_opts.get('aliases')
+		if aliases is not None:
+			aliases = tuple(aliases.split())
+		self.aliases = aliases
+
+		eclass_overrides = repo_opts.get('eclass-overrides')
+		if eclass_overrides is not None:
+			eclass_overrides = tuple(eclass_overrides.split())
+		self.eclass_overrides = eclass_overrides
+		#Locations are computed later.
+		self.eclass_locations = None
+
+		#Masters are only read from layout.conf.
+		self.masters = None
+
+		#The main-repo key makes only sense for the 'DEFAULT' section.
+		self.main_repo = repo_opts.get('main-repo')
+
+		priority = repo_opts.get('priority')
+		if priority is not None:
+			try:
+				priority = int(priority)
+			except ValueError:
+				priority = None
+		self.priority = priority
+
+		sync = repo_opts.get('sync')
+		if sync is not None:
+			sync = sync.strip()
+		self.sync = sync
+
+		format = repo_opts.get('format')
+		if format is not None:
+			format = format.strip()
+		self.format = format
+
+		location = repo_opts.get('location')
+		self.user_location = location
+		if location is not None and location.strip():
+			if os.path.isdir(location):
+				location = os.path.realpath(location)
+		else:
+			location = None
+		self.location = location
+
+		missing = True
+		if self.location is not None:
+			name, missing = self._read_repo_name(self.location)
+			# We must ensure that the name conforms to PMS 3.1.5
+			# in order to avoid InvalidAtom exceptions when we
+			# use it to generate atoms.
+			name = _gen_valid_repo(name)
+			if not name:
+				# name only contains invalid characters
+				name = "x-" + os.path.basename(self.location)
+				name = _gen_valid_repo(name)
+				# If basename only contains whitespace then the
+				# end result is name = 'x-'.
+
+		elif name == "DEFAULT": 
+			missing = False
+		self.name = name
+		self.missing_repo_name = missing
+
+	def update(self, new_repo):
+		"""Update repository with options in another RepoConfig"""
+		if new_repo.aliases is not None:
+			self.aliases = new_repo.aliases
+		if new_repo.eclass_overrides is not None:
+			self.eclass_overrides = new_repo.eclass_overrides
+		if new_repo.masters is not None:
+			self.masters = new_repo.masters
+		if new_repo.name is not None:
+			self.name = new_repo.name
+			self.missing_repo_name = new_repo.missing_repo_name
+		if new_repo.user_location is not None:
+			self.user_location = new_repo.user_location
+		if new_repo.location is not None:
+			self.location = new_repo.location
+		if new_repo.priority is not None:
+			self.priority = new_repo.priority
+		if new_repo.sync is not None:
+			self.sync = new_repo.sync
+
+	def _read_repo_name(self, repo_path):
+		"""
+		Read repo_name from repo_path.
+		Returns repo_name, missing.
+		"""
+		repo_name_path = os.path.join(repo_path, REPO_NAME_LOC)
+		try:
+			return io.open(
+				_unicode_encode(repo_name_path,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'],
+				errors='replace').readline().strip(), False
+		except EnvironmentError:
+			return "x-" + os.path.basename(repo_path), True
+
+	def info_string(self):
+		"""
+		Returns a formatted string containing informations about the repository.
+		Used by emerge --info.
+		"""
+		indent = " " * 4
+		repo_msg = []
+		repo_msg.append(self.name)
+		if self.format:
+			repo_msg.append(indent + "format: " + self.format)
+		if self.user_location:
+			repo_msg.append(indent + "location: " + self.user_location)
+		if self.sync:
+			repo_msg.append(indent + "sync: " + self.sync)
+		if self.masters:
+			repo_msg.append(indent + "masters: " + " ".join(master.name for master in self.masters))
+		if self.priority is not None:
+			repo_msg.append(indent + "priority: " + str(self.priority))
+		if self.aliases:
+			repo_msg.append(indent + "aliases: " + " ".join(self.aliases))
+		if self.eclass_overrides:
+			repo_msg.append(indent + "eclass_overrides: " + \
+				" ".join(self.eclass_overrides))
+		repo_msg.append("")
+		return "\n".join(repo_msg)
+
+class RepoConfigLoader(object):
+	"""Loads and store config of several repositories, loaded from PORTDIR_OVERLAY or repos.conf"""
+	def __init__(self, paths, settings):
+		"""Load config from files in paths"""
+		def parse(paths, prepos, ignored_map, ignored_location_map):
+			"""Parse files in paths to load config"""
+			parser = SafeConfigParser()
+			try:
+				parser.read(paths)
+			except ParsingError as e:
+				writemsg(_("!!! Error while reading repo config file: %s\n") % e, noiselevel=-1)
+			prepos['DEFAULT'] = RepoConfig("DEFAULT", parser.defaults())
+			for sname in parser.sections():
+				optdict = {}
+				for oname in parser.options(sname):
+					optdict[oname] = parser.get(sname, oname)
+
+				repo = RepoConfig(sname, optdict)
+				if repo.location and not os.path.exists(repo.location):
+					writemsg(_("!!! Invalid repos.conf entry '%s'"
+						" (not a dir): '%s'\n") % (sname, repo.location), noiselevel=-1)
+					continue
+
+				if repo.name in prepos:
+					old_location = prepos[repo.name].location
+					if old_location is not None and repo.location is not None and old_location != repo.location:
+						ignored_map.setdefault(repo.name, []).append(old_location)
+						ignored_location_map[old_location] = repo.name
+					prepos[repo.name].update(repo)
+				else:
+					prepos[repo.name] = repo
+
+		def add_overlays(portdir, portdir_overlay, prepos, ignored_map, ignored_location_map):
+			"""Add overlays in PORTDIR_OVERLAY as repositories"""
+			overlays = []
+			if portdir:
+				portdir = normalize_path(portdir)
+				overlays.append(portdir)
+			port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
+			overlays.extend(port_ov)
+			default_repo_opts = {}
+			if prepos['DEFAULT'].aliases is not None:
+				default_repo_opts['aliases'] = \
+					' '.join(prepos['DEFAULT'].aliases)
+			if prepos['DEFAULT'].eclass_overrides is not None:
+				default_repo_opts['eclass-overrides'] = \
+					' '.join(prepos['DEFAULT'].eclass_overrides)
+			if prepos['DEFAULT'].masters is not None:
+				default_repo_opts['masters'] = \
+					' '.join(prepos['DEFAULT'].masters)
+			if overlays:
+				#overlay priority is negative because we want them to be looked before any other repo
+				base_priority = 0
+				for ov in overlays:
+					if os.path.isdir(ov):
+						repo_opts = default_repo_opts.copy()
+						repo_opts['location'] = ov
+						repo = RepoConfig(None, repo_opts)
+						repo_conf_opts = prepos.get(repo.name)
+						if repo_conf_opts is not None:
+							if repo_conf_opts.aliases is not None:
+								repo_opts['aliases'] = \
+									' '.join(repo_conf_opts.aliases)
+							if repo_conf_opts.eclass_overrides is not None:
+								repo_opts['eclass-overrides'] = \
+									' '.join(repo_conf_opts.eclass_overrides)
+							if repo_conf_opts.masters is not None:
+								repo_opts['masters'] = \
+									' '.join(repo_conf_opts.masters)
+						repo = RepoConfig(repo.name, repo_opts)
+						if repo.name in prepos:
+							old_location = prepos[repo.name].location
+							if old_location is not None and old_location != repo.location:
+								ignored_map.setdefault(repo.name, []).append(old_location)
+								ignored_location_map[old_location] = repo.name
+								if old_location == portdir:
+									portdir = repo.user_location
+							prepos[repo.name].update(repo)
+							repo = prepos[repo.name]
+						else:
+							prepos[repo.name] = repo
+
+						if ov == portdir and portdir not in port_ov:
+							repo.priority = -1000
+						else:
+							repo.priority = base_priority
+							base_priority += 1
+
+					else:
+						writemsg(_("!!! Invalid PORTDIR_OVERLAY"
+							" (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+			return portdir
+
+		def repo_priority(r):
+			"""
+			Key funtion for comparing repositories by priority.
+			None is equal priority zero.
+			"""
+			x = prepos[r].priority
+			if x is None:
+				return 0
+			return x
+
+		prepos = {}
+		location_map = {}
+		treemap = {}
+		ignored_map = {}
+		ignored_location_map = {}
+
+		portdir = settings.get('PORTDIR', '')
+		portdir_overlay = settings.get('PORTDIR_OVERLAY', '')
+		parse(paths, prepos, ignored_map, ignored_location_map)
+		# If PORTDIR_OVERLAY contains a repo with the same repo_name as
+		# PORTDIR, then PORTDIR is overridden.
+		portdir = add_overlays(portdir, portdir_overlay, prepos,
+			ignored_map, ignored_location_map)
+		if portdir and portdir.strip():
+			portdir = os.path.realpath(portdir)
+
+		ignored_repos = tuple((repo_name, tuple(paths)) \
+			for repo_name, paths in ignored_map.items())
+
+		self.missing_repo_names = frozenset(repo.location
+			for repo in prepos.values()
+			if repo.location is not None and repo.missing_repo_name)
+
+		#Parse layout.conf and read masters key.
+		for repo in prepos.values():
+			if not repo.location:
+				continue
+			layout_filename = os.path.join(repo.location, "metadata", "layout.conf")
+			layout_file = KeyValuePairFileLoader(layout_filename, None, None)
+			layout_data, layout_errors = layout_file.load()
+
+			masters = layout_data.get('masters')
+			if masters and masters.strip():
+				masters = masters.split()
+			else:
+				masters = None
+			repo.masters = masters
+
+			aliases = layout_data.get('aliases')
+			if aliases and aliases.strip():
+				aliases = aliases.split()
+			else:
+				aliases = None
+			if aliases:
+				if repo.aliases:
+					aliases.extend(repo.aliases)
+				repo.aliases = tuple(sorted(set(aliases)))
+
+		#Take aliases into account.
+		new_prepos = {}
+		for repo_name, repo in prepos.items():
+			names = set()
+			names.add(repo_name)
+			if repo.aliases:
+				names.update(repo.aliases)
+
+			for name in names:
+				if name in new_prepos:
+					writemsg_level(_("!!! Repository name or alias '%s', " + \
+						"defined for repository '%s', overrides " + \
+						"existing alias or repository.\n") % (name, repo_name), level=logging.WARNING, noiselevel=-1)
+				new_prepos[name] = repo
+		prepos = new_prepos
+
+		for (name, r) in prepos.items():
+			if r.location is not None:
+				location_map[r.location] = name
+				treemap[name] = r.location
+
+		# filter duplicates from aliases, by only including
+		# items where repo.name == key
+		prepos_order = [repo.name for key, repo in prepos.items() \
+			if repo.name == key and repo.location is not None]
+		prepos_order.sort(key=repo_priority)
+
+		if portdir in location_map:
+			portdir_repo = prepos[location_map[portdir]]
+			portdir_sync = settings.get('SYNC', '')
+			#if SYNC variable is set and not overwritten by repos.conf
+			if portdir_sync and not portdir_repo.sync:
+				portdir_repo.sync = portdir_sync
+
+		if prepos['DEFAULT'].main_repo is None or \
+			prepos['DEFAULT'].main_repo not in prepos:
+			#setting main_repo if it was not set in repos.conf
+			if portdir in location_map:
+				prepos['DEFAULT'].main_repo = location_map[portdir]
+			elif portdir in ignored_location_map:
+				prepos['DEFAULT'].main_repo = ignored_location_map[portdir]
+			else:
+				prepos['DEFAULT'].main_repo = None
+				writemsg(_("!!! main-repo not set in DEFAULT and PORTDIR is empty. \n"), noiselevel=-1)
+
+		self.prepos = prepos
+		self.prepos_order = prepos_order
+		self.ignored_repos = ignored_repos
+		self.location_map = location_map
+		self.treemap = treemap
+		self._prepos_changed = True
+		self._repo_location_list = []
+
+		#The 'masters' key currently contains repo names. Replace them with the matching RepoConfig.
+		for repo_name, repo in prepos.items():
+			if repo_name == "DEFAULT":
+				continue
+			if repo.masters is None:
+				if self.mainRepo() and repo_name != self.mainRepo().name:
+					repo.masters = self.mainRepo(),
+				else:
+					repo.masters = ()
+			else:
+				if repo.masters and isinstance(repo.masters[0], RepoConfig):
+					# This one has already been processed
+					# because it has an alias.
+					continue
+				master_repos = []
+				for master_name in repo.masters:
+					if master_name not in prepos:
+						layout_filename = os.path.join(repo.user_location,
+							"metadata", "layout.conf")
+						writemsg_level(_("Unavailable repository '%s' " \
+							"referenced by masters entry in '%s'\n") % \
+							(master_name, layout_filename),
+							level=logging.ERROR, noiselevel=-1)
+					else:
+						master_repos.append(prepos[master_name])
+				repo.masters = tuple(master_repos)
+
+		#The 'eclass_overrides' key currently contains repo names. Replace them with the matching repo paths.
+		for repo_name, repo in prepos.items():
+			if repo_name == "DEFAULT":
+				continue
+
+			eclass_locations = []
+			eclass_locations.extend(master_repo.location for master_repo in repo.masters)
+			eclass_locations.append(repo.location)
+
+			if repo.eclass_overrides:
+				for other_repo_name in repo.eclass_overrides:
+					if other_repo_name in self.treemap:
+						eclass_locations.append(self.get_location_for_name(other_repo_name))
+					else:
+						writemsg_level(_("Unavailable repository '%s' " \
+							"referenced by eclass-overrides entry for " \
+							"'%s'\n") % (other_repo_name, repo_name), \
+							level=logging.ERROR, noiselevel=-1)
+			repo.eclass_locations = tuple(eclass_locations)
+
+		self._prepos_changed = True
+		self._repo_location_list = []
+
+		self._check_locations()
+
+	def repoLocationList(self):
+		"""Get a list of repositories location. Replaces PORTDIR_OVERLAY"""
+		if self._prepos_changed:
+			_repo_location_list = []
+			for repo in self.prepos_order:
+				if self.prepos[repo].location is not None:
+					_repo_location_list.append(self.prepos[repo].location)
+			self._repo_location_list = tuple(_repo_location_list)
+
+			self._prepos_changed = False
+		return self._repo_location_list
+
+	def repoUserLocationList(self):
+		"""Get a list of repositories location. Replaces PORTDIR_OVERLAY"""
+		user_location_list = []
+		for repo in self.prepos_order:
+			if self.prepos[repo].location is not None:
+				user_location_list.append(self.prepos[repo].user_location)
+		return tuple(user_location_list)
+
+	def mainRepoLocation(self):
+		"""Returns the location of main repo"""
+		main_repo = self.prepos['DEFAULT'].main_repo
+		if main_repo is not None and main_repo in self.prepos:
+			return self.prepos[main_repo].location
+		else:
+			return ''
+
+	def mainRepo(self):
+		"""Returns the main repo"""
+		maid_repo = self.prepos['DEFAULT'].main_repo
+		if maid_repo is None:
+			return None
+		return self.prepos[maid_repo]
+
+	def _check_locations(self):
+		"""Check if repositories location are correct and show a warning message if not"""
+		for (name, r) in self.prepos.items():
+			if name != 'DEFAULT':
+				if r.location is None:
+					writemsg(_("!!! Location not set for repository %s\n") % name, noiselevel=-1)
+				else:
+					if not os.path.isdir(r.location):
+						self.prepos_order.remove(name)
+						writemsg(_("!!! Invalid Repository Location"
+							" (not a dir): '%s'\n") % r.location, noiselevel=-1)
+
+	def repos_with_profiles(self):
+		for repo_name in self.prepos_order:
+			repo = self.prepos[repo_name]
+			if repo.format != "unavailable":
+				yield repo
+
+	def get_name_for_location(self, location):
+		return self.location_map[location]
+
+	def get_location_for_name(self, repo_name):
+		if repo_name is None:
+			# This simplifies code in places where
+			# we want to be able to pass in Atom.repo
+			# even if it is None.
+			return None
+		return self.treemap[repo_name]
+
+	def __getitem__(self, repo_name):
+		return self.prepos[repo_name]
+
+	def __iter__(self):
+		for repo_name in self.prepos_order:
+			yield self.prepos[repo_name]
+
+def load_repository_config(settings):
+	#~ repoconfigpaths = [os.path.join(settings.global_config_path, "repos.conf")]
+	repoconfigpaths = []
+	if settings.local_config:
+		repoconfigpaths.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
+			USER_CONFIG_PATH, "repos.conf"))
+	return RepoConfigLoader(repoconfigpaths, settings)

diff --git a/portage_with_autodep/pym/portage/tests/__init__.py b/portage_with_autodep/pym/portage/tests/__init__.py
new file mode 100644
index 0000000..a647aa2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/__init__.py
@@ -0,0 +1,244 @@
+# tests/__init__.py -- Portage Unit Test functionality
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+import time
+import unittest
+
+try:
+	from unittest.runner import _TextTestResult # new in python-2.7
+except ImportError:
+	from unittest import _TextTestResult
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+def main():
+
+	TEST_FILE = b'__test__'
+	svn_dirname = b'.svn'
+	suite = unittest.TestSuite()
+	basedir = os.path.dirname(os.path.realpath(__file__))
+	testDirs = []
+
+	if len(sys.argv) > 1:
+		suite.addTests(getTestFromCommandLine(sys.argv[1:], basedir))
+		return TextTestRunner(verbosity=2).run(suite)
+
+  # the os.walk help mentions relative paths as being quirky
+	# I was tired of adding dirs to the list, so now we add __test__
+	# to each dir we want tested.
+	for root, dirs, files in os.walk(basedir):
+		if svn_dirname in dirs:
+			dirs.remove(svn_dirname)
+		try:
+			root = _unicode_decode(root,
+				encoding=_encodings['fs'], errors='strict')
+		except UnicodeDecodeError:
+			continue
+
+		if TEST_FILE in files:
+			testDirs.append(root)
+
+	for mydir in testDirs:
+		suite.addTests(getTests(os.path.join(basedir, mydir), basedir) )
+	return TextTestRunner(verbosity=2).run(suite)
+
+def my_import(name):
+	mod = __import__(name)
+	components = name.split('.')
+	for comp in components[1:]:
+		mod = getattr(mod, comp)
+	return mod
+
+def getTestFromCommandLine(args, base_path):
+	ret = []
+	for arg in args:
+		realpath = os.path.realpath(arg)
+		path = os.path.dirname(realpath)
+		f = realpath[len(path)+1:]
+
+		if not f.startswith("test") or not f.endswith(".py"):
+			raise Exception("Invalid argument: '%s'" % arg)
+
+		mymodule = f[:-3]
+
+		parent_path = path[len(base_path)+1:]
+		parent_module = ".".join(("portage", "tests", parent_path))
+		parent_module = parent_module.replace('/', '.')
+		result = []
+
+		# Make the trailing / a . for module importing
+		modname = ".".join((parent_module, mymodule))
+		mod = my_import(modname)
+		ret.append(unittest.TestLoader().loadTestsFromModule(mod))
+	return ret
+
+def getTests(path, base_path):
+	"""
+
+	path is the path to a given subdir ( 'portage/' for example)
+	This does a simple filter on files in that dir to give us modules
+	to import
+
+	"""
+	files = os.listdir(path)
+	files = [ f[:-3] for f in files if f.startswith("test") and f.endswith(".py") ]
+	parent_path = path[len(base_path)+1:]
+	parent_module = ".".join(("portage", "tests", parent_path))
+	parent_module = parent_module.replace('/', '.')
+	result = []
+	for mymodule in files:
+		# Make the trailing / a . for module importing
+		modname = ".".join((parent_module, mymodule))
+		mod = my_import(modname)
+		result.append(unittest.TestLoader().loadTestsFromModule(mod))
+	return result
+
+class TextTestResult(_TextTestResult):
+	"""
+	We need a subclass of unittest._TextTestResult to handle tests with TODO
+
+	This just adds an addTodo method that can be used to add tests
+	that are marked TODO; these can be displayed later
+	by the test runner.
+	"""
+
+	def __init__(self, stream, descriptions, verbosity):
+		super(TextTestResult, self).__init__(stream, descriptions, verbosity)
+		self.todoed = []
+
+	def addTodo(self, test, info):
+		self.todoed.append((test,info))
+		if self.showAll:
+			self.stream.writeln("TODO")
+		elif self.dots:
+			self.stream.write(".")
+
+	def printErrors(self):
+		if self.dots or self.showAll:
+			self.stream.writeln()
+			self.printErrorList('ERROR', self.errors)
+			self.printErrorList('FAIL', self.failures)
+			self.printErrorList('TODO', self.todoed)
+
+class TestCase(unittest.TestCase):
+	"""
+	We need a way to mark a unit test as "ok to fail"
+	This way someone can add a broken test and mark it as failed
+	and then fix the code later.  This may not be a great approach
+	(broken code!!??!11oneone) but it does happen at times.
+	"""
+	
+	def __init__(self, methodName='runTest'):
+		# This method exists because unittest.py in python 2.4 stores
+		# the methodName as __testMethodName while 2.5 uses
+		# _testMethodName.
+		self._testMethodName = methodName
+		unittest.TestCase.__init__(self, methodName)
+		self.todo = False
+		
+	def defaultTestResult(self):
+		return TextTestResult()
+
+	def run(self, result=None):
+		if result is None: result = self.defaultTestResult()
+		result.startTest(self)
+		testMethod = getattr(self, self._testMethodName)
+		try:
+			try:
+				self.setUp()
+			except SystemExit:
+				raise
+			except KeyboardInterrupt:
+				raise
+			except:
+				result.addError(self, sys.exc_info())
+				return
+			ok = False
+			try:
+				testMethod()
+				ok = True
+			except self.failureException:
+				if self.todo:
+					result.addTodo(self,"%s: TODO" % testMethod)
+				else:
+					result.addFailure(self, sys.exc_info())
+			except (KeyboardInterrupt, SystemExit):
+				raise
+			except:
+				result.addError(self, sys.exc_info())
+			try:
+				self.tearDown()
+			except SystemExit:
+				raise
+			except KeyboardInterrupt:
+				raise
+			except:
+				result.addError(self, sys.exc_info())
+				ok = False
+			if ok: result.addSuccess(self)
+		finally:
+			result.stopTest(self)
+
+	def assertRaisesMsg(self, msg, excClass, callableObj, *args, **kwargs):
+		"""Fail unless an exception of class excClass is thrown
+		   by callableObj when invoked with arguments args and keyword
+		   arguments kwargs. If a different type of exception is
+		   thrown, it will not be caught, and the test case will be
+		   deemed to have suffered an error, exactly as for an
+		   unexpected exception.
+		"""
+		try:
+		    callableObj(*args, **kwargs)
+		except excClass:
+		    return
+		else:
+		    if hasattr(excClass,'__name__'): excName = excClass.__name__
+		    else: excName = str(excClass)
+		    raise self.failureException("%s not raised: %s" % (excName, msg))
+			
+class TextTestRunner(unittest.TextTestRunner):
+	"""
+	We subclass unittest.TextTestRunner to output SKIP for tests that fail but are skippable
+	"""
+	
+	def _makeResult(self):
+	        return TextTestResult(self.stream, self.descriptions, self.verbosity)
+
+	def run(self, test):
+		"""
+		Run the given test case or test suite.
+		"""
+		result = self._makeResult()
+		startTime = time.time()
+		test(result)
+		stopTime = time.time()
+		timeTaken = stopTime - startTime
+		result.printErrors()
+		self.stream.writeln(result.separator2)
+		run = result.testsRun
+		self.stream.writeln("Ran %d test%s in %.3fs" %
+							(run, run != 1 and "s" or "", timeTaken))
+		self.stream.writeln()
+		if not result.wasSuccessful():
+			self.stream.write("FAILED (")
+			failed = len(result.failures)
+			errored = len(result.errors)
+			if failed:
+				self.stream.write("failures=%d" % failed)
+			if errored:
+				if failed: self.stream.write(", ")
+				self.stream.write("errors=%d" % errored)
+			self.stream.writeln(")")
+		else:
+			self.stream.writeln("OK")
+		return result
+	
+test_cps = ['sys-apps/portage','virtual/portage']
+test_versions = ['1.0', '1.0-r1','2.3_p4','1.0_alpha57']
+test_slots = [ None, '1','gentoo-sources-2.6.17','spankywashere']
+test_usedeps = ['foo','-bar', ('foo','bar'),
+	('foo','-bar'), ('foo?', '!bar?') ]

diff --git a/portage_with_autodep/pym/portage/tests/bin/__init__.py b/portage_with_autodep/pym/portage/tests/bin/__init__.py
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/bin/__test__ b/portage_with_autodep/pym/portage/tests/bin/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/bin/setup_env.py b/portage_with_autodep/pym/portage/tests/bin/setup_env.py
new file mode 100644
index 0000000..e07643d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/bin/setup_env.py
@@ -0,0 +1,85 @@
+# setup_env.py -- Make sure bin subdir has sane env for testing
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage import shutil
+from portage.tests import TestCase
+from portage.process import spawn
+
+basepath = os.path.join(os.path.dirname(os.path.dirname(
+	os.path.abspath(__file__))),
+	"..", "..", "..")
+bindir = os.path.join(basepath, "bin")
+pymdir = os.path.join(basepath, "pym")
+basedir = None
+env = None
+
+def binTestsCleanup():
+	global basedir
+	if basedir is None:
+		return
+	if os.access(basedir, os.W_OK):
+		shutil.rmtree(basedir)
+		basedir = None
+
+def binTestsInit():
+	binTestsCleanup()
+	global basedir, env
+	basedir = tempfile.mkdtemp()
+	env = os.environ.copy()
+	env["D"] = os.path.join(basedir, "image")
+	env["T"] = os.path.join(basedir, "temp")
+	env["S"] = os.path.join(basedir, "workdir")
+	env["PF"] = "portage-tests-0.09-r1"
+	env["PATH"] = bindir + ":" + env["PATH"]
+	env["PORTAGE_BIN_PATH"] = bindir
+	env["PORTAGE_PYM_PATH"] = pymdir
+	os.mkdir(env["D"])
+	os.mkdir(env["T"])
+	os.mkdir(env["S"])
+
+class BinTestCase(TestCase):
+	def init(self):
+		binTestsInit()
+	def cleanup(self):
+		binTestsCleanup()
+
+def _exists_in_D(path):
+	# Note: do not use os.path.join() here, we assume D to end in /
+	return os.access(env["D"] + path, os.W_OK)
+def exists_in_D(path):
+	if not _exists_in_D(path):
+		raise TestCase.failureException
+def xexists_in_D(path):
+	if _exists_in_D(path):
+		raise TestCase.failureException
+
+def portage_func(func, args, exit_status=0):
+	# we don't care about the output of the programs,
+	# just their exit value and the state of $D
+	global env
+	f = open('/dev/null', 'wb')
+	fd_pipes = {0:0,1:f.fileno(),2:f.fileno()}
+	def pre_exec():
+		os.chdir(env["S"])
+	spawn([func] + args.split(), env=env,
+		fd_pipes=fd_pipes, pre_exec=pre_exec)
+	f.close()
+
+def create_portage_wrapper(bin):
+	def derived_func(*args):
+		newargs = list(args)
+		newargs.insert(0, bin)
+		return portage_func(*newargs)
+	return derived_func
+
+for bin in os.listdir(os.path.join(bindir, "ebuild-helpers")):
+	if bin.startswith("do") or \
+	   bin.startswith("new") or \
+	   bin.startswith("prep") or \
+	   bin in ["ecompress","ecompressdir","fowners","fperms"]:
+		globals()[bin] = create_portage_wrapper(
+			os.path.join(bindir, "ebuild-helpers", bin))

diff --git a/portage_with_autodep/pym/portage/tests/bin/test_dobin.py b/portage_with_autodep/pym/portage/tests/bin/test_dobin.py
new file mode 100644
index 0000000..6f50d7a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/bin/test_dobin.py
@@ -0,0 +1,16 @@
+# test_dobin.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests.bin.setup_env import BinTestCase, dobin, xexists_in_D
+
+class DoBin(BinTestCase):
+	def testDoBin(self):
+		self.init()
+		try:
+			dobin("does-not-exist", 1)
+			xexists_in_D("does-not-exist")
+			xexists_in_D("/bin/does-not-exist")
+			xexists_in_D("/usr/bin/does-not-exist")
+		finally:
+			self.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/bin/test_dodir.py b/portage_with_autodep/pym/portage/tests/bin/test_dodir.py
new file mode 100644
index 0000000..f4eb9b2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/bin/test_dodir.py
@@ -0,0 +1,16 @@
+# test_dodir.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests.bin.setup_env import BinTestCase, dodir, exists_in_D
+
+class DoDir(BinTestCase):
+	def testDoDir(self):
+		self.init()
+		try:
+			dodir("usr /usr")
+			exists_in_D("/usr")
+			dodir("/var/lib/moocow")
+			exists_in_D("/var/lib/moocow")
+		finally:
+			self.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/dbapi/__init__.py b/portage_with_autodep/pym/portage/tests/dbapi/__init__.py
new file mode 100644
index 0000000..532918b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dbapi/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/dbapi/__test__ b/portage_with_autodep/pym/portage/tests/dbapi/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py b/portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py
new file mode 100644
index 0000000..a2c5f77
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py
@@ -0,0 +1,58 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+
+from portage import os
+from portage.dbapi.virtual import fakedbapi
+from portage.package.ebuild.config import config
+from portage.tests import TestCase
+
+class TestFakedbapi(TestCase):
+
+	def testFakedbapi(self):
+		packages = (
+			("sys-apps/portage-2.1.10", {
+				"EAPI"         : "2",
+				"IUSE"         : "ipc doc",
+				"repository"   : "gentoo",
+				"SLOT"         : "0",
+				"USE"          : "ipc missing-iuse",
+			}),
+			("virtual/package-manager-0", {
+				"EAPI"         : "0",
+				"repository"   : "gentoo",
+				"SLOT"         : "0",
+			}),
+		)
+
+		match_tests = (
+			("sys-apps/portage:0[ipc]",             ["sys-apps/portage-2.1.10"]),
+			("sys-apps/portage:0[-ipc]",            []),
+			("sys-apps/portage:0[doc]",             []),
+			("sys-apps/portage:0[-doc]",            ["sys-apps/portage-2.1.10"]),
+			("sys-apps/portage:0",                  ["sys-apps/portage-2.1.10"]),
+			("sys-apps/portage:0[missing-iuse]",    []),
+			("sys-apps/portage:0[-missing-iuse]",   []),
+			("sys-apps/portage:0::gentoo[ipc]",     ["sys-apps/portage-2.1.10"]),
+			("sys-apps/portage:0::multilib[ipc]",   []),
+			("virtual/package-manager",             ["virtual/package-manager-0"]),
+		)
+
+		tempdir = tempfile.mkdtemp()
+		try:
+			portdir = os.path.join(tempdir, "usr/portage")
+			os.makedirs(portdir)
+			env = {
+				"PORTDIR": portdir,
+			}
+			fakedb = fakedbapi(settings=config(config_profile_path="",
+				env=env, _eprefix=tempdir))
+			for cpv, metadata in packages:
+				fakedb.cpv_inject(cpv, metadata=metadata)
+
+			for atom, expected_result in match_tests:
+				self.assertEqual( fakedb.match(atom), expected_result )
+		finally:
+			shutil.rmtree(tempdir)

diff --git a/portage_with_autodep/pym/portage/tests/dep/__init__.py b/portage_with_autodep/pym/portage/tests/dep/__init__.py
new file mode 100644
index 0000000..9c3f524
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.dep/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/dep/__test__ b/portage_with_autodep/pym/portage/tests/dep/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/dep/testAtom.py b/portage_with_autodep/pym/portage/tests/dep/testAtom.py
new file mode 100644
index 0000000..092cacf
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testAtom.py
@@ -0,0 +1,315 @@
+# Copyright 2006, 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+
+class TestAtom(TestCase):
+
+	def testAtom(self):
+
+		tests = (
+			( "=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]",
+				('=',  'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False ),
+			( "=sys-apps/portage-2.1-r1*:0[doc]",
+				('=*',  'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False ),
+			( "sys-apps/portage:0[doc]",
+				(None,  'sys-apps/portage', None, '0', '[doc]', None), False, False ),
+			( "sys-apps/portage:0[doc]",
+				(None,  'sys-apps/portage', None, '0', '[doc]', None), False, False ),
+			( "*/*",
+				(None,  '*/*', None, None, None, None), True, False ),
+			( "sys-apps/*",
+				(None,  'sys-apps/*', None, None, None, None), True, False ),
+			( "*/portage",
+				(None,  '*/portage', None, None, None, None), True, False ),
+			( "s*s-*/portage:1",
+				(None,  's*s-*/portage', None, '1', None, None), True, False ),
+			( "*/po*ge:2",
+				(None,  '*/po*ge', None, '2', None, None), True, False ),
+			( "!dev-libs/A",
+				(None,  'dev-libs/A', None, None, None, None), True, True ),
+			( "!!dev-libs/A",
+				(None,  'dev-libs/A', None, None, None, None), True, True ),
+			( "!!dev-libs/A",
+				(None,  'dev-libs/A', None, None, None, None), True, True ),
+			( "dev-libs/A[foo(+)]",
+				(None,  'dev-libs/A', None, None, "[foo(+)]", None), True, True ),
+			( "dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+				(None,  'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
+			( "dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+				(None,  'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
+
+			( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]",
+				('=',  'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True ),
+			( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]",
+				('=*',  'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True ),
+			( "sys-apps/portage:0::repo_name[doc]",
+				(None,  'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True ),
+
+			( "*/*::repo_name",
+				(None,  '*/*', None, None, None, 'repo_name'), True, True ),
+			( "sys-apps/*::repo_name",
+				(None,  'sys-apps/*', None, None, None, 'repo_name'), True, True ),
+			( "*/portage::repo_name",
+				(None,  '*/portage', None, None, None, 'repo_name'), True, True ),
+			( "s*s-*/portage:1::repo_name",
+				(None,  's*s-*/portage', None, '1', None, 'repo_name'), True, True ),
+		)
+		
+		tests_xfail = (
+			( Atom("sys-apps/portage"), False, False ),
+			( "cat/pkg[a!]", False, False ),
+			( "cat/pkg[!a]", False, False ),
+			( "cat/pkg[!a!]", False, False ),
+			( "cat/pkg[!a-]", False, False ),
+			( "cat/pkg[-a=]", False, False ),
+			( "cat/pkg[-a?]", False, False ),
+			( "cat/pkg[-a!]", False, False ),
+			( "cat/pkg[=a]", False, False ),
+			( "cat/pkg[=a=]", False, False ),
+			( "cat/pkg[=a?]", False, False ),
+			( "cat/pkg[=a!]", False, False ),
+			( "cat/pkg[=a-]", False, False ),
+			( "cat/pkg[?a]", False, False ),
+			( "cat/pkg[?a=]", False, False ),
+			( "cat/pkg[?a?]", False, False ),
+			( "cat/pkg[?a!]", False, False ),
+			( "cat/pkg[?a-]", False, False ),
+			( "sys-apps/portage[doc]:0", False, False ),
+			( "*/*", False, False ),
+			( "sys-apps/*", False, False ),
+			( "*/portage", False, False ),
+			( "*/**", True, False ),
+			( "*/portage[use]", True, False ),
+			( "cat/pkg[a()]", False, False ),
+			( "cat/pkg[a(]", False, False ),
+			( "cat/pkg[a)]", False, False ),
+			( "cat/pkg[a(,b]", False, False ),
+			( "cat/pkg[a),b]", False, False ),
+			( "cat/pkg[a(*)]", False, False ),
+			( "cat/pkg[a(*)]", True, False ),
+			( "cat/pkg[a(+-)]", False, False ),
+			( "cat/pkg[a()]", False, False ),
+			( "cat/pkg[(+)a]", False, False ),
+			( "cat/pkg[a=(+)]", False, False ),
+			( "cat/pkg[!(+)a=]", False, False ),
+			( "cat/pkg[!a=(+)]", False, False ),
+			( "cat/pkg[a?(+)]", False, False ),
+			( "cat/pkg[!a?(+)]", False, False ),
+			( "cat/pkg[!(+)a?]", False, False ),
+			( "cat/pkg[-(+)a]", False, False ),
+			( "cat/pkg[a(+),-a]", False, False ),
+			( "cat/pkg[a(-),-a]", False, False ),
+			( "cat/pkg[-a,a(+)]", False, False ),
+			( "cat/pkg[-a,a(-)]", False, False ),
+			( "cat/pkg[-a(+),a(-)]", False, False ),
+			( "cat/pkg[-a(-),a(+)]", False, False ),
+			( "sys-apps/portage[doc]::repo_name", False, False ),
+			( "sys-apps/portage:0[doc]::repo_name", False, False ),
+			( "sys-apps/portage[doc]:0::repo_name", False, False ),
+			( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False ),
+			( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False ),
+			( "sys-apps/portage:0::repo_name[doc]", False, False ),
+			( "*/*::repo_name", True, False ),
+		)
+
+		for atom, parts, allow_wildcard, allow_repo in tests:
+			a = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+			op, cp, ver, slot, use, repo = parts
+			self.assertEqual( op, a.operator,
+				msg="Atom('%s').operator = %s == '%s'" % ( atom, a.operator, op ) )
+			self.assertEqual( cp, a.cp,
+				msg="Atom('%s').cp = %s == '%s'" % ( atom, a.cp, cp ) )
+			if ver is not None:
+				cpv = "%s-%s" % (cp, ver)
+			else:
+				cpv = cp
+			self.assertEqual( cpv, a.cpv,
+				msg="Atom('%s').cpv = %s == '%s'" % ( atom, a.cpv, cpv ) )
+			self.assertEqual( slot, a.slot,
+				msg="Atom('%s').slot = %s == '%s'" % ( atom, a.slot, slot ) )
+			self.assertEqual( repo, a.repo,
+				msg="Atom('%s').repo == %s == '%s'" % ( atom, a.repo, repo ) )
+
+			if a.use:
+				returned_use = str(a.use)
+			else:
+				returned_use = None
+			self.assertEqual( use, returned_use,
+				msg="Atom('%s').use = %s == '%s'" % ( atom, returned_use, use ) )
+
+		for atom, allow_wildcard, allow_repo in tests_xfail:
+			self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom, \
+				allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+
+	def test_intersects(self):
+		test_cases = (
+			("dev-libs/A", "dev-libs/A", True),
+			("dev-libs/A", "dev-libs/B", False),
+			("dev-libs/A", "sci-libs/A", False),
+			("dev-libs/A[foo]", "sci-libs/A[bar]", False),
+			("dev-libs/A[foo(+)]", "sci-libs/A[foo(-)]", False),
+			("=dev-libs/A-1", "=dev-libs/A-1-r1", False),
+			("~dev-libs/A-1", "=dev-libs/A-1", False),
+			("=dev-libs/A-1:1", "=dev-libs/A-1", True),
+			("=dev-libs/A-1:1", "=dev-libs/A-1:1", True),
+			("=dev-libs/A-1:1", "=dev-libs/A-1:2", False),
+		)
+
+		for atom, other, expected_result in test_cases:
+			self.assertEqual(Atom(atom).intersects(Atom(other)), expected_result, \
+				"%s and %s should intersect: %s" % (atom, other, expected_result))
+
+	def test_violated_conditionals(self):
+		test_cases = (
+			("dev-libs/A", ["foo"], ["foo"], None, "dev-libs/A"),
+			("dev-libs/A[foo]", [], ["foo"], None, "dev-libs/A[foo]"),
+			("dev-libs/A[foo]", ["foo"], ["foo"], None, "dev-libs/A"),
+			("dev-libs/A[foo]", [], ["foo"], [], "dev-libs/A[foo]"),
+			("dev-libs/A[foo]", ["foo"], ["foo"], [], "dev-libs/A"),
+
+			("dev-libs/A:0[foo]", ["foo"], ["foo"], [], "dev-libs/A:0"),
+
+			("dev-libs/A[foo,-bar]", [], ["foo", "bar"], None, "dev-libs/A[foo]"),
+			("dev-libs/A[-foo,bar]", [], ["foo", "bar"], None, "dev-libs/A[bar]"),
+
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
+
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[!c=]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,b=,!c=]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,!e?]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,-f]"),
+
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c=]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a,!c=]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a,!c=]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a,!c=]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a,!c=]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a,!c=,-f]"),
+
+			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c(+)=]"),
+			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a(-),!c(-)=]"),
+			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a(+),!c(+)=]"),
+			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a(-),!c(-)=]"),
+			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a(+),!c(+)=]"),
+			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
+
+			("dev-libs/A[a(+),b(+)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a"], ["a"], "dev-libs/A[b(+)=,!e(+)?]"),
+			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["b"], ["b"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
+			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["c"], ["c"], "dev-libs/A[!c(+)=,!e(+)?]"),
+			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["d"], ["d"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
+			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["e"], ["e"], "dev-libs/A"),
+			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["f"], ["f"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
+
+			#Some more test cases to trigger all remaining code paths
+			("dev-libs/B[x?]", [], ["x"], ["x"], "dev-libs/B[x?]"),
+			("dev-libs/B[x(+)?]", [], [], ["x"], "dev-libs/B"),
+			("dev-libs/B[x(-)?]", [], [], ["x"], "dev-libs/B[x(-)?]"),
+
+			("dev-libs/C[x=]", [], ["x"], ["x"], "dev-libs/C[x=]"),
+			("dev-libs/C[x(+)=]", [], [], ["x"], "dev-libs/C"),
+			("dev-libs/C[x(-)=]", [], [], ["x"], "dev-libs/C[x(-)=]"),
+
+			("dev-libs/D[!x=]", [], ["x"], ["x"], "dev-libs/D"),
+			("dev-libs/D[!x(+)=]", [], [], ["x"], "dev-libs/D[!x(+)=]"),
+			("dev-libs/D[!x(-)=]", [], [], ["x"], "dev-libs/D"),
+
+			#Missing IUSE test cases
+			("dev-libs/B[x]", [], [], [], "dev-libs/B[x]"),
+			("dev-libs/B[-x]", [], [], [], "dev-libs/B[-x]"),
+			("dev-libs/B[x?]", [], [], [], "dev-libs/B[x?]"),
+			("dev-libs/B[x=]", [], [], [], "dev-libs/B[x=]"),
+			("dev-libs/B[!x=]", [], [], ["x"], "dev-libs/B[!x=]"),
+			("dev-libs/B[!x?]", [], [], ["x"], "dev-libs/B[!x?]"),
+		)
+		
+		test_cases_xfail = (
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], None),
+		)
+
+		class use_flag_validator(object):
+			def __init__(self, iuse):
+				self.iuse = iuse
+
+			def is_valid_flag(self, flag):
+				return flag in iuse
+
+		for atom, other_use, iuse, parent_use, expected_violated_atom in test_cases:
+			a = Atom(atom)
+			validator = use_flag_validator(iuse)
+			violated_atom = a.violated_conditionals(other_use, validator.is_valid_flag, parent_use)
+			if parent_use is None:
+				fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
+					(atom, " ".join(other_use), " ".join(iuse), "None", str(violated_atom), expected_violated_atom)
+			else:
+				fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
+					(atom, " ".join(other_use), " ".join(iuse), " ".join(parent_use), str(violated_atom), expected_violated_atom)
+			self.assertEqual(str(violated_atom), expected_violated_atom, fail_msg)
+
+		for atom, other_use, iuse, parent_use in test_cases_xfail:
+			a = Atom(atom)
+			validator = use_flag_validator(iuse)
+			self.assertRaisesMsg(atom, InvalidAtom, \
+				a.violated_conditionals, other_use, validator.is_valid_flag, parent_use)
+
+	def test_evaluate_conditionals(self):
+		test_cases = (
+			("dev-libs/A[foo]", [], "dev-libs/A[foo]"),
+			("dev-libs/A[foo]", ["foo"], "dev-libs/A[foo]"),
+
+			("dev-libs/A:0[foo=]", ["foo"], "dev-libs/A:0[foo]"),
+
+			("dev-libs/A[foo,-bar]", [], "dev-libs/A[foo,-bar]"),
+			("dev-libs/A[-foo,bar]", [], "dev-libs/A[-foo,bar]"),
+
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], "dev-libs/A[a,-b,c,-e,-f]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], "dev-libs/A[a,-b,c,-e,-f]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], "dev-libs/A[a,b,c,-e,-f]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], "dev-libs/A[a,-b,-c,-e,-f]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], "dev-libs/A[a,-b,c,d,-e,-f]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], "dev-libs/A[a,-b,c,-f]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], "dev-libs/A[a,-b,c,-e,-f]"),
+			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], "dev-libs/A[a(-),-b(+),c(-),d(+),-e(-),-f(+)]"),
+			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["f"], "dev-libs/A[a(+),-b(-),c(+),-e(+),-f(-)]"),
+		)
+
+		for atom, use, expected_atom in test_cases:
+			a = Atom(atom)
+			b = a.evaluate_conditionals(use)
+			self.assertEqual(str(b), expected_atom)
+			self.assertEqual(str(b.unevaluated_atom), atom)
+
+	def test__eval_qa_conditionals(self):
+		test_cases = (
+			("dev-libs/A[foo]", [], [], "dev-libs/A[foo]"),
+			("dev-libs/A[foo]", ["foo"], [], "dev-libs/A[foo]"),
+			("dev-libs/A[foo]", [], ["foo"], "dev-libs/A[foo]"),
+
+			("dev-libs/A:0[foo]", [], [], "dev-libs/A:0[foo]"),
+			("dev-libs/A:0[foo]", ["foo"], [], "dev-libs/A:0[foo]"),
+			("dev-libs/A:0[foo]", [], ["foo"], "dev-libs/A:0[foo]"),
+			("dev-libs/A:0[foo=]", [], ["foo"], "dev-libs/A:0[foo]"),
+
+			("dev-libs/A[foo,-bar]", ["foo"], ["bar"], "dev-libs/A[foo,-bar]"),
+			("dev-libs/A[-foo,bar]", ["foo", "bar"], [], "dev-libs/A[-foo,bar]"),
+
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a", "b", "c"], [], "dev-libs/A[a,-b,c,d,-e,-f]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c"], "dev-libs/A[a,b,-c,d,-e,-f]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d", "e", "f"], [], "dev-libs/A[a,b,-b,c,-c,-e,-f]"),
+			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["d", "e", "f"], "dev-libs/A[a,b,-b,c,-c,d,-f]"),
+			
+			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", \
+				["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a(-),-b(+),c(-),-e(-),-f(+)]"),
+			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", \
+				[], ["a", "b", "c", "d", "e", "f"], "dev-libs/A[a(+),b(-),-c(+),d(-),-f(-)]"),
+		)
+
+		for atom, use_mask, use_force, expected_atom in test_cases:
+			a = Atom(atom)
+			b = a._eval_qa_conditionals(use_mask, use_force)
+			self.assertEqual(str(b), expected_atom)
+			self.assertEqual(str(b.unevaluated_atom), atom)

diff --git a/portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py b/portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py
new file mode 100644
index 0000000..54791e0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py
@@ -0,0 +1,219 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import check_required_use
+from portage.exception import InvalidDependString
+
+class TestCheckRequiredUse(TestCase):
+
+	def testCheckRequiredUse(self):
+		test_cases = (
+			( "|| ( a b )", [], ["a", "b"], False),
+			( "|| ( a b )", ["a"], ["a", "b"], True),
+			( "|| ( a b )", ["b"], ["a", "b"], True),
+			( "|| ( a b )", ["a", "b"], ["a", "b"], True),
+
+			( "^^ ( a b )", [], ["a", "b"], False),
+			( "^^ ( a b )", ["a"], ["a", "b"], True),
+			( "^^ ( a b )", ["b"], ["a", "b"], True),
+			( "^^ ( a b )", ["a", "b"], ["a", "b"], False),
+
+			( "^^ ( || ( a b ) c )", [], ["a", "b", "c"], False),
+			( "^^ ( || ( a b ) c )", ["a"], ["a", "b", "c"], True),
+
+			( "^^ ( || ( ( a b ) ) ( c ) )", [], ["a", "b", "c"], False),
+			( "( ^^ ( ( || ( ( a ) ( b ) ) ) ( ( c ) ) ) )", ["a"], ["a", "b", "c"], True),
+
+			( "a || ( b c )", ["a"], ["a", "b", "c"], False),
+			( "|| ( b c ) a", ["a"], ["a", "b", "c"], False),
+
+			( "|| ( a b c )", ["a"], ["a", "b", "c"], True),
+			( "|| ( a b c )", ["b"], ["a", "b", "c"], True),
+			( "|| ( a b c )", ["c"], ["a", "b", "c"], True),
+
+			( "^^ ( a b c )", ["a"], ["a", "b", "c"], True),
+			( "^^ ( a b c )", ["b"], ["a", "b", "c"], True),
+			( "^^ ( a b c )", ["c"], ["a", "b", "c"], True),
+			( "^^ ( a b c )", ["a", "b"], ["a", "b", "c"], False),
+			( "^^ ( a b c )", ["b", "c"], ["a", "b", "c"], False),
+			( "^^ ( a b c )", ["a", "c"], ["a", "b", "c"], False),
+			( "^^ ( a b c )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+			( "a? ( ^^ ( b c ) )", [], ["a", "b", "c"], True),
+			( "a? ( ^^ ( b c ) )", ["a"], ["a", "b", "c"], False),
+			( "a? ( ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+			( "a? ( ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+			( "a? ( ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+			( "a? ( ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+			( "^^ ( a? ( !b ) !c? ( d ) )", [], ["a", "b", "c", "d"], False),
+			( "^^ ( a? ( !b ) !c? ( d ) )", ["a"], ["a", "b", "c", "d"], True),
+			( "^^ ( a? ( !b ) !c? ( d ) )", ["c"], ["a", "b", "c", "d"], True),
+			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "c"], ["a", "b", "c", "d"], True),
+			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "c"], ["a", "b", "c", "d"], False),
+			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "d"], ["a", "b", "c", "d"], False),
+
+			( "|| ( ^^ ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], True),
+			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+			( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+			( "^^ ( || ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+			( "^^ ( || ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], False),
+			( "^^ ( || ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+			( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], False),
+			( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], False),
+			( "^^ ( || ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+			( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], True),
+
+			( "|| ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], True),
+			( "|| ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+			( "|| ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+			( "|| ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+			( "|| ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+			( "|| ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+			( "|| ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+			( "|| ( ( a b ) c )", [], ["a", "b", "c"], False),
+
+			( "^^ ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], False),
+			( "^^ ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+			( "^^ ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+			( "^^ ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+			( "^^ ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+			( "^^ ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+			( "^^ ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+			( "^^ ( ( a b ) c )", [], ["a", "b", "c"], False),
+		)
+
+		test_cases_xfail = (
+			( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b"]),
+			( "^^ ( || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+			( "^^( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+			( "^^ || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+			( "^^ ( ( || ) ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+			( "^^ ( || ( a b ) ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+		)
+
+		for required_use, use, iuse, expected in test_cases:
+			self.assertEqual(bool(check_required_use(required_use, use, iuse.__contains__)), \
+				expected, required_use + ", USE = " + " ".join(use))
+
+		for required_use, use, iuse in test_cases_xfail:
+			self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
+				InvalidDependString, check_required_use, required_use, use, iuse.__contains__)
+
+	def testCheckRequiredUseFilterSatisfied(self):
+		"""
+		Test filtering of satisfied parts of REQUIRED_USE,
+		in order to reduce noise for bug #353234.
+		"""
+		test_cases = (
+			(
+				"bindist? ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) dvdnav? ( dvd )",
+				("cdio", "cdparanoia"),
+				"cdio? ( !cdparanoia )"
+			),
+			(
+				"|| ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) ^^ ( foo bar )",
+				["cdio", "cdparanoia", "foo"],
+				"cdio? ( !cdparanoia )"
+			),
+			(
+				"^^ ( || ( a b ) c )",
+				("a", "b", "c"),
+				"^^ ( || ( a b ) c )"
+			),
+			(
+				"^^ ( || ( ( a b ) ) ( c ) )",
+				("a", "b", "c"),
+				"^^ ( ( a b ) c )"
+			),
+			(
+				"a? ( ( c e ) ( b d ) )",
+				("a", "c", "e"),
+				"a? ( b d )"
+			),
+			(
+				"a? ( ( c e ) ( b d ) )",
+				("a", "b", "c", "e"),
+				"a? ( d )"
+			),
+			(
+				"a? ( ( c e ) ( c e b c d e c ) )",
+				("a", "c", "e"),
+				"a? ( b d )"
+			),
+			(
+				"^^ ( || ( a b ) ^^ ( b c ) )",
+				("a", "b"),
+				"^^ ( || ( a b ) ^^ ( b c ) )"
+			),
+			(
+				"^^ ( || ( a b ) ^^ ( b c ) )",
+				["a", "c"],
+				"^^ ( || ( a b ) ^^ ( b c ) )"
+			),
+			(
+				"^^ ( || ( a b ) ^^ ( b c ) )",
+				["b", "c"],
+				""
+			),
+			(
+				"^^ ( || ( a b ) ^^ ( b c ) )",
+				["a", "b", "c"],
+				""
+			),
+			(
+				"^^ ( ( a b c ) ( b c d ) )",
+				["a", "b", "c"],
+				""
+			),
+			(
+				"^^ ( ( a b c ) ( b c d ) )",
+				["a", "b", "c", "d"],
+				"^^ ( ( a b c ) ( b c d ) )"
+			),
+			(
+				"^^ ( ( a b c ) ( b c !d ) )",
+				["a", "b", "c"],
+				"^^ ( ( a b c ) ( b c !d ) )"
+			),
+			(
+				"^^ ( ( a b c ) ( b c !d ) )",
+				["a", "b", "c", "d"],
+				""
+			),
+			(
+				"( ( ( a ) ) ( ( ( b c ) ) ) )",
+				[""],
+				"a b c"
+			),
+			(
+				"|| ( ( ( ( a ) ) ( ( ( b c ) ) ) ) )",
+				[""],
+				"a b c"
+			),
+			(
+				"|| ( ( a ( ( ) ( ) ) ( ( ) ) ( b ( ) c ) ) )",
+				[""],
+				"a b c"
+			),
+			(
+				"|| ( ( a b c ) ) || ( ( d e f ) )",
+				[""],
+				"a b c d e f"
+			),
+		)
+		for required_use, use, expected in test_cases:
+			result = check_required_use(required_use, use, lambda k: True).tounicode()
+			self.assertEqual(result, expected,
+				"REQUIRED_USE = '%s', USE = '%s', '%s' != '%s'" % \
+				(required_use, " ".join(use), result, expected))

diff --git a/portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py b/portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py
new file mode 100644
index 0000000..69d092e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py
@@ -0,0 +1,18 @@
+# test_isvalidatom.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import ExtendedAtomDict
+
+class TestExtendedAtomDict(TestCase):
+
+	def testExtendedAtomDict(self):
+		d = ExtendedAtomDict(dict)
+		d["*/*"] = { "test1": "x" }
+		d["dev-libs/*"] = { "test2": "y" }
+		d.setdefault("sys-apps/portage", {})["test3"] = "z"
+		self.assertEqual(d.get("dev-libs/A"), { "test1": "x", "test2": "y" })
+		self.assertEqual(d.get("sys-apps/portage"), { "test1": "x", "test3": "z" })
+		self.assertEqual(d["dev-libs/*"], { "test2": "y" })
+		self.assertEqual(d["sys-apps/portage"], {'test1': 'x', 'test3': 'z'})

diff --git a/portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py b/portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py
new file mode 100644
index 0000000..026a552
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py
@@ -0,0 +1,75 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import extract_affecting_use
+from portage.exception import InvalidDependString
+
+class TestExtractAffectingUSE(TestCase):
+
+	def testExtractAffectingUSE(self):
+		test_cases = (
+			("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "A", ("a",)),
+			("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "B", ("b",)),
+			("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "C", ("c",)),
+			("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "D", ("d",)),
+			
+			("a? ( b? ( AB ) )", "AB", ("a", "b")),
+			("a? ( b? ( c? ( ABC ) ) )", "ABC", ("a", "b", "c")),
+
+			("a? ( A b? ( c? ( ABC ) AB ) )", "A", ("a",)),
+			("a? ( A b? ( c? ( ABC ) AB ) )", "AB", ("a", "b")),
+			("a? ( A b? ( c? ( ABC ) AB ) )", "ABC", ("a", "b", "c")),
+			("a? ( A b? ( c? ( ABC ) AB ) ) X", "X", []),
+			("X a? ( A b? ( c? ( ABC ) AB ) )", "X", []),
+
+			("ab? ( || ( A B ) )", "A", ("ab",)),
+			("!ab? ( || ( A B ) )", "B", ("ab",)),
+			("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "A", ("ab",)),
+			("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "B", ("ab", "b")),
+			("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "C", ("ab", "b")),
+
+			("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "A", ("ab",)),
+			("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "B", ("ab", "b")),
+			("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "C", ("ab", "b")),
+
+			("a? ( A )", "B", []),
+
+			("a? ( || ( A B ) )", "B", ["a"]),
+
+			# test USE dep defaults for bug #363073
+			("a? ( >=dev-lang/php-5.2[pcre(+)] )", ">=dev-lang/php-5.2[pcre(+)]", ["a"]),
+		)
+
+		test_cases_xfail = (
+			("? ( A )", "A"),
+			("!? ( A )", "A"),
+			("( A", "A"),
+			("A )", "A"),
+			
+			("||( A B )", "A"),
+			("|| (A B )", "A"),
+			("|| ( A B)", "A"),
+			("|| ( A B", "A"),
+			("|| A B )", "A"),
+			("|| A B", "A"),
+			("|| ( A B ) )", "A"),
+			("|| || B C", "A"),
+			("|| ( A B || )", "A"),
+			("a? A", "A"),
+			("( || ( || || ( A ) foo? ( B ) ) )", "A"),
+			("( || ( || bar? ( A ) foo? ( B ) ) )", "A"),
+		)
+
+		for dep, atom, expected in test_cases:
+			expected = set(expected)
+			result = extract_affecting_use(dep, atom, eapi="0")
+			fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
+				" ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
+			self.assertEqual(result, expected, fail_msg)
+
+		for dep, atom in test_cases_xfail:
+			fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
+				" ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
+			self.assertRaisesMsg(fail_msg, \
+				InvalidDependString, extract_affecting_use, dep, atom, eapi="0")

diff --git a/portage_with_autodep/pym/portage/tests/dep/testStandalone.py b/portage_with_autodep/pym/portage/tests/dep/testStandalone.py
new file mode 100644
index 0000000..e9f01df
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/testStandalone.py
@@ -0,0 +1,36 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import cpvequal
+from portage.exception import PortageException
+
+class TestStandalone(TestCase):
+	""" Test some small functions portage.dep
+	"""
+
+	def testCPVequal(self):
+
+		test_cases = (
+			( "sys-apps/portage-2.1","sys-apps/portage-2.1", True ),
+			( "sys-apps/portage-2.1","sys-apps/portage-2.0", False ),
+			( "sys-apps/portage-2.1","sys-apps/portage-2.1-r1", False ),
+			( "sys-apps/portage-2.1-r1","sys-apps/portage-2.1", False ),
+			( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
+			( "sys-apps/portage-2.1_alpha3_p6","sys-apps/portage-2.1_alpha3", False ),
+			( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
+			( "sys-apps/portage-2.1","sys-apps/X-2.1", False ),
+			( "sys-apps/portage-2.1","portage-2.1", False ),
+		)
+		
+		test_cases_xfail = (
+			( "sys-apps/portage","sys-apps/portage" ),
+			( "sys-apps/portage-2.1-6","sys-apps/portage-2.1-6" ),
+		)
+
+		for cpv1, cpv2, expected_result in test_cases:
+			self.assertEqual(cpvequal(cpv1, cpv2), expected_result)
+
+		for cpv1, cpv2 in test_cases_xfail:
+			self.assertRaisesMsg("cpvequal("+cpv1+", "+cpv2+")", \
+				PortageException, cpvequal, cpv1, cpv2)

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py b/portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py
new file mode 100644
index 0000000..d050adc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py
@@ -0,0 +1,43 @@
+# test_best_match_to_list.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import Atom, best_match_to_list
+
+class Test_best_match_to_list(TestCase):
+
+	def best_match_to_list_wrapper(self, mypkg, mylist):
+		"""
+		This function uses best_match_to_list to create sorted
+		list of matching atoms.
+		"""
+		ret = []
+		while mylist:
+			m = best_match_to_list(mypkg, mylist)
+			if m is not None:
+				ret.append(m)
+				mylist.remove(m)
+			else:
+				break
+
+		return ret
+
+	def testBest_match_to_list(self):
+		tests = [
+					("dev-libs/A-1", [Atom("dev-libs/A"), Atom("=dev-libs/A-1")], \
+						[Atom("=dev-libs/A-1"), Atom("dev-libs/A")]),
+					("dev-libs/A-1", [Atom("dev-libs/B"), Atom("=dev-libs/A-1:0")], \
+						[Atom("=dev-libs/A-1:0")]),
+					("dev-libs/A-1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=dev-libs/A-1:0")], \
+						[Atom("=dev-libs/A-1:0"), Atom("dev-libs/*", allow_wildcard=True)]),
+					("dev-libs/A-1:0", [Atom("dev-*/*", allow_wildcard=True), Atom("dev-*/*:0", allow_wildcard=True),\
+						Atom("dev-libs/A"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A:0"), \
+						Atom("=dev-libs/A-1*"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1")], \
+						[Atom("=dev-libs/A-1"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1*"), \
+						Atom("dev-libs/A:0"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A"), \
+						Atom("dev-*/*:0", allow_wildcard=True), Atom("dev-*/*", allow_wildcard=True)])
+				]
+
+		for pkg, atom_list, result in tests:
+			self.assertEqual( self.best_match_to_list_wrapper( pkg, atom_list ), result )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py
new file mode 100644
index 0000000..8a0a8aa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py
@@ -0,0 +1,35 @@
+# test_dep_getcpv.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getcpv
+
+class DepGetCPV(TestCase):
+	""" A simple testcase for isvalidatom
+	"""
+
+	def testDepGetCPV(self):
+		
+		prefix_ops = ["<", ">", "=", "~", "<=", 
+			      ">=", "!=", "!<", "!>", "!~"]
+
+		bad_prefix_ops = [ ">~", "<~", "~>", "~<" ]
+		postfix_ops = [ ("=", "*"), ]
+
+		cpvs = ["sys-apps/portage-2.1", "sys-apps/portage-2.1",
+				"sys-apps/portage-2.1"]
+		slots = [None, ":foo", ":2"]
+		for cpv in cpvs:
+			for slot in slots:
+				for prefix in prefix_ops:
+					mycpv = prefix + cpv
+					if slot:
+						mycpv += slot
+					self.assertEqual( dep_getcpv( mycpv ), cpv )
+
+				for prefix, postfix in postfix_ops:
+					mycpv = prefix + cpv + postfix
+					if slot:
+						mycpv += slot
+					self.assertEqual( dep_getcpv( mycpv ), cpv )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py
new file mode 100644
index 0000000..78ead8c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getrepo
+
+class DepGetRepo(TestCase):
+	""" A simple testcase for isvalidatom
+	"""
+
+	def testDepGetRepo(self):
+
+		repo_char = "::"
+		repos = ( "a", "repo-name", "repo_name", "repo123", None )
+		cpvs = ["sys-apps/portage"]
+		versions = ["2.1.1","2.1-r1", None]
+		uses = ["[use]", None]
+		for cpv in cpvs:
+			for version in versions:
+				for use in uses:
+					for repo in repos:
+						pkg = cpv
+						if version:
+							pkg = '=' + pkg + '-' + version
+						if repo is not None:
+							pkg = pkg + repo_char + repo
+						if use:
+							pkg = pkg + use
+						self.assertEqual( dep_getrepo( pkg ), repo )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py
new file mode 100644
index 0000000..206cecc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py
@@ -0,0 +1,28 @@
+# test_dep_getslot.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getslot
+
+class DepGetSlot(TestCase):
+	""" A simple testcase for isvalidatom
+	"""
+
+	def testDepGetSlot(self):
+
+		slot_char = ":"
+		slots = ( "a", "1.2", "1", "IloveVapier", None )
+		cpvs = ["sys-apps/portage"]
+		versions = ["2.1.1","2.1-r1"]
+		for cpv in cpvs:
+			for version in versions:
+				for slot in slots:
+					mycpv = cpv
+					if version:
+						mycpv = '=' + mycpv + '-' + version
+					if slot is not None:
+						self.assertEqual( dep_getslot( 
+							mycpv + slot_char + slot ), slot )
+					else:
+						self.assertEqual( dep_getslot( mycpv ), slot )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py
new file mode 100644
index 0000000..d2494f7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py
@@ -0,0 +1,35 @@
+# test_dep_getusedeps.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getusedeps
+
+from portage.tests import test_cps, test_slots, test_versions, test_usedeps
+
+class DepGetUseDeps(TestCase):
+	""" A simple testcase for dep_getusedeps
+	"""
+
+	def testDepGetUseDeps(self):
+
+		for mycpv in test_cps:
+			for version in test_versions:
+				for slot in test_slots:
+					for use in test_usedeps:
+						cpv = mycpv[:]
+						if version:
+							cpv += version
+						if slot:
+							cpv += ":" + slot
+						if isinstance(use, tuple):
+							cpv += "[%s]" % (",".join(use),)
+							self.assertEqual( dep_getusedeps(
+								cpv ), use )
+						else:
+							if len(use):
+								self.assertEqual( dep_getusedeps(
+									cpv + "[" + use + "]" ), (use,) )
+							else:
+								self.assertEqual( dep_getusedeps(
+									cpv + "[" + use + "]" ), () )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_get_operator.py b/portage_with_autodep/pym/portage/tests/dep/test_get_operator.py
new file mode 100644
index 0000000..4f9848f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_get_operator.py
@@ -0,0 +1,33 @@
+# test_get_operator.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import get_operator
+
+class GetOperator(TestCase):
+
+	def testGetOperator(self):
+
+		# get_operator does not validate operators
+		tests = [ ( "~", "~" ), ( "=", "=" ), ( ">", ">" ),
+			  ( ">=", ">=" ), ( "<=", "<=" ),
+		]
+
+		test_cpvs = ["sys-apps/portage-2.1"]
+		slots = [ None,"1","linux-2.5.6" ]
+		for cpv in test_cpvs:
+			for test in tests:
+				for slot in slots:
+					atom = cpv[:]
+					if slot:
+						atom += ":" + slot
+					result = get_operator( test[0] + atom )
+					self.assertEqual( result, test[1],
+						msg="get_operator(%s) != %s" % (test[0] + atom, test[1]) )
+
+		result = get_operator( "sys-apps/portage" )
+		self.assertEqual( result, None )
+
+		result = get_operator( "=sys-apps/portage-2.1*" )
+		self.assertEqual( result , "=*" )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py b/portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py
new file mode 100644
index 0000000..06f8110
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py
@@ -0,0 +1,42 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import get_required_use_flags
+from portage.exception import InvalidDependString
+
+class TestCheckRequiredUse(TestCase):
+
+	def testCheckRequiredUse(self):
+		test_cases = (
+			("a b c", ["a", "b", "c"]),
+
+			("|| ( a b c )", ["a", "b", "c"]),
+			("^^ ( a b c )", ["a", "b", "c"]),
+
+			("|| ( a b ^^ ( d e f ) )", ["a", "b", "d", "e", "f"]),
+			("^^ ( a b || ( d e f ) )", ["a", "b", "d", "e", "f"]),
+
+			("( ^^ ( a ( b ) ( || ( ( d e ) ( f ) ) ) ) )", ["a", "b", "d", "e", "f"]),
+
+			("a? ( ^^ ( b c ) )", ["a", "b", "c"]),
+			("a? ( ^^ ( !b !d? ( c ) ) )", ["a", "b", "c", "d"]),
+		)
+
+		test_cases_xfail = (
+			("^^ ( || ( a b ) ^^ ( b c )"),
+			("^^( || ( a b ) ^^ ( b c ) )"),
+			("^^ || ( a b ) ^^ ( b c )"),
+			("^^ ( ( || ) ( a b ) ^^ ( b c ) )"),
+			("^^ ( || ( a b ) ) ^^ ( b c ) )"),
+		)
+
+		for required_use, expected in test_cases:
+			result = get_required_use_flags(required_use)
+			expected = set(expected)
+			self.assertEqual(result, expected, \
+				"REQUIRED_USE: '%s', expected: '%s', got: '%s'" % (required_use, expected, result))
+
+		for required_use in test_cases_xfail:
+			self.assertRaisesMsg("REQUIRED_USE: '%s'" % (required_use,), \
+				InvalidDependString, get_required_use_flags, required_use)

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_isjustname.py b/portage_with_autodep/pym/portage/tests/dep/test_isjustname.py
new file mode 100644
index 0000000..c16fb54
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_isjustname.py
@@ -0,0 +1,24 @@
+# test_isjustname.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import isjustname
+
+class IsJustName(TestCase):
+
+	def testIsJustName(self):
+
+		cats = ( "", "sys-apps/", "foo/", "virtual/" )
+		pkgs = ( "portage", "paludis", "pkgcore", "notARealPkg" )
+		vers = ( "", "-2.0-r3", "-1.0_pre2", "-3.1b" )
+
+		for pkg in pkgs:
+			for cat in cats:
+				for ver in vers:
+					if len(ver):
+						self.assertFalse( isjustname( cat + pkg + ver ),
+						msg="isjustname(%s) is True!" % (cat + pkg + ver) )
+					else:
+						self.assertTrue( isjustname( cat + pkg + ver ),
+						msg="isjustname(%s) is False!" % (cat + pkg + ver) )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py b/portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py
new file mode 100644
index 0000000..173ab0d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py
@@ -0,0 +1,146 @@
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import isvalidatom
+
+class IsValidAtomTestCase(object):
+	def __init__(self, atom, expected, allow_wildcard=False, allow_repo=False):
+		self.atom = atom
+		self.expected = expected
+		self.allow_wildcard = allow_wildcard
+		self.allow_repo = allow_repo
+
+class IsValidAtom(TestCase):
+
+	def testIsValidAtom(self):
+
+		test_cases = (
+			IsValidAtomTestCase("sys-apps/portage", True),
+			IsValidAtomTestCase("=sys-apps/portage-2.1", True),
+			IsValidAtomTestCase("=sys-apps/portage-2.1*", True),
+			IsValidAtomTestCase(">=sys-apps/portage-2.1", True),
+			IsValidAtomTestCase("<=sys-apps/portage-2.1", True),
+			IsValidAtomTestCase(">sys-apps/portage-2.1", True),
+			IsValidAtomTestCase("<sys-apps/portage-2.1", True),
+			IsValidAtomTestCase("~sys-apps/portage-2.1", True),
+			IsValidAtomTestCase("sys-apps/portage:foo", True),
+			IsValidAtomTestCase("sys-apps/portage-2.1:foo", False),
+			IsValidAtomTestCase( "sys-apps/portage-2.1:", False),
+			IsValidAtomTestCase("sys-apps/portage-2.1:", False),
+			IsValidAtomTestCase("sys-apps/portage-2.1:[foo]", False),
+			IsValidAtomTestCase("sys-apps/portage", True),
+			IsValidAtomTestCase("sys-apps/portage", True),
+			IsValidAtomTestCase("sys-apps/portage", True),
+			IsValidAtomTestCase("sys-apps/portage", True),
+			IsValidAtomTestCase("sys-apps/portage", True),
+			IsValidAtomTestCase("sys-apps/portage", True),
+			IsValidAtomTestCase("sys-apps/portage", True),
+
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar?,!baz?,!doc=,build=]", True),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc?]", True),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc?]", True),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc=]", True),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc=]", True),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc=]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc?]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc?]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc!=]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][-baz][doc?][!build?]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?]", True),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?,]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[,bar,-baz,doc?,!build?]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz][doc?,!build?]", False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][doc,build]", False),
+			IsValidAtomTestCase(">~cate-gory/foo-1.0", False),
+			IsValidAtomTestCase(">~category/foo-1.0", False),
+			IsValidAtomTestCase("<~category/foo-1.0", False),
+			IsValidAtomTestCase("###cat/foo-1.0", False),
+			IsValidAtomTestCase("~sys-apps/portage", False),
+			IsValidAtomTestCase("portage", False),
+			IsValidAtomTestCase("=portage", False),
+			IsValidAtomTestCase(">=portage-2.1", False),
+			IsValidAtomTestCase("~portage-2.1", False),
+			IsValidAtomTestCase("=portage-2.1*", False),
+			IsValidAtomTestCase("null/portage", True),
+			IsValidAtomTestCase("null/portage*:0", False),
+			IsValidAtomTestCase(">=null/portage-2.1", True),
+			IsValidAtomTestCase(">=null/portage", False),
+			IsValidAtomTestCase(">null/portage", False),
+			IsValidAtomTestCase("=null/portage*", False),
+			IsValidAtomTestCase("=null/portage", False),
+			IsValidAtomTestCase("~null/portage", False),
+			IsValidAtomTestCase("<=null/portage", False),
+			IsValidAtomTestCase("<null/portage", False),
+			IsValidAtomTestCase("~null/portage-2.1", True),
+			IsValidAtomTestCase("=null/portage-2.1*", True),
+			IsValidAtomTestCase("null/portage-2.1*", False),
+			IsValidAtomTestCase("app-doc/php-docs-20071125", False),
+			IsValidAtomTestCase("app-doc/php-docs-20071125-r2", False),
+			IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
+			IsValidAtomTestCase("foo/-z-1", False),
+
+			# These are invalid because pkg name must not end in hyphen
+			# followed by numbers
+			IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
+			IsValidAtomTestCase("=foo/bar-123-1", False),
+			IsValidAtomTestCase("=foo/bar-123-1*", False),
+			IsValidAtomTestCase("foo/bar-123", False),
+			IsValidAtomTestCase("=foo/bar-123-1-r1", False),
+			IsValidAtomTestCase("=foo/bar-123-1-r1*", False),
+			IsValidAtomTestCase("foo/bar-123-r1", False),
+			IsValidAtomTestCase("foo/bar-1", False),
+
+			IsValidAtomTestCase("=foo/bar--baz-1-r1", True),
+			IsValidAtomTestCase("=foo/bar-baz--1-r1", True),
+			IsValidAtomTestCase("=foo/bar-baz---1-r1", True),
+			IsValidAtomTestCase("=foo/bar-baz---1", True),
+			IsValidAtomTestCase("=foo/bar-baz-1--r1", False),
+			IsValidAtomTestCase("games-strategy/ufo2000", True),
+			IsValidAtomTestCase("~games-strategy/ufo2000-0.1", True),
+			IsValidAtomTestCase("=media-libs/x264-20060810", True),
+			IsValidAtomTestCase("foo/b", True),
+			IsValidAtomTestCase("app-text/7plus", True),
+			IsValidAtomTestCase("foo/666", True),
+			IsValidAtomTestCase("=dev-libs/poppler-qt3-0.11*", True),
+
+			 #Testing atoms with repositories
+			IsValidAtomTestCase("sys-apps/portage::repo_123-name", True, allow_repo=True),
+			IsValidAtomTestCase("=sys-apps/portage-2.1::repo", True, allow_repo=True),
+			IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", True, allow_repo=True),
+			IsValidAtomTestCase("sys-apps/portage:foo::repo", True, allow_repo=True),
+			IsValidAtomTestCase("sys-apps/portage-2.1:foo::repo", False, allow_repo=True),
+			IsValidAtomTestCase("sys-apps/portage-2.1:::repo", False, allow_repo=True),
+			IsValidAtomTestCase("sys-apps/portage-2.1:::repo[foo]", False, allow_repo=True),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", True, allow_repo=True),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", True, allow_repo=True),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[!doc]", False, allow_repo=True),
+			IsValidAtomTestCase("###cat/foo-1.0::repo", False, allow_repo=True),
+			IsValidAtomTestCase("~sys-apps/portage::repo", False, allow_repo=True),
+			IsValidAtomTestCase("portage::repo", False, allow_repo=True),
+			IsValidAtomTestCase("=portage::repo", False, allow_repo=True),
+			IsValidAtomTestCase("null/portage::repo", True, allow_repo=True),
+			IsValidAtomTestCase("app-doc/php-docs-20071125::repo", False, allow_repo=True),
+			IsValidAtomTestCase("=foo/bar-1-r1-1-r1::repo", False, allow_repo=True),
+
+			IsValidAtomTestCase("sys-apps/portage::repo_123-name", False, allow_repo=False),
+			IsValidAtomTestCase("=sys-apps/portage-2.1::repo", False, allow_repo=False),
+			IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", False, allow_repo=False),
+			IsValidAtomTestCase("sys-apps/portage:foo::repo", False, allow_repo=False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", False, allow_repo=False),
+			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", False, allow_repo=False),
+			IsValidAtomTestCase("null/portage::repo", False, allow_repo=False),
+		)
+
+		for test_case in test_cases:
+			if test_case.expected:
+				atom_type = "valid"
+			else:
+				atom_type = "invalid"
+			self.assertEqual( bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard, \
+				allow_repo=test_case.allow_repo)), test_case.expected,
+				msg="isvalidatom(%s) != %s" % ( test_case.atom, test_case.expected ) )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py b/portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py
new file mode 100644
index 0000000..afba414
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py
@@ -0,0 +1,108 @@
+# Copyright 2006, 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.tests import TestCase
+from portage.dep import Atom, match_from_list, _repo_separator
+from portage.versions import catpkgsplit
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+class Package(object):
+	"""
+	Provides a minimal subset of attributes of _emerge.Package.Package
+	"""
+	def __init__(self, atom):
+		atom = Atom(atom, allow_repo=True)
+		self.cp = atom.cp
+		self.cpv = atom.cpv
+		self.cpv_split = catpkgsplit(self.cpv)
+		self.slot = atom.slot
+		self.repo = atom.repo
+		if atom.use:
+			self.use = self._use_class(atom.use.enabled)
+			self.iuse = self._iuse_class(atom.use.required)
+		else:
+			self.use = self._use_class([])
+			self.iuse = self._iuse_class([])
+
+	class _use_class(object):
+		def __init__(self, use):
+			self.enabled = frozenset(use)
+
+	class _iuse_class(object):
+		def __init__(self, iuse):
+			self.all = frozenset(iuse)
+
+		def is_valid_flag(self, flags):
+			if isinstance(flags, basestring):
+				flags = [flags]
+			for flag in flags:
+				if not flag in self.all:
+					return False
+			return True
+
+class Test_match_from_list(TestCase):
+
+	def testMatch_from_list(self):
+		tests = (
+			("=sys-apps/portage-45*", [], [] ),
+			("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+			("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+			("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+			("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+			("=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
+			("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"] ),
+			("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], [] ),
+			("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+			("<=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
+			("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+			("<sys-apps/portage-046", ["sys-apps/portage-046"], [] ),
+			(">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+			(">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
+			(">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
+			(">sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
+			("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"] ),
+			("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], [] ),
+			("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]),
+			("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
+			("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
+			("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
+			("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
+			("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"] ),
+			("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"] ),
+			("dev-libs/*", ["sys-apps/portage-2.1.2"], [] ),
+			("*/tar", ["sys-apps/portage-2.1.2"], [] ),
+			("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"] ),
+			("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"] ),
+
+			("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"] ),
+			("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"] ),
+			("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], [] ),
+			("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], [] ),
+			("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], [] ),
+			("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"] ),
+			("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
+			("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], [] ),
+			("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
+
+			("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"] ),
+			("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"] ),
+			("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], [] ),
+			("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
+			("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], [] ),
+			("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
+		)
+
+		for atom, cpv_list, expected_result in tests:
+			result = []
+			for pkg in match_from_list( atom, cpv_list ):
+				if isinstance(pkg, Package):
+					if pkg.repo:
+						result.append(pkg.cpv + _repo_separator + pkg.repo)
+					else:
+						result.append(pkg.cpv)
+				else:
+					result.append(pkg)
+			self.assertEqual( result, expected_result )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py b/portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py
new file mode 100644
index 0000000..9a147a0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py
@@ -0,0 +1,66 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import paren_reduce
+from portage.exception import InvalidDependString
+
+class TestParenReduce(TestCase):
+
+	def testParenReduce(self):
+
+		test_cases = (
+			( "A", ["A"]),
+			( "( A )", ["A"]),
+			( "|| ( A B )", [ "||", ["A", "B"] ]),
+			( "|| ( A || ( B C ) )", [ "||", ["A", "||", ["B", "C"]]]),
+			( "|| ( A || ( B C D ) )", [ "||", ["A", "||", ["B", "C", "D"]] ]),
+			( "|| ( A || ( B || ( C D ) E ) )", [ "||", ["A", "||", ["B", "||", ["C", "D"], "E"]] ]),
+			( "a? ( A )", ["a?", ["A"]]),
+			
+			( "( || ( ( ( A ) B ) ) )", ["A", "B"]),
+			( "( || ( || ( ( A ) B ) ) )", [ "||", ["A", "B"] ]),
+			( "|| ( A )", ["A"]),
+			( "( || ( || ( || ( A ) foo? ( B ) ) ) )", [ "||", ["A", "foo?", ["B"] ]]),
+			( "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )", [ "||", ["bar?", ["A"], "foo?", ["B"] ]]),
+			( "A || ( ) foo? ( ) B", ["A", "B"]),
+
+			( "|| ( A ) || ( B )", ["A", "B"]),
+			( "foo? ( A ) foo? ( B )", ["foo?", ["A"], "foo?", ["B"]]),
+
+			( "|| ( ( A B ) C )", [ "||", [ ["A", "B"], "C"] ]),
+			( "|| ( ( A B ) ( C ) )", [ "||", [ ["A", "B"], "C"] ]),
+			# test USE dep defaults for bug #354003
+			( ">=dev-lang/php-5.2[pcre(+)]", [ ">=dev-lang/php-5.2[pcre(+)]" ]),
+		)
+		
+		test_cases_xfail = (
+			"( A",
+			"A )",
+
+			"||( A B )",
+			"|| (A B )",
+			"|| ( A B)",
+			"|| ( A B",
+			"|| A B )",
+
+			"|| A B",
+			"|| ( A B ) )",
+			"|| || B C",
+			
+			"|| ( A B || )",
+			
+			"a? A",
+			
+			( "( || ( || || ( A ) foo? ( B ) ) )"),
+			( "( || ( || bar? ( A ) foo? ( B ) ) )"),
+		)
+
+		for dep_str, expected_result in test_cases:
+			self.assertEqual(paren_reduce(dep_str), expected_result,
+				"input: '%s' result: %s != %s" % (dep_str,
+				paren_reduce(dep_str), expected_result))
+
+		for dep_str in test_cases_xfail:
+			self.assertRaisesMsg(dep_str,
+				InvalidDependString, paren_reduce, dep_str)

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py b/portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py
new file mode 100644
index 0000000..1618430
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py
@@ -0,0 +1,627 @@
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.exception import InvalidDependString
+from portage.dep import Atom, use_reduce
+
+class UseReduceTestCase(object):
+	def __init__(self, deparray, uselist=[], masklist=[], \
+		matchall=0, excludeall=[], is_src_uri=False, \
+		eapi="0", opconvert=False, flat=False, expected_result=None, \
+			is_valid_flag=None, token_class=None):
+		self.deparray = deparray
+		self.uselist = uselist
+		self.masklist = masklist
+		self.matchall = matchall
+		self.excludeall = excludeall
+		self.is_src_uri = is_src_uri
+		self.eapi = eapi
+		self.opconvert = opconvert
+		self.flat = flat
+		self.is_valid_flag = is_valid_flag
+		self.token_class = token_class
+		self.expected_result = expected_result
+
+	def run(self):
+		try:
+			return use_reduce(self.deparray, self.uselist, self.masklist, \
+				self.matchall, self.excludeall, self.is_src_uri, self.eapi, \
+				self.opconvert, self.flat, self.is_valid_flag, self.token_class)
+		except InvalidDependString as e:
+			raise InvalidDependString("%s: %s" % (e, self.deparray))
+
+class UseReduce(TestCase):
+
+	def always_true(self, ununsed_parameter):
+		return True
+
+	def always_false(self, ununsed_parameter):
+		return False
+
+	def testUseReduce(self):
+
+		EAPI_WITH_SRC_URI_ARROWS = "2"
+		EAPI_WITHOUT_SRC_URI_ARROWS = "0"
+
+		test_cases = (
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				uselist = ["a", "b", "c", "d"],
+				expected_result = ["A", "B"]
+				),
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				uselist = ["a", "b", "c"],
+				expected_result = ["A", "B", "D"]
+				),
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				uselist = ["b", "c"],
+				expected_result = ["B", "D"]
+				),
+
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				matchall = True,
+				expected_result = ["A", "B", "C", "D"]
+				),
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				masklist = ["a", "c"],
+				expected_result = ["C", "D"]
+				),
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				matchall = True,
+				masklist = ["a", "c"],
+				expected_result = ["B", "C", "D"]
+				),
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				uselist = ["a", "b"],
+				masklist = ["a", "c"],
+				expected_result = ["B", "C", "D"]
+				),
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				excludeall = ["a", "c"],
+				expected_result = ["D"]
+				),
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				uselist = ["b"],
+				excludeall = ["a", "c"],
+				expected_result = ["B", "D"]
+				),
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				matchall = True,
+				excludeall = ["a", "c"],
+				expected_result = ["A", "B", "D"]
+				),
+			UseReduceTestCase(
+				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+				matchall = True,
+				excludeall = ["a", "c"],
+				masklist = ["b"],
+				expected_result = ["A", "D"]
+				),
+
+			
+			UseReduceTestCase(
+				"a? ( b? ( AB ) )",
+				uselist = ["a", "b"],
+				expected_result = ["AB"]
+				),
+			UseReduceTestCase(
+				"a? ( b? ( AB ) C )",
+				uselist = ["a"],
+				expected_result = ["C"]
+				),
+			UseReduceTestCase(
+				"a? ( b? ( || ( AB CD ) ) )",
+				uselist = ["a", "b"],
+				expected_result = ["||", ["AB", "CD"]]
+				),
+			UseReduceTestCase(
+				"|| ( || ( a? ( A ) b? ( B ) ) )",
+				uselist = ["a", "b"],
+				expected_result = ["||", ["A", "B"]]
+				),
+			UseReduceTestCase(
+				"|| ( || ( a? ( A ) b? ( B ) ) )",
+				uselist = ["a"],
+				expected_result = ["A"]
+				),
+			UseReduceTestCase(
+				"|| ( || ( a? ( A ) b? ( B ) ) )",
+				uselist = [],
+				expected_result = []
+				),
+			UseReduceTestCase(
+				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+				uselist = [],
+				expected_result = []
+				),
+			UseReduceTestCase(
+				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+				uselist = ["a"],
+				expected_result = ["A"]
+				),
+			UseReduceTestCase(
+				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+				uselist = ["b"],
+				expected_result = ["B"]
+				),
+			UseReduceTestCase(
+				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+				uselist = ["c"],
+				expected_result = []
+				),
+			UseReduceTestCase(
+				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+				uselist = ["a", "c"],
+				expected_result = ["||", [ "A", "C"]]
+				),
+			
+			#paren_reduce tests
+			UseReduceTestCase(
+				"A",
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"( A )",
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"|| ( A B )",
+				expected_result = [ "||", ["A", "B"] ]),
+			UseReduceTestCase(
+				"|| ( ( A B ) C )",
+				expected_result = [ "||", [ ["A", "B"], "C"] ]),
+			UseReduceTestCase(
+				"|| ( ( A B ) ( C ) )",
+				expected_result = [ "||", [ ["A", "B"], "C"] ]),
+			UseReduceTestCase(
+				"|| ( A || ( B C ) )",
+				expected_result = [ "||", ["A", "B", "C"]]),
+			UseReduceTestCase(
+				"|| ( A || ( B C D ) )",
+				expected_result = [ "||", ["A", "B", "C", "D"] ]),
+			UseReduceTestCase(
+				"|| ( A || ( B || ( C D ) E ) )",
+				expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
+			UseReduceTestCase(
+				"( || ( ( ( A ) B ) ) )",
+				expected_result = ["A", "B"] ),
+			UseReduceTestCase(
+				"( || ( || ( ( A ) B ) ) )",
+				expected_result = [ "||", ["A", "B"] ]),
+			UseReduceTestCase(
+				"( || ( || ( ( A ) B ) ) )",
+				expected_result = [ "||", ["A", "B"] ]),
+			UseReduceTestCase(
+				"|| ( A )",
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
+				uselist = ["foo"],
+				expected_result = [ "||", ["A", "B"] ]),
+			UseReduceTestCase(
+				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+				expected_result = []),
+			UseReduceTestCase(
+				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+				uselist = ["foo", "bar"],
+				expected_result = [ "||", [ "A", "B" ] ]),
+			UseReduceTestCase(
+				"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+				expected_result = ["A", "B"]),
+			UseReduceTestCase(
+				"|| ( A ) || ( B )",
+				expected_result = ["A", "B"]),
+			UseReduceTestCase(
+				"foo? ( A ) foo? ( B )",
+				expected_result = []),
+			UseReduceTestCase(
+				"foo? ( A ) foo? ( B )",
+				uselist = ["foo"],
+				expected_result = ["A", "B"]),
+			UseReduceTestCase(
+				"|| ( A B ) C",
+				expected_result = ['||', ['A', 'B'], 'C']),
+			UseReduceTestCase(
+				"A || ( B C )",
+				expected_result = ['A', '||', ['B', 'C']]),
+
+			#SRC_URI stuff
+			UseReduceTestCase(
+				"http://foo/bar -> blah.tbz2",
+				is_src_uri = True,
+				eapi = EAPI_WITH_SRC_URI_ARROWS,
+				expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
+			UseReduceTestCase(
+				"foo? ( http://foo/bar -> blah.tbz2 )",
+				uselist = [],
+				is_src_uri = True,
+				eapi = EAPI_WITH_SRC_URI_ARROWS,
+				expected_result = []),
+			UseReduceTestCase(
+				"foo? ( http://foo/bar -> blah.tbz2 )",
+				uselist = ["foo"],
+				is_src_uri = True,
+				eapi = EAPI_WITH_SRC_URI_ARROWS,
+				expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
+			UseReduceTestCase(
+				"http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
+				uselist = [],
+				is_src_uri = True,
+				eapi = EAPI_WITH_SRC_URI_ARROWS,
+				expected_result = ["http://foo/bar", "->", "bar.tbz2"]),
+			UseReduceTestCase(
+				"http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
+				uselist = ["foo"],
+				is_src_uri = True,
+				eapi = EAPI_WITH_SRC_URI_ARROWS,
+				expected_result = ["http://foo/bar", "->", "bar.tbz2", "ftp://foo/a"]),
+			UseReduceTestCase(
+				"http://foo.com/foo http://foo/bar -> blah.tbz2",
+				uselist = ["foo"],
+				is_src_uri = True,
+				eapi = EAPI_WITH_SRC_URI_ARROWS,
+				expected_result = ["http://foo.com/foo", "http://foo/bar", "->", "blah.tbz2"]),
+
+			#opconvert tests
+			UseReduceTestCase(
+				"A",
+				opconvert = True,
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"( A )",
+				opconvert = True,
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"|| ( A B )",
+				opconvert = True,
+				expected_result = [['||', 'A', 'B']]),
+			UseReduceTestCase(
+				"|| ( ( A B ) C )",
+				opconvert = True,
+				expected_result = [['||', ['A', 'B'], 'C']]),
+			UseReduceTestCase(
+				"|| ( A || ( B C ) )",
+				opconvert = True,
+				expected_result = [['||', 'A', 'B', 'C']]),
+			UseReduceTestCase(
+				"|| ( A || ( B C D ) )",
+				opconvert = True,
+				expected_result = [['||', 'A', 'B', 'C', 'D']]),
+			UseReduceTestCase(
+				"|| ( A || ( B || ( C D ) E ) )",
+				expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
+			UseReduceTestCase(
+				"( || ( ( ( A ) B ) ) )",
+				opconvert = True,
+				expected_result = [ "A", "B" ] ),
+			UseReduceTestCase(
+				"( || ( || ( ( A ) B ) ) )",
+				opconvert = True,
+				expected_result = [['||', 'A', 'B']]),
+			UseReduceTestCase(
+				"|| ( A B ) C",
+				opconvert = True,
+				expected_result = [['||', 'A', 'B'], 'C']),
+			UseReduceTestCase(
+				"A || ( B C )",
+				opconvert = True,
+				expected_result = ['A', ['||', 'B', 'C']]),
+			UseReduceTestCase(
+				"A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
+				uselist = ["foo", "bar"],
+				opconvert = True,
+				expected_result = ['A', ['||', 'B', 'C', 'D', 'E'], 'G']),
+			UseReduceTestCase(
+				"A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
+				uselist = ["foo", "bar"],
+				opconvert = False,
+				expected_result = ['A', '||', ['B', 'C', 'D', 'E'], 'G']),
+
+			UseReduceTestCase(
+				"|| ( A )",
+				opconvert = True,
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
+				uselist = ["foo"],
+				opconvert = True,
+				expected_result = [['||', 'A', 'B']]),
+			UseReduceTestCase(
+				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+				opconvert = True,
+				expected_result = []),
+			UseReduceTestCase(
+				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+				uselist = ["foo", "bar"],
+				opconvert = True,
+				expected_result = [['||', 'A', 'B']]),
+			UseReduceTestCase(
+				"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+				opconvert = True,
+				expected_result = ["A", "B"]),
+			UseReduceTestCase(
+				"|| ( A ) || ( B )",
+				opconvert = True,
+				expected_result = ["A", "B"]),
+			UseReduceTestCase(
+				"foo? ( A ) foo? ( B )",
+				opconvert = True,
+				expected_result = []),
+			UseReduceTestCase(
+				"foo? ( A ) foo? ( B )",
+				uselist = ["foo"],
+				opconvert = True,
+				expected_result = ["A", "B"]),
+			UseReduceTestCase(
+				"|| ( foo? ( || ( A B ) ) )",
+				uselist = ["foo"],
+				opconvert = True,
+				expected_result = [['||', 'A', 'B']]),
+
+			UseReduceTestCase(
+				"|| ( ( A B ) foo? ( || ( C D ) ) )",
+				uselist = ["foo"],
+				opconvert = True,
+				expected_result = [['||', ['A', 'B'], 'C', 'D']]),
+
+			UseReduceTestCase(
+				"|| ( ( A B ) foo? ( || ( C D ) ) )",
+				uselist = ["foo"],
+				opconvert = False,
+				expected_result = ['||', [['A', 'B'], 'C', 'D']]),
+
+			UseReduceTestCase(
+				"|| ( ( A B ) || ( C D ) )",
+				expected_result = ['||', [['A', 'B'], 'C', 'D']]),
+
+			UseReduceTestCase(
+				"|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
+				expected_result = ['||', [['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+
+			UseReduceTestCase(
+				"|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
+				opconvert = True,
+				expected_result = [['||', ['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+
+			UseReduceTestCase(
+				"|| ( foo? ( A B ) )",
+				uselist = ["foo"],
+				expected_result = ['A', 'B']),
+
+			UseReduceTestCase(
+				"|| ( || ( foo? ( A B ) ) )",
+				uselist = ["foo"],
+				expected_result = ['A', 'B']),
+
+			UseReduceTestCase(
+				"|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
+				uselist = ["a", "b", "c", "d", "e", "f"],
+				expected_result = ['A', 'B']),
+
+			UseReduceTestCase(
+				"|| ( || ( ( || ( a? ( ( b? ( c? ( || ( || ( || ( ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) ) ) ) )",
+				uselist = ["a", "b", "c", "d", "e", "f"],
+				expected_result = ['A', 'B']),
+
+			UseReduceTestCase(
+				"|| ( ( A ( || ( B ) ) ) )",
+				expected_result = ['A', 'B']),
+
+			UseReduceTestCase(
+				"|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
+				uselist = ["foo", "bar", "baz"],
+				expected_result = ['||', [['A', 'B'], ['C', 'D', '||', ['E', ['F', 'G'], 'H']]]]),
+
+			UseReduceTestCase(
+				"|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
+				uselist = ["foo", "bar", "baz"],
+				opconvert = True,
+				expected_result = [['||', ['A', 'B'], ['C', 'D', ['||', 'E', ['F', 'G'], 'H']]]]),
+
+			UseReduceTestCase(
+				"|| ( foo? ( A B ) )",
+				uselist = ["foo"],
+				opconvert=True,
+				expected_result = ['A', 'B']),
+
+			UseReduceTestCase(
+				"|| ( || ( foo? ( A B ) ) )",
+				uselist = ["foo"],
+				opconvert=True,
+				expected_result = ['A', 'B']),
+
+			UseReduceTestCase(
+				"|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
+				uselist = ["a", "b", "c", "d", "e", "f"],
+				opconvert=True,
+				expected_result = ['A', 'B']),
+
+			#flat test
+			UseReduceTestCase(
+				"A",
+				flat = True,
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"( A )",
+				flat = True,
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"|| ( A B )",
+				flat = True,
+				expected_result = [ "||", "A", "B" ] ),
+			UseReduceTestCase(
+				"|| ( A || ( B C ) )",
+				flat = True,
+				expected_result = [ "||", "A", "||", "B", "C" ]),
+			UseReduceTestCase(
+				"|| ( A || ( B C D ) )",
+				flat = True,
+				expected_result = [ "||", "A", "||", "B", "C", "D" ]),
+			UseReduceTestCase(
+				"|| ( A || ( B || ( C D ) E ) )",
+				flat = True,
+				expected_result = [ "||", "A", "||", "B", "||", "C", "D", "E" ]),
+			UseReduceTestCase(
+				"( || ( ( ( A ) B ) ) )",
+				flat = True,
+				expected_result = [ "||", "A", "B"] ),
+			UseReduceTestCase(
+				"( || ( || ( ( A ) B ) ) )",
+				flat = True,
+				expected_result = [ "||", "||", "A", "B" ]),
+			UseReduceTestCase(
+				"( || ( || ( ( A ) B ) ) )",
+				flat = True,
+				expected_result = [ "||", "||", "A", "B" ]),
+			UseReduceTestCase(
+				"|| ( A )",
+				flat = True,
+				expected_result = ["||", "A"]),
+			UseReduceTestCase(
+				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
+				uselist = ["foo"],
+				flat = True,
+				expected_result = [ "||", "||","||", "A", "B" ]),
+			UseReduceTestCase(
+				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+				flat = True,
+				expected_result = ["||", "||","||"]),
+			UseReduceTestCase(
+				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+				uselist = ["foo", "bar"],
+				flat = True,
+				expected_result = [ "||", "||", "A", "||", "B" ]),
+			UseReduceTestCase(
+				"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+				flat = True,
+				expected_result = ["A", "||", "B"]),
+			UseReduceTestCase(
+				"|| ( A ) || ( B )",
+				flat = True,
+				expected_result = ["||", "A", "||", "B"]),
+			UseReduceTestCase(
+				"foo? ( A ) foo? ( B )",
+				flat = True,
+				expected_result = []),
+			UseReduceTestCase(
+				"foo? ( A ) foo? ( B )",
+				uselist = ["foo"],
+				flat = True,
+				expected_result = ["A", "B"]),
+
+			#use flag validation
+			UseReduceTestCase(
+				"foo? ( A )",
+				uselist = ["foo"],
+				is_valid_flag = self.always_true,
+				expected_result = ["A"]),
+			UseReduceTestCase(
+				"foo? ( A )",
+				is_valid_flag = self.always_true,
+				expected_result = []),
+
+			#token_class
+			UseReduceTestCase(
+				"foo? ( dev-libs/A )",
+				uselist = ["foo"],
+				token_class=Atom,
+				expected_result = ["dev-libs/A"]),
+			UseReduceTestCase(
+				"foo? ( dev-libs/A )",
+				token_class=Atom,
+				expected_result = []),
+		)
+		
+		test_cases_xfail = (
+			UseReduceTestCase("? ( A )"),
+			UseReduceTestCase("!? ( A )"),
+			UseReduceTestCase("( A"),
+			UseReduceTestCase("A )"),
+			UseReduceTestCase("||( A B )"),
+			UseReduceTestCase("|| (A B )"),
+			UseReduceTestCase("|| ( A B)"),
+			UseReduceTestCase("|| ( A B"),
+			UseReduceTestCase("|| A B )"),
+			UseReduceTestCase("|| A B"),
+			UseReduceTestCase("|| ( A B ) )"),
+			UseReduceTestCase("|| || B C"),
+			UseReduceTestCase("|| ( A B || )"),
+			UseReduceTestCase("a? A"),
+			UseReduceTestCase("( || ( || || ( A ) foo? ( B ) ) )"),
+			UseReduceTestCase("( || ( || bar? ( A ) foo? ( B ) ) )"),
+			UseReduceTestCase("foo?"),
+			UseReduceTestCase("foo? || ( A )"),
+			UseReduceTestCase("|| ( )"),
+			UseReduceTestCase("foo? ( )"),
+
+			#SRC_URI stuff
+			UseReduceTestCase("http://foo/bar -> blah.tbz2", is_src_uri = True, eapi = EAPI_WITHOUT_SRC_URI_ARROWS),
+			UseReduceTestCase("|| ( http://foo/bar -> blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+			UseReduceTestCase("http://foo/bar -> foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+			UseReduceTestCase("http://foo/bar blah.tbz2 ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+			UseReduceTestCase("-> http://foo/bar blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+			UseReduceTestCase("http://foo/bar ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+			UseReduceTestCase("http://foo/bar -> foo? ( http://foo.com/foo )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+			UseReduceTestCase("foo? ( http://foo/bar -> ) blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+			UseReduceTestCase("http://foo/bar -> foo/blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+			UseReduceTestCase("http://foo/bar -> -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
+			
+			UseReduceTestCase("http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = False, eapi = EAPI_WITH_SRC_URI_ARROWS),
+
+			UseReduceTestCase(
+				"A",
+				opconvert = True,
+				flat = True),
+
+			#use flag validation
+			UseReduceTestCase("1.0? ( A )"),
+			UseReduceTestCase("!1.0? ( A )"),
+			UseReduceTestCase("!? ( A )"),
+			UseReduceTestCase("!?? ( A )"),
+			UseReduceTestCase(
+				"foo? ( A )",
+				is_valid_flag = self.always_false,
+				),
+			UseReduceTestCase(
+				"foo? ( A )",
+				uselist = ["foo"],
+				is_valid_flag = self.always_false,
+				),
+
+			#token_class
+			UseReduceTestCase(
+				"foo? ( A )",
+				uselist = ["foo"],
+				token_class=Atom),
+			UseReduceTestCase(
+				"A(B",
+				token_class=Atom),
+		)
+
+		for test_case in test_cases:
+			# If it fails then show the input, since lots of our
+			# test cases have the same output but different input,
+			# making it difficult deduce which test has failed.
+			self.assertEqual(test_case.run(), test_case.expected_result,
+				"input: '%s' result: %s != %s" % (test_case.deparray,
+				test_case.run(), test_case.expected_result))
+
+		for test_case in test_cases_xfail:
+			self.assertRaisesMsg(test_case.deparray, (InvalidDependString, ValueError), test_case.run)

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/__init__.py b/portage_with_autodep/pym/portage/tests/ebuild/__init__.py
new file mode 100644
index 0000000..e2d487e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 1998-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/__test__ b/portage_with_autodep/pym/portage/tests/ebuild/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py b/portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py
new file mode 100644
index 0000000..d8277f2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py
@@ -0,0 +1,43 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import tempfile
+
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.tests import TestCase
+
+class ArrayFromfileEofTestCase(TestCase):
+
+	def testArrayFromfileEof(self):
+		# This tests if the following python issue is fixed
+		# in the currently running version of python:
+		#   http://bugs.python.org/issue5334
+
+		input_data = "an arbitrary string"
+		input_bytes = _unicode_encode(input_data,
+			encoding='utf_8', errors='strict')
+		f = tempfile.TemporaryFile()
+		f.write(input_bytes)
+
+		f.seek(0)
+		data = []
+		eof = False
+		while not eof:
+			a = array.array('B')
+			try:
+				a.fromfile(f, len(input_bytes) + 1)
+			except (EOFError, IOError):
+				# python-3.0 lost data here
+				eof = True
+
+			if not a:
+				eof = True
+			else:
+				data.append(_unicode_decode(a.tostring(),
+					encoding='utf_8', errors='strict'))
+
+		f.close()
+
+		self.assertEqual(input_data, ''.join(data))

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_config.py b/portage_with_autodep/pym/portage/tests/ebuild/test_config.py
new file mode 100644
index 0000000..7bec8c6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_config.py
@@ -0,0 +1,198 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.package.ebuild.config import config
+from portage.package.ebuild._config.LicenseManager import LicenseManager
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class ConfigTestCase(TestCase):
+
+	def testClone(self):
+		"""
+		Test the clone via constructor.
+		"""
+
+		ebuilds = {
+			"dev-libs/A-1": { },
+		}
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+		try:
+			settings = config(clone=playground.settings)
+			result = playground.run(["=dev-libs/A-1"])
+			pkg, existing_node = result.depgraph._select_package(
+				playground.root, "=dev-libs/A-1")
+			settings.setcpv(pkg)
+
+			# clone after setcpv tests deepcopy of LazyItemsDict
+			settings2 = config(clone=settings)
+		finally:
+			playground.cleanup()
+
+	def testFeaturesMutation(self):
+		"""
+		Test whether mutation of config.features updates the FEATURES
+		variable and persists through config.regenerate() calls. Also
+		verify that features_set._prune_overrides() works correctly.
+		"""
+		playground = ResolverPlayground()
+		try:
+			settings = config(clone=playground.settings)
+
+			settings.features.add('noclean')
+			self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+			settings.regenerate()
+			self.assertEqual('noclean' in settings['FEATURES'].split(),True)
+
+			settings.features.discard('noclean')
+			self.assertEqual('noclean' in settings['FEATURES'].split(), False)
+			settings.regenerate()
+			self.assertEqual('noclean' in settings['FEATURES'].split(), False)
+
+			settings.features.add('noclean')
+			self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+			settings.regenerate()
+			self.assertEqual('noclean' in settings['FEATURES'].split(),True)
+
+			# before: ['noclean', '-noclean', 'noclean']
+			settings.features._prune_overrides()
+			#  after: ['noclean']
+			self.assertEqual(settings._features_overrides.count('noclean'), 1)
+			self.assertEqual(settings._features_overrides.count('-noclean'), 0)
+
+			settings.features.remove('noclean')
+
+			# before: ['noclean', '-noclean']
+			settings.features._prune_overrides()
+			#  after: ['-noclean']
+			self.assertEqual(settings._features_overrides.count('noclean'), 0)
+			self.assertEqual(settings._features_overrides.count('-noclean'), 1)
+		finally:
+			playground.cleanup()
+
+	def testLicenseManager(self):
+
+		user_config = {
+			"package.license":
+				(
+					"dev-libs/* TEST",
+					"dev-libs/A -TEST2", 
+					"=dev-libs/A-2 TEST3 @TEST",
+					"*/* @EULA TEST2",
+					"=dev-libs/C-1 *",
+					"=dev-libs/C-2 -*",
+				),
+		}
+
+		playground = ResolverPlayground(user_config=user_config)
+		try:
+			portage.util.noiselimit = -2
+
+			license_group_locations = (os.path.join(playground.portdir, "profiles"),)
+			pkg_license = os.path.join(playground.eroot, "etc", "portage")
+
+			lic_man = LicenseManager(license_group_locations, pkg_license)
+
+			self.assertEqual(lic_man._accept_license_str, None)
+			self.assertEqual(lic_man._accept_license, None)
+			self.assertEqual(lic_man._license_groups, {"EULA": frozenset(["TEST"])})
+			self.assertEqual(lic_man._undef_lic_groups, set(["TEST"]))
+
+			self.assertEqual(lic_man.extract_global_changes(), "TEST TEST2")
+			self.assertEqual(lic_man.extract_global_changes(), "")
+
+			lic_man.set_accept_license_str("TEST TEST2")
+			self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/B-1", "0", None), ["TEST", "TEST2", "TEST"])
+			self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-1", "0", None), ["TEST", "TEST2", "TEST", "-TEST2"])
+			self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-2", "0", None), ["TEST", "TEST2", "TEST", "-TEST2", "TEST3", "@TEST"])
+
+			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/B-1", [], "TEST", "0", None), "TEST")
+			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-1", [], "-TEST2", "0", None), "")
+			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), "TEST")
+			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-1", [], "TEST5", "0", None), "TEST5")
+			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-2", [], "TEST2", "0", None), "")
+
+			self.assertEqual(lic_man.getMissingLicenses("dev-libs/B-1", [], "TEST", "0", None), [])
+			self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-1", [], "-TEST2", "0", None), ["-TEST2"])
+			self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), [])
+			self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-3", [], "|| ( TEST2 || ( TEST3 TEST4 ) )", "0", None), ["TEST2", "TEST3", "TEST4"])
+			self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-1", [], "TEST5", "0", None), [])
+			self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-2", [], "TEST2", "0", None), ["TEST2"])
+			self.assertEqual(lic_man.getMissingLicenses("dev-libs/D-1", [], "", "0", None), [])
+		finally:
+			portage.util.noiselimit = 0
+			playground.cleanup()
+
+	def testPackageMaskOrder(self):
+
+		ebuilds = {
+			"dev-libs/A-1": { },
+			"dev-libs/B-1": { },
+			"dev-libs/C-1": { },
+			"dev-libs/D-1": { },
+			"dev-libs/E-1": { },
+		}
+
+		repo_configs = {
+			"test_repo": {
+				"package.mask":
+					(
+						"dev-libs/A",
+						"dev-libs/C",
+					),
+			}
+		}
+
+		profile = {
+			"package.mask":
+				(
+					"-dev-libs/A",
+					"dev-libs/B",
+					"-dev-libs/B",
+					"dev-libs/D",
+				),
+		}
+
+		user_config = {
+			"package.mask":
+				(
+					"-dev-libs/C",
+					"-dev-libs/D",
+					"dev-libs/E",
+				),
+		}
+
+		test_cases = (
+				ResolverPlaygroundTestCase(
+					["dev-libs/A"],
+					options = { "--autounmask": 'n' },
+					success = False),
+				ResolverPlaygroundTestCase(
+					["dev-libs/B"],
+					success = True,
+					mergelist = ["dev-libs/B-1"]),
+				ResolverPlaygroundTestCase(
+					["dev-libs/C"],
+					success = True,
+					mergelist = ["dev-libs/C-1"]),
+				ResolverPlaygroundTestCase(
+					["dev-libs/D"],
+					success = True,
+					mergelist = ["dev-libs/D-1"]),
+				ResolverPlaygroundTestCase(
+					["dev-libs/E"],
+					options = { "--autounmask": 'n' },
+					success = False),
+		)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, repo_configs=repo_configs, \
+			profile=profile, user_config=user_config)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py b/portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py
new file mode 100644
index 0000000..ed08b2a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py
@@ -0,0 +1,82 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage import _python_interpreter
+from portage import _shell_quote
+from portage.const import EBUILD_SH_BINARY
+from portage.package.ebuild.config import config
+from portage.package.ebuild.doebuild import spawn as doebuild_spawn
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.Package import Package
+from _emerge.PollScheduler import PollScheduler
+
+class DoebuildSpawnTestCase(TestCase):
+	"""
+	Invoke portage.package.ebuild.doebuild.spawn() with a
+	minimal environment. This gives coverage to some of
+	the ebuild execution internals, like ebuild.sh,
+	AbstractEbuildProcess, and EbuildIpcDaemon.
+	"""
+
+	def testDoebuildSpawn(self):
+		playground = ResolverPlayground()
+		try:
+			settings = config(clone=playground.settings)
+			cpv = 'sys-apps/portage-2.1'
+			metadata = {
+				'EAPI'      : '2',
+				'INHERITED' : 'python eutils',
+				'IUSE'      : 'build doc epydoc python3 selinux',
+				'LICENSE'   : 'GPL-2',
+				'PROVIDE'   : 'virtual/portage',
+				'RDEPEND'   : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6',
+				'SLOT'      : '0',
+			}
+			root_config = playground.trees[playground.root]['root_config']
+			pkg = Package(built=False, cpv=cpv, installed=False,
+				metadata=metadata, root_config=root_config,
+				type_name='ebuild')
+			settings.setcpv(pkg)
+			settings['PORTAGE_PYTHON'] = _python_interpreter
+			settings['PORTAGE_BUILDDIR'] = os.path.join(
+				settings['PORTAGE_TMPDIR'], cpv)
+			settings['T'] = os.path.join(
+				settings['PORTAGE_BUILDDIR'], 'temp')
+			for x in ('PORTAGE_BUILDDIR', 'T'):
+				os.makedirs(settings[x])
+			# Create a fake environment, to pretend as if the ebuild
+			# has been sourced already.
+			open(os.path.join(settings['T'], 'environment'), 'wb')
+
+			scheduler = PollScheduler().sched_iface
+			for phase in ('_internal_test',):
+
+				# Test EbuildSpawnProcess by calling doebuild.spawn() with
+				# returnpid=False. This case is no longer used by portage
+				# internals since EbuildPhase is used instead and that passes
+				# returnpid=True to doebuild.spawn().
+				rval = doebuild_spawn("%s %s" % (_shell_quote(
+					os.path.join(settings["PORTAGE_BIN_PATH"],
+					os.path.basename(EBUILD_SH_BINARY))), phase),
+					settings, free=1)
+				self.assertEqual(rval, os.EX_OK)
+
+				ebuild_phase = EbuildPhase(background=False,
+					phase=phase, scheduler=scheduler,
+					settings=settings)
+				ebuild_phase.start()
+				ebuild_phase.wait()
+				self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+
+			ebuild_phase = MiscFunctionsProcess(background=False,
+				commands=['success_hooks'],
+				scheduler=scheduler, settings=settings)
+			ebuild_phase.start()
+			ebuild_phase.wait()
+			self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py b/portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py
new file mode 100644
index 0000000..b5b4796
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py
@@ -0,0 +1,124 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+import time
+from portage import os
+from portage import _python_interpreter
+from portage.tests import TestCase
+from portage.const import PORTAGE_BIN_PATH
+from portage.const import PORTAGE_PYM_PATH
+from portage.const import BASH_BINARY
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.util import ensure_dirs
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+from _emerge.TaskScheduler import TaskScheduler
+
+class IpcDaemonTestCase(TestCase):
+
+	_SCHEDULE_TIMEOUT = 40000 # 40 seconds
+
+	def testIpcDaemon(self):
+		tmpdir = tempfile.mkdtemp()
+		build_dir = None
+		try:
+			env = {}
+
+			# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+			# need to be inherited by ebuild subprocesses.
+			if 'PORTAGE_USERNAME' in os.environ:
+				env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+			if 'PORTAGE_GRPNAME' in os.environ:
+				env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+			env['PORTAGE_PYTHON'] = _python_interpreter
+			env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH
+			env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+			env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1')
+
+			task_scheduler = TaskScheduler(max_jobs=2)
+			build_dir = EbuildBuildDir(
+				scheduler=task_scheduler.sched_iface,
+				settings=env)
+			build_dir.lock()
+			ensure_dirs(env['PORTAGE_BUILDDIR'])
+
+			input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in')
+			output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out')
+			os.mkfifo(input_fifo)
+			os.mkfifo(output_fifo)
+
+			for exitcode in (0, 1, 2):
+				exit_command = ExitCommand()
+				commands = {'exit' : exit_command}
+				daemon = EbuildIpcDaemon(commands=commands,
+					input_fifo=input_fifo,
+					output_fifo=output_fifo,
+					scheduler=task_scheduler.sched_iface)
+				proc = SpawnProcess(
+					args=[BASH_BINARY, "-c",
+					'"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode],
+					env=env, scheduler=task_scheduler.sched_iface)
+
+				self.received_command = False
+				def exit_command_callback():
+					self.received_command = True
+					proc.cancel()
+					daemon.cancel()
+
+				exit_command.reply_hook = exit_command_callback
+				task_scheduler.add(daemon)
+				task_scheduler.add(proc)
+				start_time = time.time()
+				task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT)
+				task_scheduler.clear()
+
+				self.assertEqual(self.received_command, True,
+					"command not received after %d seconds" % \
+					(time.time() - start_time,))
+				self.assertEqual(proc.isAlive(), False)
+				self.assertEqual(daemon.isAlive(), False)
+				self.assertEqual(exit_command.exitcode, exitcode)
+
+			# Intentionally short timeout test for QueueScheduler.run()
+			sleep_time_s = 10      # 10.000 seconds
+			short_timeout_ms = 10  #  0.010 seconds
+
+			for i in range(3):
+				exit_command = ExitCommand()
+				commands = {'exit' : exit_command}
+				daemon = EbuildIpcDaemon(commands=commands,
+					input_fifo=input_fifo,
+					output_fifo=output_fifo,
+					scheduler=task_scheduler.sched_iface)
+				proc = SpawnProcess(
+					args=[BASH_BINARY, "-c", 'exec sleep %d' % sleep_time_s],
+					env=env, scheduler=task_scheduler.sched_iface)
+
+				self.received_command = False
+				def exit_command_callback():
+					self.received_command = True
+					proc.cancel()
+					daemon.cancel()
+
+				exit_command.reply_hook = exit_command_callback
+				task_scheduler.add(daemon)
+				task_scheduler.add(proc)
+				start_time = time.time()
+				task_scheduler.run(timeout=short_timeout_ms)
+				task_scheduler.clear()
+
+				self.assertEqual(self.received_command, False,
+					"command received after %d seconds" % \
+					(time.time() - start_time,))
+				self.assertEqual(proc.isAlive(), False)
+				self.assertEqual(daemon.isAlive(), False)
+				self.assertEqual(proc.returncode == os.EX_OK, False)
+
+		finally:
+			if build_dir is not None:
+				build_dir.unlock()
+			shutil.rmtree(tmpdir)

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py b/portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py
new file mode 100644
index 0000000..4b6ff21
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py
@@ -0,0 +1,32 @@
+# Copyright 2009-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util._pty import _can_test_pty_eof, _test_pty_eof
+
+class PtyEofFdopenBufferedTestCase(TestCase):
+
+	def testPtyEofFdopenBuffered(self):
+		# This tests if the following python issue is fixed yet:
+		#   http://bugs.python.org/issue5380
+		# Since it might not be fixed, mark as todo.
+		self.todo = True
+		# The result is only valid if openpty does not raise EnvironmentError.
+		if _can_test_pty_eof():
+			try:
+				self.assertEqual(_test_pty_eof(fdopen_buffered=True), True)
+			except EnvironmentError:
+				pass
+
+class PtyEofFdopenUnBufferedTestCase(TestCase):
+	def testPtyEofFdopenUnBuffered(self):
+		# New development: It appears that array.fromfile() is usable
+		# with python3 as long as fdopen is called with a bufsize
+		# argument of 0.
+
+		# The result is only valid if openpty does not raise EnvironmentError.
+		if _can_test_pty_eof():
+			try:
+				self.assertEqual(_test_pty_eof(), True)
+			except EnvironmentError:
+				pass

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py b/portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py
new file mode 100644
index 0000000..fea4738
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py
@@ -0,0 +1,52 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import sys
+import tempfile
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.const import BASH_BINARY
+from portage.tests import TestCase
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.PollScheduler import PollScheduler
+
+class SpawnTestCase(TestCase):
+
+	def testLogfile(self):
+		logfile = None
+		try:
+			fd, logfile = tempfile.mkstemp()
+			os.close(fd)
+			null_fd = os.open('/dev/null', os.O_RDWR)
+			test_string = 2 * "blah blah blah\n"
+			scheduler = PollScheduler().sched_iface
+			proc = SpawnProcess(
+				args=[BASH_BINARY, "-c",
+				"echo -n '%s'" % test_string],
+				env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd},
+				scheduler=scheduler,
+				logfile=logfile)
+			proc.start()
+			os.close(null_fd)
+			self.assertEqual(proc.wait(), os.EX_OK)
+			f = io.open(_unicode_encode(logfile,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['content'], errors='strict')
+			log_content = f.read()
+			f.close()
+			# When logging passes through a pty, this comparison will fail
+			# unless the oflag terminal attributes have the termios.OPOST
+			# bit disabled. Otherwise, tranformations such as \n -> \r\n
+			# may occur.
+			self.assertEqual(test_string, log_content)
+		finally:
+			if logfile:
+				try:
+					os.unlink(logfile)
+				except EnvironmentError as e:
+					if e.errno != errno.ENOENT:
+						raise
+					del e

diff --git a/portage_with_autodep/pym/portage/tests/env/__init__.py b/portage_with_autodep/pym/portage/tests/env/__init__.py
new file mode 100644
index 0000000..cbeabe5
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage/env/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+

diff --git a/portage_with_autodep/pym/portage/tests/env/__test__ b/portage_with_autodep/pym/portage/tests/env/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/env/config/__init__.py b/portage_with_autodep/pym/portage/tests/env/config/__init__.py
new file mode 100644
index 0000000..ef5cc43
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage/env/config/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+

diff --git a/portage_with_autodep/pym/portage/tests/env/config/__test__ b/portage_with_autodep/pym/portage/tests/env/config/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py
new file mode 100644
index 0000000..f1e9e98
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py
@@ -0,0 +1,40 @@
+# test_PackageKeywordsFile.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PackageKeywordsFile
+from tempfile import mkstemp
+
+class PackageKeywordsFileTestCase(TestCase):
+
+	cpv = ['sys-apps/portage']
+	keywords = ['~x86', 'amd64', '-mips']
+	
+	def testPackageKeywordsFile(self):
+		"""
+		A simple test to ensure the load works properly
+		"""
+
+		self.BuildFile()
+		try:
+			f = PackageKeywordsFile(self.fname)
+			f.load()
+			i = 0
+			for cpv, keyword in f.items():
+				self.assertEqual( cpv, self.cpv[i] )
+				[k for k in keyword if self.assertTrue(k in self.keywords)]
+				i = i + 1
+		finally:
+			self.NukeFile()
+	
+	def BuildFile(self):
+		fd, self.fname = mkstemp()
+		f = os.fdopen(fd, 'w')
+		for c in self.cpv:
+			f.write("%s %s\n" % (c,' '.join(self.keywords)))
+		f.close()
+
+	def NukeFile(self):
+		os.unlink(self.fname)

diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py
new file mode 100644
index 0000000..0c5b30f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py
@@ -0,0 +1,29 @@
+# test_PackageMaskFile.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.env.config import PackageMaskFile
+from portage.tests import TestCase, test_cps
+from tempfile import mkstemp
+
+class PackageMaskFileTestCase(TestCase):
+	
+	def testPackageMaskFile(self):
+		self.BuildFile()
+		try:
+			f = PackageMaskFile(self.fname)
+			f.load()
+			for atom in f:
+				self.assertTrue(atom in test_cps)
+		finally:
+			self.NukeFile()
+	
+	def BuildFile(self):
+		fd, self.fname = mkstemp()
+		f = os.fdopen(fd, 'w')
+		f.write("\n".join(test_cps))
+		f.close()
+	
+	def NukeFile(self):
+		os.unlink(self.fname)

diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py
new file mode 100644
index 0000000..7a38067
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py
@@ -0,0 +1,37 @@
+# test_PackageUseFile.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PackageUseFile
+from tempfile import mkstemp
+
+
+class PackageUseFileTestCase(TestCase):
+
+	cpv = 'sys-apps/portage'
+	useflags = ['cdrom', 'far', 'boo', 'flag', 'blat']
+	
+	def testPackageUseFile(self):
+		"""
+		A simple test to ensure the load works properly
+		"""
+		self.BuildFile()
+		try:
+			f = PackageUseFile(self.fname)
+			f.load()
+			for cpv, use in f.items():
+				self.assertEqual( cpv, self.cpv )
+				[flag for flag in use if self.assertTrue(flag in self.useflags)]
+		finally:
+			self.NukeFile()
+
+	def BuildFile(self):
+		fd, self.fname = mkstemp()
+		f = os.fdopen(fd, 'w')
+		f.write("%s %s" % (self.cpv, ' '.join(self.useflags)))
+		f.close()
+	
+	def NukeFile(self):
+		os.unlink(self.fname)

diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py
new file mode 100644
index 0000000..2cd1a8a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py
@@ -0,0 +1,39 @@
+# Copyright 2006-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PortageModulesFile
+from tempfile import mkstemp
+
+class PortageModulesFileTestCase(TestCase):
+
+	keys = ['foo.bar','baz','bob','extra_key']
+	invalid_keys = ['',""]
+	modules = ['spanky','zmedico','antarus','ricer','5','6']
+
+	def setUp(self):
+		self.items = {}
+		for k, v in zip(self.keys + self.invalid_keys, 
+				self.modules):
+			self.items[k] = v
+
+	def testPortageModulesFile(self):
+		self.BuildFile()
+		f = PortageModulesFile(self.fname)
+		f.load()
+		for k in self.keys:
+			self.assertEqual(f[k], self.items[k])
+		for ik in self.invalid_keys:
+			self.assertEqual(False, ik in f)
+		self.NukeFile()
+
+	def BuildFile(self):
+		fd, self.fname = mkstemp()
+		f = os.fdopen(fd, 'w')
+		for k, v in self.items.items():
+			f.write('%s=%s\n' % (k,v))
+		f.close()
+
+	def NukeFile(self):
+		os.unlink(self.fname)

diff --git a/portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py b/portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/lafilefixer/__test__ b/portage_with_autodep/pym/portage/tests/lafilefixer/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py b/portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py
new file mode 100644
index 0000000..0bcffaa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py
@@ -0,0 +1,145 @@
+# test_lafilefixer.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.exception import InvalidData
+
+class test_lafilefixer(TestCase):
+
+	def get_test_cases_clean(self):
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' -lm'\n" + \
+			b"current=6\n" + \
+			b"age=0\n" + \
+			b"revision=2\n" + \
+			b"installed=yes\n" + \
+			b"dlopen=''\n" + \
+			b"dlpreopen=''\n" + \
+			b"libdir='/usr/lib64'\n"
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' -lm'\n" + \
+			b"current=6\n" + \
+			b"age=0\n" + \
+			b"revision=2\n" + \
+			b"installed=yes\n" + \
+			b"dlopen=''\n" + \
+			b"dlpreopen=''\n" + \
+			b"libdir='/usr/lib64'\n"
+		yield b"dependency_libs=' liba.la /usr/lib64/bar.la -lc'\n"
+
+	def get_test_cases_update(self):
+		#.la -> -l*
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc'\n", \
+			b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n"
+		#move stuff into inherited_linker_flags
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' /usr/lib64/liba.la -pthread /usr/lib64/libb.la -lc'\n" + \
+			b"inherited_linker_flags=''\n", \
+			b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n" + \
+			b"inherited_linker_flags=' -pthread'\n"
+		#reorder 
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' /usr/lib64/liba.la -R/usr/lib64 /usr/lib64/libb.la -lc'\n", \
+			b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -la -lb -lc'\n"
+		#remove duplicates from dependency_libs (the original version didn't do it for inherited_linker_flags)
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libc.la -pthread -mt" + \
+			b" -L/usr/lib -R/usr/lib64 -lc /usr/lib64/libb.la -lc'\n" +\
+			b"inherited_linker_flags=' -pthread -pthread'\n", \
+			b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -L/usr/lib -la -lc -lb'\n" +\
+			b"inherited_linker_flags=' -pthread -pthread -mt'\n"
+		#-L rewriting
+		yield b"dependency_libs=' -L/usr/X11R6/lib'\n", \
+			b"dependency_libs=' -L/usr/lib'\n"
+		yield b"dependency_libs=' -L/usr/local/lib'\n", \
+			b"dependency_libs=' -L/usr/lib'\n"
+		yield b"dependency_libs=' -L/usr/lib64/pkgconfig/../..'\n", \
+			b"dependency_libs=' -L/usr'\n"
+		yield b"dependency_libs=' -L/usr/lib/pkgconfig/..'\n", \
+			b"dependency_libs=' -L/usr/lib'\n"
+		yield b"dependency_libs=' -L/usr/lib/pkgconfig/../.. -L/usr/lib/pkgconfig/..'\n", \
+			b"dependency_libs=' -L/usr -L/usr/lib'\n"
+		#we once got a backtrace on this one
+		yield b"dependency_libs=' /usr/lib64/libMagickCore.la -L/usr/lib64 -llcms2 /usr/lib64/libtiff.la " + \
+			b"-ljbig -lc /usr/lib64/libfreetype.la /usr/lib64/libjpeg.la /usr/lib64/libXext.la " + \
+			b"/usr/lib64/libXt.la /usr/lib64/libSM.la -lICE -luuid /usr/lib64/libICE.la /usr/lib64/libX11.la " + \
+			b"/usr/lib64/libxcb.la /usr/lib64/libXau.la /usr/lib64/libXdmcp.la -lbz2 -lz -lm " + \
+			b"/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4/libgomp.la -lrt -lpthread /usr/lib64/libltdl.la -ldl " + \
+			b"/usr/lib64/libfpx.la -lstdc++'", \
+			b"dependency_libs=' -L/usr/lib64 -L/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4 -lMagickCore -llcms2 " + \
+			b"-ltiff -ljbig -lc -lfreetype -ljpeg -lXext -lXt -lSM -lICE -luuid -lX11 -lxcb -lXau -lXdmcp " + \
+			b"-lbz2 -lz -lm -lgomp -lrt -lpthread -lltdl -ldl -lfpx -lstdc++'"
+
+
+	def get_test_cases_broken(self):
+		yield b""
+		#no dependency_libs
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"current=6\n" + \
+			b"age=0\n" + \
+			b"revision=2\n"
+		#borken dependency_libs
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc' \n"
+			#borken dependency_libs
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc\n"
+		#crap in dependency_libs
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
+		#dependency_libs twice
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n" +\
+			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
+		#inherited_linker_flags twice
+		yield b"dlname='libfoo.so.1'\n" + \
+			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+			b"old_library='libpdf.a'\n" + \
+			b"inherited_linker_flags=''\n" +\
+			b"inherited_linker_flags=''\n"
+
+	def testlafilefixer(self):
+		from portage.util.lafilefixer import _parse_lafile_contents, rewrite_lafile
+
+		for clean_contents in self.get_test_cases_clean():
+			self.assertEqual(rewrite_lafile(clean_contents), (False, None))
+
+		for original_contents, fixed_contents in self.get_test_cases_update():
+			self.assertEqual(rewrite_lafile(original_contents), (True, fixed_contents))
+
+		for broken_contents in self.get_test_cases_broken():
+			self.assertRaises(InvalidData, rewrite_lafile, broken_contents)

diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/__init__.py b/portage_with_autodep/pym/portage/tests/lazyimport/__init__.py
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/__test__ b/portage_with_autodep/pym/portage/tests/lazyimport/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py b/portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
new file mode 100644
index 0000000..08ccfa7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+import portage
+from portage import os
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+
+from _emerge.PollScheduler import PollScheduler
+from _emerge.PipeReader import PipeReader
+from _emerge.SpawnProcess import SpawnProcess
+
+class LazyImportPortageBaselineTestCase(TestCase):
+
+	_module_re = re.compile(r'^(portage|repoman|_emerge)\.')
+
+	_baseline_imports = frozenset([
+		'portage.const', 'portage.localization',
+		'portage.proxy', 'portage.proxy.lazyimport',
+		'portage.proxy.objectproxy',
+		'portage._selinux',
+	])
+
+	_baseline_import_cmd = [portage._python_interpreter, '-c', '''
+import os
+import sys
+sys.path.insert(0, os.environ["PORTAGE_PYM_PATH"])
+import portage
+sys.stdout.write(" ".join(k for k in sys.modules
+	if sys.modules[k] is not None))
+''']
+
+	def testLazyImportPortageBaseline(self):
+		"""
+		Check what modules are imported by a baseline module import.
+		"""
+
+		env = os.environ.copy()
+		pythonpath = env.get('PYTHONPATH')
+		if pythonpath is not None and not pythonpath.strip():
+			pythonpath = None
+		if pythonpath is None:
+			pythonpath = ''
+		else:
+			pythonpath = ':' + pythonpath
+		pythonpath = PORTAGE_PYM_PATH + pythonpath
+		env[pythonpath] = pythonpath
+
+		# If python is patched to insert the path of the
+		# currently installed portage module into sys.path,
+		# then the above PYTHONPATH override doesn't help.
+		env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+
+		scheduler = PollScheduler().sched_iface
+		master_fd, slave_fd = os.pipe()
+		master_file = os.fdopen(master_fd, 'rb', 0)
+		slave_file = os.fdopen(slave_fd, 'wb')
+		producer = SpawnProcess(
+			args=self._baseline_import_cmd,
+			env=env, fd_pipes={1:slave_fd},
+			scheduler=scheduler)
+		producer.start()
+		slave_file.close()
+
+		consumer = PipeReader(
+			input_files={"producer" : master_file},
+			scheduler=scheduler)
+
+		consumer.start()
+		consumer.wait()
+		self.assertEqual(producer.wait(), os.EX_OK)
+		self.assertEqual(consumer.wait(), os.EX_OK)
+
+		output = consumer.getvalue().decode('ascii', 'replace').split()
+
+		unexpected_modules = " ".join(sorted(x for x in output \
+			if self._module_re.match(x) is not None and \
+			x not in self._baseline_imports))
+
+		self.assertEqual("", unexpected_modules)

diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py b/portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py
new file mode 100644
index 0000000..9d20eba
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py
@@ -0,0 +1,16 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.tests import TestCase
+
+class PreloadPortageSubmodulesTestCase(TestCase):
+
+	def testPreloadPortageSubmodules(self):
+		"""
+		Verify that _preload_portage_submodules() doesn't leave any
+		remaining proxies that refer to the portage.* namespace.
+		"""
+		portage.proxy.lazyimport._preload_portage_submodules()
+		for name in portage.proxy.lazyimport._module_proxies:
+			self.assertEqual(name.startswith('portage.'), False)

diff --git a/portage_with_autodep/pym/portage/tests/lint/__init__.py b/portage_with_autodep/pym/portage/tests/lint/__init__.py
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/lint/__test__ b/portage_with_autodep/pym/portage/tests/lint/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py b/portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py
new file mode 100644
index 0000000..aef8d74
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.py
@@ -0,0 +1,42 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import stat
+
+from portage.const import BASH_BINARY, PORTAGE_BIN_PATH
+from portage.tests import TestCase
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _encodings
+from portage import _shell_quote
+from portage import _unicode_decode, _unicode_encode
+
+class BashSyntaxTestCase(TestCase):
+
+	def testBashSyntax(self):
+		for parent, dirs, files in os.walk(PORTAGE_BIN_PATH):
+			parent = _unicode_decode(parent,
+				encoding=_encodings['fs'], errors='strict')
+			for x in files:
+				x = _unicode_decode(x,
+					encoding=_encodings['fs'], errors='strict')
+				ext = x.split('.')[-1]
+				if ext in ('.py', '.pyc', '.pyo'):
+					continue
+				x = os.path.join(parent, x)
+				st = os.lstat(x)
+				if not stat.S_ISREG(st.st_mode):
+					continue
+
+				# Check for bash shebang
+				f = open(_unicode_encode(x,
+					encoding=_encodings['fs'], errors='strict'), 'rb')
+				line = _unicode_decode(f.readline(),
+					encoding=_encodings['content'], errors='replace')
+				f.close()
+				if line[:2] == '#!' and \
+					'bash' in line:
+					cmd = "%s -n %s" % (_shell_quote(BASH_BINARY), _shell_quote(x))
+					status, output = subprocess_getstatusoutput(cmd)
+					self.assertEqual(os.WIFEXITED(status) and \
+						os.WEXITSTATUS(status) == os.EX_OK, True, msg=output)

diff --git a/portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py b/portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py
new file mode 100644
index 0000000..f90a666
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lint/test_compile_modules.py
@@ -0,0 +1,46 @@
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import itertools
+import stat
+
+from portage.const import PORTAGE_BIN_PATH, PORTAGE_PYM_PATH
+from portage.tests import TestCase
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+
+import py_compile
+
+class CompileModulesTestCase(TestCase):
+
+	def testCompileModules(self):
+		for parent, dirs, files in itertools.chain(
+			os.walk(PORTAGE_BIN_PATH),
+			os.walk(PORTAGE_PYM_PATH)):
+			parent = _unicode_decode(parent,
+				encoding=_encodings['fs'], errors='strict')
+			for x in files:
+				x = _unicode_decode(x,
+					encoding=_encodings['fs'], errors='strict')
+				if x[-4:] in ('.pyc', '.pyo'):
+					continue
+				x = os.path.join(parent, x)
+				st = os.lstat(x)
+				if not stat.S_ISREG(st.st_mode):
+					continue
+				do_compile = False
+				if x[-3:] == '.py':
+					do_compile = True
+				else:
+					# Check for python shebang
+					f = open(_unicode_encode(x,
+						encoding=_encodings['fs'], errors='strict'), 'rb')
+					line = _unicode_decode(f.readline(),
+						encoding=_encodings['content'], errors='replace')
+					f.close()
+					if line[:2] == '#!' and \
+						'python' in line:
+						do_compile = True
+				if do_compile:
+					py_compile.compile(x, cfile='/dev/null', doraise=True)

diff --git a/portage_with_autodep/pym/portage/tests/lint/test_import_modules.py b/portage_with_autodep/pym/portage/tests/lint/test_import_modules.py
new file mode 100644
index 0000000..8d257c5
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/lint/test_import_modules.py
@@ -0,0 +1,40 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+class ImportModulesTestCase(TestCase):
+
+	def testImportModules(self):
+		expected_failures = frozenset((
+		))
+
+		for mod in self._iter_modules(PORTAGE_PYM_PATH):
+			try:
+				__import__(mod)
+			except ImportError as e:
+				if mod not in expected_failures:
+					self.assertTrue(False, "failed to import '%s': %s" % (mod, e))
+				del e
+
+	def _iter_modules(self, base_dir):
+		for parent, dirs, files in os.walk(base_dir):
+			parent = _unicode_decode(parent,
+				encoding=_encodings['fs'], errors='strict')
+			parent_mod = parent[len(PORTAGE_PYM_PATH)+1:]
+			parent_mod = parent_mod.replace("/", ".")
+			for x in files:
+				x = _unicode_decode(x,
+					encoding=_encodings['fs'], errors='strict')
+				if x[-3:] != '.py':
+					continue
+				x = x[:-3]
+				if x[-8:] == '__init__':
+					x = parent_mod
+				else:
+					x = parent_mod + "." + x
+				yield x

diff --git a/portage_with_autodep/pym/portage/tests/locks/__init__.py b/portage_with_autodep/pym/portage/tests/locks/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/locks/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/locks/__test__ b/portage_with_autodep/pym/portage/tests/locks/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py b/portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py
new file mode 100644
index 0000000..8946caf
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py
@@ -0,0 +1,124 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import signal
+import tempfile
+
+from portage import os
+from portage.tests import TestCase
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.PollScheduler import PollScheduler
+
+class AsynchronousLockTestCase(TestCase):
+
+	def testAsynchronousLock(self):
+		scheduler = PollScheduler().sched_iface
+		tempdir = tempfile.mkdtemp()
+		try:
+			path = os.path.join(tempdir, 'lock_me')
+			for force_async in (True, False):
+				for force_dummy in (True, False):
+					async_lock = AsynchronousLock(path=path,
+						scheduler=scheduler, _force_async=force_async,
+						_force_thread=True,
+						_force_dummy=force_dummy)
+					async_lock.start()
+					self.assertEqual(async_lock.wait(), os.EX_OK)
+					self.assertEqual(async_lock.returncode, os.EX_OK)
+					async_lock.unlock()
+
+				async_lock = AsynchronousLock(path=path,
+					scheduler=scheduler, _force_async=force_async,
+					_force_process=True)
+				async_lock.start()
+				self.assertEqual(async_lock.wait(), os.EX_OK)
+				self.assertEqual(async_lock.returncode, os.EX_OK)
+				async_lock.unlock()
+
+		finally:
+			shutil.rmtree(tempdir)
+
+	def testAsynchronousLockWait(self):
+		scheduler = PollScheduler().sched_iface
+		tempdir = tempfile.mkdtemp()
+		try:
+			path = os.path.join(tempdir, 'lock_me')
+			lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+			lock1.start()
+			self.assertEqual(lock1.wait(), os.EX_OK)
+			self.assertEqual(lock1.returncode, os.EX_OK)
+
+			# lock2 requires _force_async=True since the portage.locks
+			# module is not designed to work as intended here if the
+			# same process tries to lock the same file more than
+			# one time concurrently.
+			lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+				_force_async=True, _force_process=True)
+			lock2.start()
+			# lock2 should be waiting for lock1 to release
+			self.assertEqual(lock2.poll(), None)
+			self.assertEqual(lock2.returncode, None)
+
+			lock1.unlock()
+			self.assertEqual(lock2.wait(), os.EX_OK)
+			self.assertEqual(lock2.returncode, os.EX_OK)
+			lock2.unlock()
+		finally:
+			shutil.rmtree(tempdir)
+
+	def testAsynchronousLockWaitCancel(self):
+		scheduler = PollScheduler().sched_iface
+		tempdir = tempfile.mkdtemp()
+		try:
+			path = os.path.join(tempdir, 'lock_me')
+			lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+			lock1.start()
+			self.assertEqual(lock1.wait(), os.EX_OK)
+			self.assertEqual(lock1.returncode, os.EX_OK)
+			lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+				_force_async=True, _force_process=True)
+			lock2.start()
+			# lock2 should be waiting for lock1 to release
+			self.assertEqual(lock2.poll(), None)
+			self.assertEqual(lock2.returncode, None)
+
+			# Cancel lock2 and then check wait() and returncode results.
+			lock2.cancel()
+			self.assertEqual(lock2.wait() == os.EX_OK, False)
+			self.assertEqual(lock2.returncode == os.EX_OK, False)
+			self.assertEqual(lock2.returncode is None, False)
+			lock1.unlock()
+		finally:
+			shutil.rmtree(tempdir)
+
+	def testAsynchronousLockWaitKill(self):
+		scheduler = PollScheduler().sched_iface
+		tempdir = tempfile.mkdtemp()
+		try:
+			path = os.path.join(tempdir, 'lock_me')
+			lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+			lock1.start()
+			self.assertEqual(lock1.wait(), os.EX_OK)
+			self.assertEqual(lock1.returncode, os.EX_OK)
+			lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+				_force_async=True, _force_process=True)
+			lock2.start()
+			# lock2 should be waiting for lock1 to release
+			self.assertEqual(lock2.poll(), None)
+			self.assertEqual(lock2.returncode, None)
+
+			# Kill lock2's process and then check wait() and
+			# returncode results. This is intended to simulate
+			# a SIGINT sent via the controlling tty.
+			self.assertEqual(lock2._imp is not None, True)
+			self.assertEqual(lock2._imp._proc is not None, True)
+			self.assertEqual(lock2._imp._proc.pid is not None, True)
+			lock2._imp._kill_test = True
+			os.kill(lock2._imp._proc.pid, signal.SIGTERM)
+			self.assertEqual(lock2.wait() == os.EX_OK, False)
+			self.assertEqual(lock2.returncode == os.EX_OK, False)
+			self.assertEqual(lock2.returncode is None, False)
+			lock1.unlock()
+		finally:
+			shutil.rmtree(tempdir)

diff --git a/portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py b/portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py
new file mode 100644
index 0000000..d5748ad
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py
@@ -0,0 +1,46 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+import traceback
+
+import portage
+from portage import os
+from portage.tests import TestCase
+
+class LockNonblockTestCase(TestCase):
+
+	def testLockNonblock(self):
+		tempdir = tempfile.mkdtemp()
+		try:
+			path = os.path.join(tempdir, 'lock_me')
+			lock1 = portage.locks.lockfile(path)
+			pid = os.fork()
+			if pid == 0:
+				portage.process._setup_pipes({0:0, 1:1, 2:2})
+				rval = 2
+				try:
+					try:
+						lock2 = portage.locks.lockfile(path, flags=os.O_NONBLOCK)
+					except portage.exception.TryAgain:
+						rval = os.EX_OK
+					else:
+						rval = 1
+						portage.locks.unlockfile(lock2)
+				except SystemExit:
+					raise
+				except:
+					traceback.print_exc()
+				finally:
+					os._exit(rval)
+
+			self.assertEqual(pid > 0, True)
+			pid, status = os.waitpid(pid, 0)
+			self.assertEqual(os.WIFEXITED(status), True)
+			self.assertEqual(os.WEXITSTATUS(status), os.EX_OK)
+
+			portage.locks.unlockfile(lock1)
+		finally:
+			shutil.rmtree(tempdir)
+

diff --git a/portage_with_autodep/pym/portage/tests/news/__init__.py b/portage_with_autodep/pym/portage/tests/news/__init__.py
new file mode 100644
index 0000000..28a753f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/news/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.news/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/news/__test__ b/portage_with_autodep/pym/portage/tests/news/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/news/test_NewsItem.py b/portage_with_autodep/pym/portage/tests/news/test_NewsItem.py
new file mode 100644
index 0000000..a4e76f3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/news/test_NewsItem.py
@@ -0,0 +1,95 @@
+# test_NewsItem.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.news import NewsItem
+from portage.dbapi.virtual import testdbapi
+from tempfile import mkstemp
+# TODO(antarus) Make newsitem use a loader so we can load using a string instead of a tempfile
+
+class NewsItemTestCase(TestCase):
+	"""These tests suck: they use your running config instead of making their own"""
+	fakeItem = """
+Title: YourSQL Upgrades from 4.0 to 4.1
+Author: Ciaran McCreesh <ciaranm@gentoo.org>
+Content-Type: text/plain
+Posted: 01-Nov-2005
+Revision: 1
+#Display-If-Installed:
+#Display-If-Profile:
+#Display-If-Arch:
+
+YourSQL databases created using YourSQL version 4.0 are incompatible
+with YourSQL version 4.1 or later. There is no reliable way to
+automate the database format conversion, so action from the system
+administrator is required before an upgrade can take place.
+
+Please see the Gentoo YourSQL Upgrade Guide for instructions:
+
+    http://www.gentoo.org/doc/en/yoursql-upgrading.xml
+
+Also see the official YourSQL documentation:
+
+    http://dev.yoursql.com/doc/refman/4.1/en/upgrading-from-4-0.html
+
+After upgrading, you should also recompile any packages which link
+against YourSQL:
+
+    revdep-rebuild --library=libyoursqlclient.so.12
+
+The revdep-rebuild tool is provided by app-portage/gentoolkit.
+"""
+	def setUp(self):
+		self.profile = "/usr/portage/profiles/default-linux/x86/2007.0/"
+		self.keywords = "x86"
+		# Use fake/test dbapi to avoid slow tests
+		self.vardb = testdbapi()
+		# self.vardb.inject_cpv('sys-apps/portage-2.0', { 'SLOT' : 0 })
+		# Consumers only use ARCH, so avoid portage.settings by using a dict
+		self.settings = { 'ARCH' : 'x86' }
+
+	def testDisplayIfProfile(self):
+		tmpItem = self.fakeItem[:].replace("#Display-If-Profile:", "Display-If-Profile: %s" %
+			self.profile)
+
+		item = self._processItem(tmpItem)
+		try:
+			self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+				msg="Expected %s to be relevant, but it was not!" % tmpItem)
+		finally:
+			os.unlink(item.path)
+
+	def testDisplayIfInstalled(self):
+		tmpItem = self.fakeItem[:].replace("#Display-If-Installed:", "Display-If-Installed: %s" %
+			"sys-apps/portage")
+
+		try:
+			item = self._processItem(tmpItem)
+			self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+				msg="Expected %s to be relevant, but it was not!" % tmpItem)
+		finally:
+			os.unlink(item.path)
+
+	def testDisplayIfKeyword(self):
+		tmpItem = self.fakeItem[:].replace("#Display-If-Keyword:", "Display-If-Keyword: %s" %
+			self.keywords)
+
+		try:
+			item = self._processItem(tmpItem)
+			self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+				msg="Expected %s to be relevant, but it was not!" % tmpItem)
+		finally:
+			os.unlink(item.path)
+
+	def _processItem(self, item):
+		filename = None
+		fd, filename = mkstemp()
+		f = os.fdopen(fd, 'w')
+		f.write(item)
+		f.close()
+		try:
+			return NewsItem(filename, 0)
+		except TypeError:
+			self.fail("Error while processing news item %s" % filename)

diff --git a/portage_with_autodep/pym/portage/tests/process/__init__.py b/portage_with_autodep/pym/portage/tests/process/__init__.py
new file mode 100644
index 0000000..d19e353
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/process/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 1998-2008 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/process/__test__ b/portage_with_autodep/pym/portage/tests/process/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/process/test_poll.py b/portage_with_autodep/pym/portage/tests/process/test_poll.py
new file mode 100644
index 0000000..ee6ee0c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/process/test_poll.py
@@ -0,0 +1,39 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from _emerge.PollScheduler import PollScheduler
+from _emerge.PipeReader import PipeReader
+from _emerge.SpawnProcess import SpawnProcess
+
+class PipeReaderTestCase(TestCase):
+
+	def testPipeReader(self):
+		"""
+		Use a poll loop to read data from a pipe and assert that
+		the data written to the pipe is identical to the data
+		read from the pipe.
+		"""
+
+		test_string = 2 * "blah blah blah\n"
+
+		scheduler = PollScheduler().sched_iface
+		master_fd, slave_fd = os.pipe()
+		master_file = os.fdopen(master_fd, 'rb', 0)
+		slave_file = os.fdopen(slave_fd, 'wb')
+		producer = SpawnProcess(
+			args=["bash", "-c", "echo -n '%s'" % test_string],
+			env=os.environ, fd_pipes={1:slave_fd},
+			scheduler=scheduler)
+		producer.start()
+		slave_file.close()
+
+		consumer = PipeReader(
+			input_files={"producer" : master_file},
+			scheduler=scheduler)
+
+		consumer.start()
+		consumer.wait()
+		output = consumer.getvalue().decode('ascii', 'replace')
+		self.assertEqual(test_string, output)

diff --git a/portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py b/portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py
new file mode 100644
index 0000000..6a8e3c1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py
@@ -0,0 +1,690 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import permutations
+import shutil
+import sys
+import tempfile
+import portage
+from portage import os
+from portage.const import PORTAGE_BASE_PATH
+from portage.dbapi.vartree import vartree
+from portage.dbapi.porttree import portagetree
+from portage.dbapi.bintree import binarytree
+from portage.dep import Atom, _repo_separator
+from portage.package.ebuild.config import config
+from portage.package.ebuild.digestgen import digestgen
+from portage._sets import load_default_config
+from portage._sets.base import InternalPackageSet
+from portage.versions import catsplit
+
+import _emerge
+from _emerge.actions import calc_depclean
+from _emerge.Blocker import Blocker
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.depgraph import backtrack_depgraph
+from _emerge.RootConfig import RootConfig
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+class ResolverPlayground(object):
+	"""
+	This class helps to create the necessary files on disk and
+	the needed settings instances, etc. for the resolver to do
+	its work.
+	"""
+
+	config_files = frozenset(("package.use", "package.mask", "package.keywords", \
+		"package.unmask", "package.properties", "package.license", "use.mask", "use.force"))
+
+	def __init__(self, ebuilds={}, installed={}, profile={}, repo_configs={}, \
+		user_config={}, sets={}, world=[], debug=False):
+		"""
+		ebuilds: cpv -> metadata mapping simulating available ebuilds. 
+		installed: cpv -> metadata mapping simulating installed packages.
+			If a metadata key is missing, it gets a default value.
+		profile: settings defined by the profile.
+		"""
+		self.debug = debug
+		self.root = "/"
+		self.eprefix = tempfile.mkdtemp()
+		self.eroot = self.root + self.eprefix.lstrip(os.sep) + os.sep
+		self.portdir = os.path.join(self.eroot, "usr/portage")
+		self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
+		os.makedirs(self.portdir)
+		os.makedirs(self.vdbdir)
+
+		if not debug:
+			portage.util.noiselimit = -2
+
+		self.repo_dirs = {}
+		#Make sure the main repo is always created
+		self._get_repo_dir("test_repo")
+
+		self._create_ebuilds(ebuilds)
+		self._create_installed(installed)
+		self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
+		self._create_world(world)
+
+		self.settings, self.trees = self._load_config()
+
+		self._create_ebuild_manifests(ebuilds)
+		
+		portage.util.noiselimit = 0
+
+	def _get_repo_dir(self, repo):
+		"""
+		Create the repo directory if needed.
+		"""
+		if repo not in self.repo_dirs:
+			if repo == "test_repo":
+				repo_path = self.portdir
+			else:
+				repo_path = os.path.join(self.eroot, "usr", "local", repo)
+
+			self.repo_dirs[repo] = repo_path
+			profile_path = os.path.join(repo_path, "profiles")
+
+			try:
+				os.makedirs(profile_path)
+			except os.error:
+				pass
+
+			repo_name_file = os.path.join(profile_path, "repo_name")
+			f = open(repo_name_file, "w")
+			f.write("%s\n" % repo)
+			f.close()
+
+		return self.repo_dirs[repo]
+
+	def _create_ebuilds(self, ebuilds):
+		for cpv in ebuilds:
+			a = Atom("=" + cpv, allow_repo=True)
+			repo = a.repo
+			if repo is None:
+				repo = "test_repo"
+
+			metadata = ebuilds[cpv].copy()
+			eapi = metadata.pop("EAPI", 0)
+			lic = metadata.pop("LICENSE", "")
+			properties = metadata.pop("PROPERTIES", "")
+			slot = metadata.pop("SLOT", 0)
+			keywords = metadata.pop("KEYWORDS", "x86")
+			iuse = metadata.pop("IUSE", "")
+			depend = metadata.pop("DEPEND", "")
+			rdepend = metadata.pop("RDEPEND", None)
+			pdepend = metadata.pop("PDEPEND", None)
+			required_use = metadata.pop("REQUIRED_USE", None)
+
+			if metadata:
+				raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
+
+			repo_dir = self._get_repo_dir(repo)
+			ebuild_dir = os.path.join(repo_dir, a.cp)
+			ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
+			try:
+				os.makedirs(ebuild_dir)
+			except os.error:
+				pass
+
+			f = open(ebuild_path, "w")
+			f.write('EAPI="' + str(eapi) + '"\n')
+			f.write('LICENSE="' + str(lic) + '"\n')
+			f.write('PROPERTIES="' + str(properties) + '"\n')
+			f.write('SLOT="' + str(slot) + '"\n')
+			f.write('KEYWORDS="' + str(keywords) + '"\n')
+			f.write('IUSE="' + str(iuse) + '"\n')
+			f.write('DEPEND="' + str(depend) + '"\n')
+			if rdepend is not None:
+				f.write('RDEPEND="' + str(rdepend) + '"\n')
+			if pdepend is not None:
+				f.write('PDEPEND="' + str(pdepend) + '"\n')
+			if required_use is not None:
+				f.write('REQUIRED_USE="' + str(required_use) + '"\n')
+			f.close()
+
+	def _create_ebuild_manifests(self, ebuilds):
+		tmpsettings = config(clone=self.settings)
+		tmpsettings['PORTAGE_QUIET'] = '1'
+		for cpv in ebuilds:
+			a = Atom("=" + cpv, allow_repo=True)
+			repo = a.repo
+			if repo is None:
+				repo = "test_repo"
+
+			repo_dir = self._get_repo_dir(repo)
+			ebuild_dir = os.path.join(repo_dir, a.cp)
+			ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
+
+			portdb = self.trees[self.root]["porttree"].dbapi
+			tmpsettings['O'] = ebuild_dir
+			if not digestgen(mysettings=tmpsettings, myportdb=portdb):
+				raise AssertionError('digest creation failed for %s' % ebuild_path)
+
+	def _create_installed(self, installed):
+		for cpv in installed:
+			a = Atom("=" + cpv, allow_repo=True)
+			repo = a.repo
+			if repo is None:
+				repo = "test_repo"
+
+			vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
+			try:
+				os.makedirs(vdb_pkg_dir)
+			except os.error:
+				pass
+
+			metadata = installed[cpv].copy()
+			eapi = metadata.pop("EAPI", 0)
+			lic = metadata.pop("LICENSE", "")
+			properties = metadata.pop("PROPERTIES", "")
+			slot = metadata.pop("SLOT", 0)
+			keywords = metadata.pop("KEYWORDS", "~x86")
+			iuse = metadata.pop("IUSE", "")
+			use = metadata.pop("USE", "")
+			depend = metadata.pop("DEPEND", "")
+			rdepend = metadata.pop("RDEPEND", None)
+			pdepend = metadata.pop("PDEPEND", None)
+			required_use = metadata.pop("REQUIRED_USE", None)
+
+			if metadata:
+				raise ValueError("metadata of installed '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
+
+			def write_key(key, value):
+				f = open(os.path.join(vdb_pkg_dir, key), "w")
+				f.write(str(value) + "\n")
+				f.close()
+			
+			write_key("EAPI", eapi)
+			write_key("LICENSE", lic)
+			write_key("PROPERTIES", properties)
+			write_key("SLOT", slot)
+			write_key("LICENSE", lic)
+			write_key("PROPERTIES", properties)
+			write_key("repository", repo)
+			write_key("KEYWORDS", keywords)
+			write_key("IUSE", iuse)
+			write_key("USE", use)
+			write_key("DEPEND", depend)
+			if rdepend is not None:
+				write_key("RDEPEND", rdepend)
+			if pdepend is not None:
+				write_key("PDEPEND", pdepend)
+			if required_use is not None:
+				write_key("REQUIRED_USE", required_use)
+
+	def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
+
+		for repo in self.repo_dirs:
+			repo_dir = self._get_repo_dir(repo)
+			profile_dir = os.path.join(self._get_repo_dir(repo), "profiles")
+
+			#Create $REPO/profiles/categories
+			categories = set()
+			for cpv in ebuilds:
+				ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
+				if ebuilds_repo is None:
+					ebuilds_repo = "test_repo"
+				if ebuilds_repo == repo:
+					categories.add(catsplit(cpv)[0])
+
+			categories_file = os.path.join(profile_dir, "categories")
+			f = open(categories_file, "w")
+			for cat in categories:
+				f.write(cat + "\n")
+			f.close()
+			
+			#Create $REPO/profiles/license_groups
+			license_file = os.path.join(profile_dir, "license_groups")
+			f = open(license_file, "w")
+			f.write("EULA TEST\n")
+			f.close()
+
+			repo_config = repo_configs.get(repo) 
+			if repo_config:
+				for config_file, lines in repo_config.items():
+					if config_file not in self.config_files:
+						raise ValueError("Unknown config file: '%s'" % config_file)
+		
+					file_name = os.path.join(profile_dir, config_file)
+					f = open(file_name, "w")
+					for line in lines:
+						f.write("%s\n" % line)
+					f.close()
+
+			#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
+			os.makedirs(os.path.join(repo_dir, "eclass"))
+
+			if repo == "test_repo":
+				#Create a minimal profile in /usr/portage
+				sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
+				os.makedirs(sub_profile_dir)
+
+				eapi_file = os.path.join(sub_profile_dir, "eapi")
+				f = open(eapi_file, "w")
+				f.write("0\n")
+				f.close()
+
+				make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
+				f = open(make_defaults_file, "w")
+				f.write("ARCH=\"x86\"\n")
+				f.write("ACCEPT_KEYWORDS=\"x86\"\n")
+				f.close()
+
+				use_force_file = os.path.join(sub_profile_dir, "use.force")
+				f = open(use_force_file, "w")
+				f.write("x86\n")
+				f.close()
+
+				if profile:
+					for config_file, lines in profile.items():
+						if config_file not in self.config_files:
+							raise ValueError("Unknown config file: '%s'" % config_file)
+
+						file_name = os.path.join(sub_profile_dir, config_file)
+						f = open(file_name, "w")
+						for line in lines:
+							f.write("%s\n" % line)
+						f.close()
+
+				#Create profile symlink
+				os.makedirs(os.path.join(self.eroot, "etc"))
+				os.symlink(sub_profile_dir, os.path.join(self.eroot, "etc", "make.profile"))
+
+		user_config_dir = os.path.join(self.eroot, "etc", "portage")
+
+		try:
+			os.makedirs(user_config_dir)
+		except os.error:
+			pass
+
+		repos_conf_file = os.path.join(user_config_dir, "repos.conf")		
+		f = open(repos_conf_file, "w")
+		priority = 0
+		for repo in sorted(self.repo_dirs.keys()):
+			f.write("[%s]\n" % repo)
+			f.write("LOCATION=%s\n" % self.repo_dirs[repo])
+			if repo == "test_repo":
+				f.write("PRIORITY=%s\n" % -1000)
+			else:
+				f.write("PRIORITY=%s\n" % priority)
+				priority += 1
+		f.close()
+
+		for config_file, lines in user_config.items():
+			if config_file not in self.config_files:
+				raise ValueError("Unknown config file: '%s'" % config_file)
+
+			file_name = os.path.join(user_config_dir, config_file)
+			f = open(file_name, "w")
+			for line in lines:
+				f.write("%s\n" % line)
+			f.close()
+
+		#Create /usr/share/portage/config/sets/portage.conf
+		default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
+		
+		try:
+			os.makedirs(default_sets_conf_dir)
+		except os.error:
+			pass
+
+		provided_sets_portage_conf = \
+			os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
+		os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))
+
+		set_config_dir = os.path.join(user_config_dir, "sets")
+
+		try:
+			os.makedirs(set_config_dir)
+		except os.error:
+			pass
+
+		for sets_file, lines in sets.items():
+			file_name = os.path.join(set_config_dir, sets_file)
+			f = open(file_name, "w")
+			for line in lines:
+				f.write("%s\n" % line)
+			f.close()
+
+		user_config_dir = os.path.join(self.eroot, "etc", "portage")
+
+		try:
+			os.makedirs(user_config_dir)
+		except os.error:
+			pass
+
+		for config_file, lines in user_config.items():
+			if config_file not in self.config_files:
+				raise ValueError("Unknown config file: '%s'" % config_file)
+
+			file_name = os.path.join(user_config_dir, config_file)
+			f = open(file_name, "w")
+			for line in lines:
+				f.write("%s\n" % line)
+			f.close()
+
+	def _create_world(self, world):
+		#Create /var/lib/portage/world
+		var_lib_portage = os.path.join(self.eroot, "var", "lib", "portage")
+		os.makedirs(var_lib_portage)
+
+		world_file = os.path.join(var_lib_portage, "world")
+
+		f = open(world_file, "w")
+		for atom in world:
+			f.write("%s\n" % atom)
+		f.close()
+
+	def _load_config(self):
+		portdir_overlay = []
+		for repo_name in sorted(self.repo_dirs):
+			path = self.repo_dirs[repo_name]
+			if path != self.portdir:
+				portdir_overlay.append(path)
+
+		env = {
+			"ACCEPT_KEYWORDS": "x86",
+			"PORTDIR": self.portdir,
+			"PORTDIR_OVERLAY": " ".join(portdir_overlay),
+			'PORTAGE_TMPDIR'       : os.path.join(self.eroot, 'var/tmp'),
+		}
+
+		# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+		# need to be inherited by ebuild subprocesses.
+		if 'PORTAGE_USERNAME' in os.environ:
+			env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+		if 'PORTAGE_GRPNAME' in os.environ:
+			env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+		settings = config(_eprefix=self.eprefix, env=env)
+		settings.lock()
+
+		trees = {
+			self.root: {
+					"vartree": vartree(settings=settings),
+					"porttree": portagetree(self.root, settings=settings),
+					"bintree": binarytree(self.root,
+						os.path.join(self.eroot, "usr/portage/packages"),
+						settings=settings)
+				}
+			}
+
+		for root, root_trees in trees.items():
+			settings = root_trees["vartree"].settings
+			settings._init_dirs()
+			setconfig = load_default_config(settings, root_trees)
+			root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
+		
+		return settings, trees
+
+	def run(self, atoms, options={}, action=None):
+		options = options.copy()
+		options["--pretend"] = True
+		if self.debug:
+			options["--debug"] = True
+
+		global_noiselimit = portage.util.noiselimit
+		global_emergelog_disable = _emerge.emergelog._disable
+		try:
+
+			if not self.debug:
+				portage.util.noiselimit = -2
+			_emerge.emergelog._disable = True
+
+			if options.get("--depclean"):
+				rval, cleanlist, ordered, req_pkg_count = \
+					calc_depclean(self.settings, self.trees, None,
+					options, "depclean", InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
+				result = ResolverPlaygroundDepcleanResult( \
+					atoms, rval, cleanlist, ordered, req_pkg_count)
+			else:
+				params = create_depgraph_params(options, action)
+				success, depgraph, favorites = backtrack_depgraph(
+					self.settings, self.trees, options, params, action, atoms, None)
+				depgraph._show_merge_list()
+				depgraph.display_problems()
+				result = ResolverPlaygroundResult(atoms, success, depgraph, favorites)
+		finally:
+			portage.util.noiselimit = global_noiselimit
+			_emerge.emergelog._disable = global_emergelog_disable
+
+		return result
+
+	def run_TestCase(self, test_case):
+		if not isinstance(test_case, ResolverPlaygroundTestCase):
+			raise TypeError("ResolverPlayground needs a ResolverPlaygroundTestCase")
+		for atoms in test_case.requests:
+			result = self.run(atoms, test_case.options, test_case.action)
+			if not test_case.compare_with_result(result):
+				return
+
+	def cleanup(self):
+		portdb = self.trees[self.root]["porttree"].dbapi
+		portdb.close_caches()
+		portage.dbapi.porttree.portdbapi.portdbapi_instances.remove(portdb)
+		if self.debug:
+			print("\nEROOT=%s" % self.eroot)
+		else:
+			shutil.rmtree(self.eroot)
+
+class ResolverPlaygroundTestCase(object):
+
+	def __init__(self, request, **kwargs):
+		self.all_permutations = kwargs.pop("all_permutations", False)
+		self.ignore_mergelist_order = kwargs.pop("ignore_mergelist_order", False)
+		self.ambiguous_merge_order = kwargs.pop("ambiguous_merge_order", False)
+		self.check_repo_names = kwargs.pop("check_repo_names", False)
+		self.merge_order_assertions = kwargs.pop("merge_order_assertions", False)
+
+		if self.all_permutations:
+			self.requests = list(permutations(request))
+		else:
+			self.requests = [request]
+
+		self.options = kwargs.pop("options", {})
+		self.action = kwargs.pop("action", None)
+		self.test_success = True
+		self.fail_msg = None
+		self._checks = kwargs.copy()
+
+	def compare_with_result(self, result):
+		checks = dict.fromkeys(result.checks)
+		for key, value in self._checks.items():
+			if not key in checks:
+				raise KeyError("Not an available check: '%s'" % key)
+			checks[key] = value
+
+		fail_msgs = []
+		for key, value in checks.items():
+			got = getattr(result, key)
+			expected = value
+
+			if key in result.optional_checks and expected is None:
+				continue
+
+			if key == "mergelist":
+				if not self.check_repo_names:
+					#Strip repo names if we don't check them
+					if got:
+						new_got = []
+						for cpv in got:
+							if cpv[:1] == "!":
+								new_got.append(cpv)
+								continue
+							a = Atom("="+cpv, allow_repo=True)
+							new_got.append(a.cpv)
+						got = new_got
+					if expected:
+						new_expected = []
+						for obj in expected:
+							if isinstance(obj, basestring):
+								if obj[:1] == "!":
+									new_expected.append(obj)
+									continue
+								a = Atom("="+obj, allow_repo=True)
+								new_expected.append(a.cpv)
+								continue
+							new_expected.append(set())
+							for cpv in obj:
+								if cpv[:1] != "!":
+									cpv = Atom("="+cpv, allow_repo=True).cpv
+								new_expected[-1].add(cpv)
+						expected = new_expected
+				if self.ignore_mergelist_order and got is not None:
+					got = set(got)
+					expected = set(expected)
+
+				if self.ambiguous_merge_order and got:
+					expected_stack = list(reversed(expected))
+					got_stack = list(reversed(got))
+					new_expected = []
+					match = True
+					while got_stack and expected_stack:
+						got_token = got_stack.pop()
+						expected_obj = expected_stack.pop()
+						if isinstance(expected_obj, basestring):
+							new_expected.append(expected_obj)
+							if got_token == expected_obj:
+								continue
+							# result doesn't match, so stop early
+							match = False
+							break
+						expected_obj = set(expected_obj)
+						try:
+							expected_obj.remove(got_token)
+						except KeyError:
+							# result doesn't match, so stop early
+							match = False
+							break
+						new_expected.append(got_token)
+						while got_stack and expected_obj:
+							got_token = got_stack.pop()
+							try:
+								expected_obj.remove(got_token)
+							except KeyError:
+								match = False
+								break
+							new_expected.append(got_token)
+						if not match:
+							# result doesn't match, so stop early
+							break
+						if expected_obj:
+							# result does not match, so stop early
+							match = False
+							new_expected.append(tuple(expected_obj))
+							break
+					if expected_stack:
+						# result does not match, add leftovers to new_expected
+						match = False
+						expected_stack.reverse()
+						new_expected.extend(expected_stack)
+					expected = new_expected
+
+					if match and self.merge_order_assertions:
+						for node1, node2 in self.merge_order_assertions:
+							if not (got.index(node1) < got.index(node2)):
+								fail_msgs.append("atoms: (" + \
+									", ".join(result.atoms) + "), key: " + \
+									("merge_order_assertions, expected: %s" % \
+									str((node1, node2))) + \
+									", got: " + str(got))
+
+			elif key in ("unstable_keywords", "needed_p_mask_changes") and expected is not None:
+				expected = set(expected)
+
+			if got != expected:
+				fail_msgs.append("atoms: (" + ", ".join(result.atoms) + "), key: " + \
+					key + ", expected: " + str(expected) + ", got: " + str(got))
+		if fail_msgs:
+			self.test_success = False
+			self.fail_msg = "\n".join(fail_msgs)
+			return False
+		return True
+
+class ResolverPlaygroundResult(object):
+
+	checks = (
+		"success", "mergelist", "use_changes", "license_changes", "unstable_keywords", "slot_collision_solutions",
+		"circular_dependency_solutions", "needed_p_mask_changes",
+		)
+	optional_checks = (
+		)
+
+	def __init__(self, atoms, success, mydepgraph, favorites):
+		self.atoms = atoms
+		self.success = success
+		self.depgraph = mydepgraph
+		self.favorites = favorites
+		self.mergelist = None
+		self.use_changes = None
+		self.license_changes = None
+		self.unstable_keywords = None
+		self.needed_p_mask_changes = None
+		self.slot_collision_solutions = None
+		self.circular_dependency_solutions = None
+
+		if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
+			self.mergelist = []
+			for x in self.depgraph._dynamic_config._serialized_tasks_cache:
+				if isinstance(x, Blocker):
+					self.mergelist.append(x.atom)
+				else:
+					repo_str = ""
+					if x.metadata["repository"] != "test_repo":
+						repo_str = _repo_separator + x.metadata["repository"]
+					self.mergelist.append(x.cpv + repo_str)
+
+		if self.depgraph._dynamic_config._needed_use_config_changes:
+			self.use_changes = {}
+			for pkg, needed_use_config_changes in \
+				self.depgraph._dynamic_config._needed_use_config_changes.items():
+				new_use, changes = needed_use_config_changes
+				self.use_changes[pkg.cpv] = changes
+
+		if self.depgraph._dynamic_config._needed_unstable_keywords:
+			self.unstable_keywords = set()
+			for pkg in self.depgraph._dynamic_config._needed_unstable_keywords:
+				self.unstable_keywords.add(pkg.cpv)
+
+		if self.depgraph._dynamic_config._needed_p_mask_changes:
+			self.needed_p_mask_changes = set()
+			for pkg in self.depgraph._dynamic_config._needed_p_mask_changes:
+				self.needed_p_mask_changes.add(pkg.cpv)
+
+		if self.depgraph._dynamic_config._needed_license_changes:
+			self.license_changes = {}
+			for pkg, missing_licenses in self.depgraph._dynamic_config._needed_license_changes.items():
+				self.license_changes[pkg.cpv] = missing_licenses
+
+		if self.depgraph._dynamic_config._slot_conflict_handler is not None:
+			self.slot_collision_solutions  = []
+			handler = self.depgraph._dynamic_config._slot_conflict_handler
+
+			for change in handler.changes:
+				new_change = {}
+				for pkg in change:
+					new_change[pkg.cpv] = change[pkg]
+				self.slot_collision_solutions.append(new_change)
+
+		if self.depgraph._dynamic_config._circular_dependency_handler is not None:
+			handler = self.depgraph._dynamic_config._circular_dependency_handler
+			sol = handler.solutions
+			self.circular_dependency_solutions = dict( zip([x.cpv for x in sol.keys()], sol.values()) )
+
+class ResolverPlaygroundDepcleanResult(object):
+
+	checks = (
+		"success", "cleanlist", "ordered", "req_pkg_count",
+		)
+	optional_checks = (
+		"ordered", "req_pkg_count",
+		)
+
+	def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count):
+		self.atoms = atoms
+		self.success = rval == 0
+		self.cleanlist = cleanlist
+		self.ordered = ordered
+		self.req_pkg_count = req_pkg_count

diff --git a/portage_with_autodep/pym/portage/tests/resolver/__init__.py b/portage_with_autodep/pym/portage/tests/resolver/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/resolver/__test__ b/portage_with_autodep/pym/portage/tests/resolver/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py b/portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py
new file mode 100644
index 0000000..54c435f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py
@@ -0,0 +1,326 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class AutounmaskTestCase(TestCase):
+
+	def testAutounmask(self):
+
+		ebuilds = {
+			#ebuilds to test use changes
+			"dev-libs/A-1": { "SLOT": 1, "DEPEND": "dev-libs/B[foo]", "EAPI": 2}, 
+			"dev-libs/A-2": { "SLOT": 2, "DEPEND": "dev-libs/B[bar]", "EAPI": 2}, 
+			"dev-libs/B-1": { "DEPEND": "foo? ( dev-libs/C ) bar? ( dev-libs/D )", "IUSE": "foo bar"}, 
+			"dev-libs/C-1": {},
+			"dev-libs/D-1": {},
+
+			#ebuilds to test if we allow changing of masked or forced flags
+			"dev-libs/E-1": { "SLOT": 1, "DEPEND": "dev-libs/F[masked-flag]", "EAPI": 2},
+			"dev-libs/E-2": { "SLOT": 2, "DEPEND": "dev-libs/G[-forced-flag]", "EAPI": 2},
+			"dev-libs/F-1": { "IUSE": "masked-flag"},
+			"dev-libs/G-1": { "IUSE": "forced-flag"},
+
+			#ebuilds to test keyword changes
+			"app-misc/Z-1": { "KEYWORDS": "~x86", "DEPEND": "app-misc/Y" },
+			"app-misc/Y-1": { "KEYWORDS": "~x86" },
+			"app-misc/W-1": {},
+			"app-misc/W-2": { "KEYWORDS": "~x86" },
+			"app-misc/V-1": { "KEYWORDS": "~x86", "DEPEND": ">=app-misc/W-2"},
+
+			#ebuilds to test mask and keyword changes
+			"app-text/A-1": {},
+			"app-text/B-1": { "KEYWORDS": "~x86" },
+			"app-text/C-1": { "KEYWORDS": "" },
+			"app-text/D-1": { "KEYWORDS": "~x86" },
+			"app-text/D-2": { "KEYWORDS": "" },
+
+			#ebuilds for mixed test for || dep handling
+			"sci-libs/K-1": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/M sci-libs/P ) )", "EAPI": 2},
+			"sci-libs/K-2": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/P sci-libs/M ) )", "EAPI": 2},
+			"sci-libs/K-3": { "DEPEND": " || ( sci-libs/M || ( sci-libs/L[bar] sci-libs/P ) )", "EAPI": 2},
+			"sci-libs/K-4": { "DEPEND": " || ( sci-libs/M || ( sci-libs/P sci-libs/L[bar] ) )", "EAPI": 2},
+			"sci-libs/K-5": { "DEPEND": " || ( sci-libs/P || ( sci-libs/L[bar] sci-libs/M ) )", "EAPI": 2},
+			"sci-libs/K-6": { "DEPEND": " || ( sci-libs/P || ( sci-libs/M sci-libs/L[bar] ) )", "EAPI": 2},
+			"sci-libs/K-7": { "DEPEND": " || ( sci-libs/M sci-libs/L[bar] )", "EAPI": 2},
+			"sci-libs/K-8": { "DEPEND": " || ( sci-libs/L[bar] sci-libs/M )", "EAPI": 2},
+
+			"sci-libs/L-1": { "IUSE": "bar" },
+			"sci-libs/M-1": { "KEYWORDS": "~x86" },
+			"sci-libs/P-1": { },
+
+			#ebuilds to test these nice "required by cat/pkg[foo]" messages
+			"dev-util/Q-1": { "DEPEND": "foo? ( dev-util/R[bar] )", "IUSE": "+foo", "EAPI": 2 },
+			"dev-util/Q-2": { "RDEPEND": "!foo? ( dev-util/R[bar] )", "IUSE": "foo", "EAPI": 2 },
+			"dev-util/R-1": { "IUSE": "bar" },
+
+			#ebuilds to test interaction with REQUIRED_USE
+			"app-portage/A-1": { "DEPEND": "app-portage/B[foo]", "EAPI": 2 }, 
+			"app-portage/A-2": { "DEPEND": "app-portage/B[foo=]", "IUSE": "+foo", "REQUIRED_USE": "foo", "EAPI": "4" }, 
+
+			"app-portage/B-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" }, 
+			"app-portage/C-1": { "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+			}
+
+		test_cases = (
+				#Test USE changes.
+				#The simple case.
+
+				ResolverPlaygroundTestCase(
+					["dev-libs/A:1"],
+					options = {"--autounmask": "n"},
+					success = False),
+				ResolverPlaygroundTestCase(
+					["dev-libs/A:1"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+					use_changes = { "dev-libs/B-1": {"foo": True} } ),
+
+				#Make sure we restart if needed.
+				ResolverPlaygroundTestCase(
+					["dev-libs/A:1", "dev-libs/B"],
+					options = {"--autounmask": True},
+					all_permutations = True,
+					success = False,
+					mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+					use_changes = { "dev-libs/B-1": {"foo": True} } ),
+				ResolverPlaygroundTestCase(
+					["dev-libs/A:1", "dev-libs/A:2", "dev-libs/B"],
+					options = {"--autounmask": True},
+					all_permutations = True,
+					success = False,
+					mergelist = ["dev-libs/D-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", "dev-libs/A-2"],
+					ignore_mergelist_order = True,
+					use_changes = { "dev-libs/B-1": {"foo": True, "bar": True} } ),
+
+				#Test keywording.
+				#The simple case.
+
+				ResolverPlaygroundTestCase(
+					["app-misc/Z"],
+					options = {"--autounmask": "n"},
+					success = False),
+				ResolverPlaygroundTestCase(
+					["app-misc/Z"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["app-misc/Y-1", "app-misc/Z-1"],
+					unstable_keywords = ["app-misc/Y-1", "app-misc/Z-1"]),
+
+				#Make sure that the backtracking for slot conflicts handles our mess.
+
+				ResolverPlaygroundTestCase(
+					["=app-misc/V-1", "app-misc/W"],
+					options = {"--autounmask": True},
+					all_permutations = True,
+					success = False,
+					mergelist = ["app-misc/W-2", "app-misc/V-1"],
+					unstable_keywords = ["app-misc/W-2", "app-misc/V-1"]),
+
+				#Mixed testing
+				#Make sure we don't change use for something in a || dep if there is another choice
+				#that needs no change.
+				
+				ResolverPlaygroundTestCase(
+					["=sci-libs/K-1"],
+					options = {"--autounmask": True},
+					success = True,
+					mergelist = ["sci-libs/P-1", "sci-libs/K-1"]),
+				ResolverPlaygroundTestCase(
+					["=sci-libs/K-2"],
+					options = {"--autounmask": True},
+					success = True,
+					mergelist = ["sci-libs/P-1", "sci-libs/K-2"]),
+				ResolverPlaygroundTestCase(
+					["=sci-libs/K-3"],
+					options = {"--autounmask": True},
+					success = True,
+					mergelist = ["sci-libs/P-1", "sci-libs/K-3"]),
+				ResolverPlaygroundTestCase(
+					["=sci-libs/K-4"],
+					options = {"--autounmask": True},
+					success = True,
+					mergelist = ["sci-libs/P-1", "sci-libs/K-4"]),
+				ResolverPlaygroundTestCase(
+					["=sci-libs/K-5"],
+					options = {"--autounmask": True},
+					success = True,
+					mergelist = ["sci-libs/P-1", "sci-libs/K-5"]),
+				ResolverPlaygroundTestCase(
+					["=sci-libs/K-6"],
+					options = {"--autounmask": True},
+					success = True,
+					mergelist = ["sci-libs/P-1", "sci-libs/K-6"]),
+
+				#Make sure we prefer use changes over keyword changes.
+				ResolverPlaygroundTestCase(
+					["=sci-libs/K-7"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["sci-libs/L-1", "sci-libs/K-7"],
+					use_changes = { "sci-libs/L-1": { "bar": True } }),
+				ResolverPlaygroundTestCase(
+					["=sci-libs/K-8"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["sci-libs/L-1", "sci-libs/K-8"],
+					use_changes = { "sci-libs/L-1": { "bar": True } }),
+
+				#Test these nice "required by cat/pkg[foo]" messages.
+				ResolverPlaygroundTestCase(
+					["=dev-util/Q-1"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["dev-util/R-1", "dev-util/Q-1"],
+					use_changes = { "dev-util/R-1": { "bar": True } }),
+				ResolverPlaygroundTestCase(
+					["=dev-util/Q-2"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["dev-util/R-1", "dev-util/Q-2"],
+					use_changes = { "dev-util/R-1": { "bar": True } }),
+
+				#Test interaction with REQUIRED_USE.
+				ResolverPlaygroundTestCase(
+					["=app-portage/A-1"],
+					options = { "--autounmask": True },
+					use_changes = None,
+					success = False),
+				ResolverPlaygroundTestCase(
+					["=app-portage/A-2"],
+					options = { "--autounmask": True },
+					use_changes = None,
+					success = False),
+				ResolverPlaygroundTestCase(
+					["=app-portage/C-1"],
+					options = { "--autounmask": True },
+					use_changes = None,
+					success = False),
+
+				#Make sure we don't change masked/forced flags.
+				ResolverPlaygroundTestCase(
+					["dev-libs/E:1"],
+					options = {"--autounmask": True},
+					use_changes = None,
+					success = False),
+				ResolverPlaygroundTestCase(
+					["dev-libs/E:2"],
+					options = {"--autounmask": True},
+					use_changes = None,
+					success = False),
+
+				#Test mask and keyword changes.
+				ResolverPlaygroundTestCase(
+					["app-text/A"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["app-text/A-1"],
+					needed_p_mask_changes = ["app-text/A-1"]),
+				ResolverPlaygroundTestCase(
+					["app-text/B"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["app-text/B-1"],
+					unstable_keywords = ["app-text/B-1"],
+					needed_p_mask_changes = ["app-text/B-1"]),
+				ResolverPlaygroundTestCase(
+					["app-text/C"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["app-text/C-1"],
+					unstable_keywords = ["app-text/C-1"],
+					needed_p_mask_changes = ["app-text/C-1"]),
+				#Make sure unstable keyword is preferred over missing keyword
+				ResolverPlaygroundTestCase(
+					["app-text/D"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["app-text/D-1"],
+					unstable_keywords = ["app-text/D-1"]),
+				#Test missing keyword
+				ResolverPlaygroundTestCase(
+					["=app-text/D-2"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["app-text/D-2"],
+					unstable_keywords = ["app-text/D-2"])
+			)
+
+		profile = {
+			"use.mask":
+				(
+					"masked-flag",
+				),
+			"use.force":
+				(
+					"forced-flag",
+				),
+			"package.mask":
+				(
+					"app-text/A",
+					"app-text/B",
+					"app-text/C",
+				),
+		}
+
+		playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+	def testAutounmaskForLicenses(self):
+
+		ebuilds = {
+			"dev-libs/A-1": { "LICENSE": "TEST" },
+			"dev-libs/B-1": { "LICENSE": "TEST", "IUSE": "foo", "KEYWORDS": "~x86"},
+			"dev-libs/C-1": { "DEPEND": "dev-libs/B[foo]", "EAPI": 2 },
+			
+			"dev-libs/D-1": { "DEPEND": "dev-libs/E dev-libs/F", "LICENSE": "TEST" },
+			"dev-libs/E-1": { "LICENSE": "TEST" },
+			"dev-libs/E-2": { "LICENSE": "TEST" },
+			"dev-libs/F-1": { "DEPEND": "=dev-libs/E-1", "LICENSE": "TEST" },
+			}
+
+		test_cases = (
+				ResolverPlaygroundTestCase(
+					["=dev-libs/A-1"],
+					options = {"--autounmask": 'n'},
+					success = False),
+				ResolverPlaygroundTestCase(
+					["=dev-libs/A-1"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["dev-libs/A-1"],
+					license_changes = { "dev-libs/A-1": set(["TEST"]) }),
+
+				#Test license+keyword+use change at once.
+				ResolverPlaygroundTestCase(
+					["=dev-libs/C-1"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["dev-libs/B-1", "dev-libs/C-1"],
+					license_changes = { "dev-libs/B-1": set(["TEST"]) },
+					unstable_keywords = ["dev-libs/B-1"],
+					use_changes = { "dev-libs/B-1": { "foo": True } }),
+
+				#Test license with backtracking.
+				ResolverPlaygroundTestCase(
+					["=dev-libs/D-1"],
+					options = {"--autounmask": True},
+					success = False,
+					mergelist = ["dev-libs/E-1", "dev-libs/F-1", "dev-libs/D-1"],
+					license_changes = { "dev-libs/D-1": set(["TEST"]), "dev-libs/E-1": set(["TEST"]), "dev-libs/E-2": set(["TEST"]), "dev-libs/F-1": set(["TEST"]) }),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py b/portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py
new file mode 100644
index 0000000..fc49306
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py
@@ -0,0 +1,169 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class BacktrackingTestCase(TestCase):
+
+	def testBacktracking(self):
+		ebuilds = {
+			"dev-libs/A-1": {},
+			"dev-libs/A-2": {},
+			"dev-libs/B-1": { "DEPEND": "dev-libs/A" },
+			}
+
+		test_cases = (
+				ResolverPlaygroundTestCase(
+					["=dev-libs/A-1", "dev-libs/B"],
+					all_permutations = True,
+					mergelist = ["dev-libs/A-1", "dev-libs/B-1"],
+					success = True),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+
+	def testHittingTheBacktrackLimit(self):
+		ebuilds = {
+			"dev-libs/A-1": {},
+			"dev-libs/A-2": {},
+			"dev-libs/B-1": {},
+			"dev-libs/B-2": {},
+			"dev-libs/C-1": { "DEPEND": "dev-libs/A dev-libs/B" },
+			"dev-libs/D-1": { "DEPEND": "=dev-libs/A-1 =dev-libs/B-1" },
+			}
+
+		test_cases = (
+				ResolverPlaygroundTestCase(
+					["dev-libs/C", "dev-libs/D"],
+					all_permutations = True,
+					mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+					ignore_mergelist_order = True,
+					success = True),
+				#This one hits the backtrack limit. Be aware that this depends on the argument order.
+				ResolverPlaygroundTestCase(
+					["dev-libs/D", "dev-libs/C"],
+					options = { "--backtrack": 1 },
+					mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/A-2", "dev-libs/B-2", "dev-libs/C-1", "dev-libs/D-1"],
+					ignore_mergelist_order = True,
+					slot_collision_solutions = [],
+					success = False),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+
+	def testBacktrackingGoodVersionFirst(self):
+		"""
+		When backtracking due to slot conflicts, we masked the version that has been pulled
+		in first. This is not always a good idea. Mask the highest version instead.
+		"""
+
+		ebuilds = {
+			"dev-libs/A-1": { "DEPEND": "=dev-libs/C-1 dev-libs/B" },
+			"dev-libs/B-1": { "DEPEND": "=dev-libs/C-1" },
+			"dev-libs/B-2": { "DEPEND": "=dev-libs/C-2" },
+			"dev-libs/C-1": { },
+			"dev-libs/C-2": { },
+			}
+
+		test_cases = (
+				ResolverPlaygroundTestCase(
+					["dev-libs/A"],
+					mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", ],
+					success = True),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+	def testBacktrackWithoutUpdates(self):
+		"""
+		If --update is not given we might have to mask the old installed version later.
+		"""
+
+		ebuilds = {
+			"dev-libs/A-1": { "DEPEND": "dev-libs/Z" },
+			"dev-libs/B-1": { "DEPEND": ">=dev-libs/Z-2" },
+			"dev-libs/Z-1": { },
+			"dev-libs/Z-2": { },
+			}
+
+		installed = {
+			"dev-libs/Z-1": { "USE": "" },
+			}
+
+		test_cases = (
+				ResolverPlaygroundTestCase(
+					["dev-libs/B", "dev-libs/A"],
+					all_permutations = True,
+					mergelist = ["dev-libs/Z-2", "dev-libs/B-1", "dev-libs/A-1", ],
+					ignore_mergelist_order = True,
+					success = True),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+	def testBacktrackMissedUpdates(self):
+		"""
+		An update is missed due to a dependency on an older version.
+		"""
+
+		ebuilds = {
+			"dev-libs/A-1": { },
+			"dev-libs/A-2": { },
+			"dev-libs/B-1": { "RDEPEND": "<=dev-libs/A-1" },
+			}
+
+		installed = {
+			"dev-libs/A-1": { "USE": "" },
+			"dev-libs/B-1": { "USE": "", "RDEPEND": "<=dev-libs/A-1" },
+			}
+
+		options = {'--update' : True, '--deep' : True, '--selective' : True}
+
+		test_cases = (
+				ResolverPlaygroundTestCase(
+					["dev-libs/A", "dev-libs/B"],
+					options = options,
+					all_permutations = True,
+					mergelist = [],
+					success = True),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py b/portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py
new file mode 100644
index 0000000..f8331ac
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py
@@ -0,0 +1,84 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class CircularDependencyTestCase(TestCase):
+
+	#TODO:
+	#	use config change by autounmask
+	#	conflict on parent's parent
+	#	difference in RDEPEND and DEPEND
+	#	is there anything else than priority buildtime and runtime?
+	#	play with use.{mask,force}
+	#	play with REQUIRED_USE
+	
+
+	def testCircularDependency(self):
+
+		ebuilds = {
+			"dev-libs/Z-1": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) )", "IUSE": "+foo bar", "EAPI": 1 }, 
+			"dev-libs/Z-2": { "DEPEND": "foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 }, 
+			"dev-libs/Z-3": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) ) foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 }, 
+			"dev-libs/Y-1": { "DEPEND": "dev-libs/Z" },
+			"dev-libs/W-1": { "DEPEND": "dev-libs/Z[foo] dev-libs/Y", "EAPI": 2 },
+			"dev-libs/W-2": { "DEPEND": "dev-libs/Z[foo=] dev-libs/Y", "IUSE": "+foo", "EAPI": 2 },
+			"dev-libs/W-3": { "DEPEND": "dev-libs/Z[bar] dev-libs/Y", "EAPI": 2 },
+
+			"app-misc/A-1": { "DEPEND": "foo? ( =app-misc/B-1 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+			"app-misc/A-2": { "DEPEND": "foo? ( =app-misc/B-2 ) bar? ( =app-misc/B-2 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+			"app-misc/B-1": { "DEPEND": "=app-misc/A-1" },
+			"app-misc/B-2": { "DEPEND": "=app-misc/A-2" },
+			}
+
+		test_cases = (
+			#Simple tests
+			ResolverPlaygroundTestCase(
+				["=dev-libs/Z-1"],
+				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)]), frozenset([("bar", True)])])},
+				success = False),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/Z-2"],
+				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+				success = False),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/Z-3"],
+				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+				success = False),
+
+			#Conflict on parent
+			ResolverPlaygroundTestCase(
+				["=dev-libs/W-1"],
+				circular_dependency_solutions = {},
+				success = False),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/W-2"],
+				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+				success = False),
+
+			#Conflict with autounmask
+			ResolverPlaygroundTestCase(
+				["=dev-libs/W-3"],
+				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)])])},
+				use_changes = { "dev-libs/Z-3": {"bar": True}},
+				success = False),
+
+			#Conflict with REQUIRED_USE
+			ResolverPlaygroundTestCase(
+				["=app-misc/B-1"],
+				circular_dependency_solutions = { "app-misc/B-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+				success = False),
+			ResolverPlaygroundTestCase(
+				["=app-misc/B-2"],
+				circular_dependency_solutions = {},
+				success = False),
+		)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_depclean.py b/portage_with_autodep/pym/portage/tests/resolver/test_depclean.py
new file mode 100644
index 0000000..ba70144
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_depclean.py
@@ -0,0 +1,285 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleDepcleanTestCase(TestCase):
+
+	def testSimpleDepclean(self):
+		ebuilds = {
+			"dev-libs/A-1": {},
+			"dev-libs/B-1": {},
+			}
+		installed = {
+			"dev-libs/A-1": {},
+			"dev-libs/B-1": {},
+			}
+
+		world = (
+			"dev-libs/A",
+			)
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				[],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = ["dev-libs/B-1"]),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+class DepcleanWithDepsTestCase(TestCase):
+
+	def testDepcleanWithDeps(self):
+		ebuilds = {
+			"dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
+			"dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+			"dev-libs/C-1": {},
+			"dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
+			"dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
+			"dev-libs/F-1": {},
+			}
+		installed = {
+			"dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
+			"dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+			"dev-libs/C-1": {},
+			"dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
+			"dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
+			"dev-libs/F-1": {},
+			}
+
+		world = (
+			"dev-libs/A",
+			)
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				[],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = ["dev-libs/B-1", "dev-libs/D-1",
+					"dev-libs/E-1", "dev-libs/F-1"]),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+
+class DepcleanWithInstalledMaskedTestCase(TestCase):
+
+	def testDepcleanWithInstalledMasked(self):
+		"""
+		Test case for bug 332719.
+		emerge --declean ignores that B is masked by license and removes C.
+		The next emerge -uDN world doesn't take B and installs C again.
+		"""
+		ebuilds = {
+			"dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
+			"dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+			"dev-libs/C-1": { "KEYWORDS": "x86" },
+			}
+		installed = {
+			"dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
+			"dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+			"dev-libs/C-1": { "KEYWORDS": "x86" },
+			}
+
+		world = (
+			"dev-libs/A",
+			)
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				[],
+				options = {"--depclean": True},
+				success = True,
+				#cleanlist = ["dev-libs/C-1"]),
+				cleanlist = ["dev-libs/B-1"]),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+class DepcleanInstalledKeywordMaskedSlotTestCase(TestCase):
+
+	def testDepcleanInstalledKeywordMaskedSlot(self):
+		"""
+		Verify that depclean removes newer slot
+		masked by KEYWORDS (see bug #350285).
+		"""
+		ebuilds = {
+			"dev-libs/A-1": { "RDEPEND": "|| ( =dev-libs/B-2.7* =dev-libs/B-2.6* )" },
+			"dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
+			"dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
+			}
+		installed = {
+			"dev-libs/A-1": { "EAPI" : "3", "RDEPEND": "|| ( dev-libs/B:2.7 dev-libs/B:2.6 )" },
+			"dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
+			"dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
+			}
+
+		world = (
+			"dev-libs/A",
+			)
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				[],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = ["dev-libs/B-2.7"]),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+class DepcleanWithExcludeTestCase(TestCase):
+
+	def testDepcleanWithExclude(self):
+
+		installed = {
+			"dev-libs/A-1": {},
+			"dev-libs/B-1": { "RDEPEND": "dev-libs/A" },
+			}
+
+		test_cases = (
+			#Without --exclude.
+			ResolverPlaygroundTestCase(
+				[],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = ["dev-libs/B-1", "dev-libs/A-1"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/A"],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = []),
+			ResolverPlaygroundTestCase(
+				["dev-libs/B"],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = ["dev-libs/B-1"]),
+
+			#With --exclude
+			ResolverPlaygroundTestCase(
+				[],
+				options = {"--depclean": True, "--exclude": ["dev-libs/A"]},
+				success = True,
+				cleanlist = ["dev-libs/B-1"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/B"],
+				options = {"--depclean": True, "--exclude": ["dev-libs/B"]},
+				success = True,
+				cleanlist = []),
+			)
+
+		playground = ResolverPlayground(installed=installed)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+class DepcleanWithExcludeAndSlotsTestCase(TestCase):
+
+	def testDepcleanWithExcludeAndSlots(self):
+
+		installed = {
+			"dev-libs/Z-1": { "SLOT": 1},
+			"dev-libs/Z-2": { "SLOT": 2},
+			"dev-libs/Y-1": { "RDEPEND": "=dev-libs/Z-1", "SLOT": 1 },
+			"dev-libs/Y-2": { "RDEPEND": "=dev-libs/Z-2", "SLOT": 2 },
+			}
+
+		world = [ "dev-libs/Y" ]
+
+		test_cases = (
+			#Without --exclude.
+			ResolverPlaygroundTestCase(
+				[],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = ["dev-libs/Y-1", "dev-libs/Z-1"]),
+			ResolverPlaygroundTestCase(
+				[],
+				options = {"--depclean": True, "--exclude": ["dev-libs/Z"]},
+				success = True,
+				cleanlist = ["dev-libs/Y-1"]),
+			ResolverPlaygroundTestCase(
+				[],
+				options = {"--depclean": True, "--exclude": ["dev-libs/Y"]},
+				success = True,
+				cleanlist = []),
+			)
+
+		playground = ResolverPlayground(installed=installed, world=world)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+class DepcleanAndWildcardsTestCase(TestCase):
+
+	def testDepcleanAndWildcards(self):
+
+		installed = {
+			"dev-libs/A-1": { "RDEPEND": "dev-libs/B" },
+			"dev-libs/B-1": {},
+			}
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				["*/*"],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/*"],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
+			ResolverPlaygroundTestCase(
+				["*/A"],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = ["dev-libs/A-1"]),
+			ResolverPlaygroundTestCase(
+				["*/B"],
+				options = {"--depclean": True},
+				success = True,
+				cleanlist = []),
+			)
+
+		playground = ResolverPlayground(installed=installed)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_depth.py b/portage_with_autodep/pym/portage/tests/resolver/test_depth.py
new file mode 100644
index 0000000..cb1e2dd
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_depth.py
@@ -0,0 +1,252 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+	ResolverPlaygroundTestCase)
+
+class ResolverDepthTestCase(TestCase):
+
+	def testResolverDepth(self):
+
+		ebuilds = {
+			"dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
+			"dev-libs/A-2": {"RDEPEND" : "dev-libs/B"},
+			"dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
+			"dev-libs/B-2": {"RDEPEND" : "dev-libs/C"},
+			"dev-libs/C-1": {},
+			"dev-libs/C-2": {},
+
+			"virtual/libusb-0"         : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
+			"virtual/libusb-1"         : {"EAPI" :"2", "SLOT" : "1", "RDEPEND" : ">=dev-libs/libusb-1.0.4:1"},
+			"dev-libs/libusb-0.1.13"   : {},
+			"dev-libs/libusb-1.0.5"    : {"SLOT":"1"},
+			"dev-libs/libusb-compat-1" : {},
+			"sys-freebsd/freebsd-lib-8": {"IUSE" : "+usb"},
+
+			"sys-fs/udev-164"          : {"EAPI" : "1", "RDEPEND" : "virtual/libusb:0"},
+
+			"virtual/jre-1.5.0"        : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
+			"virtual/jre-1.5.0-r1"     : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
+			"virtual/jre-1.6.0"        : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
+			"virtual/jre-1.6.0-r1"     : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
+			"virtual/jdk-1.5.0"        : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+			"virtual/jdk-1.5.0-r1"     : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+			"virtual/jdk-1.6.0"        : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+			"virtual/jdk-1.6.0-r1"     : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+			"dev-java/gcj-jdk-4.5"     : {},
+			"dev-java/gcj-jdk-4.5-r1"  : {},
+			"dev-java/icedtea-6.1"     : {},
+			"dev-java/icedtea-6.1-r1"  : {},
+			"dev-java/sun-jdk-1.5"     : {"SLOT" : "1.5"},
+			"dev-java/sun-jdk-1.6"     : {"SLOT" : "1.6"},
+			"dev-java/sun-jre-bin-1.5" : {"SLOT" : "1.5"},
+			"dev-java/sun-jre-bin-1.6" : {"SLOT" : "1.6"},
+
+			"dev-java/ant-core-1.8"   : {"DEPEND"  : ">=virtual/jdk-1.4"},
+			"dev-db/hsqldb-1.8"       : {"RDEPEND" : ">=virtual/jre-1.6"},
+			}
+
+		installed = {
+			"dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
+			"dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
+			"dev-libs/C-1": {},
+
+			"virtual/jre-1.5.0"       : {"SLOT" : "1.5", "RDEPEND" : "|| ( =virtual/jdk-1.5.0* =dev-java/sun-jre-bin-1.5.0* )"},
+			"virtual/jre-1.6.0"       : {"SLOT" : "1.6", "RDEPEND" : "|| ( =virtual/jdk-1.6.0* =dev-java/sun-jre-bin-1.6.0* )"},
+			"virtual/jdk-1.5.0"       : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+			"virtual/jdk-1.6.0"       : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+			"dev-java/gcj-jdk-4.5"    : {},
+			"dev-java/icedtea-6.1"    : {},
+
+			"virtual/libusb-0"         : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
+			}
+
+		world = ["dev-libs/A"]
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				["dev-libs/A"],
+				options = {"--update": True, "--deep": 0},
+				success = True,
+				mergelist = ["dev-libs/A-2"]),
+
+			ResolverPlaygroundTestCase(
+				["dev-libs/A"],
+				options = {"--update": True, "--deep": 1},
+				success = True,
+				mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
+
+			ResolverPlaygroundTestCase(
+				["dev-libs/A"],
+				options = {"--update": True, "--deep": 2},
+				success = True,
+				mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+			ResolverPlaygroundTestCase(
+				["@world"],
+				options = {"--update": True, "--deep": True},
+				success = True,
+				mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+			ResolverPlaygroundTestCase(
+				["@world"],
+				options = {"--emptytree": True},
+				success = True,
+				mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+			ResolverPlaygroundTestCase(
+				["@world"],
+				options = {"--selective": True, "--deep": True},
+				success = True,
+				mergelist = []),
+
+			ResolverPlaygroundTestCase(
+				["dev-libs/A"],
+				options = {"--deep": 2},
+				success = True,
+				mergelist = ["dev-libs/A-2"]),
+
+			ResolverPlaygroundTestCase(
+				["virtual/jre"],
+				options = {},
+				success = True,
+				mergelist = ['virtual/jre-1.6.0-r1']),
+
+			ResolverPlaygroundTestCase(
+				["virtual/jre"],
+				options = {"--deep" : True},
+				success = True,
+				mergelist = ['virtual/jre-1.6.0-r1']),
+
+			# Test bug #141118, where we avoid pulling in
+			# redundant deps, satisfying nested virtuals
+			# as efficiently as possible.
+			ResolverPlaygroundTestCase(
+				["virtual/jre"],
+				options = {"--selective" : True, "--deep" : True},
+				success = True,
+				mergelist = []),
+
+			# Test bug #150361, where depgraph._greedy_slots()
+			# is triggered by --update with AtomArg.
+			ResolverPlaygroundTestCase(
+				["virtual/jre"],
+				options = {"--update" : True},
+				success = True,
+				ambiguous_merge_order = True,
+				mergelist = [('virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
+
+			# Recursively traversed virtual dependencies, and their
+			# direct dependencies, are considered to have the same
+			# depth as direct dependencies.
+			ResolverPlaygroundTestCase(
+				["virtual/jre"],
+				options = {"--update" : True, "--deep" : 1},
+				success = True,
+				ambiguous_merge_order = True,
+				merge_order_assertions=(('dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1'), ('virtual/jdk-1.6.0-r1', 'virtual/jre-1.6.0-r1'),
+					('dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.5.0-r1'), ('virtual/jdk-1.5.0-r1', 'virtual/jre-1.5.0-r1')),
+				mergelist = [('dev-java/icedtea-6.1-r1', 'dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.6.0-r1', 'virtual/jdk-1.5.0-r1', 'virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
+
+			ResolverPlaygroundTestCase(
+				["virtual/jre:1.5"],
+				options = {"--update" : True},
+				success = True,
+				mergelist = ['virtual/jre-1.5.0-r1']),
+
+			ResolverPlaygroundTestCase(
+				["virtual/jre:1.6"],
+				options = {"--update" : True},
+				success = True,
+				mergelist = ['virtual/jre-1.6.0-r1']),
+
+			# Test that we don't pull in any unnecessary updates
+			# when --update is not specified, even though we
+			# specified --deep.
+			ResolverPlaygroundTestCase(
+				["dev-java/ant-core"],
+				options = {"--deep" : True},
+				success = True,
+				mergelist = ["dev-java/ant-core-1.8"]),
+
+			ResolverPlaygroundTestCase(
+				["dev-java/ant-core"],
+				options = {"--update" : True},
+				success = True,
+				mergelist = ["dev-java/ant-core-1.8"]),
+
+			# Recursively traversed virtual dependencies, and their
+			# direct dependencies, are considered to have the same
+			# depth as direct dependencies.
+			ResolverPlaygroundTestCase(
+				["dev-java/ant-core"],
+				options = {"--update" : True, "--deep" : 1},
+				success = True,
+				mergelist = ['dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1', 'dev-java/ant-core-1.8']),
+
+			ResolverPlaygroundTestCase(
+				["dev-db/hsqldb"],
+				options = {"--deep" : True},
+				success = True,
+				mergelist = ["dev-db/hsqldb-1.8"]),
+
+			# Don't traverse deps of an installed package with --deep=0,
+			# even if it's a virtual.
+			ResolverPlaygroundTestCase(
+				["virtual/libusb:0"],
+				options = {"--selective" : True, "--deep" : 0},
+				success = True,
+				mergelist = []),
+
+			# Satisfy unsatisfied dep of installed package with --deep=1.
+			ResolverPlaygroundTestCase(
+				["virtual/libusb:0"],
+				options = {"--selective" : True, "--deep" : 1},
+				success = True,
+				mergelist = ['dev-libs/libusb-0.1.13']),
+
+			# Pull in direct dep of virtual, even with --deep=0.
+			ResolverPlaygroundTestCase(
+				["sys-fs/udev"],
+				options = {"--deep" : 0},
+				success = True,
+				mergelist = ['dev-libs/libusb-0.1.13', 'sys-fs/udev-164']),
+
+			# Test --nodeps with direct virtual deps.
+			ResolverPlaygroundTestCase(
+				["sys-fs/udev"],
+				options = {"--nodeps" : True},
+				success = True,
+				mergelist = ["sys-fs/udev-164"]),
+
+			# Test that --nodeps overrides --deep.
+			ResolverPlaygroundTestCase(
+				["sys-fs/udev"],
+				options = {"--nodeps" : True, "--deep" : True},
+				success = True,
+				mergelist = ["sys-fs/udev-164"]),
+
+			# Test that --nodeps overrides --emptytree.
+			ResolverPlaygroundTestCase(
+				["sys-fs/udev"],
+				options = {"--nodeps" : True, "--emptytree" : True},
+				success = True,
+				mergelist = ["sys-fs/udev-164"]),
+
+			# Test --emptytree with virtuals.
+			ResolverPlaygroundTestCase(
+				["sys-fs/udev"],
+				options = {"--emptytree" : True},
+				success = True,
+				mergelist = ['dev-libs/libusb-0.1.13', 'virtual/libusb-0', 'sys-fs/udev-164']),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+			world=world)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_eapi.py b/portage_with_autodep/pym/portage/tests/resolver/test_eapi.py
new file mode 100644
index 0000000..525b585
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_eapi.py
@@ -0,0 +1,115 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class EAPITestCase(TestCase):
+
+	def testEAPI(self):
+
+		ebuilds = {
+			#EAPI-1: IUSE-defaults
+			"dev-libs/A-1.0": { "EAPI": 0, "IUSE": "+foo" }, 
+			"dev-libs/A-1.1": { "EAPI": 1, "IUSE": "+foo" }, 
+			"dev-libs/A-1.2": { "EAPI": 2, "IUSE": "+foo" }, 
+			"dev-libs/A-1.3": { "EAPI": 3, "IUSE": "+foo" }, 
+			"dev-libs/A-1.4": { "EAPI": "4", "IUSE": "+foo" }, 
+
+			#EAPI-1: slot deps
+			"dev-libs/A-2.0": { "EAPI": 0, "DEPEND": "dev-libs/B:0" }, 
+			"dev-libs/A-2.1": { "EAPI": 1, "DEPEND": "dev-libs/B:0" }, 
+			"dev-libs/A-2.2": { "EAPI": 2, "DEPEND": "dev-libs/B:0" }, 
+			"dev-libs/A-2.3": { "EAPI": 3, "DEPEND": "dev-libs/B:0" }, 
+			"dev-libs/A-2.4": { "EAPI": "4", "DEPEND": "dev-libs/B:0" }, 
+
+			#EAPI-2: use deps
+			"dev-libs/A-3.0": { "EAPI": 0, "DEPEND": "dev-libs/B[foo]" }, 
+			"dev-libs/A-3.1": { "EAPI": 1, "DEPEND": "dev-libs/B[foo]" }, 
+			"dev-libs/A-3.2": { "EAPI": 2, "DEPEND": "dev-libs/B[foo]" }, 
+			"dev-libs/A-3.3": { "EAPI": 3, "DEPEND": "dev-libs/B[foo]" }, 
+			"dev-libs/A-3.4": { "EAPI": "4", "DEPEND": "dev-libs/B[foo]" }, 
+
+			#EAPI-2: strong blocks
+			"dev-libs/A-4.0": { "EAPI": 0, "DEPEND": "!!dev-libs/B" }, 
+			"dev-libs/A-4.1": { "EAPI": 1, "DEPEND": "!!dev-libs/B" }, 
+			"dev-libs/A-4.2": { "EAPI": 2, "DEPEND": "!!dev-libs/B" }, 
+			"dev-libs/A-4.3": { "EAPI": 3, "DEPEND": "!!dev-libs/B" }, 
+			"dev-libs/A-4.4": { "EAPI": "4", "DEPEND": "!!dev-libs/B" }, 
+
+			#EAPI-4: slot operator deps
+			#~ "dev-libs/A-5.0": { "EAPI": 0, "DEPEND": "dev-libs/B:*" }, 
+			#~ "dev-libs/A-5.1": { "EAPI": 1, "DEPEND": "dev-libs/B:*" }, 
+			#~ "dev-libs/A-5.2": { "EAPI": 2, "DEPEND": "dev-libs/B:*" }, 
+			#~ "dev-libs/A-5.3": { "EAPI": 3, "DEPEND": "dev-libs/B:*" }, 
+			#~ "dev-libs/A-5.4": { "EAPI": "4", "DEPEND": "dev-libs/B:*" }, 
+
+			#EAPI-4: use dep defaults
+			"dev-libs/A-6.0": { "EAPI": 0, "DEPEND": "dev-libs/B[bar(+)]" }, 
+			"dev-libs/A-6.1": { "EAPI": 1, "DEPEND": "dev-libs/B[bar(+)]" }, 
+			"dev-libs/A-6.2": { "EAPI": 2, "DEPEND": "dev-libs/B[bar(+)]" }, 
+			"dev-libs/A-6.3": { "EAPI": 3, "DEPEND": "dev-libs/B[bar(+)]" }, 
+			"dev-libs/A-6.4": { "EAPI": "4", "DEPEND": "dev-libs/B[bar(+)]" }, 
+			
+			#EAPI-4: REQUIRED_USE
+			"dev-libs/A-7.0": { "EAPI": 0, "IUSE": "foo bar", "REQUIRED_USE": "|| ( foo bar )" }, 
+			"dev-libs/A-7.1": { "EAPI": 1, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" }, 
+			"dev-libs/A-7.2": { "EAPI": 2, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" }, 
+			"dev-libs/A-7.3": { "EAPI": 3, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" }, 
+			"dev-libs/A-7.4": { "EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" }, 
+
+			"dev-libs/B-1": {"EAPI": 1, "IUSE": "+foo"}, 
+			}
+
+		test_cases = (
+			ResolverPlaygroundTestCase(["=dev-libs/A-1.0"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-1.1"], success = True, mergelist = ["dev-libs/A-1.1"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-1.2"], success = True, mergelist = ["dev-libs/A-1.2"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-1.3"], success = True, mergelist = ["dev-libs/A-1.3"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-1.4"], success = True, mergelist = ["dev-libs/A-1.4"]),
+
+			ResolverPlaygroundTestCase(["=dev-libs/A-2.0"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-2.1"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.1"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-2.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.2"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-2.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.3"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-2.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.4"]),
+
+			ResolverPlaygroundTestCase(["=dev-libs/A-3.0"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-3.1"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-3.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.2"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-3.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.3"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-3.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.4"]),
+
+			ResolverPlaygroundTestCase(["=dev-libs/A-4.0"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-4.1"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-4.2"], success = True, mergelist = ["dev-libs/A-4.2"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-4.3"], success = True, mergelist = ["dev-libs/A-4.3"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-4.4"], success = True, mergelist = ["dev-libs/A-4.4"]),
+
+			ResolverPlaygroundTestCase(["=dev-libs/A-5.0"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-5.1"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-5.2"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-5.3"], success = False),
+			# not implemented: EAPI-4: slot operator deps
+			#~ ResolverPlaygroundTestCase(["=dev-libs/A-5.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-5.4"]),
+
+			ResolverPlaygroundTestCase(["=dev-libs/A-6.0"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-6.1"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-6.2"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-6.3"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-6.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-6.4"]),
+			
+			ResolverPlaygroundTestCase(["=dev-libs/A-7.0"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-7.1"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-7.2"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-7.3"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-7.4"], success = True, mergelist = ["dev-libs/A-7.4"]),
+		)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py b/portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py
new file mode 100644
index 0000000..0a52c81
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py
@@ -0,0 +1,453 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+	ResolverPlaygroundTestCase)
+
+class MergeOrderTestCase(TestCase):
+
+	def testMergeOrder(self):
+		ebuilds = {
+			"app-misc/blocker-buildtime-a-1" : {},
+			"app-misc/blocker-buildtime-unbuilt-a-1" : {
+				"DEPEND" : "!app-misc/installed-blocker-a",
+			},
+			"app-misc/blocker-buildtime-unbuilt-hard-a-1" : {
+				"EAPI"   : "2",
+				"DEPEND" : "!!app-misc/installed-blocker-a",
+			},
+			"app-misc/blocker-update-order-a-1" : {},
+			"app-misc/blocker-update-order-hard-a-1" : {},
+			"app-misc/blocker-update-order-hard-unsolvable-a-1" : {},
+			"app-misc/blocker-runtime-a-1" : {},
+			"app-misc/blocker-runtime-b-1" : {},
+			"app-misc/blocker-runtime-hard-a-1" : {},
+			"app-misc/circ-buildtime-a-0": {},
+			"app-misc/circ-buildtime-a-1": {
+				"RDEPEND": "app-misc/circ-buildtime-b",
+			},
+			"app-misc/circ-buildtime-b-1": {
+				"RDEPEND": "app-misc/circ-buildtime-c",
+			},
+			"app-misc/circ-buildtime-c-1": {
+				"DEPEND": "app-misc/circ-buildtime-a",
+			},
+			"app-misc/circ-buildtime-unsolvable-a-1": {
+				"RDEPEND": "app-misc/circ-buildtime-unsolvable-b",
+			},
+			"app-misc/circ-buildtime-unsolvable-b-1": {
+				"RDEPEND": "app-misc/circ-buildtime-unsolvable-c",
+			},
+			"app-misc/circ-buildtime-unsolvable-c-1": {
+				"DEPEND": "app-misc/circ-buildtime-unsolvable-a",
+			},
+			"app-misc/circ-post-runtime-a-1": {
+				"PDEPEND": "app-misc/circ-post-runtime-b",
+			},
+			"app-misc/circ-post-runtime-b-1": {
+				"RDEPEND": "app-misc/circ-post-runtime-c",
+			},
+			"app-misc/circ-post-runtime-c-1": {
+				"RDEPEND": "app-misc/circ-post-runtime-a",
+			},
+			"app-misc/circ-runtime-a-1": {
+				"RDEPEND": "app-misc/circ-runtime-b",
+			},
+			"app-misc/circ-runtime-b-1": {
+				"RDEPEND": "app-misc/circ-runtime-c",
+			},
+			"app-misc/circ-runtime-c-1": {
+				"RDEPEND": "app-misc/circ-runtime-a",
+			},
+			"app-misc/circ-satisfied-a-0": {
+				"RDEPEND": "app-misc/circ-satisfied-b",
+			},
+			"app-misc/circ-satisfied-a-1": {
+				"RDEPEND": "app-misc/circ-satisfied-b",
+			},
+			"app-misc/circ-satisfied-b-0": {
+				"RDEPEND": "app-misc/circ-satisfied-c",
+			},
+			"app-misc/circ-satisfied-b-1": {
+				"RDEPEND": "app-misc/circ-satisfied-c",
+			},
+			"app-misc/circ-satisfied-c-0": {
+				"DEPEND": "app-misc/circ-satisfied-a",
+				"RDEPEND": "app-misc/circ-satisfied-a",
+			},
+			"app-misc/circ-satisfied-c-1": {
+				"DEPEND": "app-misc/circ-satisfied-a",
+				"RDEPEND": "app-misc/circ-satisfied-a",
+			},
+			"app-misc/circ-smallest-a-1": {
+				"RDEPEND": "app-misc/circ-smallest-b",
+			},
+			"app-misc/circ-smallest-b-1": {
+				"RDEPEND": "app-misc/circ-smallest-a",
+			},
+			"app-misc/circ-smallest-c-1": {
+				"RDEPEND": "app-misc/circ-smallest-d",
+			},
+			"app-misc/circ-smallest-d-1": {
+				"RDEPEND": "app-misc/circ-smallest-e",
+			},
+			"app-misc/circ-smallest-e-1": {
+				"RDEPEND": "app-misc/circ-smallest-c",
+			},
+			"app-misc/circ-smallest-f-1": {
+				"RDEPEND": "app-misc/circ-smallest-g app-misc/circ-smallest-a app-misc/circ-smallest-c",
+			},
+			"app-misc/circ-smallest-g-1": {
+				"RDEPEND": "app-misc/circ-smallest-f",
+			},
+			"app-misc/installed-blocker-a-1" : {
+				"EAPI"   : "2",
+				"DEPEND" : "!app-misc/blocker-buildtime-a",
+				"RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
+			},
+			"app-misc/installed-old-version-blocks-a-1" : {
+				"RDEPEND" : "!app-misc/blocker-update-order-a",
+			},
+			"app-misc/installed-old-version-blocks-a-2" : {},
+			"app-misc/installed-old-version-blocks-hard-a-1" : {
+				"EAPI"    : "2",
+				"RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
+			},
+			"app-misc/installed-old-version-blocks-hard-a-2" : {},
+			"app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
+				"EAPI"    : "2",
+				"RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
+			},
+			"app-misc/installed-old-version-blocks-hard-unsolvable-a-2" : {
+				"DEPEND"  : "app-misc/blocker-update-order-hard-unsolvable-a",
+				"RDEPEND" : "",
+			},
+			"app-misc/some-app-a-1": {
+				"RDEPEND": "app-misc/circ-runtime-a app-misc/circ-runtime-b",
+			},
+			"app-misc/some-app-b-1": {
+				"RDEPEND": "app-misc/circ-post-runtime-a app-misc/circ-post-runtime-b",
+			},
+			"app-misc/some-app-c-1": {
+				"RDEPEND": "app-misc/circ-buildtime-a app-misc/circ-buildtime-b",
+			},
+			"app-admin/eselect-python-20100321" : {},
+			"sys-apps/portage-2.1.9.42" : {
+				"DEPEND"  : "dev-lang/python",
+				"RDEPEND" : "dev-lang/python",
+			},
+			"sys-apps/portage-2.1.9.49" : {
+				"DEPEND"  : "dev-lang/python >=app-admin/eselect-python-20091230",
+				"RDEPEND" : "dev-lang/python",
+			},
+			"dev-lang/python-3.1" : {},
+			"dev-lang/python-3.2" : {},
+			"virtual/libc-0" : {
+				"RDEPEND" : "sys-libs/glibc",
+			},
+			"sys-devel/gcc-4.5.2" : {},
+			"sys-devel/binutils-2.18" : {},
+			"sys-devel/binutils-2.20.1" : {},
+			"sys-libs/glibc-2.11" : {
+				"DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+				"RDEPEND": "",
+			},
+			"sys-libs/glibc-2.13" : {
+				"DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+				"RDEPEND": "",
+			},
+			"virtual/os-headers-0" : {
+				"RDEPEND" : "sys-kernel/linux-headers",
+			},
+			"sys-kernel/linux-headers-2.6.38": {
+				"DEPEND" : "app-arch/xz-utils",
+				"RDEPEND": "",
+			},
+			"sys-kernel/linux-headers-2.6.39": {
+				"DEPEND" : "app-arch/xz-utils",
+				"RDEPEND": "",
+			},
+			"app-arch/xz-utils-5.0.1" : {},
+			"app-arch/xz-utils-5.0.2" : {},
+			"dev-util/pkgconfig-0.25-r2" : {},
+			"kde-base/kdelibs-3.5.7" : {
+				"PDEPEND" : "kde-misc/kdnssd-avahi",
+			},
+			"kde-misc/kdnssd-avahi-0.1.2" : {
+				"DEPEND"  : "kde-base/kdelibs app-arch/xz-utils dev-util/pkgconfig",
+				"RDEPEND" : "kde-base/kdelibs",
+			},
+			"kde-base/kdnssd-3.5.7" : {
+				"DEPEND"  : "kde-base/kdelibs",
+				"RDEPEND" : "kde-base/kdelibs",
+			},
+			"kde-base/libkdegames-3.5.7" : {
+				"DEPEND"  : "kde-base/kdelibs",
+				"RDEPEND" : "kde-base/kdelibs",
+			},
+			"kde-base/kmines-3.5.7" : {
+				"DEPEND"  : "kde-base/libkdegames",
+				"RDEPEND" : "kde-base/libkdegames",
+			},
+			"media-video/libav-0.7_pre20110327" : {
+				"EAPI" : "2",
+				"IUSE" : "X +encode",
+				"RDEPEND" : "!media-video/ffmpeg",
+			},
+			"media-video/ffmpeg-0.7_rc1" : {
+				"EAPI" : "2",
+				"IUSE" : "X +encode",
+			},
+			"virtual/ffmpeg-0.6.90" : {
+				"EAPI" : "2",
+				"IUSE" : "X +encode",
+				"RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
+			},
+		}
+
+		installed = {
+			"app-misc/circ-buildtime-a-0": {},
+			"app-misc/circ-satisfied-a-0": {
+				"RDEPEND": "app-misc/circ-satisfied-b",
+			},
+			"app-misc/circ-satisfied-b-0": {
+				"RDEPEND": "app-misc/circ-satisfied-c",
+			},
+			"app-misc/circ-satisfied-c-0": {
+				"DEPEND": "app-misc/circ-satisfied-a",
+				"RDEPEND": "app-misc/circ-satisfied-a",
+			},
+			"app-misc/installed-blocker-a-1" : {
+				"EAPI"   : "2",
+				"DEPEND" : "!app-misc/blocker-buildtime-a",
+				"RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
+			},
+			"app-misc/installed-old-version-blocks-a-1" : {
+				"RDEPEND" : "!app-misc/blocker-update-order-a",
+			},
+			"app-misc/installed-old-version-blocks-hard-a-1" : {
+				"EAPI"    : "2",
+				"RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
+			},
+			"app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
+				"EAPI"    : "2",
+				"RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
+			},
+			"sys-apps/portage-2.1.9.42" : {
+				"DEPEND"  : "dev-lang/python",
+				"RDEPEND" : "dev-lang/python",
+			},
+			"dev-lang/python-3.1" : {},
+			"virtual/libc-0" : {
+				"RDEPEND" : "sys-libs/glibc",
+			},
+			"sys-devel/binutils-2.18" : {},
+			"sys-libs/glibc-2.11" : {
+				"DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+				"RDEPEND": "",
+			},
+			"virtual/os-headers-0" : {
+				"RDEPEND" : "sys-kernel/linux-headers",
+			},
+			"sys-kernel/linux-headers-2.6.38": {
+				"DEPEND" : "app-arch/xz-utils",
+				"RDEPEND": "",
+			},
+			"app-arch/xz-utils-5.0.1" : {},
+			"media-video/ffmpeg-0.7_rc1" : {
+				"EAPI" : "2",
+				"IUSE" : "X +encode",
+				"USE" : "encode",
+			},
+			"virtual/ffmpeg-0.6.90" : {
+				"EAPI" : "2",
+				"IUSE" : "X +encode",
+				"USE" : "encode",
+				"RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
+			},
+		}
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				["app-misc/some-app-a"],
+				success = True,
+				ambiguous_merge_order = True,
+				mergelist = [("app-misc/circ-runtime-a-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-c-1"), "app-misc/some-app-a-1"]),
+			ResolverPlaygroundTestCase(
+				["app-misc/some-app-a"],
+				success = True,
+				ambiguous_merge_order = True,
+				mergelist = [("app-misc/circ-runtime-c-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-a-1"), "app-misc/some-app-a-1"]),
+			# Test unsolvable circular dep that is RDEPEND in one
+			# direction and DEPEND in the other.
+			ResolverPlaygroundTestCase(
+				["app-misc/circ-buildtime-unsolvable-a"],
+				success = False,
+				circular_dependency_solutions = {}),
+			# Test optimal merge order for a circular dep that is
+			# RDEPEND in one direction and DEPEND in the other.
+			# This requires an installed instance of the DEPEND
+			# package in order to be solvable.
+			ResolverPlaygroundTestCase(
+				["app-misc/some-app-c", "app-misc/circ-buildtime-a"],
+				success = True,
+				ambiguous_merge_order = True,
+				mergelist = [("app-misc/circ-buildtime-b-1", "app-misc/circ-buildtime-c-1"), "app-misc/circ-buildtime-a-1", "app-misc/some-app-c-1"]),
+			# Test optimal merge order for a circular dep that is
+			# RDEPEND in one direction and PDEPEND in the other.
+			ResolverPlaygroundTestCase(
+				["app-misc/some-app-b"],
+				success = True,
+				ambiguous_merge_order = True,
+				mergelist = ["app-misc/circ-post-runtime-a-1", ("app-misc/circ-post-runtime-b-1", "app-misc/circ-post-runtime-c-1"), "app-misc/some-app-b-1"]),
+			# Test optimal merge order for a circular dep that is
+			# RDEPEND in one direction and DEPEND in the other,
+			# with all dependencies initially satisfied. Optimally,
+			# the DEPEND/buildtime dep should be updated before the
+			# package that depends on it, even though it's feasible
+			# to update it later since it is already satisfied.
+			ResolverPlaygroundTestCase(
+				["app-misc/circ-satisfied-a", "app-misc/circ-satisfied-b", "app-misc/circ-satisfied-c"],
+				success = True,
+				all_permutations = True,
+				ambiguous_merge_order = True,
+				merge_order_assertions = (("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-c-1"),),
+				mergelist = [("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-b-1", "app-misc/circ-satisfied-c-1")]),
+			# In the case of multiple runtime cycles, where some cycles
+			# may depend on smaller independent cycles, it's optimal
+			# to merge smaller independent cycles before other cycles
+			# that depend on them.
+			ResolverPlaygroundTestCase(
+				["app-misc/circ-smallest-a", "app-misc/circ-smallest-c", "app-misc/circ-smallest-f"],
+				success = True,
+				ambiguous_merge_order = True,
+				all_permutations = True,
+				mergelist = [('app-misc/circ-smallest-a-1', 'app-misc/circ-smallest-b-1'),
+				('app-misc/circ-smallest-c-1', 'app-misc/circ-smallest-d-1', 'app-misc/circ-smallest-e-1'),
+				('app-misc/circ-smallest-f-1', 'app-misc/circ-smallest-g-1')]),
+			# installed package has buildtime-only blocker
+			# that should be ignored
+			ResolverPlaygroundTestCase(
+				["app-misc/blocker-buildtime-a"],
+				success = True,
+				mergelist = ["app-misc/blocker-buildtime-a-1"]),
+			# We're installing a package that an old version of
+			# an installed package blocks. However, an update is
+			# available to the old package. The old package should
+			# be updated first, in order to solve the blocker without
+			# any need for blocking packages to temporarily overlap.
+			ResolverPlaygroundTestCase(
+				["app-misc/blocker-update-order-a", "app-misc/installed-old-version-blocks-a"],
+				success = True,
+				all_permutations = True,
+				mergelist = ["app-misc/installed-old-version-blocks-a-2", "app-misc/blocker-update-order-a-1"]),
+			# This is the same as above but with a hard blocker. The hard
+			# blocker is solved automatically since the update makes it
+			# irrelevant.
+			ResolverPlaygroundTestCase(
+				["app-misc/blocker-update-order-hard-a", "app-misc/installed-old-version-blocks-hard-a"],
+				success = True,
+				all_permutations = True,
+				mergelist = ["app-misc/installed-old-version-blocks-hard-a-2", "app-misc/blocker-update-order-hard-a-1"]),
+			# This is similar to the above case except that it's unsolvable
+			# due to merge order, unless bug 250286 is implemented so that
+			# the installed blocker will be unmerged before installation
+			# of the package it blocks (rather than after like a soft blocker
+			# would be handled). The "unmerge before" behavior requested
+			# in bug 250286 must be optional since essential programs or
+			# libraries may be temporarily unavailable during a
+			# non-overlapping update like this.
+			ResolverPlaygroundTestCase(
+				["app-misc/blocker-update-order-hard-unsolvable-a", "app-misc/installed-old-version-blocks-hard-unsolvable-a"],
+				success = False,
+				all_permutations = True,
+				ambiguous_merge_order = True,
+				merge_order_assertions = (('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2'),),
+				mergelist = [('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2', '!!app-misc/blocker-update-order-hard-unsolvable-a')]),
+			# The installed package has runtime blockers that
+			# should cause it to be uninstalled. The uninstall
+			# task is executed only after blocking packages have
+			# been merged.
+			# TODO: distinguish between install/uninstall tasks in mergelist
+			ResolverPlaygroundTestCase(
+				["app-misc/blocker-runtime-a", "app-misc/blocker-runtime-b"],
+				success = True,
+				all_permutations = True,
+				ambiguous_merge_order = True,
+				mergelist = [("app-misc/blocker-runtime-a-1", "app-misc/blocker-runtime-b-1"), "app-misc/installed-blocker-a-1", ("!app-misc/blocker-runtime-a", "!app-misc/blocker-runtime-b")]),
+			# We have a soft buildtime blocker against an installed
+			# package that should cause it to be uninstalled. Note that with
+			# soft blockers, the blocking packages are allowed to temporarily
+			# overlap. This allows any essential programs/libraries provided
+			# by both packages to be available at all times.
+			# TODO: distinguish between install/uninstall tasks in mergelist
+			ResolverPlaygroundTestCase(
+				["app-misc/blocker-buildtime-unbuilt-a"],
+				success = True,
+				mergelist = ["app-misc/blocker-buildtime-unbuilt-a-1", "app-misc/installed-blocker-a-1", "!app-misc/installed-blocker-a"]),
+			# We have a hard buildtime blocker against an installed
+			# package that will not resolve automatically (unless
+			# the option requested in bug 250286 is implemented).
+			ResolverPlaygroundTestCase(
+				["app-misc/blocker-buildtime-unbuilt-hard-a"],
+				success = False,
+				mergelist = ['app-misc/blocker-buildtime-unbuilt-hard-a-1', '!!app-misc/installed-blocker-a']),
+			# An installed package has a hard runtime blocker that
+			# will not resolve automatically (unless the option
+			# requested in bug 250286 is implemented).
+			ResolverPlaygroundTestCase(
+				["app-misc/blocker-runtime-hard-a"],
+				success = False,
+				mergelist = ['app-misc/blocker-runtime-hard-a-1', '!!app-misc/blocker-runtime-hard-a']),
+			# Test swapping of providers for a new-style virtual package,
+			# which relies on delayed evaluation of disjunctive (virtual
+			# and ||) deps as required to solve bug #264434. Note that
+			# this behavior is not supported for old-style PROVIDE virtuals,
+			# as reported in bug #339164.
+			ResolverPlaygroundTestCase(
+				["media-video/libav"],
+				success=True,
+				mergelist = ['media-video/libav-0.7_pre20110327', 'media-video/ffmpeg-0.7_rc1', '!media-video/ffmpeg']),
+			# Test that PORTAGE_PACKAGE_ATOM is merged asap. Optimally,
+			# satisfied deps are always merged after the asap nodes that
+			# depend on them.
+			ResolverPlaygroundTestCase(
+				["dev-lang/python", portage.const.PORTAGE_PACKAGE_ATOM],
+				success = True,
+				all_permutations = True,
+				mergelist = ['app-admin/eselect-python-20100321', 'sys-apps/portage-2.1.9.49', 'dev-lang/python-3.2']),
+			# Test that OS_HEADERS_PACKAGE_ATOM and LIBC_PACKAGE_ATOM
+			# are merged asap, in order to account for implicit
+			# dependencies. See bug #303567. Optimally, satisfied deps
+			# are always merged after the asap nodes that depend on them.
+			ResolverPlaygroundTestCase(
+				["app-arch/xz-utils", "sys-kernel/linux-headers", "sys-devel/binutils", "sys-libs/glibc"],
+				options = {"--complete-graph" : True},
+				success = True,
+				all_permutations = True,
+				ambiguous_merge_order = True,
+				mergelist = ['sys-kernel/linux-headers-2.6.39', 'sys-devel/gcc-4.5.2', 'sys-libs/glibc-2.13', ('app-arch/xz-utils-5.0.2', 'sys-devel/binutils-2.20.1')]),
+			# Test asap install of PDEPEND for bug #180045.
+			ResolverPlaygroundTestCase(
+				["kde-base/kmines", "kde-base/kdnssd", "kde-base/kdelibs", "app-arch/xz-utils"],
+				success = True,
+				all_permutations = True,
+				ambiguous_merge_order = True,
+				merge_order_assertions = (
+					('dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2'),
+					('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/libkdegames-3.5.7'),
+					('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/kdnssd-3.5.7'),
+					('kde-base/libkdegames-3.5.7', 'kde-base/kmines-3.5.7'),
+				),
+				mergelist = [('kde-base/kdelibs-3.5.7', 'dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2', 'app-arch/xz-utils-5.0.2', 'kde-base/libkdegames-3.5.7', 'kde-base/kdnssd-3.5.7', 'kde-base/kmines-3.5.7')]),
+		)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py b/portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
new file mode 100644
index 0000000..a860e7b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
@@ -0,0 +1,31 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MissingIUSEandEvaluatedAtomsTestCase(TestCase):
+
+	def testMissingIUSEandEvaluatedAtoms(self):
+		ebuilds = {
+			"dev-libs/A-1": { "DEPEND": "dev-libs/B[foo?]", "IUSE": "foo bar", "EAPI": 2 }, 
+			"dev-libs/A-2": { "DEPEND": "dev-libs/B[foo?,bar]", "IUSE": "foo bar", "EAPI": 2 }, 
+			"dev-libs/B-1": { "IUSE": "bar" }, 
+			}
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				["=dev-libs/A-1"],
+				success = False),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/A-2"],
+				success = False),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py b/portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py
new file mode 100644
index 0000000..34c6d45
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py
@@ -0,0 +1,318 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MultirepoTestCase(TestCase):
+
+	def testMultirepo(self):
+		ebuilds = {
+			#Simple repo selection
+			"dev-libs/A-1": { },
+			"dev-libs/A-1::repo1": { },
+			"dev-libs/A-2::repo1": { },
+			"dev-libs/A-1::repo2": { },
+
+			#Packages in exactly one repo
+			"dev-libs/B-1": { },
+			"dev-libs/C-1::repo1": { },
+
+			#Package in repository 1 and 2, but 1 must be used
+			"dev-libs/D-1::repo1": { },
+			"dev-libs/D-1::repo2": { },
+
+			"dev-libs/E-1": { },
+			"dev-libs/E-1::repo1": { },
+			"dev-libs/E-1::repo2": { "SLOT": "1" },
+
+			"dev-libs/F-1::repo1": { "SLOT": "1" },
+			"dev-libs/F-1::repo2": { "SLOT": "1" },
+
+			"dev-libs/G-1::repo1": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "" },
+			"dev-libs/G-1::repo2": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "^^ ( x y )" },
+
+			"dev-libs/H-1": {	"KEYWORDS": "x86", "EAPI" : "3",
+								"RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
+
+			"dev-libs/I-1::repo2": { "SLOT" : "1"},
+			"dev-libs/I-2::repo2": { "SLOT" : "2"},
+			}
+
+		installed = {
+			"dev-libs/H-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
+			"dev-libs/I-2::repo1": {"SLOT" : "2"},
+			}
+
+		sets = {
+			"multirepotest": 
+				( "dev-libs/A::test_repo", )
+		}
+
+		test_cases = (
+			#Simple repo selection
+			ResolverPlaygroundTestCase(
+				["dev-libs/A"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/A-2::repo1"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/A::test_repo"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/A-1"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/A::repo2"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/A-1::repo2"]),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/A-1::repo1"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/A-1::repo1"]),
+			ResolverPlaygroundTestCase(
+				["@multirepotest"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/A-1"]),
+
+			#Packages in exactly one repo
+			ResolverPlaygroundTestCase(
+				["dev-libs/B"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/B-1"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/C"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/C-1::repo1"]),
+
+			#Package in repository 1 and 2, but 2 must be used
+			ResolverPlaygroundTestCase(
+				["dev-libs/D"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/D-1::repo2"]),
+
+			#Atoms with slots
+			ResolverPlaygroundTestCase(
+				["dev-libs/E"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/E-1::repo2"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/E:1::repo2"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/E-1::repo2"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/E:1"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/E-1::repo2"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/F:1"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/F-1::repo2"]),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/F-1:1"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/F-1::repo2"]),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/F-1:1::repo1"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/F-1::repo1"]),
+
+			# Dependency on installed dev-libs/C-2 ebuild for which ebuild is
+			# not available from the same repo should not unnecessarily
+			# reinstall the same version from a different repo.
+			ResolverPlaygroundTestCase(
+				["dev-libs/H"],
+				options = {"--update": True, "--deep": True},
+				success = True,
+				mergelist = []),
+
+			# Check interaction between repo priority and unsatisfied
+			# REQUIRED_USE, for bug #350254.
+			ResolverPlaygroundTestCase(
+				["=dev-libs/G-1"],
+				check_repo_names = True,
+				success = False),
+
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds,
+			installed=installed, sets=sets)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+
+	def testMultirepoUserConfig(self):
+		ebuilds = {
+			#package.use test
+			"dev-libs/A-1": { "IUSE": "foo" },
+			"dev-libs/A-2::repo1": { "IUSE": "foo" },
+			"dev-libs/A-3::repo2": { },
+			"dev-libs/B-1": { "DEPEND": "dev-libs/A", "EAPI": 2 },
+			"dev-libs/B-2": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+			"dev-libs/B-3": { "DEPEND": "dev-libs/A[-foo]", "EAPI": 2 },
+
+			#package.keywords test
+			"dev-libs/C-1": { "KEYWORDS": "~x86" },
+			"dev-libs/C-1::repo1": { "KEYWORDS": "~x86" },
+
+			#package.license
+			"dev-libs/D-1": { "LICENSE": "TEST" },
+			"dev-libs/D-1::repo1": { "LICENSE": "TEST" },
+
+			#package.mask
+			"dev-libs/E-1": { },
+			"dev-libs/E-1::repo1": { },
+			"dev-libs/H-1": { },
+			"dev-libs/H-1::repo1": { },
+			"dev-libs/I-1::repo2": { "SLOT" : "1"},
+			"dev-libs/I-2::repo2": { "SLOT" : "2"},
+			"dev-libs/J-1": {	"KEYWORDS": "x86", "EAPI" : "3",
+								"RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
+
+			#package.properties
+			"dev-libs/F-1": { "PROPERTIES": "bar"},
+			"dev-libs/F-1::repo1": { "PROPERTIES": "bar"},
+
+			#package.unmask
+			"dev-libs/G-1": { },
+			"dev-libs/G-1::repo1": { },
+
+			#package.mask with wildcards
+			"dev-libs/Z-1::repo3": { },
+			}
+
+		installed = {
+			"dev-libs/J-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
+			"dev-libs/I-2::repo1": {"SLOT" : "2"},
+			}
+
+		user_config = {
+			"package.use":
+				(
+					"dev-libs/A::repo1 foo",
+				),
+			"package.keywords":
+				(
+					"=dev-libs/C-1::test_repo",
+				),
+			"package.license":
+				(
+					"=dev-libs/D-1::test_repo TEST",
+				),
+			"package.mask":
+				(
+					"dev-libs/E::repo1",
+					"dev-libs/H",
+					"dev-libs/I::repo1",
+					#needed for package.unmask test
+					"dev-libs/G",
+					#wildcard test
+					"*/*::repo3",
+				),
+			"package.properties":
+				(
+					"dev-libs/F::repo1 -bar",
+				),
+			"package.unmask":
+				(
+					"dev-libs/G::test_repo",
+				),
+			}
+
+		test_cases = (
+			#package.use test
+			ResolverPlaygroundTestCase(
+				["=dev-libs/B-1"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/A-3::repo2", "dev-libs/B-1"]),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/B-2"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/A-2::repo1", "dev-libs/B-2"]),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/B-3"],
+				options = { "--autounmask": 'n' },
+				success = False,
+				check_repo_names = True),
+
+			#package.keywords test
+			ResolverPlaygroundTestCase(
+				["dev-libs/C"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/C-1"]),
+
+			#package.license test
+			ResolverPlaygroundTestCase(
+				["dev-libs/D"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/D-1"]),
+
+			#package.mask test
+			ResolverPlaygroundTestCase(
+				["dev-libs/E"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/E-1"]),
+
+			# Dependency on installed dev-libs/C-2 ebuild for which ebuild is
+			# masked from the same repo should not unnecessarily pull
+			# in a different slot. It should just pull in the same slot from
+			# a different repo (bug #351828).
+			ResolverPlaygroundTestCase(
+				["dev-libs/J"],
+				options = {"--update": True, "--deep": True},
+				success = True,
+				mergelist = ["dev-libs/I-2"]),
+
+			#package.properties test
+			ResolverPlaygroundTestCase(
+				["dev-libs/F"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/F-1"]),
+
+			#package.mask test
+			ResolverPlaygroundTestCase(
+				["dev-libs/G"],
+				success = True,
+				check_repo_names = True,
+				mergelist = ["dev-libs/G-1"]),
+			ResolverPlaygroundTestCase(
+				["dev-libs/H"],
+				options = { "--autounmask": 'n' },
+				success = False),
+
+			#package.mask with wildcards
+			ResolverPlaygroundTestCase(
+				["dev-libs/Z"],
+				options = { "--autounmask": 'n' },
+				success = False),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds,
+			installed=installed, user_config=user_config)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_multislot.py b/portage_with_autodep/pym/portage/tests/resolver/test_multislot.py
new file mode 100644
index 0000000..8615419
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_multislot.py
@@ -0,0 +1,40 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MultSlotTestCase(TestCase):
+
+	def testMultiSlotSelective(self):
+		"""
+		Test that a package isn't reinstalled due to SLOT dependency
+		interaction with USE=multislot (bug #220341).
+		"""
+
+		ebuilds = {
+			"sys-devel/gcc-4.4.4": { "SLOT": "4.4" },
+			}
+
+		installed = {
+			"sys-devel/gcc-4.4.4": { "SLOT": "i686-pc-linux-gnu-4.4.4" },
+			}
+
+		options = {'--update' : True, '--deep' : True, '--selective' : True}
+
+		test_cases = (
+				ResolverPlaygroundTestCase(
+					["sys-devel/gcc:4.4"],
+					options = options,
+					mergelist = [],
+					success = True),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py b/portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py
new file mode 100644
index 0000000..8aedf59
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py
@@ -0,0 +1,35 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class OldDepChainDisplayTestCase(TestCase):
+
+	def testOldDepChainDisplay(self):
+		ebuilds = {
+			"dev-libs/A-1": { "DEPEND": "foo? ( dev-libs/B[-bar] )", "IUSE": "+foo", "EAPI": "2" }, 
+			"dev-libs/A-2": { "DEPEND": "foo? ( dev-libs/C )", "IUSE": "+foo", "EAPI": "1" }, 
+			"dev-libs/B-1": { "IUSE": "bar", "DEPEND": "!bar? ( dev-libs/D[-baz] )", "EAPI": "2" },
+			"dev-libs/C-1": { "KEYWORDS": "~x86" },
+			"dev-libs/D-1": { "IUSE": "+baz", "EAPI": "1" },
+			}
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				["=dev-libs/A-1"],
+				options = { "--autounmask": 'n' },
+				success = False),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/A-2"],
+				options = { "--autounmask": 'n' },
+				success = False),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_output.py b/portage_with_autodep/pym/portage/tests/resolver/test_output.py
new file mode 100644
index 0000000..34efe9c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_output.py
@@ -0,0 +1,88 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MergelistOutputTestCase(TestCase):
+
+	def testMergelistOutput(self):
+		"""
+		This test doesn't check if the output is correct, but makes sure
+		that we don't backtrace somewhere in the output code.
+		"""
+		ebuilds = {
+			"dev-libs/A-1": { "DEPEND": "dev-libs/B dev-libs/C", "IUSE": "+foo", "EAPI": 1 },
+			"dev-libs/B-1": { "DEPEND": "dev-libs/D", "IUSE": "foo +bar", "EAPI": 1 },
+			"dev-libs/C-1": { "DEPEND": "dev-libs/E", "IUSE": "foo bar" },
+			"dev-libs/D-1": { "IUSE": "" },
+			"dev-libs/E-1": {},
+
+			#reinstall for flags
+			"dev-libs/Z-1": { "IUSE": "+foo", "EAPI": 1 },
+			"dev-libs/Y-1": { "IUSE": "foo", "EAPI": 1 },
+			"dev-libs/X-1": {},
+			"dev-libs/W-1": { "IUSE": "+foo", "EAPI": 1 },
+			}
+
+		installed = {
+			"dev-libs/Z-1": { "USE": "", "IUSE": "foo" },
+			"dev-libs/Y-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
+			"dev-libs/X-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
+			"dev-libs/W-1": { },
+		}
+
+		option_cobos = (
+			(),
+			("verbose",),
+			("tree",),
+			("tree", "unordered-display",),
+			("verbose",),
+			("verbose", "tree",),
+			("verbose", "tree", "unordered-display",),
+		)
+
+		test_cases = []
+		for options in option_cobos:
+			testcase_opts = {}
+			for opt in options:
+				testcase_opts["--" + opt] = True
+
+			test_cases.append(ResolverPlaygroundTestCase(
+				["dev-libs/A"],
+				options = testcase_opts,
+				success = True,
+				ignore_mergelist_order=True,
+				mergelist = ["dev-libs/D-1", "dev-libs/E-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"]))
+
+			test_cases.append(ResolverPlaygroundTestCase(
+				["dev-libs/Z"],
+				options = testcase_opts,
+				success = True,
+				mergelist = ["dev-libs/Z-1"]))
+
+			test_cases.append(ResolverPlaygroundTestCase(
+				["dev-libs/Y"],
+				options = testcase_opts,
+				success = True,
+				mergelist = ["dev-libs/Y-1"]))
+
+			test_cases.append(ResolverPlaygroundTestCase(
+				["dev-libs/X"],
+				options = testcase_opts,
+				success = True,
+				mergelist = ["dev-libs/X-1"]))
+
+			test_cases.append(ResolverPlaygroundTestCase(
+				["dev-libs/W"],
+				options = testcase_opts,
+				success = True,
+				mergelist = ["dev-libs/W-1"]))
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py b/portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py
new file mode 100644
index 0000000..b9c4d6d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py
@@ -0,0 +1,138 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+	ResolverPlaygroundTestCase)
+
+class RebuildTestCase(TestCase):
+
+	def testRebuild(self):
+		"""
+		Rebuild packages when dependencies that are used at both build-time and
+		run-time are upgraded.
+		"""
+
+		ebuilds = {
+			"sys-libs/x-1": { },
+			"sys-libs/x-1-r1": { },
+			"sys-libs/x-2": { },
+			"sys-apps/a-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+			"sys-apps/a-2": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+			"sys-apps/b-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+			"sys-apps/b-2": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+			"sys-apps/c-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : ""},
+			"sys-apps/c-2": { "DEPEND"  : "sys-libs/x", "RDEPEND" : ""},
+			"sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
+			"sys-apps/d-2": { "RDEPEND" : "sys-libs/x"},
+			"sys-apps/e-2": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+			"sys-apps/f-2": { "DEPEND"  : "sys-apps/a", "RDEPEND" : "sys-apps/a"},
+			"sys-apps/g-2": { "DEPEND"  : "sys-apps/b sys-libs/x",
+				"RDEPEND" : "sys-apps/b"},
+			}
+
+		installed = {
+			"sys-libs/x-1": { },
+			"sys-apps/a-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+			"sys-apps/b-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+			"sys-apps/c-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : ""},
+			"sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
+			"sys-apps/e-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
+			"sys-apps/f-1": { "DEPEND"  : "sys-apps/a", "RDEPEND" : "sys-apps/a"},
+			"sys-apps/g-1": { "DEPEND"  : "sys-apps/b sys-libs/x",
+				"RDEPEND" : "sys-apps/b"},
+			}
+
+		world = ["sys-apps/a", "sys-apps/b", "sys-apps/c", "sys-apps/d",
+			"sys-apps/e", "sys-apps/f", "sys-apps/g"]
+
+		test_cases = (
+				ResolverPlaygroundTestCase(
+					["sys-libs/x"],
+					options = {"--rebuild-if-unbuilt" : True,
+						"--rebuild-exclude" : ["sys-apps/b"]},
+					mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/e-2'],
+					ignore_mergelist_order = True,
+					success = True),
+
+				ResolverPlaygroundTestCase(
+					["sys-libs/x"],
+					options = {"--rebuild-if-unbuilt" : True},
+					mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+						'sys-apps/e-2', 'sys-apps/g-2'],
+					ignore_mergelist_order = True,
+					success = True),
+
+				ResolverPlaygroundTestCase(
+					["sys-libs/x"],
+					options = {"--rebuild-if-unbuilt" : True,
+						"--rebuild-ignore" : ["sys-libs/x"]},
+					mergelist = ['sys-libs/x-2'],
+					ignore_mergelist_order = True,
+					success = True),
+
+				ResolverPlaygroundTestCase(
+					["sys-libs/x"],
+					options = {"--rebuild-if-unbuilt" : True,
+						"--rebuild-ignore" : ["sys-apps/b"]},
+					mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+						'sys-apps/e-2'],
+					ignore_mergelist_order = True,
+					success = True),
+
+				ResolverPlaygroundTestCase(
+					["=sys-libs/x-1-r1"],
+					options = {"--rebuild-if-unbuilt" : True},
+					mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
+						'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+					ignore_mergelist_order = True,
+					success = True),
+
+				ResolverPlaygroundTestCase(
+					["=sys-libs/x-1-r1"],
+					options = {"--rebuild-if-new-rev" : True},
+					mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
+						'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+					ignore_mergelist_order = True,
+					success = True),
+
+				ResolverPlaygroundTestCase(
+					["=sys-libs/x-1-r1"],
+					options = {"--rebuild-if-new-ver" : True},
+					mergelist = ['sys-libs/x-1-r1'],
+					ignore_mergelist_order = True,
+					success = True),
+
+				ResolverPlaygroundTestCase(
+					["sys-libs/x"],
+					options = {"--rebuild-if-new-ver" : True},
+					mergelist = ['sys-libs/x-2', 'sys-apps/a-2',
+						'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+					ignore_mergelist_order = True,
+					success = True),
+
+				ResolverPlaygroundTestCase(
+					["=sys-libs/x-1"],
+					options = {"--rebuild-if-new-rev" : True},
+					mergelist = ['sys-libs/x-1'],
+					ignore_mergelist_order = True,
+					success = True),
+
+				ResolverPlaygroundTestCase(
+					["=sys-libs/x-1"],
+					options = {"--rebuild-if-unbuilt" : True},
+					mergelist = ['sys-libs/x-1', 'sys-apps/a-2',
+						'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+					ignore_mergelist_order = True,
+					success = True),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds,
+			installed=installed, world=world)
+
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_required_use.py b/portage_with_autodep/pym/portage/tests/resolver/test_required_use.py
new file mode 100644
index 0000000..c8810fa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_required_use.py
@@ -0,0 +1,114 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class RequiredUSETestCase(TestCase):
+
+	def testRequiredUSE(self):
+		"""
+		Only simple REQUIRED_USE values here. The parser is tested under in dep/testCheckRequiredUse
+		"""
+
+		ebuilds = {
+			"dev-libs/A-1" : {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "|| ( foo bar )"},
+			"dev-libs/A-2" : {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "|| ( foo bar )"},
+			"dev-libs/A-3" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "|| ( foo bar )"},
+			"dev-libs/A-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( foo bar )"},
+			"dev-libs/A-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( )"},
+
+			"dev-libs/B-1" : {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "^^ ( foo bar )"},
+			"dev-libs/B-2" : {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "^^ ( foo bar )"},
+			"dev-libs/B-3" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "^^ ( foo bar )"},
+			"dev-libs/B-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )"},
+			"dev-libs/B-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( )"},
+
+			"dev-libs/C-1" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "foo? ( !bar )"},
+			"dev-libs/C-2" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( !bar )"},
+			"dev-libs/C-3" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( bar )"},
+			"dev-libs/C-4" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "foo? ( bar )"},
+			"dev-libs/C-5" : {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "foo? ( bar )"},
+			"dev-libs/C-6" : {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "foo? ( bar )"},
+			"dev-libs/C-7" : {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "!foo? ( bar )"},
+			"dev-libs/C-8" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( bar )"},
+			"dev-libs/C-9" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "!foo? ( bar )"},
+			"dev-libs/C-10": {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "!foo? ( bar )"},
+			"dev-libs/C-11": {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "!foo? ( !bar )"},
+			"dev-libs/C-12": {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "!foo? ( !bar )"},
+			"dev-libs/C-13": {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( !bar )"},
+			"dev-libs/C-14": {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "!foo? ( !bar )"},
+
+			"dev-libs/D-1" : {"EAPI": "4", "IUSE": "+w +x +y z",    "REQUIRED_USE": "w? ( x || ( y z ) )"},
+			"dev-libs/D-2" : {"EAPI": "4", "IUSE": "+w +x +y +z",   "REQUIRED_USE": "w? ( x || ( y z ) )"},
+			"dev-libs/D-3" : {"EAPI": "4", "IUSE": "+w +x y z",     "REQUIRED_USE": "w? ( x || ( y z ) )"},
+			"dev-libs/D-4" : {"EAPI": "4", "IUSE": "+w x +y +z",    "REQUIRED_USE": "w? ( x || ( y z ) )"},
+			"dev-libs/D-5" : {"EAPI": "4", "IUSE": "w x y z",       "REQUIRED_USE": "w? ( x || ( y z ) )"},
+			}
+
+		test_cases = (
+			ResolverPlaygroundTestCase(["=dev-libs/A-1"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/A-2"], success = True, mergelist=["dev-libs/A-2"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-3"], success = True, mergelist=["dev-libs/A-3"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-4"], success = True, mergelist=["dev-libs/A-4"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-5"], success = True, mergelist=["dev-libs/A-5"]),
+
+			ResolverPlaygroundTestCase(["=dev-libs/B-1"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/B-2"], success = True, mergelist=["dev-libs/B-2"]),
+			ResolverPlaygroundTestCase(["=dev-libs/B-3"], success = True, mergelist=["dev-libs/B-3"]),
+			ResolverPlaygroundTestCase(["=dev-libs/B-4"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/B-5"], success = True, mergelist=["dev-libs/B-5"]),
+
+			ResolverPlaygroundTestCase(["=dev-libs/C-1"],  success = True, mergelist=["dev-libs/C-1"]),
+			ResolverPlaygroundTestCase(["=dev-libs/C-2"],  success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/C-3"],  success = True, mergelist=["dev-libs/C-3"]),
+			ResolverPlaygroundTestCase(["=dev-libs/C-4"],  success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/C-5"],  success = True, mergelist=["dev-libs/C-5"]),
+			ResolverPlaygroundTestCase(["=dev-libs/C-6"],  success = True, mergelist=["dev-libs/C-6"]),
+			ResolverPlaygroundTestCase(["=dev-libs/C-7"],  success = True, mergelist=["dev-libs/C-7"]),
+			ResolverPlaygroundTestCase(["=dev-libs/C-8"],  success = True, mergelist=["dev-libs/C-8"]),
+			ResolverPlaygroundTestCase(["=dev-libs/C-9"],  success = True, mergelist=["dev-libs/C-9"]),
+			ResolverPlaygroundTestCase(["=dev-libs/C-10"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/C-11"], success = True, mergelist=["dev-libs/C-11"]),
+			ResolverPlaygroundTestCase(["=dev-libs/C-12"], success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/C-13"], success = True, mergelist=["dev-libs/C-13"]),
+			ResolverPlaygroundTestCase(["=dev-libs/C-14"], success = True, mergelist=["dev-libs/C-14"]),
+
+			ResolverPlaygroundTestCase(["=dev-libs/D-1"],  success = True, mergelist=["dev-libs/D-1"]),
+			ResolverPlaygroundTestCase(["=dev-libs/D-2"],  success = True, mergelist=["dev-libs/D-2"]),
+			ResolverPlaygroundTestCase(["=dev-libs/D-3"],  success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/D-4"],  success = False),
+			ResolverPlaygroundTestCase(["=dev-libs/D-5"],  success = True, mergelist=["dev-libs/D-5"]),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()
+
+	def testRequiredUseOrDeps(self):
+		
+		ebuilds = {
+			"dev-libs/A-1": { "IUSE": "+x +y", "REQUIRED_USE": "^^ ( x y )", "EAPI": "4" },
+			"dev-libs/B-1": { "IUSE": "+x +y", "REQUIRED_USE": "",           "EAPI": "4" },
+			"app-misc/p-1": { "RDEPEND": "|| ( =dev-libs/A-1 =dev-libs/B-1 )" },
+			}
+
+		test_cases = (
+				# This should fail and show a REQUIRED_USE error for
+				# dev-libs/A-1, since this choice it preferred.
+				ResolverPlaygroundTestCase(
+					["=app-misc/p-1"],
+					success = False),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_simple.py b/portage_with_autodep/pym/portage/tests/resolver/test_simple.py
new file mode 100644
index 0000000..0bcfc4b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_simple.py
@@ -0,0 +1,57 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleResolverTestCase(TestCase):
+
+	def testSimple(self):
+		ebuilds = {
+			"dev-libs/A-1": { "KEYWORDS": "x86" }, 
+			"dev-libs/A-2": { "KEYWORDS": "~x86" },
+			"dev-libs/B-1.2": {},
+
+			"app-misc/Z-1": { "DEPEND": "|| ( app-misc/Y ( app-misc/X app-misc/W ) )", "RDEPEND": "" },
+			"app-misc/Y-1": { "KEYWORDS": "~x86" },
+			"app-misc/X-1": {},
+			"app-misc/W-1": {},
+			}
+		installed = {
+			"dev-libs/A-1": {},
+			"dev-libs/B-1.1": {},
+			}
+
+		test_cases = (
+			ResolverPlaygroundTestCase(["dev-libs/A"], success = True, mergelist = ["dev-libs/A-1"]),
+			ResolverPlaygroundTestCase(["=dev-libs/A-2"], options = { "--autounmask": 'n' }, success = False),
+
+			ResolverPlaygroundTestCase(
+				["dev-libs/A"],
+				options = {"--noreplace": True},
+				success = True,
+				mergelist = []),
+			ResolverPlaygroundTestCase(
+				["dev-libs/B"],
+				options = {"--noreplace": True},
+				success = True,
+				mergelist = []),
+			ResolverPlaygroundTestCase(
+				["dev-libs/B"],
+				options = {"--update": True},
+				success = True,
+				mergelist = ["dev-libs/B-1.2"]),
+
+			ResolverPlaygroundTestCase(
+				["app-misc/Z"],
+				success = True,
+				mergelist = ["app-misc/W-1", "app-misc/X-1", "app-misc/Z-1"]),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py b/portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py
new file mode 100644
index 0000000..4867cea
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py
@@ -0,0 +1,143 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotCollisionTestCase(TestCase):
+
+	def testSlotCollision(self):
+
+		ebuilds = {
+			"dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo" }, 
+			"dev-libs/B-1": { "IUSE": "foo" },
+			"dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+			"dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "EAPI": 2 },
+			"dev-libs/E-1": {  },
+			"dev-libs/E-2": { "IUSE": "foo" },
+
+			"app-misc/Z-1": { },
+			"app-misc/Z-2": { },
+			"app-misc/Y-1": { "DEPEND": "=app-misc/Z-1" },
+			"app-misc/Y-2": { "DEPEND": ">app-misc/Z-1" },
+			"app-misc/X-1": { "DEPEND": "=app-misc/Z-2" },
+			"app-misc/X-2": { "DEPEND": "<app-misc/Z-2" },
+
+			"sci-libs/K-1": { "IUSE": "+foo", "EAPI": 1 },
+			"sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]", "EAPI": 2 },
+			"sci-libs/M-1": { "DEPEND": "sci-libs/K[foo=]", "IUSE": "+foo", "EAPI": 2 },
+
+			"sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar foo", "EAPI": 1 },
+			"sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
+			"sci-libs/P-1": { "DEPEND": "sci-libs/Q:1[foo=]", "IUSE": "foo", "EAPI": 2 },
+
+			"sys-libs/A-1": { "RDEPEND": "foo? ( sys-libs/J[foo=] )", "IUSE": "+foo", "EAPI": "4" },
+			"sys-libs/B-1": { "RDEPEND": "bar? ( sys-libs/J[bar=] )", "IUSE": "+bar", "EAPI": "4" },
+			"sys-libs/C-1": { "RDEPEND": "sys-libs/J[bar]", "EAPI": "4" },
+			"sys-libs/D-1": { "RDEPEND": "sys-libs/J[bar?]", "IUSE": "bar", "EAPI": "4" },
+			"sys-libs/E-1": { "RDEPEND": "sys-libs/J[foo(+)?]", "IUSE": "+foo", "EAPI": "4" },
+			"sys-libs/F-1": { "RDEPEND": "sys-libs/J[foo(+)]", "EAPI": "4" },
+			"sys-libs/J-1": { "IUSE": "+foo", "EAPI": "4" },
+			"sys-libs/J-2": { "IUSE": "+bar", "EAPI": "4" },
+
+			"app-misc/A-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+			"app-misc/B-1": { "DEPEND": "=app-misc/A-1[foo=]", "IUSE": "foo", "EAPI": 2 },
+			"app-misc/C-1": { "DEPEND": "=app-misc/A-1[foo]", "EAPI": 2 },
+			"app-misc/E-1": { "RDEPEND": "dev-libs/E[foo?]", "IUSE": "foo", "EAPI": "2" },
+			"app-misc/F-1": { "RDEPEND": "=dev-libs/E-1", "IUSE": "foo", "EAPI": "2" },
+			}
+		installed = {
+			"dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo", "USE": "foo" }, 
+			"dev-libs/B-1": { "IUSE": "foo", "USE": "foo" },
+			"dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+			"dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "USE": "foo", "EAPI": 2 },
+			
+			"sci-libs/K-1": { "IUSE": "foo", "USE": "" },
+			"sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]" },
+
+			"sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 1 },
+			"sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
+
+			"app-misc/A-1": { "IUSE": "+foo bar", "USE": "foo", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+			}
+
+		test_cases = (
+			#A qt-*[qt3support] like mess.
+			ResolverPlaygroundTestCase(
+				["dev-libs/A", "dev-libs/B", "dev-libs/C", "dev-libs/D"],
+				options = { "--autounmask": 'n' },
+				success = False,
+				mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+				ignore_mergelist_order = True,
+				slot_collision_solutions = [ {"dev-libs/A-1": {"foo": True}, "dev-libs/D-1": {"foo": True}} ]),
+
+			ResolverPlaygroundTestCase(
+				["sys-libs/A", "sys-libs/B", "sys-libs/C", "sys-libs/D", "sys-libs/E", "sys-libs/F"],
+				options = { "--autounmask": 'n' },
+				success = False,
+				ignore_mergelist_order = True,
+				slot_collision_solutions = [],
+				mergelist = ['sys-libs/J-2', 'sys-libs/J-1', 'sys-libs/A-1', 'sys-libs/B-1', 'sys-libs/C-1', 'sys-libs/D-1', 'sys-libs/E-1', 'sys-libs/F-1'],
+				),
+
+			#A version based conflicts, nothing we can do.
+			ResolverPlaygroundTestCase(
+				["=app-misc/X-1", "=app-misc/Y-1"],
+				success = False,
+				mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-1", "app-misc/Y-1"],
+				ignore_mergelist_order = True,
+				slot_collision_solutions = []
+				),
+			ResolverPlaygroundTestCase(
+				["=app-misc/X-2", "=app-misc/Y-2"],
+				success = False,
+				mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-2", "app-misc/Y-2"],
+				ignore_mergelist_order = True,
+				slot_collision_solutions = []
+				),
+
+			ResolverPlaygroundTestCase(
+				["=app-misc/E-1", "=app-misc/F-1"],
+				success = False,
+				mergelist = ["dev-libs/E-1", "dev-libs/E-2", "app-misc/E-1", "app-misc/F-1"],
+				ignore_mergelist_order = True,
+				slot_collision_solutions = []
+				),
+
+			#Simple cases.
+			ResolverPlaygroundTestCase(
+				["sci-libs/L", "sci-libs/M"],
+				success = False,
+				mergelist = ["sci-libs/L-1", "sci-libs/M-1", "sci-libs/K-1"],
+				ignore_mergelist_order = True,
+				slot_collision_solutions = [{"sci-libs/K-1": {"foo": False}, "sci-libs/M-1": {"foo": False}}]
+				),
+
+			#Avoid duplicates.
+			ResolverPlaygroundTestCase(
+				["sci-libs/P", "sci-libs/Q:2"],
+				success = False,
+				options = { "--update": True, "--complete-graph": True, "--autounmask": 'n' },
+				mergelist = ["sci-libs/P-1", "sci-libs/Q-1"],
+				ignore_mergelist_order = True,
+				all_permutations=True,
+				slot_collision_solutions = [{"sci-libs/Q-1": {"foo": True}, "sci-libs/P-1": {"foo": True}}]
+				),
+
+			#Conflict with REQUIRED_USE
+			ResolverPlaygroundTestCase(
+				["=app-misc/C-1", "=app-misc/B-1"],
+				all_permutations = True,
+				slot_collision_solutions = [],
+				mergelist = ["app-misc/A-1", "app-misc/C-1", "app-misc/B-1"],
+				ignore_mergelist_order = True,
+				success = False),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py b/portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py
new file mode 100644
index 0000000..7d17106
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py
@@ -0,0 +1,40 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseDepDefaultsTestCase(TestCase):
+
+	def testUseDepDefaultse(self):
+
+		ebuilds = {
+			"dev-libs/A-1": { "DEPEND": "dev-libs/B[foo]", "RDEPEND": "dev-libs/B[foo]", "EAPI": "2" },
+			"dev-libs/A-2": { "DEPEND": "dev-libs/B[foo(+)]", "RDEPEND": "dev-libs/B[foo(+)]", "EAPI": "4" },
+			"dev-libs/A-3": { "DEPEND": "dev-libs/B[foo(-)]", "RDEPEND": "dev-libs/B[foo(-)]", "EAPI": "4" },
+			"dev-libs/B-1": { "IUSE": "+foo", "EAPI": "1" },
+			"dev-libs/B-2": {},
+			}
+
+		test_cases = (
+			ResolverPlaygroundTestCase(
+				["=dev-libs/A-1"],
+				success = True,
+				mergelist = ["dev-libs/B-1", "dev-libs/A-1"]),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/A-2"],
+				success = True,
+				mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
+			ResolverPlaygroundTestCase(
+				["=dev-libs/A-3"],
+				success = True,
+				mergelist = ["dev-libs/B-1", "dev-libs/A-3"]),
+			)
+
+		playground = ResolverPlayground(ebuilds=ebuilds)
+		try:
+			for test_case in test_cases:
+				playground.run_TestCase(test_case)
+				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+		finally:
+			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/runTests b/portage_with_autodep/pym/portage/tests/runTests
new file mode 100755
index 0000000..6b3311d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/runTests
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# runTests.py -- Portage Unit Test Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os, sys
+import os.path as osp
+import grp
+import pwd
+import signal
+
+def debug_signal(signum, frame):
+	import pdb
+	pdb.set_trace()
+signal.signal(signal.SIGUSR1, debug_signal)
+
+# Pretend that the current user's uid/gid are the 'portage' uid/gid,
+# so things go smoothly regardless of the current user and global
+# user/group configuration.
+os.environ["PORTAGE_USERNAME"] = pwd.getpwuid(os.getuid()).pw_name
+os.environ["PORTAGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
+
+# Insert our parent dir so we can do shiny import "tests"
+# This line courtesy of Marienz and Pkgcore ;)
+sys.path.insert(0, osp.dirname(osp.dirname(osp.dirname(osp.abspath(__file__)))))
+
+import portage
+
+# Ensure that we don't instantiate portage.settings, so that tests should
+# work the same regardless of global configuration file state/existence.
+portage._disable_legacy_globals()
+
+import portage.tests as tests
+from portage.const import PORTAGE_BIN_PATH
+path = os.environ.get("PATH", "").split(":")
+path = [x for x in path if x]
+if not path or not os.path.samefile(path[0], PORTAGE_BIN_PATH):
+	path.insert(0, PORTAGE_BIN_PATH)
+	os.environ["PATH"] = ":".join(path)
+del path
+
+
+if __name__ == "__main__":
+	result = tests.main()
+	if not result.wasSuccessful():
+		sys.exit(1)

diff --git a/portage_with_autodep/pym/portage/tests/sets/__init__.py b/portage_with_autodep/pym/portage/tests/sets/__init__.py
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/sets/base/__init__.py b/portage_with_autodep/pym/portage/tests/sets/base/__init__.py
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/sets/base/__test__ b/portage_with_autodep/pym/portage/tests/sets/base/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py b/portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py
new file mode 100644
index 0000000..e0a3478
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py
@@ -0,0 +1,61 @@
+# testConfigFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+from portage.tests import TestCase
+from portage._sets.base import InternalPackageSet
+
+class InternalPackageSetTestCase(TestCase):
+	"""Simple Test Case for InternalPackageSet"""
+
+	def testInternalPackageSet(self):
+		i1_atoms = set(("dev-libs/A", ">=dev-libs/A-1", "dev-libs/B"))
+		i2_atoms = set(("dev-libs/A", "dev-libs/*", "dev-libs/C"))
+
+		i1 = InternalPackageSet(initial_atoms=i1_atoms)
+		i2 = InternalPackageSet(initial_atoms=i2_atoms, allow_wildcard=True)
+		self.assertRaises(InvalidAtom, InternalPackageSet, initial_atoms=i2_atoms)
+
+		self.assertEqual(i1.getAtoms(), i1_atoms)
+		self.assertEqual(i2.getAtoms(), i2_atoms)
+
+		new_atom = Atom("*/*", allow_wildcard=True)
+		self.assertRaises(InvalidAtom, i1.add, new_atom)
+		i2.add(new_atom)
+
+		i2_atoms.add(new_atom)
+
+		self.assertEqual(i1.getAtoms(), i1_atoms)
+		self.assertEqual(i2.getAtoms(), i2_atoms)
+
+		removed_atom = Atom("dev-libs/A")
+
+		i1.remove(removed_atom)
+		i2.remove(removed_atom)
+
+		i1_atoms.remove(removed_atom)
+		i2_atoms.remove(removed_atom)
+
+		self.assertEqual(i1.getAtoms(), i1_atoms)
+		self.assertEqual(i2.getAtoms(), i2_atoms)
+
+		update_atoms = [Atom("dev-libs/C"), Atom("dev-*/C", allow_wildcard=True)]
+
+		self.assertRaises(InvalidAtom, i1.update, update_atoms)
+		i2.update(update_atoms)
+
+		i2_atoms.update(update_atoms)
+
+		self.assertEqual(i1.getAtoms(), i1_atoms)
+		self.assertEqual(i2.getAtoms(), i2_atoms)
+
+		replace_atoms = [Atom("dev-libs/D"), Atom("*-libs/C", allow_wildcard=True)]
+
+		self.assertRaises(InvalidAtom, i1.replace, replace_atoms)
+		i2.replace(replace_atoms)
+
+		i2_atoms = set(replace_atoms)
+
+		self.assertEqual(i2.getAtoms(), i2_atoms)

diff --git a/portage_with_autodep/pym/portage/tests/sets/files/__init__.py b/portage_with_autodep/pym/portage/tests/sets/files/__init__.py
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/sets/files/__test__ b/portage_with_autodep/pym/portage/tests/sets/files/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py b/portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py
new file mode 100644
index 0000000..3ec26a0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py
@@ -0,0 +1,32 @@
+# testConfigFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage.tests import TestCase, test_cps
+from portage._sets.files import ConfigFileSet
+
+class ConfigFileSetTestCase(TestCase):
+	"""Simple Test Case for ConfigFileSet"""
+
+	def setUp(self):
+		fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
+		f = os.fdopen(fd, 'w')
+		for i in range(0, len(test_cps)):
+			atom = test_cps[i]
+			if i % 2 == 0:
+				f.write(atom + ' abc def\n')
+			else:
+				f.write(atom + '\n')
+		f.close()
+
+	def tearDown(self):
+		os.unlink(self.testfile)
+
+	def testConfigStaticFileSet(self):
+		s = ConfigFileSet(self.testfile)
+		s.load()
+		self.assertEqual(set(test_cps), s.getAtoms())
+

diff --git a/portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py b/portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py
new file mode 100644
index 0000000..d515a67
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py
@@ -0,0 +1,27 @@
+# testStaticFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage.tests import TestCase, test_cps
+from portage._sets.files import StaticFileSet
+
+class StaticFileSetTestCase(TestCase):
+	"""Simple Test Case for StaticFileSet"""
+
+	def setUp(self):
+		fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
+		f = os.fdopen(fd, 'w')
+		f.write("\n".join(test_cps))
+		f.close()
+
+	def tearDown(self):
+		os.unlink(self.testfile)
+
+	def testSampleStaticFileSet(self):
+		s = StaticFileSet(self.testfile)
+		s.load()
+		self.assertEqual(set(test_cps), s.getAtoms())
+

diff --git a/portage_with_autodep/pym/portage/tests/sets/shell/__init__.py b/portage_with_autodep/pym/portage/tests/sets/shell/__init__.py
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/sets/shell/__test__ b/portage_with_autodep/pym/portage/tests/sets/shell/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/sets/shell/testShell.py b/portage_with_autodep/pym/portage/tests/sets/shell/testShell.py
new file mode 100644
index 0000000..2cdd833
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/sets/shell/testShell.py
@@ -0,0 +1,28 @@
+# testCommandOututSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.process import find_binary
+from portage.tests import TestCase, test_cps
+from portage._sets.shell import CommandOutputSet
+
+class CommandOutputSetTestCase(TestCase):
+	"""Simple Test Case for CommandOutputSet"""
+
+	def setUp(self):
+		pass
+
+	def tearDown(self):
+		pass
+
+	def testCommand(self):
+		
+		input = set(test_cps)
+		command = find_binary("bash")
+		command += " -c '"
+		for a in input:
+		  command += " echo -e \"%s\" ; " % a
+		command += "'"
+		s = CommandOutputSet(command)
+		atoms = s.getAtoms()
+		self.assertEqual(atoms, input)

diff --git a/portage_with_autodep/pym/portage/tests/unicode/__init__.py b/portage_with_autodep/pym/portage/tests/unicode/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/unicode/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/unicode/__test__ b/portage_with_autodep/pym/portage/tests/unicode/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/unicode/test_string_format.py b/portage_with_autodep/pym/portage/tests/unicode/test_string_format.py
new file mode 100644
index 0000000..fb6e8e0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/unicode/test_string_format.py
@@ -0,0 +1,108 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+from portage import _encodings, _unicode_decode
+from portage.exception import PortageException
+from portage.tests import TestCase
+from _emerge.DependencyArg import DependencyArg
+from _emerge.UseFlagDisplay import UseFlagDisplay
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+STR_IS_UNICODE = sys.hexversion >= 0x3000000
+
+class StringFormatTestCase(TestCase):
+	"""
+	Test that string formatting works correctly in the current interpretter,
+	which may be either python2 or python3.
+	"""
+
+	# In order to get some unicode test strings in a way that works in
+	# both python2 and python3, write them here as byte strings and
+	# decode them before use. This assumes _encodings['content'] is
+	# utf_8.
+
+	unicode_strings = (
+		b'\xE2\x80\x98',
+		b'\xE2\x80\x99',
+	)
+
+	def testDependencyArg(self):
+
+		self.assertEqual(_encodings['content'], 'utf_8')
+
+		for arg_bytes in self.unicode_strings:
+			arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+			dependency_arg = DependencyArg(arg=arg_unicode)
+
+			# Force unicode format string so that __unicode__() is
+			# called in python2.
+			formatted_str = _unicode_decode("%s") % (dependency_arg,)
+			self.assertEqual(formatted_str, arg_unicode)
+
+			if STR_IS_UNICODE:
+
+				# Test the __str__ method which returns unicode in python3
+				formatted_str = "%s" % (dependency_arg,)
+				self.assertEqual(formatted_str, arg_unicode)
+
+			else:
+
+				# Test the __str__ method which returns encoded bytes in python2
+				formatted_bytes = "%s" % (dependency_arg,)
+				self.assertEqual(formatted_bytes, arg_bytes)
+
+	def testPortageException(self):
+
+		self.assertEqual(_encodings['content'], 'utf_8')
+
+		for arg_bytes in self.unicode_strings:
+			arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+			e = PortageException(arg_unicode)
+
+			# Force unicode format string so that __unicode__() is
+			# called in python2.
+			formatted_str = _unicode_decode("%s") % (e,)
+			self.assertEqual(formatted_str, arg_unicode)
+
+			if STR_IS_UNICODE:
+
+				# Test the __str__ method which returns unicode in python3
+				formatted_str = "%s" % (e,)
+				self.assertEqual(formatted_str, arg_unicode)
+
+			else:
+
+				# Test the __str__ method which returns encoded bytes in python2
+				formatted_bytes = "%s" % (e,)
+				self.assertEqual(formatted_bytes, arg_bytes)
+
+	def testUseFlagDisplay(self):
+
+		self.assertEqual(_encodings['content'], 'utf_8')
+
+		for enabled in (True, False):
+			for forced in (True, False):
+				for arg_bytes in self.unicode_strings:
+					arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
+					e = UseFlagDisplay(arg_unicode, enabled, forced)
+
+					# Force unicode format string so that __unicode__() is
+					# called in python2.
+					formatted_str = _unicode_decode("%s") % (e,)
+					self.assertEqual(isinstance(formatted_str, basestring), True)
+
+					if STR_IS_UNICODE:
+
+						# Test the __str__ method which returns unicode in python3
+						formatted_str = "%s" % (e,)
+						self.assertEqual(isinstance(formatted_str, str), True)
+
+					else:
+
+						# Test the __str__ method which returns encoded bytes in python2
+						formatted_bytes = "%s" % (e,)
+						self.assertEqual(isinstance(formatted_bytes, bytes), True)

diff --git a/portage_with_autodep/pym/portage/tests/util/__init__.py b/portage_with_autodep/pym/portage/tests/util/__init__.py
new file mode 100644
index 0000000..69ce189
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage.util/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+

diff --git a/portage_with_autodep/pym/portage/tests/util/__test__ b/portage_with_autodep/pym/portage/tests/util/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/util/test_digraph.py b/portage_with_autodep/pym/portage/tests/util/test_digraph.py
new file mode 100644
index 0000000..b65c0b1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_digraph.py
@@ -0,0 +1,201 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util.digraph import digraph
+#~ from portage.util import noiselimit
+import portage.util
+
+class DigraphTest(TestCase):
+
+	def testBackwardCompatibility(self):
+		g = digraph()
+		f = g.copy()
+		g.addnode("A", None)
+		self.assertEqual("A" in g, True)
+		self.assertEqual(bool(g), True)
+		self.assertEqual(g.allnodes(), ["A"])
+		self.assertEqual(g.allzeros(), ["A"])
+		self.assertEqual(g.hasnode("A"), True)
+
+	def testDigraphEmptyGraph(self):
+		g = digraph()
+		f = g.clone()
+		for x in g, f:
+			self.assertEqual(bool(x), False)
+			self.assertEqual(x.contains("A"), False)
+			self.assertEqual(x.firstzero(), None)
+			self.assertRaises(KeyError, x.remove, "A")
+			x.delnode("A")
+			self.assertEqual(list(x), [])
+			self.assertEqual(x.get("A"), None)
+			self.assertEqual(x.get("A", "default"), "default")
+			self.assertEqual(x.all_nodes(), [])
+			self.assertEqual(x.leaf_nodes(), [])
+			self.assertEqual(x.root_nodes(), [])
+			self.assertRaises(KeyError, x.child_nodes, "A")
+			self.assertRaises(KeyError, x.parent_nodes, "A")
+			self.assertEqual(x.hasallzeros(), True)
+			self.assertRaises(KeyError, list, x.bfs("A"))
+			self.assertRaises(KeyError, x.shortest_path, "A", "B")
+			self.assertRaises(KeyError, x.remove_edge, "A", "B")
+			self.assertEqual(x.get_cycles(), [])
+			x.difference_update("A")
+			portage.util.noiselimit = -2
+			x.debug_print()
+			portage.util.noiselimit = 0
+
+	def testDigraphCircle(self):
+		g = digraph()
+		g.add("A", "B", -1)
+		g.add("B", "C", 0)
+		g.add("C", "D", 1)
+		g.add("D", "A", 2)
+
+		f = g.clone()
+		for x in g, f:
+			self.assertEqual(bool(x), True)
+			self.assertEqual(x.contains("A"), True)
+			self.assertEqual(x.firstzero(), None)
+			self.assertRaises(KeyError, x.remove, "Z")
+			x.delnode("Z")
+			self.assertEqual(list(x), ["A", "B", "C", "D"])
+			self.assertEqual(x.get("A"), "A")
+			self.assertEqual(x.get("A", "default"), "A")
+			self.assertEqual(x.all_nodes(), ["A", "B", "C", "D"])
+			self.assertEqual(x.leaf_nodes(), [])
+			self.assertEqual(x.root_nodes(), [])
+			self.assertEqual(x.child_nodes("A"), ["D"])
+			self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
+			self.assertEqual(x.parent_nodes("A"), ["B"])
+			self.assertEqual(x.parent_nodes("A", ignore_priority=-2), ["B"])
+			self.assertEqual(x.parent_nodes("A", ignore_priority=-1), [])
+			self.assertEqual(x.hasallzeros(), False)
+			self.assertEqual(list(x.bfs("A")), [(None, "A"), ("A", "D"), ("D", "C"), ("C", "B")])
+			self.assertEqual(x.shortest_path("A", "D"), ["A", "D"])
+			self.assertEqual(x.shortest_path("D", "A"), ["D", "C", "B", "A"])
+			self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
+			self.assertEqual(x.shortest_path("D", "A", ignore_priority=-2), ["D", "C", "B", "A"])
+			cycles = set(tuple(y) for y in x.get_cycles())
+			self.assertEqual(cycles, set([("D", "C", "B", "A"), ("C", "B", "A", "D"), ("B", "A", "D", "C"), \
+				("A", "D", "C", "B")]))
+			x.remove_edge("A", "B")
+			self.assertEqual(x.get_cycles(), [])
+			x.difference_update(["D"])
+			self.assertEqual(x.all_nodes(), ["A", "B", "C"])
+			portage.util.noiselimit = -2
+			x.debug_print()
+			portage.util.noiselimit = 0
+
+	def testDigraphTree(self):
+		g = digraph()
+		g.add("B", "A", -1)
+		g.add("C", "A", 0)
+		g.add("D", "C", 1)
+		g.add("E", "C", 2)
+
+		f = g.clone()
+		for x in g, f:
+			self.assertEqual(bool(x), True)
+			self.assertEqual(x.contains("A"), True)
+			self.assertEqual(x.firstzero(), "B")
+			self.assertRaises(KeyError, x.remove, "Z")
+			x.delnode("Z")
+			self.assertEqual(set(x), set(["A", "B", "C", "D", "E"]))
+			self.assertEqual(x.get("A"), "A")
+			self.assertEqual(x.get("A", "default"), "A")
+			self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "D", "E"]))
+			self.assertEqual(set(x.leaf_nodes()), set(["B", "D", "E"]))
+			self.assertEqual(set(x.leaf_nodes(ignore_priority=0)), set(["A", "B", "D", "E"]))
+			self.assertEqual(x.root_nodes(), ["A"])
+			self.assertEqual(set(x.root_nodes(ignore_priority=0)), set(["A", "B", "C"]))
+			self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
+			self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
+			self.assertEqual(x.parent_nodes("B"), ["A"])
+			self.assertEqual(x.parent_nodes("B", ignore_priority=-2), ["A"])
+			self.assertEqual(x.parent_nodes("B", ignore_priority=-1), [])
+			self.assertEqual(x.hasallzeros(), False)
+			self.assertEqual(list(x.bfs("A")), [(None, "A"), ("A", "C"), ("A", "B"), ("C", "E"), ("C", "D")])
+			self.assertEqual(x.shortest_path("A", "D"), ["A", "C", "D"])
+			self.assertEqual(x.shortest_path("D", "A"), None)
+			self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
+			cycles = set(tuple(y) for y in x.get_cycles())
+			self.assertEqual(cycles, set())
+			x.remove("D")
+			self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "E"]))
+			x.remove("C")
+			self.assertEqual(set(x.all_nodes()), set(["A", "B", "E"]))
+			portage.util.noiselimit = -2
+			x.debug_print()
+			portage.util.noiselimit = 0
+			self.assertRaises(KeyError, x.remove_edge, "A", "E")
+
+	def testDigraphCompleteGraph(self):
+		g = digraph()
+		g.add("A", "B", -1)
+		g.add("B", "A", 1)
+		g.add("A", "C", 1)
+		g.add("C", "A", -1)
+		g.add("C", "B", 1)
+		g.add("B", "C", 1)
+
+		f = g.clone()
+		for x in g, f:
+			self.assertEqual(bool(x), True)
+			self.assertEqual(x.contains("A"), True)
+			self.assertEqual(x.firstzero(), None)
+			self.assertRaises(KeyError, x.remove, "Z")
+			x.delnode("Z")
+			self.assertEqual(list(x), ["A", "B", "C"])
+			self.assertEqual(x.get("A"), "A")
+			self.assertEqual(x.get("A", "default"), "A")
+			self.assertEqual(x.all_nodes(), ["A", "B", "C"])
+			self.assertEqual(x.leaf_nodes(), [])
+			self.assertEqual(x.root_nodes(), [])
+			self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
+			self.assertEqual(x.child_nodes("A", ignore_priority=0), ["B"])
+			self.assertEqual(set(x.parent_nodes("A")), set(["B", "C"]))
+			self.assertEqual(x.parent_nodes("A", ignore_priority=0), ["C"])
+			self.assertEqual(x.parent_nodes("A", ignore_priority=1), [])
+			self.assertEqual(x.hasallzeros(), False)
+			self.assertEqual(list(x.bfs("A")), [(None, "A"), ("A", "C"), ("A", "B")])
+			self.assertEqual(x.shortest_path("A", "C"), ["A", "C"])
+			self.assertEqual(x.shortest_path("C", "A"), ["C", "A"])
+			self.assertEqual(x.shortest_path("A", "C", ignore_priority=0), ["A", "B", "C"])
+			self.assertEqual(x.shortest_path("C", "A", ignore_priority=0), ["C", "A"])
+			cycles = set(tuple(y) for y in x.get_cycles())
+			self.assertEqual(cycles, set([("C", "A"), ("A", "B"), ("A", "C")]))
+			x.remove_edge("A", "B")
+			self.assertEqual(x.get_cycles(), [["C", "A"], ["A", "C"], ["C", "B"]])
+			x.difference_update(["C"])
+			self.assertEqual(x.all_nodes(), ["A", "B"])
+			portage.util.noiselimit = -2
+			x.debug_print()
+			portage.util.noiselimit = 0
+
+	def testDigraphIgnorePriority(self):
+
+		def always_true(dummy):
+			return True
+
+		def always_false(dummy):
+			return False
+
+		g = digraph()
+		g.add("A", "B")
+
+		self.assertEqual(g.parent_nodes("A"), ["B"])
+		self.assertEqual(g.parent_nodes("A", ignore_priority=always_false), ["B"])
+		self.assertEqual(g.parent_nodes("A", ignore_priority=always_true), [])
+
+		self.assertEqual(g.child_nodes("B"), ["A"])
+		self.assertEqual(g.child_nodes("B", ignore_priority=always_false), ["A"])
+		self.assertEqual(g.child_nodes("B", ignore_priority=always_true), [])
+
+		self.assertEqual(g.leaf_nodes(), ["A"])
+		self.assertEqual(g.leaf_nodes(ignore_priority=always_false), ["A"])
+		self.assertEqual(g.leaf_nodes(ignore_priority=always_true), ["A", "B"])
+
+		self.assertEqual(g.root_nodes(), ["B"])
+		self.assertEqual(g.root_nodes(ignore_priority=always_false), ["B"])
+		self.assertEqual(g.root_nodes(ignore_priority=always_true), ["A", "B"])

diff --git a/portage_with_autodep/pym/portage/tests/util/test_getconfig.py b/portage_with_autodep/pym/portage/tests/util/test_getconfig.py
new file mode 100644
index 0000000..22e0bfc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_getconfig.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.const import PORTAGE_BASE_PATH
+from portage.tests import TestCase
+from portage.util import getconfig
+
+class GetConfigTestCase(TestCase):
+	"""
+	Test that getconfig() produces that same result as bash would when
+	sourcing the same input.
+	"""
+
+	_cases = {
+		'FETCHCOMMAND'             : '/usr/bin/wget -t 3 -T 60 --passive-ftp -O "${DISTDIR}/${FILE}" "${URI}"',
+		'FETCHCOMMAND_RSYNC'       : 'rsync -avP "${URI}" "${DISTDIR}/${FILE}"',
+		'FETCHCOMMAND_SFTP'        : 'bash -c "x=\\${2#sftp://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec sftp -P \\${port} \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" sftp "${DISTDIR}/${FILE}" "${URI}"',
+		'FETCHCOMMAND_SSH'         : 'bash -c "x=\\${2#ssh://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec rsync --rsh=\\"ssh -p\\${port}\\" -avP \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" rsync "${DISTDIR}/${FILE}" "${URI}"',
+		'PORTAGE_ELOG_MAILSUBJECT' : '[portage] ebuild log for ${PACKAGE} on ${HOST}'
+	}
+
+	def testGetConfig(self):
+
+		make_globals_file = os.path.join(PORTAGE_BASE_PATH,
+			'cnf', 'make.globals')
+		d = getconfig(make_globals_file)
+		for k, v in self._cases.items():
+			self.assertEqual(d[k], v)

diff --git a/portage_with_autodep/pym/portage/tests/util/test_grabdict.py b/portage_with_autodep/pym/portage/tests/util/test_grabdict.py
new file mode 100644
index 0000000..e62a75d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_grabdict.py
@@ -0,0 +1,11 @@
+# test_grabDict.py -- Portage Unit Testing Functionality
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+#from portage.util import grabdict
+
+class GrabDictTestCase(TestCase):
+	
+	def testGrabDictPass(self):
+		pass

diff --git a/portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py b/portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py
new file mode 100644
index 0000000..f993886
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py
@@ -0,0 +1,14 @@
+# test_normalizePath.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+
+class NormalizePathTestCase(TestCase):
+	
+	def testNormalizePath(self):
+		
+		from portage.util import normalize_path
+		path = "///foo/bar/baz"
+		good = "/foo/bar/baz"
+		self.assertEqual(normalize_path(path), good)

diff --git a/portage_with_autodep/pym/portage/tests/util/test_stackDictList.py b/portage_with_autodep/pym/portage/tests/util/test_stackDictList.py
new file mode 100644
index 0000000..678001c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_stackDictList.py
@@ -0,0 +1,17 @@
+# test_stackDictList.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+
+class StackDictListTestCase(TestCase):
+
+	def testStackDictList(self):
+		from portage.util import stack_dictlist
+		
+		tests = [ ({'a':'b'},{'x':'y'},False,{'a':['b'],'x':['y']}) ]
+		tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-*']},True,{} ))
+		tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-x86']},True,{'KEYWORDS':['alpha']} ))
+		for test in tests:
+			self.assertEqual(
+			stack_dictlist([test[0],test[1]],incremental=test[2]), test[3] )

diff --git a/portage_with_autodep/pym/portage/tests/util/test_stackDicts.py b/portage_with_autodep/pym/portage/tests/util/test_stackDicts.py
new file mode 100644
index 0000000..0d2cadd
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_stackDicts.py
@@ -0,0 +1,36 @@
+# test_stackDicts.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import stack_dicts
+
+
+class StackDictsTestCase(TestCase):
+	
+	def testStackDictsPass(self):
+		
+		tests = [ ( [ { "a":"b" }, { "b":"c" } ], { "a":"b", "b":"c" },
+			  False, [], False ),
+			  ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
+			  True, [], False ),
+			  ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
+			  False, ["a"], False ),
+			  ( [ { "a":"b" }, None ], { "a":"b" },
+			  False, [], True ),
+			  ( [ None ], {}, False, [], False ),
+			  ( [ None, {}], {}, False, [], True ) ]
+
+
+		for test in tests:
+			result = stack_dicts( test[0], test[2], test[3], test[4] )
+			self.assertEqual( result, test[1] )
+	
+	def testStackDictsFail(self):
+		
+		tests = [ ( [ None, {} ], None, False, [], True ),
+			  ( [ { "a":"b"}, {"a":"c" } ], { "a":"b c" },
+				False, [], False ) ]
+		for test in tests:
+			result = stack_dicts( test[0], test[2], test[3], test[4] )
+			self.assertNotEqual( result , test[1] )

diff --git a/portage_with_autodep/pym/portage/tests/util/test_stackLists.py b/portage_with_autodep/pym/portage/tests/util/test_stackLists.py
new file mode 100644
index 0000000..8d01ea5
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_stackLists.py
@@ -0,0 +1,19 @@
+# test_stackLists.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import stack_lists
+
+class StackListsTestCase(TestCase):
+	
+	def testStackLists(self):
+		
+		tests = [ ( [ ['a','b','c'], ['d','e','f'] ], ['a','c','b','e','d','f'], False ),
+			  ( [ ['a','x'], ['b','x'] ], ['a','x','b'], False ),
+			  ( [ ['a','b','c'], ['-*'] ], [], True ),
+			  ( [ ['a'], ['-a'] ], [], True ) ]
+
+		for test in tests:
+			result = stack_lists( test[0], test[2] )
+			self.assertEqual( result , test[1] )

diff --git a/portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py b/portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py
new file mode 100644
index 0000000..2a1a209
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py
@@ -0,0 +1,24 @@
+# test_uniqueArray.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.util import unique_array
+
+class UniqueArrayTestCase(TestCase):
+	
+	def testUniqueArrayPass(self):
+		"""
+		test portage.util.uniqueArray()
+		"""
+
+		tests = [ ( ["a","a","a",os,os,[],[],[]], ['a',os,[]] ), 
+			  ( [1,1,1,2,3,4,4] , [1,2,3,4]) ]
+
+		for test in tests:
+			result = unique_array( test[0] )
+			for item in test[1]:
+				number = result.count(item)
+				self.assertFalse( number is not 1, msg="%s contains %s of %s, \
+					should be only 1" % (result, number, item) )

diff --git a/portage_with_autodep/pym/portage/tests/util/test_varExpand.py b/portage_with_autodep/pym/portage/tests/util/test_varExpand.py
new file mode 100644
index 0000000..7b528d6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/util/test_varExpand.py
@@ -0,0 +1,92 @@
+# test_varExpand.py -- Portage Unit Testing Functionality
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import varexpand
+
+class VarExpandTestCase(TestCase):
+	
+	def testVarExpandPass(self):
+
+		varDict = { "a":"5", "b":"7", "c":"-5" }
+		for key in varDict:
+			result = varexpand( "$%s" % key, varDict )
+			
+			self.assertFalse( result != varDict[key],
+				msg="Got %s != %s, from varexpand( %s, %s )" % \
+					( result, varDict[key], "$%s" % key, varDict ) )
+			result = varexpand( "${%s}" % key, varDict )
+			self.assertFalse( result != varDict[key],
+				msg="Got %s != %s, from varexpand( %s, %s )" % \
+					( result, varDict[key], "${%s}" % key, varDict ) )
+
+	def testVarExpandBackslashes(self):
+		"""
+		We want to behave like bash does when expanding a variable
+		assignment in a sourced file, in which case it performs
+		backslash removal for \\ and \$ but nothing more. It also
+		removes escaped newline characters. Note that we don't
+		handle escaped quotes here, since getconfig() uses shlex
+		to handle that earlier.
+		"""
+
+		varDict = {}
+		tests = [
+			("\\", "\\"),
+			("\\\\", "\\"),
+			("\\\\\\", "\\\\"),
+			("\\\\\\\\", "\\\\"),
+			("\\$", "$"),
+			("\\\\$", "\\$"),
+			("\\a", "\\a"),
+			("\\b", "\\b"),
+			("\\n", "\\n"),
+			("\\r", "\\r"),
+			("\\t", "\\t"),
+			("\\\n", ""),
+			("\\\"", "\\\""),
+			("\\'", "\\'"),
+		]
+		for test in tests:
+			result = varexpand( test[0], varDict )
+			self.assertFalse( result != test[1],
+				msg="Got %s != %s from varexpand( %s, %s )" \
+				% ( result, test[1], test[0], varDict ) )
+
+	def testVarExpandDoubleQuotes(self):
+		
+		varDict = { "a":"5" }
+		tests = [ ("\"${a}\"", "\"5\"") ]
+		for test in tests:
+			result = varexpand( test[0], varDict )
+			self.assertFalse( result != test[1],
+				msg="Got %s != %s from varexpand( %s, %s )" \
+				% ( result, test[1], test[0], varDict ) )
+
+	def testVarExpandSingleQuotes(self):
+		
+		varDict = { "a":"5" }
+		tests = [ ("\'${a}\'", "\'${a}\'") ]
+		for test in tests:
+			result = varexpand( test[0], varDict )
+			self.assertFalse( result != test[1],
+				msg="Got %s != %s from varexpand( %s, %s )" \
+				% ( result, test[1], test[0], varDict ) )
+
+	def testVarExpandFail(self):
+
+		varDict = { "a":"5", "b":"7", "c":"15" }
+
+		testVars = [ "fail" ]
+
+		for var in testVars:
+			result = varexpand( "$%s" % var, varDict )
+			self.assertFalse( len(result),
+				msg="Got %s == %s, from varexpand( %s, %s )" \
+					% ( result, var, "$%s" % var, varDict ) )
+
+			result = varexpand( "${%s}" % var, varDict )
+			self.assertFalse( len(result),
+				msg="Got %s == %s, from varexpand( %s, %s )" \
+					% ( result, var, "${%s}" % var, varDict ) )

diff --git a/portage_with_autodep/pym/portage/tests/versions/__init__.py b/portage_with_autodep/pym/portage/tests/versions/__init__.py
new file mode 100644
index 0000000..2b14180
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/versions/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.versions/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/versions/__test__ b/portage_with_autodep/pym/portage/tests/versions/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py b/portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py
new file mode 100644
index 0000000..a223d78
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py
@@ -0,0 +1,16 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.versions import cpv_sort_key
+
+class CpvSortKeyTestCase(TestCase):
+
+	def testCpvSortKey(self):
+
+		tests = [ (("a/b-2_alpha", "a", "b", "a/b-2", "a/a-1", "a/b-1"),
+			( "a", "a/a-1", "a/b-1", "a/b-2_alpha", "a/b-2", "b")),
+		]
+
+		for test in tests:
+			self.assertEqual( tuple(sorted(test[0], key=cpv_sort_key())), test[1] )

diff --git a/portage_with_autodep/pym/portage/tests/versions/test_vercmp.py b/portage_with_autodep/pym/portage/tests/versions/test_vercmp.py
new file mode 100644
index 0000000..aa7969c
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/versions/test_vercmp.py
@@ -0,0 +1,80 @@
+# test_vercmp.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.versions import vercmp
+
+class VerCmpTestCase(TestCase):
+	""" A simple testCase for portage.versions.vercmp()
+	"""
+	
+	def testVerCmpGreater(self):
+		
+		tests = [ ( "6.0", "5.0"), ("5.0","5"),
+			("1.0-r1", "1.0-r0"),
+			("1.0-r1", "1.0"),
+			("cvs.9999", "9999"),
+			("999999999999999999999999999999", "999999999999999999999999999998"),
+			("1.0.0", "1.0"),
+			("1.0.0", "1.0b"),
+			("1b", "1"),
+			("1b_p1", "1_p1"),
+			("1.1b", "1.1"),
+			("12.2.5", "12.2b"),
+		]
+		for test in tests:
+			self.assertFalse( vercmp( test[0], test[1] ) <= 0, msg="%s < %s? Wrong!" % (test[0],test[1]) )
+
+	def testVerCmpLess(self):
+		"""
+		pre < alpha < beta < rc < p -> test each of these, they are inductive (or should be..)
+		"""
+		tests = [ ( "4.0", "5.0"), ("5", "5.0"), ("1.0_pre2","1.0_p2"),
+			("1.0_alpha2", "1.0_p2"),("1.0_alpha1", "1.0_beta1"),("1.0_beta3","1.0_rc3"),
+			("1.001000000000000000001", "1.001000000000000000002"),
+			("1.00100000000", "1.0010000000000000001"),
+			("9999", "cvs.9999"),
+			("999999999999999999999999999998", "999999999999999999999999999999"),
+			("1.01", "1.1"),
+			("1.0-r0", "1.0-r1"),
+			("1.0", "1.0-r1"),
+			("1.0", "1.0.0"),
+			("1.0b", "1.0.0"),
+			("1_p1", "1b_p1"),
+			("1", "1b"),
+			("1.1", "1.1b"),
+			("12.2b", "12.2.5"),
+		]
+		for test in tests:
+			self.assertFalse( vercmp( test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0],test[1]))
+	
+	
+	def testVerCmpEqual(self):
+		
+		tests = [ ("4.0", "4.0"),
+			("1.0", "1.0"),
+			("1.0-r0", "1.0"),
+			("1.0", "1.0-r0"),
+			("1.0-r0", "1.0-r0"),
+			("1.0-r1", "1.0-r1")]
+		for test in tests:
+			self.assertFalse( vercmp( test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0],test[1]))
+			
+	def testVerNotEqual(self):
+		
+		tests = [ ("1","2"),("1.0_alpha","1.0_pre"),("1.0_beta","1.0_alpha"),
+			("0", "0.0"),
+			("cvs.9999", "9999"),
+			("1.0-r0", "1.0-r1"),
+			("1.0-r1", "1.0-r0"),
+			("1.0", "1.0-r1"),
+			("1.0-r1", "1.0"),
+			("1.0", "1.0.0"),
+			("1_p1", "1b_p1"),
+			("1b", "1"),
+			("1.1b", "1.1"),
+			("12.2b", "12.2"),
+		]
+		for test in tests:
+			self.assertFalse( vercmp( test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0],test[1]))

diff --git a/portage_with_autodep/pym/portage/tests/xpak/__init__.py b/portage_with_autodep/pym/portage/tests/xpak/__init__.py
new file mode 100644
index 0000000..9c3f524
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/xpak/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.dep/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/xpak/__test__ b/portage_with_autodep/pym/portage/tests/xpak/__test__
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py b/portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py
new file mode 100644
index 0000000..2da5735
--- /dev/null
+++ b/portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py
@@ -0,0 +1,16 @@
+# xpak/test_decodeint.py
+# Copright Gentoo Foundation 2006
+# Portage Unit Testing Functionality
+
+from portage.tests import TestCase
+from portage.xpak import decodeint, encodeint
+
+class testDecodeIntTestCase(TestCase):
+
+	def testDecodeInt(self):
+		
+		for n in range(1000):
+			self.assertEqual(decodeint(encodeint(n)), n)
+
+		for n in (2 ** 32 - 1,):
+			self.assertEqual(decodeint(encodeint(n)), n)

diff --git a/portage_with_autodep/pym/portage/update.py b/portage_with_autodep/pym/portage/update.py
new file mode 100644
index 0000000..52ab506
--- /dev/null
+++ b/portage_with_autodep/pym/portage/update.py
@@ -0,0 +1,320 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import re
+import stat
+import sys
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.dep:Atom,dep_getkey,isvalidatom,' + \
+	'remove_slot',
+	'portage.util:ConfigProtect,new_protect_filename,' + \
+		'normalize_path,write_atomic,writemsg',
+	'portage.util.listdir:_ignorecvs_dirs',
+	'portage.versions:ververify'
+)
+
+from portage.const import USER_CONFIG_PATH
+from portage.exception import DirectoryNotFound, InvalidAtom, PortageException
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+ignored_dbentries = ("CONTENTS", "environment.bz2")
+
+def update_dbentry(update_cmd, mycontent):
+	if update_cmd[0] == "move":
+		old_value = str(update_cmd[1])
+		if old_value in mycontent:
+			new_value = str(update_cmd[2])
+			old_value = re.escape(old_value);
+			mycontent = re.sub(old_value+"(:|$|\\s)", new_value+"\\1", mycontent)
+			def myreplace(matchobj):
+				# Strip slot and * operator if necessary
+				# so that ververify works.
+				ver = remove_slot(matchobj.group(2))
+				ver = ver.rstrip("*")
+				if ververify(ver):
+					return "%s-%s" % (new_value, matchobj.group(2))
+				else:
+					return "".join(matchobj.groups())
+			mycontent = re.sub("(%s-)(\\S*)" % old_value, myreplace, mycontent)
+	elif update_cmd[0] == "slotmove" and update_cmd[1].operator is None:
+		pkg, origslot, newslot = update_cmd[1:]
+		old_value = "%s:%s" % (pkg, origslot)
+		if old_value in mycontent:
+			old_value = re.escape(old_value)
+			new_value = "%s:%s" % (pkg, newslot)
+			mycontent = re.sub(old_value+"($|\\s)", new_value+"\\1", mycontent)
+	return mycontent
+
+def update_dbentries(update_iter, mydata):
+	"""Performs update commands and returns a
+	dict containing only the updated items."""
+	updated_items = {}
+	for k, mycontent in mydata.items():
+		k_unicode = _unicode_decode(k,
+			encoding=_encodings['repo.content'], errors='replace')
+		if k_unicode not in ignored_dbentries:
+			orig_content = mycontent
+			mycontent = _unicode_decode(mycontent,
+				encoding=_encodings['repo.content'], errors='replace')
+			is_encoded = mycontent is not orig_content
+			orig_content = mycontent
+			for update_cmd in update_iter:
+				mycontent = update_dbentry(update_cmd, mycontent)
+			if mycontent != orig_content:
+				if is_encoded:
+					mycontent = _unicode_encode(mycontent,
+						encoding=_encodings['repo.content'],
+						errors='backslashreplace')
+				updated_items[k] = mycontent
+	return updated_items
+
+def fixdbentries(update_iter, dbdir):
+	"""Performs update commands which result in search and replace operations
+	for each of the files in dbdir (excluding CONTENTS and environment.bz2).
+	Returns True when actual modifications are necessary and False otherwise."""
+	mydata = {}
+	for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
+		file_path = os.path.join(dbdir, myfile)
+		mydata[myfile] = io.open(_unicode_encode(file_path,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'],
+			errors='replace').read()
+	updated_items = update_dbentries(update_iter, mydata)
+	for myfile, mycontent in updated_items.items():
+		file_path = os.path.join(dbdir, myfile)
+		write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
+	return len(updated_items) > 0
+
+def grab_updates(updpath, prev_mtimes=None):
+	"""Returns all the updates from the given directory as a sorted list of
+	tuples, each containing (file_path, statobj, content).  If prev_mtimes is
+	given then updates are only returned if one or more files have different
+	mtimes. When a change is detected for a given file, updates will be
+	returned for that file and any files that come after it in the entire
+	sequence. This ensures that all relevant updates are returned for cases
+	in which the destination package of an earlier move corresponds to
+	the source package of a move that comes somewhere later in the entire
+	sequence of files.
+	"""
+	try:
+		mylist = os.listdir(updpath)
+	except OSError as oe:
+		if oe.errno == errno.ENOENT:
+			raise DirectoryNotFound(updpath)
+		raise
+	if prev_mtimes is None:
+		prev_mtimes = {}
+	# validate the file name (filter out CVS directory, etc...)
+	mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
+	if len(mylist) == 0:
+		return []
+	
+	# update names are mangled to make them sort properly
+	mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
+	mylist.sort()
+	mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]
+
+	update_data = []
+	for myfile in mylist:
+		file_path = os.path.join(updpath, myfile)
+		mystat = os.stat(file_path)
+		if update_data or \
+			file_path not in prev_mtimes or \
+			long(prev_mtimes[file_path]) != mystat[stat.ST_MTIME]:
+			content = io.open(_unicode_encode(file_path,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'], errors='replace'
+				).read()
+			update_data.append((file_path, mystat, content))
+	return update_data
+
+def parse_updates(mycontent):
+	"""Valid updates are returned as a list of split update commands."""
+	myupd = []
+	errors = []
+	mylines = mycontent.splitlines()
+	for myline in mylines:
+		mysplit = myline.split()
+		if len(mysplit) == 0:
+			continue
+		if mysplit[0] not in ("move", "slotmove"):
+			errors.append(_("ERROR: Update type not recognized '%s'") % myline)
+			continue
+		if mysplit[0] == "move":
+			if len(mysplit) != 3:
+				errors.append(_("ERROR: Update command invalid '%s'") % myline)
+				continue
+			for i in (1, 2):
+				try:
+					atom = Atom(mysplit[i])
+				except InvalidAtom:
+					atom = None
+				else:
+					if atom.blocker or atom != atom.cp:
+						atom = None
+				if atom is not None:
+					mysplit[i] = atom
+				else:
+					errors.append(
+						_("ERROR: Malformed update entry '%s'") % myline)
+					break
+		if mysplit[0] == "slotmove":
+			if len(mysplit)!=4:
+				errors.append(_("ERROR: Update command invalid '%s'") % myline)
+				continue
+			pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
+			try:
+				atom = Atom(pkg)
+			except InvalidAtom:
+				atom = None
+			else:
+				if atom.blocker:
+					atom = None
+			if atom is not None:
+				mysplit[1] = atom
+			else:
+				errors.append(_("ERROR: Malformed update entry '%s'") % myline)
+				continue
+
+		# The list of valid updates is filtered by continue statements above.
+		myupd.append(mysplit)
+	return myupd, errors
+
+def update_config_files(config_root, protect, protect_mask, update_iter, match_callback = None):
+	"""Perform global updates on /etc/portage/package.*.
+	config_root - location of files to update
+	protect - list of paths from CONFIG_PROTECT
+	protect_mask - list of paths from CONFIG_PROTECT_MASK
+	update_iter - list of update commands as returned from parse_updates(),
+		or dict of {repo_name: list}
+	match_callback - a callback which will be called with three arguments:
+		match_callback(repo_name, old_atom, new_atom)
+	and should return boolean value determining whether to perform the update"""
+
+	repo_dict = None
+	if isinstance(update_iter, dict):
+		repo_dict = update_iter
+	if match_callback is None:
+		def match_callback(repo_name, atoma, atomb):
+			return True
+	config_root = normalize_path(config_root)
+	update_files = {}
+	file_contents = {}
+	myxfiles = [
+		"package.accept_keywords", "package.env",
+		"package.keywords", "package.license",
+		"package.mask", "package.properties",
+		"package.unmask", "package.use"
+	]
+	myxfiles += [os.path.join("profile", x) for x in myxfiles]
+	abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
+	recursivefiles = []
+	for x in myxfiles:
+		config_file = os.path.join(abs_user_config, x)
+		if os.path.isdir(config_file):
+			for parent, dirs, files in os.walk(config_file):
+				try:
+					parent = _unicode_decode(parent,
+						encoding=_encodings['fs'], errors='strict')
+				except UnicodeDecodeError:
+					continue
+				for y_enc in list(dirs):
+					try:
+						y = _unicode_decode(y_enc,
+							encoding=_encodings['fs'], errors='strict')
+					except UnicodeDecodeError:
+						dirs.remove(y_enc)
+						continue
+					if y.startswith(".") or y in _ignorecvs_dirs:
+						dirs.remove(y_enc)
+				for y in files:
+					try:
+						y = _unicode_decode(y,
+							encoding=_encodings['fs'], errors='strict')
+					except UnicodeDecodeError:
+						continue
+					if y.startswith("."):
+						continue
+					recursivefiles.append(
+						os.path.join(parent, y)[len(abs_user_config) + 1:])
+		else:
+			recursivefiles.append(x)
+	myxfiles = recursivefiles
+	for x in myxfiles:
+		try:
+			file_contents[x] = io.open(
+				_unicode_encode(os.path.join(abs_user_config, x),
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['content'],
+				errors='replace').readlines()
+		except IOError:
+			continue
+
+	# update /etc/portage/packages.*
+	ignore_line_re = re.compile(r'^#|^\s*$')
+	if repo_dict is None:
+		update_items = [(None, update_iter)]
+	else:
+		update_items = [x for x in repo_dict.items() if x[0] != 'DEFAULT']
+	for repo_name, update_iter in update_items:
+		for update_cmd in update_iter:
+			for x, contents in file_contents.items():
+				skip_next = False
+				for pos, line in enumerate(contents):
+					if skip_next:
+						skip_next = False
+						continue
+					if ignore_line_re.match(line):
+						continue
+					atom = line.split()[0]
+					if atom[:1] == "-":
+						# package.mask supports incrementals
+						atom = atom[1:]
+					if not isvalidatom(atom):
+						continue
+					new_atom = update_dbentry(update_cmd, atom)
+					if atom != new_atom:
+						if match_callback(repo_name, atom, new_atom):
+							# add a comment with the update command, so
+							# the user can clearly see what happened
+							contents[pos] = "# %s\n" % \
+								" ".join("%s" % (x,) for x in update_cmd)
+							contents.insert(pos + 1,
+								line.replace("%s" % (atom,),
+								"%s" % (new_atom,), 1))
+							# we've inserted an additional line, so we need to
+							# skip it when it's reached in the next iteration
+							skip_next = True
+							update_files[x] = 1
+							sys.stdout.write("p")
+							sys.stdout.flush()
+
+	protect_obj = ConfigProtect(
+		config_root, protect, protect_mask)
+	for x in update_files:
+		updating_file = os.path.join(abs_user_config, x)
+		if protect_obj.isprotected(updating_file):
+			updating_file = new_protect_filename(updating_file)
+		try:
+			write_atomic(updating_file, "".join(file_contents[x]))
+		except PortageException as e:
+			writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+			writemsg(_("!!! An error occurred while updating a config file:") + \
+				" '%s'\n" % updating_file, noiselevel=-1)
+			continue
+
+def dep_transform(mydep, oldkey, newkey):
+	if dep_getkey(mydep) == oldkey:
+		return mydep.replace(oldkey, newkey, 1)
+	return mydep

diff --git a/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
new file mode 100644
index 0000000..5cb9747
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
@@ -0,0 +1,76 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['ExtractKernelVersion']
+
+import io
+
+from portage import os, _encodings, _unicode_encode
+from portage.util import getconfig, grabfile
+
+def ExtractKernelVersion(base_dir):
+	"""
+	Try to figure out what kernel version we are running
+	@param base_dir: Path to sources (usually /usr/src/linux)
+	@type base_dir: string
+	@rtype: tuple( version[string], error[string])
+	@returns:
+	1. tuple( version[string], error[string])
+	Either version or error is populated (but never both)
+
+	"""
+	lines = []
+	pathname = os.path.join(base_dir, 'Makefile')
+	try:
+		f = io.open(_unicode_encode(pathname,
+			encoding=_encodings['fs'], errors='strict'), mode='r',
+			encoding=_encodings['content'], errors='replace')
+	except OSError as details:
+		return (None, str(details))
+	except IOError as details:
+		return (None, str(details))
+
+	try:
+		for i in range(4):
+			lines.append(f.readline())
+	except OSError as details:
+		return (None, str(details))
+	except IOError as details:
+		return (None, str(details))
+
+	lines = [l.strip() for l in lines]
+
+	version = ''
+
+	#XXX: The following code relies on the ordering of vars within the Makefile
+	for line in lines:
+		# split on the '=' then remove annoying whitespace
+		items = line.split("=")
+		items = [i.strip() for i in items]
+		if items[0] == 'VERSION' or \
+			items[0] == 'PATCHLEVEL':
+			version += items[1]
+			version += "."
+		elif items[0] == 'SUBLEVEL':
+			version += items[1]
+		elif items[0] == 'EXTRAVERSION' and \
+			items[-1] != items[0]:
+			version += items[1]
+
+	# Grab a list of files named localversion* and sort them
+	localversions = os.listdir(base_dir)
+	for x in range(len(localversions)-1,-1,-1):
+		if localversions[x][:12] != "localversion":
+			del localversions[x]
+	localversions.sort()
+
+	# Append the contents of each to the version string, stripping ALL whitespace
+	for lv in localversions:
+		version += "".join( " ".join( grabfile( base_dir+ "/" + lv ) ).split() )
+
+	# Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
+	kernelconfig = getconfig(base_dir+"/.config")
+	if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
+		version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
+
+	return (version,None)

diff --git a/portage_with_autodep/pym/portage/util/__init__.py b/portage_with_autodep/pym/portage/util/__init__.py
new file mode 100644
index 0000000..4aa63d5
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/__init__.py
@@ -0,0 +1,1602 @@
+# Copyright 2004-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['apply_permissions', 'apply_recursive_permissions',
+	'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
+	'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
+	'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict',
+	'grabdict_package', 'grabfile', 'grabfile_package', 'grablines',
+	'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals',
+	'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist',
+	'stack_dicts', 'stack_lists', 'unique_array', 'unique_everseen', 'varexpand',
+	'write_atomic', 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout']
+
+from copy import deepcopy
+import errno
+import io
+try:
+	from itertools import filterfalse
+except ImportError:
+	from itertools import ifilterfalse as filterfalse
+import logging
+import re
+import shlex
+import stat
+import string
+import sys
+import traceback
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'pickle',
+	'portage.dep:Atom',
+	'portage.util.listdir:_ignorecvs_dirs'
+)
+
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.exception import InvalidAtom, PortageException, FileNotFound, \
+       OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem
+from portage.localization import _
+from portage.proxy.objectproxy import ObjectProxy
+from portage.cache.mappings import UserDict
+
+noiselimit = 0
+
+def initialize_logger(level=logging.WARN):
+	"""Sets up basic logging of portage activities
+	Args:
+		level: the level to emit messages at ('info', 'debug', 'warning' ...)
+	Returns:
+		None
+	"""
+	logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
+
+def writemsg(mystr,noiselevel=0,fd=None):
+	"""Prints out warning and debug messages based on the noiselimit setting"""
+	global noiselimit
+	if fd is None:
+		fd = sys.stderr
+	if noiselevel <= noiselimit:
+		# avoid potential UnicodeEncodeError
+		if isinstance(fd, io.StringIO):
+			mystr = _unicode_decode(mystr,
+				encoding=_encodings['content'], errors='replace')
+		else:
+			mystr = _unicode_encode(mystr,
+				encoding=_encodings['stdio'], errors='backslashreplace')
+			if sys.hexversion >= 0x3000000 and fd in (sys.stdout, sys.stderr):
+				fd = fd.buffer
+		fd.write(mystr)
+		fd.flush()
+
+def writemsg_stdout(mystr,noiselevel=0):
+	"""Prints messages stdout based on the noiselimit setting"""
+	writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
+
+def writemsg_level(msg, level=0, noiselevel=0):
+	"""
+	Show a message for the given level as defined by the logging module
+	(default is 0). When level >= logging.WARNING then the message is
+	sent to stderr, otherwise it is sent to stdout. The noiselevel is
+	passed directly to writemsg().
+
+	@type msg: str
+	@param msg: a message string, including newline if appropriate
+	@type level: int
+	@param level: a numeric logging level (see the logging module)
+	@type noiselevel: int
+	@param noiselevel: passed directly to writemsg
+	"""
+	if level >= logging.WARNING:
+		fd = sys.stderr
+	else:
+		fd = sys.stdout
+	writemsg(msg, noiselevel=noiselevel, fd=fd)
+
+def normalize_path(mypath):
+	""" 
+	os.path.normpath("//foo") returns "//foo" instead of "/foo"
+	We dislike this behavior so we create our own normpath func
+	to fix it.
+	"""
+	if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes):
+		path_sep = os.path.sep.encode()
+	else:
+		path_sep = os.path.sep
+
+	if mypath.startswith(path_sep):
+		# posixpath.normpath collapses 3 or more leading slashes to just 1.
+		return os.path.normpath(2*path_sep + mypath)
+	else:
+		return os.path.normpath(mypath)
+
+def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False):
+	"""This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
+	begins with a #, it is ignored, as are empty lines"""
+
+	mylines=grablines(myfilename, recursive, remember_source_file=True)
+	newlines=[]
+
+	for x, source_file in mylines:
+		#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+		#into single spaces.
+		myline = x.split()
+		if x and x[0] != "#":
+			mylinetemp = []
+			for item in myline:
+				if item[:1] != "#":
+					mylinetemp.append(item)
+				else:
+					break
+			myline = mylinetemp
+
+		myline = " ".join(myline)
+		if not myline:
+			continue
+		if myline[0]=="#":
+			# Check if we have a compat-level string. BC-integration data.
+			# '##COMPAT==>N<==' 'some string attached to it'
+			mylinetest = myline.split("<==",1)
+			if len(mylinetest) == 2:
+				myline_potential = mylinetest[1]
+				mylinetest = mylinetest[0].split("##COMPAT==>")
+				if len(mylinetest) == 2:
+					if compat_level >= int(mylinetest[1]):
+						# It's a compat line, and the key matches.
+						newlines.append(myline_potential)
+				continue
+			else:
+				continue
+		if remember_source_file:
+			newlines.append((myline, source_file))
+		else:
+			newlines.append(myline)
+	return newlines
+
+def map_dictlist_vals(func,myDict):
+	"""Performs a function on each value of each key in a dictlist.
+	Returns a new dictlist."""
+	new_dl = {}
+	for key in myDict:
+		new_dl[key] = []
+		new_dl[key] = [func(x) for x in myDict[key]]
+	return new_dl
+
+def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
+	"""
+	Stacks an array of dict-types into one array. Optionally merging or
+	overwriting matching key/value pairs for the dict[key]->list.
+	Returns a single dict. Higher index in lists is preferenced.
+	
+	Example usage:
+	   >>> from portage.util import stack_dictlist
+		>>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
+		>>> {'a':'b','x':'y'}
+		>>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
+		>>> {'a':['b','c'] }
+		>>> a = {'KEYWORDS':['x86','alpha']}
+		>>> b = {'KEYWORDS':['-x86']}
+		>>> print stack_dictlist( [a,b] )
+		>>> { 'KEYWORDS':['x86','alpha','-x86']}
+		>>> print stack_dictlist( [a,b], incremental=True)
+		>>> { 'KEYWORDS':['alpha'] }
+		>>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
+		>>> { 'KEYWORDS':['alpha'] }
+	
+	@param original_dicts a list of (dictionary objects or None)
+	@type list
+	@param incremental True or false depending on whether new keys should overwrite
+	   keys which already exist.
+	@type boolean
+	@param incrementals A list of items that should be incremental (-foo removes foo from
+	   the returned dict).
+	@type list
+	@param ignore_none Appears to be ignored, but probably was used long long ago.
+	@type boolean
+	
+	"""
+	final_dict = {}
+	for mydict in original_dicts:
+		if mydict is None:
+			continue
+		for y in mydict:
+			if not y in final_dict:
+				final_dict[y] = []
+			
+			for thing in mydict[y]:
+				if thing:
+					if incremental or y in incrementals:
+						if thing == "-*":
+							final_dict[y] = []
+							continue
+						elif thing[:1] == '-':
+							try:
+								final_dict[y].remove(thing[1:])
+							except ValueError:
+								pass
+							continue
+					if thing not in final_dict[y]:
+						final_dict[y].append(thing)
+			if y in final_dict and not final_dict[y]:
+				del final_dict[y]
+	return final_dict
+
+def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
+	"""Stacks an array of dict-types into one array. Optionally merging or
+	overwriting matching key/value pairs for the dict[key]->string.
+	Returns a single dict."""
+	final_dict = {}
+	for mydict in dicts:
+		if not mydict:
+			continue
+		for k, v in mydict.items():
+			if k in final_dict and (incremental or (k in incrementals)):
+				final_dict[k] += " " + v
+			else:
+				final_dict[k]  = v
+	return final_dict
+
+def append_repo(atom_list, repo_name, remember_source_file=False):
+	"""
+	Takes a list of valid atoms without repo spec and appends ::repo_name.
+	"""
+	if remember_source_file:
+		return [(Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True), source) \
+			for atom, source in atom_list]
+	else:
+		return [Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True) \
+			for atom in atom_list]
+
+def stack_lists(lists, incremental=1, remember_source_file=False,
+	warn_for_unmatched_removal=False, strict_warn_for_unmatched_removal=False, ignore_repo=False):
+	"""Stacks an array of list-types into one array. Optionally removing
+	distinct values using '-value' notation. Higher index is preferenced.
+
+	all elements must be hashable."""
+	matched_removals = set()
+	unmatched_removals = {}
+	new_list = {}
+	for sub_list in lists:
+		for token in sub_list:
+			token_key = token
+			if remember_source_file:
+				token, source_file = token
+			else:
+				source_file = False
+
+			if token is None:
+				continue
+
+			if incremental:
+				if token == "-*":
+					new_list.clear()
+				elif token[:1] == '-':
+					matched = False
+					if ignore_repo and not "::" in token:
+						#Let -cat/pkg remove cat/pkg::repo.
+						to_be_removed = []
+						token_slice = token[1:]
+						for atom in new_list:
+							atom_without_repo = atom
+							if atom.repo is not None:
+								# Atom.without_repo instantiates a new Atom,
+								# which is unnecessary here, so use string
+								# replacement instead.
+								atom_without_repo = \
+									atom.replace("::" + atom.repo, "", 1)
+							if atom_without_repo == token_slice:
+								to_be_removed.append(atom)
+						if to_be_removed:
+							matched = True
+							for atom in to_be_removed:
+								new_list.pop(atom)
+					else:
+						try:
+							new_list.pop(token[1:])
+							matched = True
+						except KeyError:
+							pass
+
+					if not matched:
+						if source_file and \
+							(strict_warn_for_unmatched_removal or \
+							token_key not in matched_removals):
+							unmatched_removals.setdefault(source_file, set()).add(token)
+					else:
+						matched_removals.add(token_key)
+				else:
+					new_list[token] = source_file
+			else:
+				new_list[token] = source_file
+
+	if warn_for_unmatched_removal:
+		for source_file, tokens in unmatched_removals.items():
+			if len(tokens) > 3:
+				selected = [tokens.pop(), tokens.pop(), tokens.pop()]
+				writemsg(_("--- Unmatch removal atoms in %s: %s and %s more\n") % \
+					(source_file, ", ".join(selected), len(tokens)),
+					noiselevel=-1)
+			else:
+				writemsg(_("--- Unmatch removal atom(s) in %s: %s\n") % (source_file, ", ".join(tokens)),
+					noiselevel=-1)
+
+	if remember_source_file:
+		return list(new_list.items())
+	else:
+		return list(new_list)
+
+def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
+	"""
+	This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
+	
+	@param myfilename: file to process
+	@type myfilename: string (path)
+	@param juststrings: only return strings
+	@type juststrings: Boolean (integer)
+	@param empty: Ignore certain lines
+	@type empty: Boolean (integer)
+	@param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
+	@type recursive: Boolean (integer)
+	@param incremental: Append to the return list, don't overwrite
+	@type incremental: Boolean (integer)
+	@rtype: Dictionary
+	@returns:
+	1.  Returns the lines in a file in a dictionary, for example:
+		'sys-apps/portage x86 amd64 ppc'
+		would return
+		{ "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
+		the line syntax is key : [list of values]
+	"""
+	newdict={}
+	for x in grablines(myfilename, recursive):
+		#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+		#into single spaces.
+		if x[0] == "#":
+			continue
+		myline=x.split()
+		mylinetemp = []
+		for item in myline:
+			if item[:1] != "#":
+				mylinetemp.append(item)
+			else:
+				break
+		myline = mylinetemp
+		if len(myline) < 2 and empty == 0:
+			continue
+		if len(myline) < 1 and empty == 1:
+			continue
+		if incremental:
+			newdict.setdefault(myline[0], []).extend(myline[1:])
+		else:
+			newdict[myline[0]] = myline[1:]
+	if juststrings:
+		for k, v in newdict.items():
+			newdict[k] = " ".join(v)
+	return newdict
+
+def read_corresponding_eapi_file(filename):
+	"""
+	Read the 'eapi' file from the directory 'filename' is in.
+	Returns "0" if the file is not present or invalid.
+	"""
+	default = "0"
+	eapi_file = os.path.join(os.path.dirname(filename), "eapi")
+	try:
+		f = open(eapi_file, "r")
+		lines = f.readlines()
+		if len(lines) == 1:
+			eapi = lines[0].rstrip("\n")
+		else:
+			writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file),
+				noiselevel=-1)
+			eapi = default
+		f.close()
+	except IOError:
+		eapi = default
+
+	return eapi
+
+def grabdict_package(myfilename, juststrings=0, recursive=0, allow_wildcard=False, allow_repo=False,
+	verify_eapi=False, eapi=None):
+	""" Does the same thing as grabdict except it validates keys
+	    with isvalidatom()"""
+	pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
+	if not pkgs:
+		return pkgs
+	if verify_eapi and eapi is None:
+		eapi = read_corresponding_eapi_file(myfilename)
+
+	# We need to call keys() here in order to avoid the possibility of
+	# "RuntimeError: dictionary changed size during iteration"
+	# when an invalid atom is deleted.
+	atoms = {}
+	for k, v in pkgs.items():
+		try:
+			k = Atom(k, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
+		except InvalidAtom as e:
+			writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
+				noiselevel=-1)
+		else:
+			atoms[k] = v
+	return atoms
+
+def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=False, allow_repo=False,
+	remember_source_file=False, verify_eapi=False, eapi=None):
+
+	pkgs=grabfile(myfilename, compatlevel, recursive=recursive, remember_source_file=True)
+	if not pkgs:
+		return pkgs
+	if verify_eapi and eapi is None:
+		eapi = read_corresponding_eapi_file(myfilename)
+	mybasename = os.path.basename(myfilename)
+	atoms = []
+	for pkg, source_file in pkgs:
+		pkg_orig = pkg
+		# for packages and package.mask files
+		if pkg[:1] == "-":
+			pkg = pkg[1:]
+		if pkg[:1] == '*' and mybasename == 'packages':
+			pkg = pkg[1:]
+		try:
+			pkg = Atom(pkg, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
+		except InvalidAtom as e:
+			writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
+				noiselevel=-1)
+		else:
+			if pkg_orig == str(pkg):
+				# normal atom, so return as Atom instance
+				if remember_source_file:
+					atoms.append((pkg, source_file))
+				else:
+					atoms.append(pkg)
+			else:
+				# atom has special prefix, so return as string
+				if remember_source_file:
+					atoms.append((pkg_orig, source_file))
+				else:
+					atoms.append(pkg_orig)
+	return atoms
+
+def grablines(myfilename, recursive=0, remember_source_file=False):
+	mylines=[]
+	if recursive and os.path.isdir(myfilename):
+		if os.path.basename(myfilename) in _ignorecvs_dirs:
+			return mylines
+		dirlist = os.listdir(myfilename)
+		dirlist.sort()
+		for f in dirlist:
+			if not f.startswith(".") and not f.endswith("~"):
+				mylines.extend(grablines(
+					os.path.join(myfilename, f), recursive, remember_source_file))
+	else:
+		try:
+			myfile = io.open(_unicode_encode(myfilename,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['content'], errors='replace')
+			if remember_source_file:
+				mylines = [(line, myfilename) for line in myfile.readlines()]
+			else:
+				mylines = myfile.readlines()
+			myfile.close()
+		except IOError as e:
+			if e.errno == PermissionDenied.errno:
+				raise PermissionDenied(myfilename)
+			pass
+	return mylines
+
+def writedict(mydict,myfilename,writekey=True):
+	"""Writes out a dict to a file; writekey=0 mode doesn't write out
+	the key and assumes all values are strings, not lists."""
+	lines = []
+	if not writekey:
+		for v in mydict.values():
+			lines.append(v + "\n")
+	else:
+		for k, v in mydict.items():
+			lines.append("%s %s\n" % (k, " ".join(v)))
+	write_atomic(myfilename, "".join(lines))
+
+def shlex_split(s):
+	"""
+	This is equivalent to shlex.split but it temporarily encodes unicode
+	strings to bytes since shlex.split() doesn't handle unicode strings.
+	"""
+	is_unicode = sys.hexversion < 0x3000000 and isinstance(s, unicode)
+	if is_unicode:
+		s = _unicode_encode(s)
+	rval = shlex.split(s)
+	if is_unicode:
+		rval = [_unicode_decode(x) for x in rval]
+	return rval
+
+class _tolerant_shlex(shlex.shlex):
+	def sourcehook(self, newfile):
+		try:
+			return shlex.shlex.sourcehook(self, newfile)
+		except EnvironmentError as e:
+			writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \
+				(self.infile, str(e)), noiselevel=-1)
+			return (newfile, io.StringIO())
+
+_invalid_var_name_re = re.compile(r'^\d|\W')
+
+def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
+	if isinstance(expand, dict):
+		# Some existing variable definitions have been
+		# passed in, for use in substitutions.
+		expand_map = expand
+		expand = True
+	else:
+		expand_map = {}
+	mykeys = {}
+	try:
+		# NOTE: shlex doesn't support unicode objects with Python 2
+		# (produces spurious \0 characters).
+		if sys.hexversion < 0x3000000:
+			content = open(_unicode_encode(mycfg,
+				encoding=_encodings['fs'], errors='strict'), 'rb').read()
+		else:
+			content = open(_unicode_encode(mycfg,
+				encoding=_encodings['fs'], errors='strict'), mode='r',
+				encoding=_encodings['content'], errors='replace').read()
+	except IOError as e:
+		if e.errno == PermissionDenied.errno:
+			raise PermissionDenied(mycfg)
+		if e.errno != errno.ENOENT:
+			writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1)
+			if e.errno not in (errno.EISDIR,):
+				raise
+		return None
+
+	# Workaround for avoiding a silent error in shlex that is
+	# triggered by a source statement at the end of the file
+	# without a trailing newline after the source statement.
+	if content and content[-1] != '\n':
+		content += '\n'
+
+	# Warn about dos-style line endings since that prevents
+	# people from being able to source them with bash.
+	if '\r' in content:
+		writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
+			"in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
+
+	try:
+		if tolerant:
+			shlex_class = _tolerant_shlex
+		else:
+			shlex_class = shlex.shlex
+		# The default shlex.sourcehook() implementation
+		# only joins relative paths when the infile
+		# attribute is properly set.
+		lex = shlex_class(content, infile=mycfg, posix=True)
+		lex.wordchars = string.digits + string.ascii_letters + \
+			"~!@#$%*_\:;?,./-+{}"
+		lex.quotes="\"'"
+		if allow_sourcing:
+			lex.source="source"
+		while 1:
+			key=lex.get_token()
+			if key == "export":
+				key = lex.get_token()
+			if key is None:
+				#normal end of file
+				break;
+			equ=lex.get_token()
+			if (equ==''):
+				#unexpected end of file
+				#lex.error_leader(self.filename,lex.lineno)
+				if not tolerant:
+					writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
+						noiselevel=-1)
+					raise Exception(_("ParseError: Unexpected EOF: %s: on/before line %s") % (mycfg, lex.lineno))
+				else:
+					return mykeys
+			elif (equ!='='):
+				#invalid token
+				#lex.error_leader(self.filename,lex.lineno)
+				if not tolerant:
+					raise Exception(_("ParseError: Invalid token "
+						"'%s' (not '='): %s: line %s") % \
+						(equ, mycfg, lex.lineno))
+				else:
+					return mykeys
+			val=lex.get_token()
+			if val is None:
+				#unexpected end of file
+				#lex.error_leader(self.filename,lex.lineno)
+				if not tolerant:
+					writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
+						noiselevel=-1)
+					raise portage.exception.CorruptionError(_("ParseError: Unexpected EOF: %s: line %s") % (mycfg, lex.lineno))
+				else:
+					return mykeys
+			key = _unicode_decode(key)
+			val = _unicode_decode(val)
+
+			if _invalid_var_name_re.search(key) is not None:
+				if not tolerant:
+					raise Exception(_(
+						"ParseError: Invalid variable name '%s': line %s") % \
+						(key, lex.lineno - 1))
+				writemsg(_("!!! Invalid variable name '%s': line %s in %s\n") \
+					% (key, lex.lineno - 1, mycfg), noiselevel=-1)
+				continue
+
+			if expand:
+				mykeys[key] = varexpand(val, expand_map)
+				expand_map[key] = mykeys[key]
+			else:
+				mykeys[key] = val
+	except SystemExit as e:
+		raise
+	except Exception as e:
+		raise portage.exception.ParseError(str(e)+" in "+mycfg)
+	return mykeys
+	
+#cache expansions of constant strings
+cexpand={}
+def varexpand(mystring, mydict=None):
+	if mydict is None:
+		mydict = {}
+	newstring = cexpand.get(" "+mystring, None)
+	if newstring is not None:
+		return newstring
+
+	"""
+	new variable expansion code.  Preserves quotes, handles \n, etc.
+	This code is used by the configfile code, as well as others (parser)
+	This would be a good bunch of code to port to C.
+	"""
+	numvars=0
+	mystring=" "+mystring
+	#in single, double quotes
+	insing=0
+	indoub=0
+	pos=1
+	newstring=" "
+	while (pos<len(mystring)):
+		if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
+			if (indoub):
+				newstring=newstring+"'"
+			else:
+				newstring += "'" # Quote removal is handled by shlex.
+				insing=not insing
+			pos=pos+1
+			continue
+		elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
+			if (insing):
+				newstring=newstring+'"'
+			else:
+				newstring += '"' # Quote removal is handled by shlex.
+				indoub=not indoub
+			pos=pos+1
+			continue
+		if (not insing): 
+			#expansion time
+			if (mystring[pos]=="\n"):
+				#convert newlines to spaces
+				newstring=newstring+" "
+				pos=pos+1
+			elif (mystring[pos]=="\\"):
+				# For backslash expansion, this function used to behave like
+				# echo -e, but that's not needed for our purposes. We want to
+				# behave like bash does when expanding a variable assignment
+				# in a sourced file, in which case it performs backslash
+				# removal for \\ and \$ but nothing more. It also removes
+				# escaped newline characters. Note that we don't handle
+				# escaped quotes here, since getconfig() uses shlex
+				# to handle that earlier.
+				if (pos+1>=len(mystring)):
+					newstring=newstring+mystring[pos]
+					break
+				else:
+					a = mystring[pos + 1]
+					pos = pos + 2
+					if a in ("\\", "$"):
+						newstring = newstring + a
+					elif a == "\n":
+						pass
+					else:
+						newstring = newstring + mystring[pos-2:pos]
+					continue
+			elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
+				pos=pos+1
+				if mystring[pos]=="{":
+					pos=pos+1
+					braced=True
+				else:
+					braced=False
+				myvstart=pos
+				validchars=string.ascii_letters+string.digits+"_"
+				while mystring[pos] in validchars:
+					if (pos+1)>=len(mystring):
+						if braced:
+							cexpand[mystring]=""
+							return ""
+						else:
+							pos=pos+1
+							break
+					pos=pos+1
+				myvarname=mystring[myvstart:pos]
+				if braced:
+					if mystring[pos]!="}":
+						cexpand[mystring]=""
+						return ""
+					else:
+						pos=pos+1
+				if len(myvarname)==0:
+					cexpand[mystring]=""
+					return ""
+				numvars=numvars+1
+				if myvarname in mydict:
+					newstring=newstring+mydict[myvarname] 
+			else:
+				newstring=newstring+mystring[pos]
+				pos=pos+1
+		else:
+			newstring=newstring+mystring[pos]
+			pos=pos+1
+	if numvars==0:
+		cexpand[mystring]=newstring[1:]
+	return newstring[1:]	
+
+# broken and removed, but can still be imported
+pickle_write = None
+
+def pickle_read(filename,default=None,debug=0):
+	if not os.access(filename, os.R_OK):
+		writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
+		return default
+	data = None
+	try:
+		myf = open(_unicode_encode(filename,
+			encoding=_encodings['fs'], errors='strict'), 'rb')
+		mypickle = pickle.Unpickler(myf)
+		data = mypickle.load()
+		myf.close()
+		del mypickle,myf
+		writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
+	except SystemExit as e:
+		raise
+	except Exception as e:
+		writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
+		data = default
+	return data
+
+def dump_traceback(msg, noiselevel=1):
+	info = sys.exc_info()
+	if not info[2]:
+		stack = traceback.extract_stack()[:-1]
+		error = None
+	else:
+		stack = traceback.extract_tb(info[2])
+		error = str(info[1])
+	writemsg("\n====================================\n", noiselevel=noiselevel)
+	writemsg("%s\n\n" % msg, noiselevel=noiselevel)
+	for line in traceback.format_list(stack):
+		writemsg(line, noiselevel=noiselevel)
+	if error:
+		writemsg(error+"\n", noiselevel=noiselevel)
+	writemsg("====================================\n\n", noiselevel=noiselevel)
+
+class cmp_sort_key(object):
+	"""
+	In python-3.0 the list.sort() method no longer has a "cmp" keyword
+	argument. This class acts as an adapter which converts a cmp function
+	into one that's suitable for use as the "key" keyword argument to
+	list.sort(), making it easier to port code for python-3.0 compatibility.
+	It works by generating key objects which use the given cmp function to
+	implement their __lt__ method.
+	"""
+	__slots__ = ("_cmp_func",)
+
+	def __init__(self, cmp_func):
+		"""
+		@type cmp_func: callable which takes 2 positional arguments
+		@param cmp_func: A cmp function.
+		"""
+		self._cmp_func = cmp_func
+
+	def __call__(self, lhs):
+		return self._cmp_key(self._cmp_func, lhs)
+
+	class _cmp_key(object):
+		__slots__ = ("_cmp_func", "_obj")
+
+		def __init__(self, cmp_func, obj):
+			self._cmp_func = cmp_func
+			self._obj = obj
+
+		def __lt__(self, other):
+			if other.__class__ is not self.__class__:
+				raise TypeError("Expected type %s, got %s" % \
+					(self.__class__, other.__class__))
+			return self._cmp_func(self._obj, other._obj) < 0
+
+def unique_array(s):
+	"""lifted from python cookbook, credit: Tim Peters
+	Return a list of the elements in s in arbitrary order, sans duplicates"""
+	n = len(s)
+	# assume all elements are hashable, if so, it's linear
+	try:
+		return list(set(s))
+	except TypeError:
+		pass
+
+	# so much for linear.  abuse sort.
+	try:
+		t = list(s)
+		t.sort()
+	except TypeError:
+		pass
+	else:
+		assert n > 0
+		last = t[0]
+		lasti = i = 1
+		while i < n:
+			if t[i] != last:
+				t[lasti] = last = t[i]
+				lasti += 1
+			i += 1
+		return t[:lasti]
+
+	# blah.	 back to original portage.unique_array
+	u = []
+	for x in s:
+		if x not in u:
+			u.append(x)
+	return u
+
+def unique_everseen(iterable, key=None):
+    """
+    List unique elements, preserving order. Remember all elements ever seen.
+    Taken from itertools documentation.
+    """
+    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+    # unique_everseen('ABBCcAD', str.lower) --> A B C D
+    seen = set()
+    seen_add = seen.add
+    if key is None:
+        for element in filterfalse(seen.__contains__, iterable):
+            seen_add(element)
+            yield element
+    else:
+        for element in iterable:
+            k = key(element)
+            if k not in seen:
+                seen_add(k)
+                yield element
+
+def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+	stat_cached=None, follow_links=True):
+	"""Apply user, group, and mode bits to a file if the existing bits do not
+	already match.  The default behavior is to force an exact match of mode
+	bits.  When mask=0 is specified, mode bits on the target file are allowed
+	to be a superset of the mode argument (via logical OR).  When mask>0, the
+	mode bits that the target file is allowed to have are restricted via
+	logical XOR.
+	Returns True if the permissions were modified and False otherwise."""
+
+	modified = False
+
+	if stat_cached is None:
+		try:
+			if follow_links:
+				stat_cached = os.stat(filename)
+			else:
+				stat_cached = os.lstat(filename)
+		except OSError as oe:
+			func_call = "stat('%s')" % filename
+			if oe.errno == errno.EPERM:
+				raise OperationNotPermitted(func_call)
+			elif oe.errno == errno.EACCES:
+				raise PermissionDenied(func_call)
+			elif oe.errno == errno.ENOENT:
+				raise FileNotFound(filename)
+			else:
+				raise
+
+	if	(uid != -1 and uid != stat_cached.st_uid) or \
+		(gid != -1 and gid != stat_cached.st_gid):
+		try:
+			if follow_links:
+				os.chown(filename, uid, gid)
+			else:
+				portage.data.lchown(filename, uid, gid)
+			modified = True
+		except OSError as oe:
+			func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
+			if oe.errno == errno.EPERM:
+				raise OperationNotPermitted(func_call)
+			elif oe.errno == errno.EACCES:
+				raise PermissionDenied(func_call)
+			elif oe.errno == errno.EROFS:
+				raise ReadOnlyFileSystem(func_call)
+			elif oe.errno == errno.ENOENT:
+				raise FileNotFound(filename)
+			else:
+				raise
+
+	new_mode = -1
+	st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
+	if mask >= 0:
+		if mode == -1:
+			mode = 0 # Don't add any mode bits when mode is unspecified.
+		else:
+			mode = mode & 0o7777
+		if	(mode & st_mode != mode) or \
+			((mask ^ st_mode) & st_mode != st_mode):
+			new_mode = mode | st_mode
+			new_mode = (mask ^ new_mode) & new_mode
+	elif mode != -1:
+		mode = mode & 0o7777 # protect from unwanted bits
+		if mode != st_mode:
+			new_mode = mode
+
+	# The chown system call may clear S_ISUID and S_ISGID
+	# bits, so those bits are restored if necessary.
+	if modified and new_mode == -1 and \
+		(st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
+		if mode == -1:
+			new_mode = st_mode
+		else:
+			mode = mode & 0o7777
+			if mask >= 0:
+				new_mode = mode | st_mode
+				new_mode = (mask ^ new_mode) & new_mode
+			else:
+				new_mode = mode
+			if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
+				new_mode = -1
+
+	if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
+		# Mode doesn't matter for symlinks.
+		new_mode = -1
+
+	if new_mode != -1:
+		try:
+			os.chmod(filename, new_mode)
+			modified = True
+		except OSError as oe:
+			func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
+			if oe.errno == errno.EPERM:
+				raise OperationNotPermitted(func_call)
+			elif oe.errno == errno.EACCES:
+				raise PermissionDenied(func_call)
+			elif oe.errno == errno.EROFS:
+				raise ReadOnlyFileSystem(func_call)
+			elif oe.errno == errno.ENOENT:
+				raise FileNotFound(filename)
+			raise
+	return modified
+
+def apply_stat_permissions(filename, newstat, **kwargs):
+	"""A wrapper around apply_secpass_permissions that gets
+	uid, gid, and mode from a stat object"""
+	return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
+	mode=newstat.st_mode, **kwargs)
+
+def apply_recursive_permissions(top, uid=-1, gid=-1,
+	dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
+	"""A wrapper around apply_secpass_permissions that applies permissions
+	recursively.  If optional argument onerror is specified, it should be a
+	function; it will be called with one argument, a PortageException instance.
+	Returns True if all permissions are applied and False if some are left
+	unapplied."""
+
+	# Avoid issues with circular symbolic links, as in bug #339670.
+	follow_links = False
+
+	if onerror is None:
+		# Default behavior is to dump errors to stderr so they won't
+		# go unnoticed.  Callers can pass in a quiet instance.
+		def onerror(e):
+			if isinstance(e, OperationNotPermitted):
+				writemsg(_("Operation Not Permitted: %s\n") % str(e),
+					noiselevel=-1)
+			elif isinstance(e, FileNotFound):
+				writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
+			else:
+				raise
+
+	all_applied = True
+	for dirpath, dirnames, filenames in os.walk(top):
+		try:
+			applied = apply_secpass_permissions(dirpath,
+				uid=uid, gid=gid, mode=dirmode, mask=dirmask,
+				follow_links=follow_links)
+			if not applied:
+				all_applied = False
+		except PortageException as e:
+			all_applied = False
+			onerror(e)
+
+		for name in filenames:
+			try:
+				applied = apply_secpass_permissions(os.path.join(dirpath, name),
+					uid=uid, gid=gid, mode=filemode, mask=filemask,
+					follow_links=follow_links)
+				if not applied:
+					all_applied = False
+			except PortageException as e:
+				# Ignore InvalidLocation exceptions such as FileNotFound
+				# and DirectoryNotFound since sometimes things disappear,
+				# like when adjusting permissions on DISTCC_DIR.
+				if not isinstance(e, portage.exception.InvalidLocation):
+					all_applied = False
+					onerror(e)
+	return all_applied
+
+def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+	stat_cached=None, follow_links=True):
+	"""A wrapper around apply_permissions that uses secpass and simple
+	logic to apply as much of the permissions as possible without
+	generating an obviously avoidable permission exception. Despite
+	attempts to avoid an exception, it's possible that one will be raised
+	anyway, so be prepared.
+	Returns True if all permissions are applied and False if some are left
+	unapplied."""
+
+	if stat_cached is None:
+		try:
+			if follow_links:
+				stat_cached = os.stat(filename)
+			else:
+				stat_cached = os.lstat(filename)
+		except OSError as oe:
+			func_call = "stat('%s')" % filename
+			if oe.errno == errno.EPERM:
+				raise OperationNotPermitted(func_call)
+			elif oe.errno == errno.EACCES:
+				raise PermissionDenied(func_call)
+			elif oe.errno == errno.ENOENT:
+				raise FileNotFound(filename)
+			else:
+				raise
+
+	all_applied = True
+
+	if portage.data.secpass < 2:
+
+		if uid != -1 and \
+		uid != stat_cached.st_uid:
+			all_applied = False
+			uid = -1
+
+		if gid != -1 and \
+		gid != stat_cached.st_gid and \
+		gid not in os.getgroups():
+			all_applied = False
+			gid = -1
+
+	apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
+		stat_cached=stat_cached, follow_links=follow_links)
+	return all_applied
+
+class atomic_ofstream(ObjectProxy):
+	"""Write a file atomically via os.rename().  Atomic replacement prevents
+	interprocess interference and prevents corruption of the target
+	file when the write is interrupted (for example, when an 'out of space'
+	error occurs)."""
+
+	def __init__(self, filename, mode='w', follow_links=True, **kargs):
+		"""Opens a temporary filename.pid in the same directory as filename."""
+		ObjectProxy.__init__(self)
+		object.__setattr__(self, '_aborted', False)
+		if 'b' in mode:
+			open_func = open
+		else:
+			open_func = io.open
+			kargs.setdefault('encoding', _encodings['content'])
+			kargs.setdefault('errors', 'backslashreplace')
+
+		if follow_links:
+			canonical_path = os.path.realpath(filename)
+			object.__setattr__(self, '_real_name', canonical_path)
+			tmp_name = "%s.%i" % (canonical_path, os.getpid())
+			try:
+				object.__setattr__(self, '_file',
+					open_func(_unicode_encode(tmp_name,
+						encoding=_encodings['fs'], errors='strict'),
+						mode=mode, **kargs))
+				return
+			except IOError as e:
+				if canonical_path == filename:
+					raise
+				# Ignore this error, since it's irrelevant
+				# and the below open call will produce a
+				# new error if necessary.
+
+		object.__setattr__(self, '_real_name', filename)
+		tmp_name = "%s.%i" % (filename, os.getpid())
+		object.__setattr__(self, '_file',
+			open_func(_unicode_encode(tmp_name,
+				encoding=_encodings['fs'], errors='strict'),
+				mode=mode, **kargs))
+
+	def _get_target(self):
+		return object.__getattribute__(self, '_file')
+
+	if sys.hexversion >= 0x3000000:
+
+		def __getattribute__(self, attr):
+			if attr in ('close', 'abort', '__del__'):
+				return object.__getattribute__(self, attr)
+			return getattr(object.__getattribute__(self, '_file'), attr)
+
+	else:
+
+		# For TextIOWrapper, automatically coerce write calls to
+		# unicode, in order to avoid TypeError when writing raw
+		# bytes with python2.
+
+		def __getattribute__(self, attr):
+			if attr in ('close', 'abort', 'write', '__del__'):
+				return object.__getattribute__(self, attr)
+			return getattr(object.__getattribute__(self, '_file'), attr)
+
+		def write(self, s):
+			f = object.__getattribute__(self, '_file')
+			if isinstance(f, io.TextIOWrapper):
+				s = _unicode_decode(s)
+			return f.write(s)
+
+	def close(self):
+		"""Closes the temporary file, copies permissions (if possible),
+		and performs the atomic replacement via os.rename().  If the abort()
+		method has been called, then the temp file is closed and removed."""
+		f = object.__getattribute__(self, '_file')
+		real_name = object.__getattribute__(self, '_real_name')
+		if not f.closed:
+			try:
+				f.close()
+				if not object.__getattribute__(self, '_aborted'):
+					try:
+						apply_stat_permissions(f.name, os.stat(real_name))
+					except OperationNotPermitted:
+						pass
+					except FileNotFound:
+						pass
+					except OSError as oe: # from the above os.stat call
+						if oe.errno in (errno.ENOENT, errno.EPERM):
+							pass
+						else:
+							raise
+					os.rename(f.name, real_name)
+			finally:
+				# Make sure we cleanup the temp file
+				# even if an exception is raised.
+				try:
+					os.unlink(f.name)
+				except OSError as oe:
+					pass
+
+	def abort(self):
+		"""If an error occurs while writing the file, the user should
+		call this method in order to leave the target file unchanged.
+		This will call close() automatically."""
+		if not object.__getattribute__(self, '_aborted'):
+			object.__setattr__(self, '_aborted', True)
+			self.close()
+
+	def __del__(self):
+		"""If the user does not explicitely call close(), it is
+		assumed that an error has occurred, so we abort()."""
+		try:
+			f = object.__getattribute__(self, '_file')
+		except AttributeError:
+			pass
+		else:
+			if not f.closed:
+				self.abort()
+		# ensure destructor from the base class is called
+		base_destructor = getattr(ObjectProxy, '__del__', None)
+		if base_destructor is not None:
+			base_destructor(self)
+
+def write_atomic(file_path, content, **kwargs):
+	f = None
+	try:
+		f = atomic_ofstream(file_path, **kwargs)
+		f.write(content)
+		f.close()
+	except (IOError, OSError) as e:
+		if f:
+			f.abort()
+		func_call = "write_atomic('%s')" % file_path
+		if e.errno == errno.EPERM:
+			raise OperationNotPermitted(func_call)
+		elif e.errno == errno.EACCES:
+			raise PermissionDenied(func_call)
+		elif e.errno == errno.EROFS:
+			raise ReadOnlyFileSystem(func_call)
+		elif e.errno == errno.ENOENT:
+			raise FileNotFound(file_path)
+		else:
+			raise
+
+def ensure_dirs(dir_path, **kwargs):
+	"""Create a directory and call apply_permissions.
+	Returns True if a directory is created or the permissions needed to be
+	modified, and False otherwise.
+
+	This function's handling of EEXIST errors makes it useful for atomic
+	directory creation, in which multiple processes may be competing to
+	create the same directory.
+	"""
+
+	created_dir = False
+
+	try:
+		os.makedirs(dir_path)
+		created_dir = True
+	except OSError as oe:
+		func_call = "makedirs('%s')" % dir_path
+		if oe.errno in (errno.EEXIST,):
+			pass
+		else:
+			if os.path.isdir(dir_path):
+				# NOTE: DragonFly raises EPERM for makedir('/')
+				# and that is supposed to be ignored here.
+				# Also, sometimes mkdir raises EISDIR on FreeBSD
+				# and we want to ignore that too (bug #187518).
+				pass
+			elif oe.errno == errno.EPERM:
+				raise OperationNotPermitted(func_call)
+			elif oe.errno == errno.EACCES:
+				raise PermissionDenied(func_call)
+			elif oe.errno == errno.EROFS:
+				raise ReadOnlyFileSystem(func_call)
+			else:
+				raise
+	if kwargs:
+		perms_modified = apply_permissions(dir_path, **kwargs)
+	else:
+		perms_modified = False
+	return created_dir or perms_modified
+
+class LazyItemsDict(UserDict):
+	"""A mapping object that behaves like a standard dict except that it allows
+	for lazy initialization of values via callable objects.  Lazy items can be
+	overwritten and deleted just as normal items."""
+
+	__slots__ = ('lazy_items',)
+
+	def __init__(self, *args, **kwargs):
+
+		self.lazy_items = {}
+		UserDict.__init__(self, *args, **kwargs)
+
+	def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
+		"""Add a lazy item for the given key.  When the item is requested,
+		value_callable will be called with *pargs and **kwargs arguments."""
+		self.lazy_items[item_key] = \
+			self._LazyItem(value_callable, pargs, kwargs, False)
+		# make it show up in self.keys(), etc...
+		UserDict.__setitem__(self, item_key, None)
+
+	def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
+		"""This is like addLazyItem except value_callable will only be called
+		a maximum of 1 time and the result will be cached for future requests."""
+		self.lazy_items[item_key] = \
+			self._LazyItem(value_callable, pargs, kwargs, True)
+		# make it show up in self.keys(), etc...
+		UserDict.__setitem__(self, item_key, None)
+
+	def update(self, *args, **kwargs):
+		if len(args) > 1:
+			raise TypeError(
+				"expected at most 1 positional argument, got " + \
+				repr(len(args)))
+		if args:
+			map_obj = args[0]
+		else:
+			map_obj = None
+		if map_obj is None:
+			pass
+		elif isinstance(map_obj, LazyItemsDict):
+			for k in map_obj:
+				if k in map_obj.lazy_items:
+					UserDict.__setitem__(self, k, None)
+				else:
+					UserDict.__setitem__(self, k, map_obj[k])
+			self.lazy_items.update(map_obj.lazy_items)
+		else:
+			UserDict.update(self, map_obj)
+		if kwargs:
+			UserDict.update(self, kwargs)
+
+	def __getitem__(self, item_key):
+		if item_key in self.lazy_items:
+			lazy_item = self.lazy_items[item_key]
+			pargs = lazy_item.pargs
+			if pargs is None:
+				pargs = ()
+			kwargs = lazy_item.kwargs
+			if kwargs is None:
+				kwargs = {}
+			result = lazy_item.func(*pargs, **kwargs)
+			if lazy_item.singleton:
+				self[item_key] = result
+			return result
+
+		else:
+			return UserDict.__getitem__(self, item_key)
+
+	def __setitem__(self, item_key, value):
+		if item_key in self.lazy_items:
+			del self.lazy_items[item_key]
+		UserDict.__setitem__(self, item_key, value)
+
+	def __delitem__(self, item_key):
+		if item_key in self.lazy_items:
+			del self.lazy_items[item_key]
+		UserDict.__delitem__(self, item_key)
+
+	def clear(self):
+		self.lazy_items.clear()
+		UserDict.clear(self)
+
+	def copy(self):
+		return self.__copy__()
+
+	def __copy__(self):
+		return self.__class__(self)
+
+	def __deepcopy__(self, memo=None):
+		"""
+		This forces evaluation of each contained lazy item, and deepcopy of
+		the result. A TypeError is raised if any contained lazy item is not
+		a singleton, since it is not necessarily possible for the behavior
+		of this type of item to be safely preserved.
+		"""
+		if memo is None:
+			memo = {}
+		result = self.__class__()
+		memo[id(self)] = result
+		for k in self:
+			k_copy = deepcopy(k, memo)
+			lazy_item = self.lazy_items.get(k)
+			if lazy_item is not None:
+				if not lazy_item.singleton:
+					raise TypeError(_unicode_decode("LazyItemsDict " + \
+						"deepcopy is unsafe with lazy items that are " + \
+						"not singletons: key=%s value=%s") % (k, lazy_item,))
+			UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
+		return result
+
+	class _LazyItem(object):
+
+		__slots__ = ('func', 'pargs', 'kwargs', 'singleton')
+
+		def __init__(self, func, pargs, kwargs, singleton):
+
+			if not pargs:
+				pargs = None
+			if not kwargs:
+				kwargs = None
+
+			self.func = func
+			self.pargs = pargs
+			self.kwargs = kwargs
+			self.singleton = singleton
+
+		def __copy__(self):
+			return self.__class__(self.func, self.pargs,
+				self.kwargs, self.singleton)
+
+		def __deepcopy__(self, memo=None):
+			"""
+			Override this since the default implementation can fail silently,
+			leaving some attributes unset.
+			"""
+			if memo is None:
+				memo = {}
+			result = self.__copy__()
+			memo[id(self)] = result
+			result.func = deepcopy(self.func, memo)
+			result.pargs = deepcopy(self.pargs, memo)
+			result.kwargs = deepcopy(self.kwargs, memo)
+			result.singleton = deepcopy(self.singleton, memo)
+			return result
+
+class ConfigProtect(object):
+	def __init__(self, myroot, protect_list, mask_list):
+		self.myroot = myroot
+		self.protect_list = protect_list
+		self.mask_list = mask_list
+		self.updateprotect()
+
+	def updateprotect(self):
+		"""Update internal state for isprotected() calls.  Nonexistent paths
+		are ignored."""
+
+		os = _os_merge
+
+		self.protect = []
+		self._dirs = set()
+		for x in self.protect_list:
+			ppath = normalize_path(
+				os.path.join(self.myroot, x.lstrip(os.path.sep)))
+			try:
+				if stat.S_ISDIR(os.stat(ppath).st_mode):
+					self._dirs.add(ppath)
+				self.protect.append(ppath)
+			except OSError:
+				# If it doesn't exist, there's no need to protect it.
+				pass
+
+		self.protectmask = []
+		for x in self.mask_list:
+			ppath = normalize_path(
+				os.path.join(self.myroot, x.lstrip(os.path.sep)))
+			try:
+				"""Use lstat so that anything, even a broken symlink can be
+				protected."""
+				if stat.S_ISDIR(os.lstat(ppath).st_mode):
+					self._dirs.add(ppath)
+				self.protectmask.append(ppath)
+				"""Now use stat in case this is a symlink to a directory."""
+				if stat.S_ISDIR(os.stat(ppath).st_mode):
+					self._dirs.add(ppath)
+			except OSError:
+				# If it doesn't exist, there's no need to mask it.
+				pass
+
+	def isprotected(self, obj):
+		"""Returns True if obj is protected, False otherwise.  The caller must
+		ensure that obj is normalized with a single leading slash.  A trailing
+		slash is optional for directories."""
+		masked = 0
+		protected = 0
+		sep = os.path.sep
+		for ppath in self.protect:
+			if len(ppath) > masked and obj.startswith(ppath):
+				if ppath in self._dirs:
+					if obj != ppath and not obj.startswith(ppath + sep):
+						# /etc/foo does not match /etc/foobaz
+						continue
+				elif obj != ppath:
+					# force exact match when CONFIG_PROTECT lists a
+					# non-directory
+					continue
+				protected = len(ppath)
+				#config file management
+				for pmpath in self.protectmask:
+					if len(pmpath) >= protected and obj.startswith(pmpath):
+						if pmpath in self._dirs:
+							if obj != pmpath and \
+								not obj.startswith(pmpath + sep):
+								# /etc/foo does not match /etc/foobaz
+								continue
+						elif obj != pmpath:
+							# force exact match when CONFIG_PROTECT_MASK lists
+							# a non-directory
+							continue
+						#skip, it's in the mask
+						masked = len(pmpath)
+		return protected > masked
+
+def new_protect_filename(mydest, newmd5=None, force=False):
+	"""Resolves a config-protect filename for merging, optionally
+	using the last filename if the md5 matches. If force is True,
+	then a new filename will be generated even if mydest does not
+	exist yet.
+	(dest,md5) ==> 'string'            --- path_to_target_filename
+	(dest)     ==> ('next', 'highest') --- next_target and most-recent_target
+	"""
+
+	# config protection filename format:
+	# ._cfg0000_foo
+	# 0123456789012
+
+	os = _os_merge
+
+	prot_num = -1
+	last_pfile = ""
+
+	if not force and \
+		not os.path.exists(mydest):
+		return mydest
+
+	real_filename = os.path.basename(mydest)
+	real_dirname  = os.path.dirname(mydest)
+	for pfile in os.listdir(real_dirname):
+		if pfile[0:5] != "._cfg":
+			continue
+		if pfile[10:] != real_filename:
+			continue
+		try:
+			new_prot_num = int(pfile[5:9])
+			if new_prot_num > prot_num:
+				prot_num = new_prot_num
+				last_pfile = pfile
+		except ValueError:
+			continue
+	prot_num = prot_num + 1
+
+	new_pfile = normalize_path(os.path.join(real_dirname,
+		"._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
+	old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
+	if last_pfile and newmd5:
+		try:
+			last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile)
+		except FileNotFound:
+			# The file suddenly disappeared or it's a broken symlink.
+			pass
+		else:
+			if last_pfile_md5 == newmd5:
+				return old_pfile
+	return new_pfile
+
+def find_updated_config_files(target_root, config_protect):
+	"""
+	Return a tuple of configuration files that needs to be updated.
+	The tuple contains lists organized like this:
+	[ protected_dir, file_list ]
+	If the protected config isn't a protected_dir but a procted_file, list is:
+	[ protected_file, None ]
+	If no configuration files needs to be updated, None is returned
+	"""
+
+	os = _os_merge
+
+	if config_protect:
+		# directories with some protect files in them
+		for x in config_protect:
+			files = []
+
+			x = os.path.join(target_root, x.lstrip(os.path.sep))
+			if not os.access(x, os.W_OK):
+				continue
+			try:
+				mymode = os.lstat(x).st_mode
+			except OSError:
+				continue
+
+			if stat.S_ISLNK(mymode):
+				# We want to treat it like a directory if it
+				# is a symlink to an existing directory.
+				try:
+					real_mode = os.stat(x).st_mode
+					if stat.S_ISDIR(real_mode):
+						mymode = real_mode
+				except OSError:
+					pass
+
+			if stat.S_ISDIR(mymode):
+				mycommand = \
+					"find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
+			else:
+				mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
+						os.path.split(x.rstrip(os.path.sep))
+			mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
+			a = subprocess_getstatusoutput(mycommand)
+
+			if a[0] == 0:
+				files = a[1].split('\0')
+				# split always produces an empty string as the last element
+				if files and not files[-1]:
+					del files[-1]
+				if files:
+					if stat.S_ISDIR(mymode):
+						yield (x, files)
+					else:
+						yield (x, None)
+
+def getlibpaths(root, env=None):
+	""" Return a list of paths that are used for library lookups """
+	if env is None:
+		env = os.environ
+	# the following is based on the information from ld.so(8)
+	rval = env.get("LD_LIBRARY_PATH", "").split(":")
+	rval.extend(grabfile(os.path.join(root, "etc", "ld.so.conf")))
+	rval.append("/usr/lib")
+	rval.append("/lib")
+
+	return [normalize_path(x) for x in rval if x]

diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
new file mode 100644
index 0000000..52670d9
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
@@ -0,0 +1,805 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import subprocess
+
+import portage
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.exception import CommandNotFound
+from portage.localization import _
+from portage.util import getlibpaths
+from portage.util import grabfile
+from portage.util import normalize_path
+from portage.util import writemsg_level
+
+class LinkageMapELF(object):
+
+	"""Models dynamic linker dependencies."""
+
+	_needed_aux_key = "NEEDED.ELF.2"
+	_soname_map_class = slot_dict_class(
+		("consumers", "providers"), prefix="")
+
+	class _obj_properies_class(object):
+
+		__slots__ = ("arch", "needed", "runpaths", "soname", "alt_paths",
+			"owner",)
+
+		def __init__(self, arch, needed, runpaths, soname, alt_paths, owner):
+			self.arch = arch
+			self.needed = needed
+			self.runpaths = runpaths
+			self.soname = soname
+			self.alt_paths = alt_paths
+			self.owner = owner
+
+	def __init__(self, vardbapi):
+		self._dbapi = vardbapi
+		self._root = self._dbapi.settings['ROOT']
+		self._libs = {}
+		self._obj_properties = {}
+		self._obj_key_cache = {}
+		self._defpath = set()
+		self._path_key_cache = {}
+
+	def _clear_cache(self):
+		self._libs.clear()
+		self._obj_properties.clear()
+		self._obj_key_cache.clear()
+		self._defpath.clear()
+		self._path_key_cache.clear()
+
+	def _path_key(self, path):
+		key = self._path_key_cache.get(path)
+		if key is None:
+			key = self._ObjectKey(path, self._root)
+			self._path_key_cache[path] = key
+		return key
+
+	def _obj_key(self, path):
+		key = self._obj_key_cache.get(path)
+		if key is None:
+			key = self._ObjectKey(path, self._root)
+			self._obj_key_cache[path] = key
+		return key
+
+	class _ObjectKey(object):
+
+		"""Helper class used as _obj_properties keys for objects."""
+
+		__slots__ = ("_key",)
+
+		def __init__(self, obj, root):
+			"""
+			This takes a path to an object.
+
+			@param object: path to a file
+			@type object: string (example: '/usr/bin/bar')
+
+			"""
+			self._key = self._generate_object_key(obj, root)
+
+		def __hash__(self):
+			return hash(self._key)
+
+		def __eq__(self, other):
+			return self._key == other._key
+
+		def _generate_object_key(self, obj, root):
+			"""
+			Generate object key for a given object.
+
+			@param object: path to a file
+			@type object: string (example: '/usr/bin/bar')
+			@rtype: 2-tuple of types (long, int) if object exists. string if
+				object does not exist.
+			@return:
+				1. 2-tuple of object's inode and device from a stat call, if object
+					exists.
+				2. realpath of object if object does not exist.
+
+			"""
+
+			os = _os_merge
+
+			try:
+				_unicode_encode(obj,
+					encoding=_encodings['merge'], errors='strict')
+			except UnicodeEncodeError:
+				# The package appears to have been merged with a 
+				# different value of sys.getfilesystemencoding(),
+				# so fall back to utf_8 if appropriate.
+				try:
+					_unicode_encode(obj,
+						encoding=_encodings['fs'], errors='strict')
+				except UnicodeEncodeError:
+					pass
+				else:
+					os = portage.os
+
+			abs_path = os.path.join(root, obj.lstrip(os.sep))
+			try:
+				object_stat = os.stat(abs_path)
+			except OSError:
+				# Use the realpath as the key if the file does not exists on the
+				# filesystem.
+				return os.path.realpath(abs_path)
+			# Return a tuple of the device and inode.
+			return (object_stat.st_dev, object_stat.st_ino)
+
+		def file_exists(self):
+			"""
+			Determine if the file for this key exists on the filesystem.
+
+			@rtype: Boolean
+			@return:
+				1. True if the file exists.
+				2. False if the file does not exist or is a broken symlink.
+
+			"""
+			return isinstance(self._key, tuple)
+
+	class _LibGraphNode(_ObjectKey):
+		__slots__ = ("alt_paths",)
+
+		def __init__(self, key):
+			"""
+			Create a _LibGraphNode from an existing _ObjectKey.
+			This re-uses the _key attribute in order to avoid repeating
+			any previous stat calls, which helps to avoid potential race
+			conditions due to inconsistent stat results when the
+			file system is being modified concurrently.
+			"""
+			self._key = key._key
+			self.alt_paths = set()
+
+		def __str__(self):
+			return str(sorted(self.alt_paths))
+
+	def rebuild(self, exclude_pkgs=None, include_file=None,
+		preserve_paths=None):
+		"""
+		Raises CommandNotFound if there are preserved libs
+		and the scanelf binary is not available.
+
+		@param exclude_pkgs: A set of packages that should be excluded from
+			the LinkageMap, since they are being unmerged and their NEEDED
+			entries are therefore irrelevant and would only serve to corrupt
+			the LinkageMap.
+		@type exclude_pkgs: set
+		@param include_file: The path of a file containing NEEDED entries for
+			a package which does not exist in the vardbapi yet because it is
+			currently being merged.
+		@type include_file: String
+		@param preserve_paths: Libraries preserved by a package instance that
+			is currently being merged. They need to be explicitly passed to the
+			LinkageMap, since they are not registered in the
+			PreservedLibsRegistry yet.
+		@type preserve_paths: set
+		"""
+
+		os = _os_merge
+		root = self._root
+		root_len = len(root) - 1
+		self._clear_cache()
+		self._defpath.update(getlibpaths(self._root, env=self._dbapi.settings))
+		libs = self._libs
+		obj_properties = self._obj_properties
+
+		lines = []
+
+		# Data from include_file is processed first so that it
+		# overrides any data from previously installed files.
+		if include_file is not None:
+			for line in grabfile(include_file):
+				lines.append((None, include_file, line))
+
+		aux_keys = [self._needed_aux_key]
+		can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
+		if can_lock:
+			self._dbapi.lock()
+		try:
+			for cpv in self._dbapi.cpv_all():
+				if exclude_pkgs is not None and cpv in exclude_pkgs:
+					continue
+				needed_file = self._dbapi.getpath(cpv,
+					filename=self._needed_aux_key)
+				for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
+					lines.append((cpv, needed_file, line))
+		finally:
+			if can_lock:
+				self._dbapi.unlock()
+
+		# have to call scanelf for preserved libs here as they aren't 
+		# registered in NEEDED.ELF.2 files
+		plibs = {}
+		if preserve_paths is not None:
+			plibs.update((x, None) for x in preserve_paths)
+		if self._dbapi._plib_registry and \
+			self._dbapi._plib_registry.hasEntries():
+			for cpv, items in \
+				self._dbapi._plib_registry.getPreservedLibs().items():
+				if exclude_pkgs is not None and cpv in exclude_pkgs:
+					# These preserved libs will either be unmerged,
+					# rendering them irrelevant, or they will be
+					# preserved in the replacement package and are
+					# already represented via the preserve_paths
+					# parameter.
+					continue
+				plibs.update((x, cpv) for x in items)
+		if plibs:
+			args = ["/usr/bin/scanelf", "-qF", "%a;%F;%S;%r;%n"]
+			args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
+				for x in plibs)
+			try:
+				proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+			except EnvironmentError as e:
+				if e.errno != errno.ENOENT:
+					raise
+				raise CommandNotFound(args[0])
+			else:
+				for l in proc.stdout:
+					try:
+						l = _unicode_decode(l,
+							encoding=_encodings['content'], errors='strict')
+					except UnicodeDecodeError:
+						l = _unicode_decode(l,
+							encoding=_encodings['content'], errors='replace')
+						writemsg_level(_("\nError decoding characters " \
+							"returned from scanelf: %s\n\n") % (l,),
+							level=logging.ERROR, noiselevel=-1)
+					l = l[3:].rstrip("\n")
+					if not l:
+						continue
+					fields = l.split(";")
+					if len(fields) < 5:
+						writemsg_level(_("\nWrong number of fields " \
+							"returned from scanelf: %s\n\n") % (l,),
+							level=logging.ERROR, noiselevel=-1)
+						continue
+					fields[1] = fields[1][root_len:]
+					owner = plibs.pop(fields[1], None)
+					lines.append((owner, "scanelf", ";".join(fields)))
+				proc.wait()
+
+		if plibs:
+			# Preserved libraries that did not appear in the scanelf output.
+			# This is known to happen with statically linked libraries.
+			# Generate dummy lines for these, so we can assume that every
+			# preserved library has an entry in self._obj_properties. This
+			# is important in order to prevent findConsumers from raising
+			# an unwanted KeyError.
+			for x, cpv in plibs.items():
+				lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))
+
+		# Share identical frozenset instances when available,
+		# in order to conserve memory.
+		frozensets = {}
+
+		for owner, location, l in lines:
+			l = l.rstrip("\n")
+			if not l:
+				continue
+			fields = l.split(";")
+			if len(fields) < 5:
+				writemsg_level(_("\nWrong number of fields " \
+					"in %s: %s\n\n") % (location, l),
+					level=logging.ERROR, noiselevel=-1)
+				continue
+			arch = fields[0]
+			obj = fields[1]
+			soname = fields[2]
+			path = frozenset(normalize_path(x) \
+				for x in filter(None, fields[3].replace(
+				"${ORIGIN}", os.path.dirname(obj)).replace(
+				"$ORIGIN", os.path.dirname(obj)).split(":")))
+			path = frozensets.setdefault(path, path)
+			needed = frozenset(x for x in fields[4].split(",") if x)
+			needed = frozensets.setdefault(needed, needed)
+
+			obj_key = self._obj_key(obj)
+			indexed = True
+			myprops = obj_properties.get(obj_key)
+			if myprops is None:
+				indexed = False
+				myprops = self._obj_properies_class(
+					arch, needed, path, soname, [], owner)
+				obj_properties[obj_key] = myprops
+			# All object paths are added into the obj_properties tuple.
+			myprops.alt_paths.append(obj)
+
+			# Don't index the same file more that once since only one
+			# set of data can be correct and therefore mixing data
+			# may corrupt the index (include_file overrides previously
+			# installed).
+			if indexed:
+				continue
+
+			arch_map = libs.get(arch)
+			if arch_map is None:
+				arch_map = {}
+				libs[arch] = arch_map
+			if soname:
+				soname_map = arch_map.get(soname)
+				if soname_map is None:
+					soname_map = self._soname_map_class(
+						providers=[], consumers=[])
+					arch_map[soname] = soname_map
+				soname_map.providers.append(obj_key)
+			for needed_soname in needed:
+				soname_map = arch_map.get(needed_soname)
+				if soname_map is None:
+					soname_map = self._soname_map_class(
+						providers=[], consumers=[])
+					arch_map[needed_soname] = soname_map
+				soname_map.consumers.append(obj_key)
+
+		for arch, sonames in libs.items():
+			for soname_node in sonames.values():
+				soname_node.providers = tuple(set(soname_node.providers))
+				soname_node.consumers = tuple(set(soname_node.consumers))
+
+	def listBrokenBinaries(self, debug=False):
+		"""
+		Find binaries and their needed sonames, which have no providers.
+
+		@param debug: Boolean to enable debug output
+		@type debug: Boolean
+		@rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
+		@return: The return value is an object -> set-of-sonames mapping, where
+			object is a broken binary and the set consists of sonames needed by
+			object that have no corresponding libraries to fulfill the dependency.
+
+		"""
+
+		os = _os_merge
+
+		class _LibraryCache(object):
+
+			"""
+			Caches properties associated with paths.
+
+			The purpose of this class is to prevent multiple instances of
+			_ObjectKey for the same paths.
+
+			"""
+
+			def __init__(cache_self):
+				cache_self.cache = {}
+
+			def get(cache_self, obj):
+				"""
+				Caches and returns properties associated with an object.
+
+				@param obj: absolute path (can be symlink)
+				@type obj: string (example: '/usr/lib/libfoo.so')
+				@rtype: 4-tuple with types
+					(string or None, string or None, 2-tuple, Boolean)
+				@return: 4-tuple with the following components:
+					1. arch as a string or None if it does not exist,
+					2. soname as a string or None if it does not exist,
+					3. obj_key as 2-tuple,
+					4. Boolean representing whether the object exists.
+					(example: ('libfoo.so.1', (123L, 456L), True))
+
+				"""
+				if obj in cache_self.cache:
+					return cache_self.cache[obj]
+				else:
+					obj_key = self._obj_key(obj)
+					# Check that the library exists on the filesystem.
+					if obj_key.file_exists():
+						# Get the arch and soname from LinkageMap._obj_properties if
+						# it exists. Otherwise, None.
+						obj_props = self._obj_properties.get(obj_key)
+						if obj_props is None:
+							arch = None
+							soname = None
+						else:
+							arch = obj_props.arch
+							soname = obj_props.soname
+						return cache_self.cache.setdefault(obj, \
+								(arch, soname, obj_key, True))
+					else:
+						return cache_self.cache.setdefault(obj, \
+								(None, None, obj_key, False))
+
+		rValue = {}
+		cache = _LibraryCache()
+		providers = self.listProviders()
+
+		# Iterate over all obj_keys and their providers.
+		for obj_key, sonames in providers.items():
+			obj_props = self._obj_properties[obj_key]
+			arch = obj_props.arch
+			path = obj_props.runpaths
+			objs = obj_props.alt_paths
+			path = path.union(self._defpath)
+			# Iterate over each needed soname and the set of library paths that
+			# fulfill the soname to determine if the dependency is broken.
+			for soname, libraries in sonames.items():
+				# validLibraries is used to store libraries, which satisfy soname,
+				# so if no valid libraries are found, the soname is not satisfied
+				# for obj_key.  If unsatisfied, objects associated with obj_key
+				# must be emerged.
+				validLibraries = set()
+				# It could be the case that the library to satisfy the soname is
+				# not in the obj's runpath, but a symlink to the library is (eg
+				# libnvidia-tls.so.1 in nvidia-drivers).  Also, since LinkageMap
+				# does not catalog symlinks, broken or missing symlinks may go
+				# unnoticed.  As a result of these cases, check that a file with
+				# the same name as the soname exists in obj's runpath.
+				# XXX If we catalog symlinks in LinkageMap, this could be improved.
+				for directory in path:
+					cachedArch, cachedSoname, cachedKey, cachedExists = \
+							cache.get(os.path.join(directory, soname))
+					# Check that this library provides the needed soname.  Doing
+					# this, however, will cause consumers of libraries missing
+					# sonames to be unnecessarily emerged. (eg libmix.so)
+					if cachedSoname == soname and cachedArch == arch:
+						validLibraries.add(cachedKey)
+						if debug and cachedKey not in \
+								set(map(self._obj_key_cache.get, libraries)):
+							# XXX This is most often due to soname symlinks not in
+							# a library's directory.  We could catalog symlinks in
+							# LinkageMap to avoid checking for this edge case here.
+							writemsg_level(
+								_("Found provider outside of findProviders:") + \
+								(" %s -> %s %s\n" % (os.path.join(directory, soname),
+								self._obj_properties[cachedKey].alt_paths, libraries)),
+								level=logging.DEBUG,
+								noiselevel=-1)
+						# A valid library has been found, so there is no need to
+						# continue.
+						break
+					if debug and cachedArch == arch and \
+							cachedKey in self._obj_properties:
+						writemsg_level((_("Broken symlink or missing/bad soname: " + \
+							"%(dir_soname)s -> %(cachedKey)s " + \
+							"with soname %(cachedSoname)s but expecting %(soname)s") % \
+							{"dir_soname":os.path.join(directory, soname),
+							"cachedKey": self._obj_properties[cachedKey],
+							"cachedSoname": cachedSoname, "soname":soname}) + "\n",
+							level=logging.DEBUG,
+							noiselevel=-1)
+				# This conditional checks if there are no libraries to satisfy the
+				# soname (empty set).
+				if not validLibraries:
+					for obj in objs:
+						rValue.setdefault(obj, set()).add(soname)
+					# If no valid libraries have been found by this point, then
+					# there are no files named with the soname within obj's runpath,
+					# but if there are libraries (from the providers mapping), it is
+					# likely that soname symlinks or the actual libraries are
+					# missing or broken.  Thus those libraries are added to rValue
+					# in order to emerge corrupt library packages.
+					for lib in libraries:
+						rValue.setdefault(lib, set()).add(soname)
+						if debug:
+							if not os.path.isfile(lib):
+								writemsg_level(_("Missing library:") + " %s\n" % (lib,),
+									level=logging.DEBUG,
+									noiselevel=-1)
+							else:
+								writemsg_level(_("Possibly missing symlink:") + \
+									"%s\n" % (os.path.join(os.path.dirname(lib), soname)),
+									level=logging.DEBUG,
+									noiselevel=-1)
+		return rValue
+
+	def listProviders(self):
+		"""
+		Find the providers for all object keys in LinkageMap.
+
+		@rtype: dict (example:
+			{(123L, 456L): {'libbar.so': set(['/lib/libbar.so.1.5'])}})
+		@return: The return value is an object key -> providers mapping, where
+			providers is a mapping of soname -> set-of-library-paths returned
+			from the findProviders method.
+
+		"""
+		rValue = {}
+		if not self._libs:
+			self.rebuild()
+		# Iterate over all object keys within LinkageMap.
+		for obj_key in self._obj_properties:
+			rValue.setdefault(obj_key, self.findProviders(obj_key))
+		return rValue
+
+	def isMasterLink(self, obj):
+		"""
+		Determine whether an object is a "master" symlink, which means
+		that its basename is the same as the beginning part of the
+		soname and it lacks the soname's version component.
+
+		Examples:
+
+		soname                 | master symlink name
+		--------------------------------------------
+		libarchive.so.2.8.4    | libarchive.so
+		libproc-3.2.8.so       | libproc.so
+
+		@param obj: absolute path to an object
+		@type obj: string (example: '/usr/bin/foo')
+		@rtype: Boolean
+		@return:
+			1. True if obj is a master link
+			2. False if obj is not a master link
+
+		"""
+		os = _os_merge
+		obj_key = self._obj_key(obj)
+		if obj_key not in self._obj_properties:
+			raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+		basename = os.path.basename(obj)
+		soname = self._obj_properties[obj_key].soname
+		return len(basename) < len(soname) and \
+			basename.endswith(".so") and \
+			soname.startswith(basename[:-3])
+
+	def listLibraryObjects(self):
+		"""
+		Return a list of library objects.
+
+		Known limitation: library objects lacking an soname are not included.
+
+		@rtype: list of strings
+		@return: list of paths to all providers
+
+		"""
+		rValue = []
+		if not self._libs:
+			self.rebuild()
+		for arch_map in self._libs.values():
+			for soname_map in arch_map.values():
+				for obj_key in soname_map.providers:
+					rValue.extend(self._obj_properties[obj_key].alt_paths)
+		return rValue
+
+	def getOwners(self, obj):
+		"""
+		Return the package(s) associated with an object. Raises KeyError
+		if the object is unknown. Returns an empty tuple if the owner(s)
+		are unknown.
+
+		NOTE: For preserved libraries, the owner(s) may have been
+		previously uninstalled, but these uninstalled owners can be
+		returned by this method since they are registered in the
+		PreservedLibsRegistry.
+
+		@param obj: absolute path to an object
+		@type obj: string (example: '/usr/bin/bar')
+		@rtype: tuple
+		@return: a tuple of cpv
+		"""
+		if not self._libs:
+			self.rebuild()
+		if isinstance(obj, self._ObjectKey):
+			obj_key = obj
+		else:
+			obj_key = self._obj_key_cache.get(obj)
+			if obj_key is None:
+				raise KeyError("%s not in object list" % obj)
+		obj_props = self._obj_properties.get(obj_key)
+		if obj_props is None:
+			raise KeyError("%s not in object list" % obj_key)
+		if obj_props.owner is None:
+			return ()
+		return (obj_props.owner,)
+
+	def getSoname(self, obj):
+		"""
+		Return the soname associated with an object.
+
+		@param obj: absolute path to an object
+		@type obj: string (example: '/usr/bin/bar')
+		@rtype: string
+		@return: soname as a string
+
+		"""
+		if not self._libs:
+			self.rebuild()
+		if isinstance(obj, self._ObjectKey):
+			obj_key = obj
+			if obj_key not in self._obj_properties:
+				raise KeyError("%s not in object list" % obj_key)
+			return self._obj_properties[obj_key].soname
+		if obj not in self._obj_key_cache:
+			raise KeyError("%s not in object list" % obj)
+		return self._obj_properties[self._obj_key_cache[obj]].soname
+
+	def findProviders(self, obj):
+		"""
+		Find providers for an object or object key.
+
+		This method may be called with a key from _obj_properties.
+
+		In some cases, not all valid libraries are returned.  This may occur when
+		an soname symlink referencing a library is in an object's runpath while
+		the actual library is not.  We should consider cataloging symlinks within
+		LinkageMap as this would avoid those cases and would be a better model of
+		library dependencies (since the dynamic linker actually searches for
+		files named with the soname in the runpaths).
+
+		@param obj: absolute path to an object or a key from _obj_properties
+		@type obj: string (example: '/usr/bin/bar') or _ObjectKey
+		@rtype: dict (example: {'libbar.so': set(['/lib/libbar.so.1.5'])})
+		@return: The return value is a soname -> set-of-library-paths, where
+		set-of-library-paths satisfy soname.
+
+		"""
+
+		os = _os_merge
+
+		rValue = {}
+
+		if not self._libs:
+			self.rebuild()
+
+		# Determine the obj_key from the arguments.
+		if isinstance(obj, self._ObjectKey):
+			obj_key = obj
+			if obj_key not in self._obj_properties:
+				raise KeyError("%s not in object list" % obj_key)
+		else:
+			obj_key = self._obj_key(obj)
+			if obj_key not in self._obj_properties:
+				raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+		obj_props = self._obj_properties[obj_key]
+		arch = obj_props.arch
+		needed = obj_props.needed
+		path = obj_props.runpaths
+		path_keys = set(self._path_key(x) for x in path.union(self._defpath))
+		for soname in needed:
+			rValue[soname] = set()
+			if arch not in self._libs or soname not in self._libs[arch]:
+				continue
+			# For each potential provider of the soname, add it to rValue if it
+			# resides in the obj's runpath.
+			for provider_key in self._libs[arch][soname].providers:
+				providers = self._obj_properties[provider_key].alt_paths
+				for provider in providers:
+					if self._path_key(os.path.dirname(provider)) in path_keys:
+						rValue[soname].add(provider)
+		return rValue
+
+	def findConsumers(self, obj, exclude_providers=None):
+		"""
+		Find consumers of an object or object key.
+
+		This method may be called with a key from _obj_properties.  If this
+		method is going to be called with an object key, to avoid not catching
+		shadowed libraries, do not pass new _ObjectKey instances to this method.
+		Instead pass the obj as a string.
+
+		In some cases, not all consumers are returned.  This may occur when
+		an soname symlink referencing a library is in an object's runpath while
+		the actual library is not. For example, this problem is noticeable for
+		binutils since it's libraries are added to the path via symlinks that
+		are gemerated in the /usr/$CHOST/lib/ directory by binutils-config.
+		Failure to recognize consumers of these symlinks makes preserve-libs
+		fail to preserve binutils libs that are needed by these unrecognized
+		consumers.
+
+		Note that library consumption via dlopen (common for kde plugins) is
+		currently undetected. However, it is possible to use the
+		corresponding libtool archive (*.la) files to detect such consumers
+		(revdep-rebuild is able to detect them).
+
+		The exclude_providers argument is useful for determining whether
+		removal of one or more packages will create unsatisfied consumers. When
+		this option is given, consumers are excluded from the results if there
+		is an alternative provider (which is not excluded) of the required
+		soname such that the consumers will remain satisfied if the files
+		owned by exclude_providers are removed.
+
+		@param obj: absolute path to an object or a key from _obj_properties
+		@type obj: string (example: '/usr/bin/bar') or _ObjectKey
+		@param exclude_providers: A collection of callables that each take a
+			single argument referring to the path of a library (example:
+			'/usr/lib/libssl.so.0.9.8'), and return True if the library is
+			owned by a provider which is planned for removal.
+		@type exclude_providers: collection
+		@rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
+		@return: The return value is a soname -> set-of-library-paths, where
+		set-of-library-paths satisfy soname.
+
+		"""
+
+		os = _os_merge
+
+		if not self._libs:
+			self.rebuild()
+
+		# Determine the obj_key and the set of objects matching the arguments.
+		if isinstance(obj, self._ObjectKey):
+			obj_key = obj
+			if obj_key not in self._obj_properties:
+				raise KeyError("%s not in object list" % obj_key)
+			objs = self._obj_properties[obj_key].alt_paths
+		else:
+			objs = set([obj])
+			obj_key = self._obj_key(obj)
+			if obj_key not in self._obj_properties:
+				raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+		# If there is another version of this lib with the
+		# same soname and the soname symlink points to that
+		# other version, this lib will be shadowed and won't
+		# have any consumers.
+		if not isinstance(obj, self._ObjectKey):
+			soname = self._obj_properties[obj_key].soname
+			soname_link = os.path.join(self._root,
+				os.path.dirname(obj).lstrip(os.path.sep), soname)
+			obj_path = os.path.join(self._root, obj.lstrip(os.sep))
+			try:
+				soname_st = os.stat(soname_link)
+				obj_st = os.stat(obj_path)
+			except OSError:
+				pass
+			else:
+				if (obj_st.st_dev, obj_st.st_ino) != \
+					(soname_st.st_dev, soname_st.st_ino):
+					return set()
+
+		obj_props = self._obj_properties[obj_key]
+		arch = obj_props.arch
+		soname = obj_props.soname
+
+		soname_node = None
+		arch_map = self._libs.get(arch)
+		if arch_map is not None:
+			soname_node = arch_map.get(soname)
+
+		defpath_keys = set(self._path_key(x) for x in self._defpath)
+		satisfied_consumer_keys = set()
+		if soname_node is not None:
+			if exclude_providers is not None:
+				relevant_dir_keys = set()
+				for provider_key in soname_node.providers:
+					provider_objs = self._obj_properties[provider_key].alt_paths
+					for p in provider_objs:
+						provider_excluded = False
+						for excluded_provider_isowner in exclude_providers:
+							if excluded_provider_isowner(p):
+								provider_excluded = True
+								break
+						if not provider_excluded:
+							# This provider is not excluded. It will
+							# satisfy a consumer of this soname if it
+							# is in the default ld.so path or the
+							# consumer's runpath.
+							relevant_dir_keys.add(
+								self._path_key(os.path.dirname(p)))
+
+				if relevant_dir_keys:
+					for consumer_key in soname_node.consumers:
+						path = self._obj_properties[consumer_key].runpaths
+						path_keys = defpath_keys.copy()
+						path_keys.update(self._path_key(x) for x in path)
+						if relevant_dir_keys.intersection(path_keys):
+							satisfied_consumer_keys.add(consumer_key)
+
+		rValue = set()
+		if soname_node is not None:
+			# For each potential consumer, add it to rValue if an object from the
+			# arguments resides in the consumer's runpath.
+			objs_dir_keys = set(self._path_key(os.path.dirname(x))
+				for x in objs)
+			for consumer_key in soname_node.consumers:
+				if consumer_key in satisfied_consumer_keys:
+					continue
+				consumer_props = self._obj_properties[consumer_key]
+				path = consumer_props.runpaths
+				consumer_objs = consumer_props.alt_paths
+				path_keys = defpath_keys.union(self._path_key(x) for x in path)
+				if objs_dir_keys.intersection(path_keys):
+					rValue.update(consumer_objs)
+		return rValue

diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
new file mode 100644
index 0000000..602cf87
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
@@ -0,0 +1,172 @@
+# Copyright 1998-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import sys
+
+try:
+	import cPickle as pickle
+except ImportError:
+	import pickle
+
+from portage import os
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.exception import PermissionDenied
+from portage.localization import _
+from portage.util import atomic_ofstream
+from portage.util import writemsg_level
+from portage.versions import cpv_getkey
+from portage.locks import lockfile, unlockfile
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+class PreservedLibsRegistry(object):
+	""" This class handles the tracking of preserved library objects """
+	def __init__(self, root, filename):
+		""" 
+			@param root: root used to check existence of paths in pruneNonExisting
+		    @type root: String
+			@param filename: absolute path for saving the preserved libs records
+		    @type filename: String
+		"""
+		self._root = root
+		self._filename = filename
+		self._data = None
+		self._lock = None
+
+	def lock(self):
+		"""Grab an exclusive lock on the preserved libs registry."""
+		if self._lock is not None:
+			raise AssertionError("already locked")
+		self._lock = lockfile(self._filename)
+
+	def unlock(self):
+		"""Release our exclusive lock on the preserved libs registry."""
+		if self._lock is None:
+			raise AssertionError("not locked")
+		unlockfile(self._lock)
+		self._lock = None
+
+	def load(self):
+		""" Reload the registry data from file """
+		self._data = None
+		try:
+			self._data = pickle.load(
+				open(_unicode_encode(self._filename,
+					encoding=_encodings['fs'], errors='strict'), 'rb'))
+		except (ValueError, pickle.UnpicklingError) as e:
+			writemsg_level(_("!!! Error loading '%s': %s\n") % \
+				(self._filename, e), level=logging.ERROR, noiselevel=-1)
+		except (EOFError, IOError) as e:
+			if isinstance(e, EOFError) or e.errno == errno.ENOENT:
+				pass
+			elif e.errno == PermissionDenied.errno:
+				raise PermissionDenied(self._filename)
+			else:
+				raise
+		if self._data is None:
+			self._data = {}
+		self._data_orig = self._data.copy()
+		self.pruneNonExisting()
+
+	def store(self):
+		"""
+		Store the registry data to the file. The existing inode will be
+		replaced atomically, so if that inode is currently being used
+		for a lock then that lock will be rendered useless. Therefore,
+		it is important not to call this method until the current lock
+		is ready to be immediately released.
+		"""
+		if os.environ.get("SANDBOX_ON") == "1" or \
+			self._data == self._data_orig:
+			return
+		try:
+			f = atomic_ofstream(self._filename, 'wb')
+			pickle.dump(self._data, f, protocol=2)
+			f.close()
+		except EnvironmentError as e:
+			if e.errno != PermissionDenied.errno:
+				writemsg_level("!!! %s %s\n" % (e, self._filename),
+					level=logging.ERROR, noiselevel=-1)
+		else:
+			self._data_orig = self._data.copy()
+
+	def _normalize_counter(self, counter):
+		"""
+		For simplicity, normalize as a unicode string
+		and strip whitespace. This avoids the need for
+		int conversion and a possible ValueError resulting
+		from vardb corruption.
+		"""
+		if not isinstance(counter, basestring):
+			counter = str(counter)
+		return _unicode_decode(counter).strip()
+
+	def register(self, cpv, slot, counter, paths):
+		""" Register new objects in the registry. If there is a record with the
+			same packagename (internally derived from cpv) and slot it is 
+			overwritten with the new data.
+			@param cpv: package instance that owns the objects
+			@type cpv: CPV (as String)
+			@param slot: the value of SLOT of the given package instance
+			@type slot: String
+			@param counter: vdb counter value for the package instance
+			@type counter: String
+			@param paths: absolute paths of objects that got preserved during an update
+			@type paths: List
+		"""
+		cp = cpv_getkey(cpv)
+		cps = cp+":"+slot
+		counter = self._normalize_counter(counter)
+		if len(paths) == 0 and cps in self._data \
+				and self._data[cps][0] == cpv and \
+				self._normalize_counter(self._data[cps][1]) == counter:
+			del self._data[cps]
+		elif len(paths) > 0:
+			self._data[cps] = (cpv, counter, paths)
+
+	def unregister(self, cpv, slot, counter):
+		""" Remove a previous registration of preserved objects for the given package.
+			@param cpv: package instance whose records should be removed
+			@type cpv: CPV (as String)
+			@param slot: the value of SLOT of the given package instance
+			@type slot: String
+		"""
+		self.register(cpv, slot, counter, [])
+	
+	def pruneNonExisting(self):
+		""" Remove all records for objects that no longer exist on the filesystem. """
+
+		os = _os_merge
+
+		for cps in list(self._data):
+			cpv, counter, paths = self._data[cps]
+			paths = [f for f in paths \
+				if os.path.exists(os.path.join(self._root, f.lstrip(os.sep)))]
+			if len(paths) > 0:
+				self._data[cps] = (cpv, counter, paths)
+			else:
+				del self._data[cps]
+	
+	def hasEntries(self):
+		""" Check if this registry contains any records. """
+		if self._data is None:
+			self.load()
+		return len(self._data) > 0
+	
+	def getPreservedLibs(self):
+		""" Return a mapping of packages->preserved objects.
+			@returns mapping of package instances to preserved objects
+			@rtype Dict cpv->list-of-paths
+		"""
+		if self._data is None:
+			self.load()
+		rValue = {}
+		for cps in self._data:
+			rValue[self._data[cps][0]] = self._data[cps][2]
+		return rValue

diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py b/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/util/_pty.py b/portage_with_autodep/pym/portage/util/_pty.py
new file mode 100644
index 0000000..f45ff0a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_pty.py
@@ -0,0 +1,212 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import fcntl
+import platform
+import pty
+import select
+import sys
+import termios
+
+from portage import os, _unicode_decode, _unicode_encode
+from portage.output import get_term_size, set_term_size
+from portage.process import spawn_bash
+from portage.util import writemsg
+
+def _can_test_pty_eof():
+	"""
+	The _test_pty_eof() function seems to hang on most
+	kernels other than Linux.
+	This was reported for the following kernels which used to work fine
+	without this EOF test: Darwin, AIX, FreeBSD.  They seem to hang on
+	the slave_file.close() call.  Note that Python's implementation of
+	openpty on Solaris already caused random hangs without this EOF test
+	and hence is globally disabled.
+	@rtype: bool
+	@returns: True if _test_pty_eof() won't hang, False otherwise.
+	"""
+	return platform.system() in ("Linux",)
+
+def _test_pty_eof(fdopen_buffered=False):
+	"""
+	Returns True if this issues is fixed for the currently
+	running version of python: http://bugs.python.org/issue5380
+	Raises an EnvironmentError from openpty() if it fails.
+
+	NOTE: This issue is only problematic when array.fromfile()
+	is used, rather than os.read(). However, array.fromfile()
+	is preferred since it is approximately 10% faster.
+
+	New development: It appears that array.fromfile() is usable
+	with python3 as long as fdopen is called with a bufsize
+	argument of 0.
+	"""
+
+	use_fork = False
+
+	test_string = 2 * "blah blah blah\n"
+	test_string = _unicode_decode(test_string,
+		encoding='utf_8', errors='strict')
+
+	# may raise EnvironmentError
+	master_fd, slave_fd = pty.openpty()
+
+	# Non-blocking mode is required for Darwin kernel.
+	fcntl.fcntl(master_fd, fcntl.F_SETFL,
+		fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+	# Disable post-processing of output since otherwise weird
+	# things like \n -> \r\n transformations may occur.
+	mode = termios.tcgetattr(slave_fd)
+	mode[1] &= ~termios.OPOST
+	termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
+
+	# Simulate a subprocess writing some data to the
+	# slave end of the pipe, and then exiting.
+	pid = None
+	if use_fork:
+		pids = spawn_bash(_unicode_encode("echo -n '%s'" % test_string,
+			encoding='utf_8', errors='strict'), env=os.environ,
+			fd_pipes={0:sys.stdin.fileno(), 1:slave_fd, 2:slave_fd},
+			returnpid=True)
+		if isinstance(pids, int):
+			os.close(master_fd)
+			os.close(slave_fd)
+			raise EnvironmentError('spawn failed')
+		pid = pids[0]
+	else:
+		os.write(slave_fd, _unicode_encode(test_string,
+			encoding='utf_8', errors='strict'))
+	os.close(slave_fd)
+
+	# If using a fork, we must wait for the child here,
+	# in order to avoid a race condition that would
+	# lead to inconsistent results.
+	if pid is not None:
+		os.waitpid(pid, 0)
+
+	if fdopen_buffered:
+		master_file = os.fdopen(master_fd, 'rb')
+	else:
+		master_file = os.fdopen(master_fd, 'rb', 0)
+	eof = False
+	data = []
+	iwtd = [master_file]
+	owtd = []
+	ewtd = []
+
+	while not eof:
+
+		events = select.select(iwtd, owtd, ewtd)
+		if not events[0]:
+			eof = True
+			break
+
+		buf = array.array('B')
+		try:
+			buf.fromfile(master_file, 1024)
+		except (EOFError, IOError):
+			eof = True
+
+		if not buf:
+			eof = True
+		else:
+			data.append(_unicode_decode(buf.tostring(),
+				encoding='utf_8', errors='strict'))
+
+	master_file.close()
+
+	return test_string == ''.join(data)
+
+# If _test_pty_eof() can't be used for runtime detection of
+# http://bugs.python.org/issue5380, openpty can't safely be used
+# unless we can guarantee that the current version of python has
+# been fixed (affects all current versions of python3). When
+# this issue is fixed in python3, we can add another sys.hexversion
+# conditional to enable openpty support in the fixed versions.
+if sys.hexversion >= 0x3000000 and not _can_test_pty_eof():
+	_disable_openpty = True
+else:
+	# Disable the use of openpty on Solaris as it seems Python's openpty
+	# implementation doesn't play nice on Solaris with Portage's
+	# behaviour causing hangs/deadlocks.
+	# Additional note for the future: on Interix, pipes do NOT work, so
+	# _disable_openpty on Interix must *never* be True
+	_disable_openpty = platform.system() in ("SunOS",)
+_tested_pty = False
+
+if not _can_test_pty_eof():
+	# Skip _test_pty_eof() on systems where it hangs.
+	_tested_pty = True
+
+_fbsd_test_pty = platform.system() == 'FreeBSD'
+
+def _create_pty_or_pipe(copy_term_size=None):
+	"""
+	Try to create a pty and if then fails then create a normal
+	pipe instead.
+
+	@param copy_term_size: If a tty file descriptor is given
+		then the term size will be copied to the pty.
+	@type copy_term_size: int
+	@rtype: tuple
+	@returns: A tuple of (is_pty, master_fd, slave_fd) where
+		is_pty is True if a pty was successfully allocated, and
+		False if a normal pipe was allocated.
+	"""
+
+	got_pty = False
+
+	global _disable_openpty, _fbsd_test_pty, _tested_pty
+	if not (_tested_pty or _disable_openpty):
+		try:
+			if not _test_pty_eof():
+				_disable_openpty = True
+		except EnvironmentError as e:
+			_disable_openpty = True
+			writemsg("openpty failed: '%s'\n" % str(e),
+				noiselevel=-1)
+			del e
+		_tested_pty = True
+
+	if _fbsd_test_pty and not _disable_openpty:
+		# Test for python openpty breakage after freebsd7 to freebsd8
+		# upgrade, which results in a 'Function not implemented' error
+		# and the process being killed.
+		pid = os.fork()
+		if pid == 0:
+			pty.openpty()
+			os._exit(os.EX_OK)
+		pid, status = os.waitpid(pid, 0)
+		if (status & 0xff) == 140:
+			_disable_openpty = True
+		_fbsd_test_pty = False
+
+	if _disable_openpty:
+		master_fd, slave_fd = os.pipe()
+	else:
+		try:
+			master_fd, slave_fd = pty.openpty()
+			got_pty = True
+		except EnvironmentError as e:
+			_disable_openpty = True
+			writemsg("openpty failed: '%s'\n" % str(e),
+				noiselevel=-1)
+			del e
+			master_fd, slave_fd = os.pipe()
+
+	if got_pty:
+		# Disable post-processing of output since otherwise weird
+		# things like \n -> \r\n transformations may occur.
+		mode = termios.tcgetattr(slave_fd)
+		mode[1] &= ~termios.OPOST
+		termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
+
+	if got_pty and \
+		copy_term_size is not None and \
+		os.isatty(copy_term_size):
+		rows, columns = get_term_size()
+		set_term_size(rows, columns, slave_fd)
+
+	return (got_pty, master_fd, slave_fd)

diff --git a/portage_with_autodep/pym/portage/util/digraph.py b/portage_with_autodep/pym/portage/util/digraph.py
new file mode 100644
index 0000000..1bbe10f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/digraph.py
@@ -0,0 +1,342 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digraph']
+
+from collections import deque
+import sys
+
+from portage import _unicode_decode
+from portage.util import writemsg
+
+class digraph(object):
+	"""
+	A directed graph object.
+	"""
+
+	def __init__(self):
+		"""Create an empty digraph"""
+		
+		# { node : ( { child : priority } , { parent : priority } ) }
+		self.nodes = {}
+		self.order = []
+
+	def add(self, node, parent, priority=0):
+		"""Adds the specified node with the specified parent.
+		
+		If the dep is a soft-dep and the node already has a hard
+		relationship to the parent, the relationship is left as hard."""
+		
+		if node not in self.nodes:
+			self.nodes[node] = ({}, {}, node)
+			self.order.append(node)
+		
+		if not parent:
+			return
+		
+		if parent not in self.nodes:
+			self.nodes[parent] = ({}, {}, parent)
+			self.order.append(parent)
+
+		priorities = self.nodes[node][1].get(parent)
+		if priorities is None:
+			priorities = []
+			self.nodes[node][1][parent] = priorities
+			self.nodes[parent][0][node] = priorities
+		priorities.append(priority)
+		priorities.sort()
+
+	def remove(self, node):
+		"""Removes the specified node from the digraph, also removing
+		and ties to other nodes in the digraph. Raises KeyError if the
+		node doesn't exist."""
+		
+		if node not in self.nodes:
+			raise KeyError(node)
+		
+		for parent in self.nodes[node][1]:
+			del self.nodes[parent][0][node]
+		for child in self.nodes[node][0]:
+			del self.nodes[child][1][node]
+		
+		del self.nodes[node]
+		self.order.remove(node)
+
+	def difference_update(self, t):
+		"""
+		Remove all given nodes from node_set. This is more efficient
+		than multiple calls to the remove() method.
+		"""
+		if isinstance(t, (list, tuple)) or \
+			not hasattr(t, "__contains__"):
+			t = frozenset(t)
+		order = []
+		for node in self.order:
+			if node not in t:
+				order.append(node)
+				continue
+			for parent in self.nodes[node][1]:
+				del self.nodes[parent][0][node]
+			for child in self.nodes[node][0]:
+				del self.nodes[child][1][node]
+			del self.nodes[node]
+		self.order = order
+
+	def remove_edge(self, child, parent):
+		"""
+		Remove edge in the direction from child to parent. Note that it is
+		possible for a remaining edge to exist in the opposite direction.
+		Any endpoint vertices that become isolated will remain in the graph.
+		"""
+
+		# Nothing should be modified when a KeyError is raised.
+		for k in parent, child:
+			if k not in self.nodes:
+				raise KeyError(k)
+
+		# Make sure the edge exists.
+		if child not in self.nodes[parent][0]:
+			raise KeyError(child)
+		if parent not in self.nodes[child][1]:
+			raise KeyError(parent)
+
+		# Remove the edge.
+		del self.nodes[child][1][parent]
+		del self.nodes[parent][0][child]
+
+	def __iter__(self):
+		return iter(self.order)
+
+	def contains(self, node):
+		"""Checks if the digraph contains mynode"""
+		return node in self.nodes
+
+	def get(self, key, default=None):
+		node_data = self.nodes.get(key, self)
+		if node_data is self:
+			return default
+		return node_data[2]
+
+	def all_nodes(self):
+		"""Return a list of all nodes in the graph"""
+		return self.order[:]
+
+	def child_nodes(self, node, ignore_priority=None):
+		"""Return all children of the specified node"""
+		if ignore_priority is None:
+			return list(self.nodes[node][0])
+		children = []
+		if hasattr(ignore_priority, '__call__'):
+			for child, priorities in self.nodes[node][0].items():
+				for priority in priorities:
+					if not ignore_priority(priority):
+						children.append(child)
+						break
+		else:
+			for child, priorities in self.nodes[node][0].items():
+				if ignore_priority < priorities[-1]:
+					children.append(child)
+		return children
+
+	def parent_nodes(self, node, ignore_priority=None):
+		"""Return all parents of the specified node"""
+		if ignore_priority is None:
+			return list(self.nodes[node][1])
+		parents = []
+		if hasattr(ignore_priority, '__call__'):
+			for parent, priorities in self.nodes[node][1].items():
+				for priority in priorities:
+					if not ignore_priority(priority):
+						parents.append(parent)
+						break
+		else:
+			for parent, priorities in self.nodes[node][1].items():
+				if ignore_priority < priorities[-1]:
+					parents.append(parent)
+		return parents
+
+	def leaf_nodes(self, ignore_priority=None):
+		"""Return all nodes that have no children
+		
+		If ignore_soft_deps is True, soft deps are not counted as
+		children in calculations."""
+		
+		leaf_nodes = []
+		if ignore_priority is None:
+			for node in self.order:
+				if not self.nodes[node][0]:
+					leaf_nodes.append(node)
+		elif hasattr(ignore_priority, '__call__'):
+			for node in self.order:
+				is_leaf_node = True
+				for child, priorities in self.nodes[node][0].items():
+					for priority in priorities:
+						if not ignore_priority(priority):
+							is_leaf_node = False
+							break
+					if not is_leaf_node:
+						break
+				if is_leaf_node:
+					leaf_nodes.append(node)
+		else:
+			for node in self.order:
+				is_leaf_node = True
+				for child, priorities in self.nodes[node][0].items():
+					if ignore_priority < priorities[-1]:
+						is_leaf_node = False
+						break
+				if is_leaf_node:
+					leaf_nodes.append(node)
+		return leaf_nodes
+
+	def root_nodes(self, ignore_priority=None):
+		"""Return all nodes that have no parents.
+		
+		If ignore_soft_deps is True, soft deps are not counted as
+		parents in calculations."""
+		
+		root_nodes = []
+		if ignore_priority is None:
+			for node in self.order:
+				if not self.nodes[node][1]:
+					root_nodes.append(node)
+		elif hasattr(ignore_priority, '__call__'):
+			for node in self.order:
+				is_root_node = True
+				for parent, priorities in self.nodes[node][1].items():
+					for priority in priorities:
+						if not ignore_priority(priority):
+							is_root_node = False
+							break
+					if not is_root_node:
+						break
+				if is_root_node:
+					root_nodes.append(node)
+		else:
+			for node in self.order:
+				is_root_node = True
+				for parent, priorities in self.nodes[node][1].items():
+					if ignore_priority < priorities[-1]:
+						is_root_node = False
+						break
+				if is_root_node:
+					root_nodes.append(node)
+		return root_nodes
+
+	def __bool__(self):
+		return bool(self.nodes)
+
+	def is_empty(self):
+		"""Checks if the digraph is empty"""
+		return len(self.nodes) == 0
+
+	def clone(self):
+		clone = digraph()
+		clone.nodes = {}
+		memo = {}
+		for children, parents, node in self.nodes.values():
+			children_clone = {}
+			for child, priorities in children.items():
+				priorities_clone = memo.get(id(priorities))
+				if priorities_clone is None:
+					priorities_clone = priorities[:]
+					memo[id(priorities)] = priorities_clone
+				children_clone[child] = priorities_clone
+			parents_clone = {}
+			for parent, priorities in parents.items():
+				priorities_clone = memo.get(id(priorities))
+				if priorities_clone is None:
+					priorities_clone = priorities[:]
+					memo[id(priorities)] = priorities_clone
+				parents_clone[parent] = priorities_clone
+			clone.nodes[node] = (children_clone, parents_clone, node)
+		clone.order = self.order[:]
+		return clone
+
+	def delnode(self, node):
+		try:
+			self.remove(node)
+		except KeyError:
+			pass
+
+	def firstzero(self):
+		leaf_nodes = self.leaf_nodes()
+		if leaf_nodes:
+			return leaf_nodes[0]
+		return None
+
+	def hasallzeros(self, ignore_priority=None):
+		return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
+			len(self.order)
+
+	def debug_print(self):
+		def output(s):
+			writemsg(s, noiselevel=-1)
+		# Use _unicode_decode() to force unicode format
+		# strings for python-2.x safety, ensuring that
+		# node.__unicode__() is used when necessary.
+		for node in self.nodes:
+			output(_unicode_decode("%s ") % (node,))
+			if self.nodes[node][0]:
+				output("depends on\n")
+			else:
+				output("(no children)\n")
+			for child, priorities in self.nodes[node][0].items():
+				output(_unicode_decode("  %s (%s)\n") % \
+					(child, priorities[-1],))
+
+	def bfs(self, start, ignore_priority=None):
+		if start not in self:
+			raise KeyError(start)
+
+		queue, enqueued = deque([(None, start)]), set([start])
+		while queue:
+			parent, n = queue.popleft()
+			yield parent, n
+			new = set(self.child_nodes(n, ignore_priority)) - enqueued
+			enqueued |= new
+			queue.extend([(n, child) for child in new])
+
+	def shortest_path(self, start, end, ignore_priority=None):
+		if start not in self:
+			raise KeyError(start)
+		elif end not in self:
+			raise KeyError(end)
+
+		paths = {None: []}
+		for parent, child in self.bfs(start, ignore_priority):
+			paths[child] = paths[parent] + [child]
+			if child == end:
+				return paths[child]
+		return None
+
+	def get_cycles(self, ignore_priority=None, max_length=None):
+		"""
+		Returns all cycles that have at most length 'max_length'.
+		If 'max_length' is 'None', all cycles are returned.
+		"""
+		all_cycles = []
+		for node in self.nodes:
+			shortest_path = None
+			for child in self.child_nodes(node, ignore_priority):
+				path = self.shortest_path(child, node, ignore_priority)
+				if path is None:
+					continue
+				if not shortest_path or len(shortest_path) > len(path):
+					shortest_path = path
+			if shortest_path:
+				if not max_length or len(shortest_path) <= max_length:
+					all_cycles.append(shortest_path)
+		return all_cycles
+
+	# Backward compatibility
+	addnode = add
+	allnodes = all_nodes
+	allzeros = leaf_nodes
+	hasnode = contains
+	__contains__ = contains
+	empty = is_empty
+	copy = clone
+
+	if sys.hexversion < 0x3000000:
+		__nonzero__ = __bool__

diff --git a/portage_with_autodep/pym/portage/util/env_update.py b/portage_with_autodep/pym/portage/util/env_update.py
new file mode 100644
index 0000000..eb8a0d9
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/env_update.py
@@ -0,0 +1,293 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['env_update']
+
+import errno
+import io
+import stat
+import sys
+import time
+
+import portage
+from portage import os, _encodings, _unicode_encode
+from portage.checksum import prelink_capable
+from portage.data import ostype
+from portage.exception import ParseError
+from portage.localization import _
+from portage.process import find_binary
+from portage.util import atomic_ofstream, ensure_dirs, getconfig, \
+	normalize_path, writemsg
+from portage.util.listdir import listdir
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
+	env=None, writemsg_level=None):
+	"""
+	Parse /etc/env.d and use it to generate /etc/profile.env, csh.env,
+	ld.so.conf, and prelink.conf. Finally, run ldconfig. When ldconfig is
+	called, its -X option will be used in order to avoid potential
+	interference with installed soname symlinks that are required for
+	correct operation of FEATURES=preserve-libs for downgrade operations.
+	It's not necessary for ldconfig to create soname symlinks, since
+	portage will use NEEDED.ELF.2 data to automatically create them
+	after src_install if they happen to be missing.
+	@param makelinks: True if ldconfig should be called, False otherwise
+	@param target_root: root that is passed to the ldconfig -r option,
+		defaults to portage.settings["ROOT"].
+	@type target_root: String (Path)
+	"""
+	if writemsg_level is None:
+		writemsg_level = portage.util.writemsg_level
+	if target_root is None:
+		target_root = portage.settings["ROOT"]
+	if prev_mtimes is None:
+		prev_mtimes = portage.mtimedb["ldpath"]
+	if env is None:
+		env = os.environ
+	envd_dir = os.path.join(target_root, "etc", "env.d")
+	ensure_dirs(envd_dir, mode=0o755)
+	fns = listdir(envd_dir, EmptyOnError=1)
+	fns.sort()
+	templist = []
+	for x in fns:
+		if len(x) < 3:
+			continue
+		if not x[0].isdigit() or not x[1].isdigit():
+			continue
+		if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
+			continue
+		templist.append(x)
+	fns = templist
+	del templist
+
+	space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
+	colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
+		"CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
+		  "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
+		  "PYTHONPATH", "ROOTPATH"])
+
+	config_list = []
+
+	for x in fns:
+		file_path = os.path.join(envd_dir, x)
+		try:
+			myconfig = getconfig(file_path, expand=False)
+		except ParseError as e:
+			writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
+			del e
+			continue
+		if myconfig is None:
+			# broken symlink or file removed by a concurrent process
+			writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
+			continue
+
+		config_list.append(myconfig)
+		if "SPACE_SEPARATED" in myconfig:
+			space_separated.update(myconfig["SPACE_SEPARATED"].split())
+			del myconfig["SPACE_SEPARATED"]
+		if "COLON_SEPARATED" in myconfig:
+			colon_separated.update(myconfig["COLON_SEPARATED"].split())
+			del myconfig["COLON_SEPARATED"]
+
+	env = {}
+	specials = {}
+	for var in space_separated:
+		mylist = []
+		for myconfig in config_list:
+			if var in myconfig:
+				for item in myconfig[var].split():
+					if item and not item in mylist:
+						mylist.append(item)
+				del myconfig[var] # prepare for env.update(myconfig)
+		if mylist:
+			env[var] = " ".join(mylist)
+		specials[var] = mylist
+
+	for var in colon_separated:
+		mylist = []
+		for myconfig in config_list:
+			if var in myconfig:
+				for item in myconfig[var].split(":"):
+					if item and not item in mylist:
+						mylist.append(item)
+				del myconfig[var] # prepare for env.update(myconfig)
+		if mylist:
+			env[var] = ":".join(mylist)
+		specials[var] = mylist
+
+	for myconfig in config_list:
+		"""Cumulative variables have already been deleted from myconfig so that
+		they won't be overwritten by this dict.update call."""
+		env.update(myconfig)
+
+	ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
+	try:
+		myld = io.open(_unicode_encode(ldsoconf_path,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['content'], errors='replace')
+		myldlines=myld.readlines()
+		myld.close()
+		oldld=[]
+		for x in myldlines:
+			#each line has at least one char (a newline)
+			if x[:1] == "#":
+				continue
+			oldld.append(x[:-1])
+	except (IOError, OSError) as e:
+		if e.errno != errno.ENOENT:
+			raise
+		oldld = None
+
+	ld_cache_update=False
+
+	newld = specials["LDPATH"]
+	if (oldld != newld):
+		#ld.so.conf needs updating and ldconfig needs to be run
+		myfd = atomic_ofstream(ldsoconf_path)
+		myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
+		myfd.write("# contents of /etc/env.d directory\n")
+		for x in specials["LDPATH"]:
+			myfd.write(x + "\n")
+		myfd.close()
+		ld_cache_update=True
+
+	# Update prelink.conf if we are prelink-enabled
+	if prelink_capable:
+		newprelink = atomic_ofstream(
+			os.path.join(target_root, "etc", "prelink.conf"))
+		newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
+		newprelink.write("# contents of /etc/env.d directory\n")
+
+		for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
+			newprelink.write("-l %s\n" % (x,));
+		prelink_paths = []
+		prelink_paths += specials.get("LDPATH", [])
+		prelink_paths += specials.get("PATH", [])
+		prelink_paths += specials.get("PRELINK_PATH", [])
+		prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
+		for x in prelink_paths:
+			if not x:
+				continue
+			if x[-1:] != '/':
+				x += "/"
+			plmasked = 0
+			for y in prelink_path_mask:
+				if not y:
+					continue
+				if y[-1] != '/':
+					y += "/"
+				if y == x[0:len(y)]:
+					plmasked = 1
+					break
+			if not plmasked:
+				newprelink.write("-h %s\n" % (x,))
+		for x in prelink_path_mask:
+			newprelink.write("-b %s\n" % (x,))
+		newprelink.close()
+
+	current_time = long(time.time())
+	mtime_changed = False
+	lib_dirs = set()
+	for lib_dir in set(specials["LDPATH"] + \
+		['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
+		x = os.path.join(target_root, lib_dir.lstrip(os.sep))
+		try:
+			newldpathtime = os.stat(x)[stat.ST_MTIME]
+			lib_dirs.add(normalize_path(x))
+		except OSError as oe:
+			if oe.errno == errno.ENOENT:
+				try:
+					del prev_mtimes[x]
+				except KeyError:
+					pass
+				# ignore this path because it doesn't exist
+				continue
+			raise
+		if newldpathtime == current_time:
+			# Reset mtime to avoid the potential ambiguity of times that
+			# differ by less than 1 second.
+			newldpathtime -= 1
+			os.utime(x, (newldpathtime, newldpathtime))
+			prev_mtimes[x] = newldpathtime
+			mtime_changed = True
+		elif x in prev_mtimes:
+			if prev_mtimes[x] == newldpathtime:
+				pass
+			else:
+				prev_mtimes[x] = newldpathtime
+				mtime_changed = True
+		else:
+			prev_mtimes[x] = newldpathtime
+			mtime_changed = True
+
+	if mtime_changed:
+		ld_cache_update = True
+
+	if makelinks and \
+		not ld_cache_update and \
+		contents is not None:
+		libdir_contents_changed = False
+		for mypath, mydata in contents.items():
+			if mydata[0] not in ("obj", "sym"):
+				continue
+			head, tail = os.path.split(mypath)
+			if head in lib_dirs:
+				libdir_contents_changed = True
+				break
+		if not libdir_contents_changed:
+			makelinks = False
+
+	ldconfig = "/sbin/ldconfig"
+	if "CHOST" in env and "CBUILD" in env and \
+		env["CHOST"] != env["CBUILD"]:
+		ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
+
+	# Only run ldconfig as needed
+	if (ld_cache_update or makelinks) and ldconfig:
+		# ldconfig has very different behaviour between FreeBSD and Linux
+		if ostype == "Linux" or ostype.lower().endswith("gnu"):
+			# We can't update links if we haven't cleaned other versions first, as
+			# an older package installed ON TOP of a newer version will cause ldconfig
+			# to overwrite the symlinks we just made. -X means no links. After 'clean'
+			# we can safely create links.
+			writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
+				(target_root,))
+			os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
+		elif ostype in ("FreeBSD","DragonFly"):
+			writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
+				target_root)
+			os.system(("cd / ; %s -elf -i " + \
+				"-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
+				(ldconfig, target_root, target_root))
+
+	del specials["LDPATH"]
+
+	penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
+	penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
+	cenvnotice  = penvnotice[:]
+	penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
+	cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
+
+	#create /etc/profile.env for bash support
+	outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
+	outfile.write(penvnotice)
+
+	env_keys = [ x for x in env if x != "LDPATH" ]
+	env_keys.sort()
+	for k in env_keys:
+		v = env[k]
+		if v.startswith('$') and not v.startswith('${'):
+			outfile.write("export %s=$'%s'\n" % (k, v[1:]))
+		else:
+			outfile.write("export %s='%s'\n" % (k, v))
+	outfile.close()
+
+	#create /etc/csh.env for (t)csh support
+	outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
+	outfile.write(cenvnotice)
+	for x in env_keys:
+		outfile.write("setenv %s '%s'\n" % (x, env[x]))
+	outfile.close()

diff --git a/portage_with_autodep/pym/portage/util/lafilefixer.py b/portage_with_autodep/pym/portage/util/lafilefixer.py
new file mode 100644
index 0000000..2b093d8
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/lafilefixer.py
@@ -0,0 +1,185 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os as _os
+import re
+
+from portage import _unicode_decode
+from portage.exception import InvalidData
+
+#########################################################
+#	This an re-implementaion of dev-util/lafilefixer-0.5.
+#	rewrite_lafile() takes the contents of an lafile as a string
+#	It then parses the dependency_libs and inherited_linker_flags
+#	entries. 
+#	We insist on dependency_libs being present. inherited_linker_flags
+#	is optional.
+#	There are strict rules about the syntax imposed by libtool's libltdl.
+#	See 'parse_dotla_file' and 'trim' functions in libltdl/ltdl.c.
+#	Note that duplicated entries of dependency_libs and inherited_linker_flags
+#	are ignored by libtool (last one wins), but we treat it as error (like
+#	lafilefixer does).
+#	What it does:
+#		* Replaces all .la files with absolut paths in dependency_libs with
+#		  corresponding -l* and -L* entries 
+#		  (/usr/lib64/libfoo.la -> -L/usr/lib64 -lfoo)
+#		* Moves various flags (see flag_re below) to inherited_linker_flags,
+#		  if such an entry was present.
+#		* Reorders dependency_libs such that all -R* entries precede -L* entries
+#		  and these precede all other entries.
+#		* Remove duplicated entries from dependency_libs
+#		* Takes care that no entry to inherited_linker_flags is added that is
+#		  already there.
+#########################################################
+
+#These regexes are used to parse the interesting entries in the la file
+dep_libs_re = re.compile(b"dependency_libs='(?P<value>[^']*)'$")
+inh_link_flags_re = re.compile(b"inherited_linker_flags='(?P<value>[^']*)'$")
+
+#regexes for replacing stuff in -L entries. 
+#replace 'X11R6/lib' and 'local/lib' with 'lib', no idea what's this about.
+X11_local_sub = re.compile(b"X11R6/lib|local/lib")
+#get rid of the '..'
+pkgconfig_sub1 = re.compile(b"usr/lib[^/]*/pkgconfig/\.\./\.\.")
+pkgconfig_sub2 = re.compile(b"(?P<usrlib>usr/lib[^/]*)/pkgconfig/\.\.")
+
+#detect flags that should go into inherited_linker_flags instead of dependency_libs
+flag_re = re.compile(b"-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads")
+
+def _parse_lafile_contents(contents):
+	"""
+	Parses 'dependency_libs' and 'inherited_linker_flags' lines.
+	"""
+
+	dep_libs = None
+	inh_link_flags = None
+
+	for line in contents.split(b"\n"):
+		m = dep_libs_re.match(line)
+		if m:
+			if dep_libs is not None:
+				raise InvalidData("duplicated dependency_libs entry")
+			dep_libs = m.group("value")
+			continue
+
+		m = inh_link_flags_re.match(line)
+		if m:
+			if inh_link_flags is not None:
+				raise InvalidData("duplicated inherited_linker_flags entry")
+			inh_link_flags = m.group("value")
+			continue
+
+	return dep_libs, inh_link_flags
+
+def rewrite_lafile(contents):
+	"""
+	Given the contents of an .la file, parse and fix it.
+	This operates with strings of raw bytes (assumed to contain some ascii
+	characters), in order to avoid any potential character encoding issues.
+	Raises 'InvalidData' if the .la file is invalid.
+	@param contents: the contents of a libtool archive file
+	@type contents: bytes
+	@rtype: tuple
+	@returns: (True, fixed_contents) if something needed to be
+		fixed, (False, None) otherwise.
+	"""
+	#Parse the 'dependency_libs' and 'inherited_linker_flags' lines.
+	dep_libs, inh_link_flags = \
+		_parse_lafile_contents(contents)
+
+	if dep_libs is None:
+		raise InvalidData("missing or invalid dependency_libs")
+
+	new_dep_libs = []
+	new_inh_link_flags = []
+	librpath = []
+	libladir = []
+
+	if inh_link_flags is not None:
+		new_inh_link_flags = inh_link_flags.split()
+
+	#Check entries in 'dependency_libs'.
+	for dep_libs_entry in dep_libs.split():
+		if dep_libs_entry.startswith(b"-l"):
+			#-lfoo, keep it
+			if dep_libs_entry not in new_dep_libs:
+				new_dep_libs.append(dep_libs_entry)
+
+		elif dep_libs_entry.endswith(b".la"):
+			#Two cases:
+			#1) /usr/lib64/libfoo.la, turn it into -lfoo and append -L/usr/lib64 to libladir
+			#2) libfoo.la, keep it
+			dir, file = _os.path.split(dep_libs_entry)
+
+			if not dir or not file.startswith(b"lib"):
+				if dep_libs_entry not in new_dep_libs:
+					new_dep_libs.append(dep_libs_entry)
+			else:
+				#/usr/lib64/libfoo.la -> -lfoo
+				lib = b"-l" + file[3:-3]
+				if lib not in new_dep_libs:
+					new_dep_libs.append(lib)
+				#/usr/lib64/libfoo.la -> -L/usr/lib64
+				ladir = b"-L" + dir
+				if ladir not in libladir:
+					libladir.append(ladir)
+
+		elif dep_libs_entry.startswith(b"-L"):
+			#Do some replacement magic and store them in 'libladir'.
+			#This allows us to place all -L entries at the beginning
+			#of 'dependency_libs'.
+			ladir = dep_libs_entry
+			
+			ladir = X11_local_sub.sub(b"lib", ladir)
+			ladir = pkgconfig_sub1.sub(b"usr", ladir)
+			ladir = pkgconfig_sub2.sub(b"\g<usrlib>", ladir)
+			
+			if ladir not in libladir:
+				libladir.append(ladir)
+
+		elif dep_libs_entry.startswith(b"-R"):
+			if dep_libs_entry not in librpath:
+				librpath.append(dep_libs_entry)
+
+		elif flag_re.match(dep_libs_entry):
+			#All this stuff goes into inh_link_flags, if the la file has such an entry.
+			#If it doesn't, they stay in 'dependency_libs'.
+			if inh_link_flags is not None:
+				if dep_libs_entry not in new_inh_link_flags:
+					new_inh_link_flags.append(dep_libs_entry)
+			else:
+				if dep_libs_entry not in new_dep_libs:
+					new_dep_libs.append(dep_libs_entry)
+
+		else:
+			raise InvalidData("Error: Unexpected entry '%s' in 'dependency_libs'" \
+				% _unicode_decode(dep_libs_entry))
+
+	#What should 'dependency_libs' and 'inherited_linker_flags' look like?
+	expected_dep_libs = b""
+	for x in (librpath, libladir, new_dep_libs):
+		if x:
+			expected_dep_libs += b" " + b" ".join(x)
+
+	expected_inh_link_flags = b""
+	if new_inh_link_flags:
+		expected_inh_link_flags += b" " + b" ".join(new_inh_link_flags)
+
+	#Don't touch the file if we don't need to, otherwise put the expected values into
+	#'contents' and write it into the la file.
+
+	changed = False
+	if dep_libs != expected_dep_libs:
+		contents = contents.replace(b"dependency_libs='" + dep_libs + b"'", \
+			b"dependency_libs='" + expected_dep_libs + b"'")
+		changed = True
+
+	if inh_link_flags is not None and expected_inh_link_flags != inh_link_flags:
+		contents = contents.replace(b"inherited_linker_flags='" + inh_link_flags + b"'", \
+			b"inherited_linker_flags='" + expected_inh_link_flags + b"'")
+		changed = True
+
+	if changed:
+		return True, contents
+	else:
+		return False, None

diff --git a/portage_with_autodep/pym/portage/util/listdir.py b/portage_with_autodep/pym/portage/util/listdir.py
new file mode 100644
index 0000000..5753d2f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/listdir.py
@@ -0,0 +1,151 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['cacheddir', 'listdir']
+
+import errno
+import stat
+import time
+
+from portage import os
+from portage.exception import DirectoryNotFound, PermissionDenied, PortageException
+from portage.util import normalize_path, writemsg
+
+_ignorecvs_dirs = ('CVS', 'RCS', 'SCCS', '.svn', '.git')
+dircache = {}
+cacheHit = 0
+cacheMiss = 0
+cacheStale = 0
+
+def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
+	global cacheHit,cacheMiss,cacheStale
+	mypath = normalize_path(my_original_path)
+	if mypath in dircache:
+		cacheHit += 1
+		cached_mtime, list, ftype = dircache[mypath]
+	else:
+		cacheMiss += 1
+		cached_mtime, list, ftype = -1, [], []
+	try:
+		pathstat = os.stat(mypath)
+		if stat.S_ISDIR(pathstat[stat.ST_MODE]):
+			mtime = pathstat.st_mtime
+		else:
+			raise DirectoryNotFound(mypath)
+	except EnvironmentError as e:
+		if e.errno == PermissionDenied.errno:
+			raise PermissionDenied(mypath)
+		del e
+		return [], []
+	except PortageException:
+		return [], []
+	# Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
+	if mtime != cached_mtime or time.time() - mtime < 4:
+		if mypath in dircache:
+			cacheStale += 1
+		try:
+			list = os.listdir(mypath)
+		except EnvironmentError as e:
+			if e.errno != errno.EACCES:
+				raise
+			del e
+			raise PermissionDenied(mypath)
+		ftype = []
+		for x in list:
+			try:
+				if followSymlinks:
+					pathstat = os.stat(mypath+"/"+x)
+				else:
+					pathstat = os.lstat(mypath+"/"+x)
+
+				if stat.S_ISREG(pathstat[stat.ST_MODE]):
+					ftype.append(0)
+				elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
+					ftype.append(1)
+				elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
+					ftype.append(2)
+				else:
+					ftype.append(3)
+			except (IOError, OSError):
+				ftype.append(3)
+		dircache[mypath] = mtime, list, ftype
+
+	ret_list = []
+	ret_ftype = []
+	for x in range(0, len(list)):
+		if list[x] in ignorelist:
+			pass
+		elif ignorecvs:
+			if list[x][:2] != ".#" and \
+				not (ftype[x] == 1 and list[x] in _ignorecvs_dirs):
+				ret_list.append(list[x])
+				ret_ftype.append(ftype[x])
+		else:
+			ret_list.append(list[x])
+			ret_ftype.append(ftype[x])
+
+	writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
+	return ret_list, ret_ftype
+
+def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
+	EmptyOnError=False, dirsonly=False):
+	"""
+	Portage-specific implementation of os.listdir
+
+	@param mypath: Path whose contents you wish to list
+	@type mypath: String
+	@param recursive: Recursively scan directories contained within mypath
+	@type recursive: Boolean
+	@param filesonly; Only return files, not more directories
+	@type filesonly: Boolean
+	@param ignorecvs: Ignore CVS directories ('CVS','SCCS','.svn','.git')
+	@type ignorecvs: Boolean
+	@param ignorelist: List of filenames/directories to exclude
+	@type ignorelist: List
+	@param followSymlinks: Follow Symlink'd files and directories
+	@type followSymlinks: Boolean
+	@param EmptyOnError: Return [] if an error occurs (deprecated, always True)
+	@type EmptyOnError: Boolean
+	@param dirsonly: Only return directories.
+	@type dirsonly: Boolean
+	@rtype: List
+	@returns: A list of files and directories (or just files or just directories) or an empty list.
+	"""
+
+	list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+
+	if list is None:
+		list=[]
+	if ftype is None:
+		ftype=[]
+
+	if not (filesonly or dirsonly or recursive):
+		return list
+
+	if recursive:
+		x=0
+		while x<len(ftype):
+			if ftype[x] == 1:
+				l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
+					followSymlinks)
+
+				l=l[:]
+				for y in range(0,len(l)):
+					l[y]=list[x]+"/"+l[y]
+				list=list+l
+				ftype=ftype+f
+			x+=1
+	if filesonly:
+		rlist=[]
+		for x in range(0,len(ftype)):
+			if ftype[x]==0:
+				rlist=rlist+[list[x]]
+	elif dirsonly:
+		rlist = []
+		for x in range(0, len(ftype)):
+			if ftype[x] == 1:
+				rlist = rlist + [list[x]]	
+	else:
+		rlist=list
+
+	return rlist

diff --git a/portage_with_autodep/pym/portage/util/movefile.py b/portage_with_autodep/pym/portage/util/movefile.py
new file mode 100644
index 0000000..30cb6f1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/movefile.py
@@ -0,0 +1,242 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['movefile']
+
+import errno
+import os as _os
+import shutil as _shutil
+import stat
+
+import portage
+from portage import bsd_chflags, _encodings, _os_overrides, _selinux, \
+	_unicode_decode, _unicode_func_wrapper, _unicode_module_wrapper
+from portage.const import MOVE_BINARY
+from portage.localization import _
+from portage.process import spawn
+from portage.util import writemsg
+
+def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
+		hardlink_candidates=None, encoding=_encodings['fs']):
+	"""moves a file from src to dest, preserving all permissions and attributes; mtime will
+	be preserved even when moving across filesystems.  Returns true on success and false on
+	failure.  Move is atomic."""
+	#print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
+
+	if mysettings is None:
+		mysettings = portage.settings
+
+	selinux_enabled = mysettings.selinux_enabled()
+	if selinux_enabled:
+		selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
+
+	lchown = _unicode_func_wrapper(portage.data.lchown, encoding=encoding)
+	os = _unicode_module_wrapper(_os,
+		encoding=encoding, overrides=_os_overrides)
+	shutil = _unicode_module_wrapper(_shutil, encoding=encoding)
+
+	try:
+		if not sstat:
+			sstat=os.lstat(src)
+
+	except SystemExit as e:
+		raise
+	except Exception as e:
+		print(_("!!! Stating source file failed... movefile()"))
+		print("!!!",e)
+		return None
+
+	destexists=1
+	try:
+		dstat=os.lstat(dest)
+	except (OSError, IOError):
+		dstat=os.lstat(os.path.dirname(dest))
+		destexists=0
+
+	if bsd_chflags:
+		if destexists and dstat.st_flags != 0:
+			bsd_chflags.lchflags(dest, 0)
+		# Use normal stat/chflags for the parent since we want to
+		# follow any symlinks to the real parent directory.
+		pflags = os.stat(os.path.dirname(dest)).st_flags
+		if pflags != 0:
+			bsd_chflags.chflags(os.path.dirname(dest), 0)
+
+	if destexists:
+		if stat.S_ISLNK(dstat[stat.ST_MODE]):
+			try:
+				os.unlink(dest)
+				destexists=0
+			except SystemExit as e:
+				raise
+			except Exception as e:
+				pass
+
+	if stat.S_ISLNK(sstat[stat.ST_MODE]):
+		try:
+			target=os.readlink(src)
+			if mysettings and mysettings["D"]:
+				if target.find(mysettings["D"])==0:
+					target=target[len(mysettings["D"]):]
+			if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+				os.unlink(dest)
+			try:
+				if selinux_enabled:
+					selinux.symlink(target, dest, src)
+				else:
+					os.symlink(target, dest)
+			except OSError as e:
+				# Some programs will create symlinks automatically, so we have
+				# to tolerate these links being recreated during the merge
+				# process. In any case, if the link is pointing at the right
+				# place, we're in good shape.
+				if e.errno not in (errno.ENOENT, errno.EEXIST) or \
+					target != os.readlink(dest):
+					raise
+			lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+			# utime() only works on the target of a symlink, so it's not
+			# possible to perserve mtime on symlinks.
+			return os.lstat(dest)[stat.ST_MTIME]
+		except SystemExit as e:
+			raise
+		except Exception as e:
+			print(_("!!! failed to properly create symlink:"))
+			print("!!!",dest,"->",target)
+			print("!!!",e)
+			return None
+
+	hardlinked = False
+	# Since identical files might be merged to multiple filesystems,
+	# so os.link() calls might fail for some paths, so try them all.
+	# For atomic replacement, first create the link as a temp file
+	# and them use os.rename() to replace the destination.
+	if hardlink_candidates:
+		head, tail = os.path.split(dest)
+		hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
+			(tail, os.getpid()))
+		try:
+			os.unlink(hardlink_tmp)
+		except OSError as e:
+			if e.errno != errno.ENOENT:
+				writemsg(_("!!! Failed to remove hardlink temp file: %s\n") % \
+					(hardlink_tmp,), noiselevel=-1)
+				writemsg("!!! %s\n" % (e,), noiselevel=-1)
+				return None
+			del e
+		for hardlink_src in hardlink_candidates:
+			try:
+				os.link(hardlink_src, hardlink_tmp)
+			except OSError:
+				continue
+			else:
+				try:
+					os.rename(hardlink_tmp, dest)
+				except OSError as e:
+					writemsg(_("!!! Failed to rename %s to %s\n") % \
+						(hardlink_tmp, dest), noiselevel=-1)
+					writemsg("!!! %s\n" % (e,), noiselevel=-1)
+					return None
+				hardlinked = True
+				break
+
+	renamefailed=1
+	if hardlinked:
+		renamefailed = False
+	if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
+		try:
+			if selinux_enabled:
+				selinux.rename(src, dest)
+			else:
+				os.rename(src,dest)
+			renamefailed=0
+		except OSError as e:
+			if e.errno != errno.EXDEV:
+				# Some random error.
+				print(_("!!! Failed to move %(src)s to %(dest)s") % {"src": src, "dest": dest})
+				print("!!!",e)
+				return None
+			# Invalid cross-device-link 'bind' mounted or actually Cross-Device
+	if renamefailed:
+		didcopy=0
+		if stat.S_ISREG(sstat[stat.ST_MODE]):
+			try: # For safety copy then move it over.
+				if selinux_enabled:
+					selinux.copyfile(src, dest + "#new")
+					selinux.rename(dest + "#new", dest)
+				else:
+					shutil.copyfile(src,dest+"#new")
+					os.rename(dest+"#new",dest)
+				didcopy=1
+			except SystemExit as e:
+				raise
+			except Exception as e:
+				print(_('!!! copy %(src)s -> %(dest)s failed.') % {"src": src, "dest": dest})
+				print("!!!",e)
+				return None
+		else:
+			#we don't yet handle special, so we need to fall back to /bin/mv
+			a = spawn([MOVE_BINARY, '-f', src, dest], env=os.environ)
+			if a != os.EX_OK:
+				writemsg(_("!!! Failed to move special file:\n"), noiselevel=-1)
+				writemsg(_("!!! '%(src)s' to '%(dest)s'\n") % \
+					{"src": _unicode_decode(src, encoding=encoding),
+					"dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
+				writemsg("!!! %s\n" % a, noiselevel=-1)
+				return None # failure
+		try:
+			if didcopy:
+				if stat.S_ISLNK(sstat[stat.ST_MODE]):
+					lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+				else:
+					os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
+				os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
+				os.unlink(src)
+		except SystemExit as e:
+			raise
+		except Exception as e:
+			print(_("!!! Failed to chown/chmod/unlink in movefile()"))
+			print("!!!",dest)
+			print("!!!",e)
+			return None
+
+	# Always use stat_obj[stat.ST_MTIME] for the integral timestamp which
+	# is returned, since the stat_obj.st_mtime float attribute rounds *up*
+	# if the nanosecond part of the timestamp is 999999881 ns or greater.
+	try:
+		if hardlinked:
+			newmtime = os.stat(dest)[stat.ST_MTIME]
+		else:
+			# Note: It is not possible to preserve nanosecond precision
+			# (supported in POSIX.1-2008 via utimensat) with the IEEE 754
+			# double precision float which only has a 53 bit significand.
+			if newmtime is not None:
+				os.utime(dest, (newmtime, newmtime))
+			else:
+				newmtime = sstat[stat.ST_MTIME]
+				if renamefailed:
+					# If rename succeeded then timestamps are automatically
+					# preserved with complete precision because the source
+					# and destination inode are the same. Otherwise, round
+					# down to the nearest whole second since python's float
+					# st_mtime cannot be used to preserve the st_mtim.tv_nsec
+					# field with complete precision. Note that we have to use
+					# stat_obj[stat.ST_MTIME] here because the float
+					# stat_obj.st_mtime rounds *up* sometimes.
+					os.utime(dest, (newmtime, newmtime))
+	except OSError:
+		# The utime can fail here with EPERM even though the move succeeded.
+		# Instead of failing, use stat to return the mtime if possible.
+		try:
+			newmtime = os.stat(dest)[stat.ST_MTIME]
+		except OSError as e:
+			writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
+			writemsg("!!! %s\n" % dest, noiselevel=-1)
+			writemsg("!!! %s\n" % str(e), noiselevel=-1)
+			return None
+
+	if bsd_chflags:
+		# Restore the flags we saved before moving
+		if pflags:
+			bsd_chflags.chflags(os.path.dirname(dest), pflags)
+
+	return newmtime

diff --git a/portage_with_autodep/pym/portage/util/mtimedb.py b/portage_with_autodep/pym/portage/util/mtimedb.py
new file mode 100644
index 0000000..67f93e8
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/mtimedb.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['MtimeDB']
+
+import copy
+try:
+	import cPickle as pickle
+except ImportError:
+	import pickle
+
+import portage
+from portage import _unicode_encode
+from portage.data import portage_gid, uid
+from portage.localization import _
+from portage.util import apply_secpass_permissions, atomic_ofstream, writemsg
+
+class MtimeDB(dict):
+	def __init__(self, filename):
+		dict.__init__(self)
+		self.filename = filename
+		self._load(filename)
+
+	def _load(self, filename):
+		try:
+			f = open(_unicode_encode(filename), 'rb')
+			mypickle = pickle.Unpickler(f)
+			try:
+				mypickle.find_global = None
+			except AttributeError:
+				# TODO: If py3k, override Unpickler.find_class().
+				pass
+			d = mypickle.load()
+			f.close()
+			del f
+		except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
+			if isinstance(e, pickle.UnpicklingError):
+				writemsg(_("!!! Error loading '%s': %s\n") % \
+					(filename, str(e)), noiselevel=-1)
+			del e
+			d = {}
+
+		if "old" in d:
+			d["updates"] = d["old"]
+			del d["old"]
+		if "cur" in d:
+			del d["cur"]
+
+		d.setdefault("starttime", 0)
+		d.setdefault("version", "")
+		for k in ("info", "ldpath", "updates"):
+			d.setdefault(k, {})
+
+		mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
+			"starttime", "updates", "version"))
+
+		for k in list(d):
+			if k not in mtimedbkeys:
+				writemsg(_("Deleting invalid mtimedb key: %s\n") % str(k))
+				del d[k]
+		self.update(d)
+		self._clean_data = copy.deepcopy(d)
+
+	def commit(self):
+		if not self.filename:
+			return
+		d = {}
+		d.update(self)
+		# Only commit if the internal state has changed.
+		if d != self._clean_data:
+			d["version"] = str(portage.VERSION)
+			try:
+				f = atomic_ofstream(self.filename, mode='wb')
+			except EnvironmentError:
+				pass
+			else:
+				pickle.dump(d, f, protocol=2)
+				f.close()
+				apply_secpass_permissions(self.filename,
+					uid=uid, gid=portage_gid, mode=0o644)
+				self._clean_data = copy.deepcopy(d)

diff --git a/portage_with_autodep/pym/portage/versions.py b/portage_with_autodep/pym/portage/versions.py
new file mode 100644
index 0000000..f8691d1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/versions.py
@@ -0,0 +1,403 @@
+# versions.py -- core Portage functionality
+# Copyright 1998-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = [
+	'best', 'catpkgsplit', 'catsplit',
+	'cpv_getkey', 'cpv_getversion', 'cpv_sort_key', 'pkgcmp',  'pkgsplit',
+	'ververify', 'vercmp'
+]
+
+import re
+import warnings
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.util:cmp_sort_key'
+)
+from portage.localization import _
+
+# \w is [a-zA-Z0-9_]
+
+# 2.1.1 A category name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_cat = r'[\w+][\w+.-]*'
+
+# 2.1.2 A package name may contain any of the characters [A-Za-z0-9+_-].
+# It must not begin with a hyphen,
+# and must not end in a hyphen followed by one or more digits.
+_pkg = r'[\w+][\w+-]*?'
+
+_v = r'(cvs\.)?(\d+)((\.\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\d*)*)'
+_rev = r'\d+'
+_vr = _v + '(-r(' + _rev + '))?'
+
+_cp = '(' + _cat + '/' + _pkg + '(-' + _vr + ')?)'
+_cpv = '(' + _cp + '-' + _vr + ')'
+_pv = '(?P<pn>' + _pkg + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?'
+
+ver_regexp = re.compile("^" + _vr + "$")
+suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
+suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
+endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+
+def ververify(myver, silent=1):
+	if ver_regexp.match(myver):
+		return 1
+	else:
+		if not silent:
+			print(_("!!! syntax error in version: %s") % myver)
+		return 0
+
+vercmp_cache = {}
+def vercmp(ver1, ver2, silent=1):
+	"""
+	Compare two versions
+	Example usage:
+		>>> from portage.versions import vercmp
+		>>> vercmp('1.0-r1','1.2-r3')
+		negative number
+		>>> vercmp('1.3','1.2-r3')
+		positive number
+		>>> vercmp('1.0_p3','1.0_p3')
+		0
+	
+	@param pkg1: version to compare with (see ver_regexp in portage.versions.py)
+	@type pkg1: string (example: "2.1.2-r3")
+	@param pkg2: version to compare againts (see ver_regexp in portage.versions.py)
+	@type pkg2: string (example: "2.1.2_rc5")
+	@rtype: None or float
+	@return:
+	1. positive if ver1 is greater than ver2
+	2. negative if ver1 is less than ver2 
+	3. 0 if ver1 equals ver2
+	4. None if ver1 or ver2 are invalid (see ver_regexp in portage.versions.py)
+	"""
+
+	if ver1 == ver2:
+		return 0
+	mykey=ver1+":"+ver2
+	try:
+		return vercmp_cache[mykey]
+	except KeyError:
+		pass
+	match1 = ver_regexp.match(ver1)
+	match2 = ver_regexp.match(ver2)
+	
+	# checking that the versions are valid
+	if not match1 or not match1.groups():
+		if not silent:
+			print(_("!!! syntax error in version: %s") % ver1)
+		return None
+	if not match2 or not match2.groups():
+		if not silent:
+			print(_("!!! syntax error in version: %s") % ver2)
+		return None
+
+	# shortcut for cvs ebuilds (new style)
+	if match1.group(1) and not match2.group(1):
+		vercmp_cache[mykey] = 1
+		return 1
+	elif match2.group(1) and not match1.group(1):
+		vercmp_cache[mykey] = -1
+		return -1
+	
+	# building lists of the version parts before the suffix
+	# first part is simple
+	list1 = [int(match1.group(2))]
+	list2 = [int(match2.group(2))]
+
+	# this part would greatly benefit from a fixed-length version pattern
+	if match1.group(3) or match2.group(3):
+		vlist1 = match1.group(3)[1:].split(".")
+		vlist2 = match2.group(3)[1:].split(".")
+
+		for i in range(0, max(len(vlist1), len(vlist2))):
+			# Implcit .0 is given a value of -1, so that 1.0.0 > 1.0, since it
+			# would be ambiguous if two versions that aren't literally equal
+			# are given the same value (in sorting, for example).
+			if len(vlist1) <= i or len(vlist1[i]) == 0:
+				list1.append(-1)
+				list2.append(int(vlist2[i]))
+			elif len(vlist2) <= i or len(vlist2[i]) == 0:
+				list1.append(int(vlist1[i]))
+				list2.append(-1)
+			# Let's make life easy and use integers unless we're forced to use floats
+			elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
+				list1.append(int(vlist1[i]))
+				list2.append(int(vlist2[i]))
+			# now we have to use floats so 1.02 compares correctly against 1.1
+			else:
+				# list1.append(float("0."+vlist1[i]))
+				# list2.append(float("0."+vlist2[i]))
+				# Since python floats have limited range, we multiply both
+				# floating point representations by a constant so that they are
+				# transformed into whole numbers. This allows the practically
+				# infinite range of a python int to be exploited. The
+				# multiplication is done by padding both literal strings with
+				# zeros as necessary to ensure equal length.
+				max_len = max(len(vlist1[i]), len(vlist2[i]))
+				list1.append(int(vlist1[i].ljust(max_len, "0")))
+				list2.append(int(vlist2[i].ljust(max_len, "0")))
+
+	# and now the final letter
+	# NOTE: Behavior changed in r2309 (between portage-2.0.x and portage-2.1).
+	# The new behavior is 12.2.5 > 12.2b which, depending on how you look at,
+	# may seem counter-intuitive. However, if you really think about it, it
+	# seems like it's probably safe to assume that this is the behavior that
+	# is intended by anyone who would use versions such as these.
+	if len(match1.group(5)):
+		list1.append(ord(match1.group(5)))
+	if len(match2.group(5)):
+		list2.append(ord(match2.group(5)))
+
+	for i in range(0, max(len(list1), len(list2))):
+		if len(list1) <= i:
+			vercmp_cache[mykey] = -1
+			return -1
+		elif len(list2) <= i:
+			vercmp_cache[mykey] = 1
+			return 1
+		elif list1[i] != list2[i]:
+			a = list1[i]
+			b = list2[i]
+			rval = (a > b) - (a < b)
+			vercmp_cache[mykey] = rval
+			return rval
+
+	# main version is equal, so now compare the _suffix part
+	list1 = match1.group(6).split("_")[1:]
+	list2 = match2.group(6).split("_")[1:]
+	
+	for i in range(0, max(len(list1), len(list2))):
+		# Implicit _p0 is given a value of -1, so that 1 < 1_p0
+		if len(list1) <= i:
+			s1 = ("p","-1")
+		else:
+			s1 = suffix_regexp.match(list1[i]).groups()
+		if len(list2) <= i:
+			s2 = ("p","-1")
+		else:
+			s2 = suffix_regexp.match(list2[i]).groups()
+		if s1[0] != s2[0]:
+			a = suffix_value[s1[0]]
+			b = suffix_value[s2[0]]
+			rval = (a > b) - (a < b)
+			vercmp_cache[mykey] = rval
+			return rval
+		if s1[1] != s2[1]:
+			# it's possible that the s(1|2)[1] == ''
+			# in such a case, fudge it.
+			try:
+				r1 = int(s1[1])
+			except ValueError:
+				r1 = 0
+			try:
+				r2 = int(s2[1])
+			except ValueError:
+				r2 = 0
+			rval = (r1 > r2) - (r1 < r2)
+			if rval:
+				vercmp_cache[mykey] = rval
+				return rval
+
+	# the suffix part is equal to, so finally check the revision
+	if match1.group(10):
+		r1 = int(match1.group(10))
+	else:
+		r1 = 0
+	if match2.group(10):
+		r2 = int(match2.group(10))
+	else:
+		r2 = 0
+	rval = (r1 > r2) - (r1 < r2)
+	vercmp_cache[mykey] = rval
+	return rval
+	
+def pkgcmp(pkg1, pkg2):
+	"""
+	Compare 2 package versions created in pkgsplit format.
+
+	Example usage:
+		>>> from portage.versions import *
+		>>> pkgcmp(pkgsplit('test-1.0-r1'),pkgsplit('test-1.2-r3'))
+		-1
+		>>> pkgcmp(pkgsplit('test-1.3'),pkgsplit('test-1.2-r3'))
+		1
+
+	@param pkg1: package to compare with
+	@type pkg1: list (example: ['test', '1.0', 'r1'])
+	@param pkg2: package to compare againts
+	@type pkg2: list (example: ['test', '1.0', 'r1'])
+	@rtype: None or integer
+	@return: 
+		1. None if package names are not the same
+		2. 1 if pkg1 is greater than pkg2
+		3. -1 if pkg1 is less than pkg2 
+		4. 0 if pkg1 equals pkg2
+	"""
+	if pkg1[0] != pkg2[0]:
+		return None
+	return vercmp("-".join(pkg1[1:]), "-".join(pkg2[1:]))
+
+_pv_re = re.compile('^' + _pv + '$', re.VERBOSE)
+
+def _pkgsplit(mypkg):
+	"""
+	@param mypkg: pv
+	@return:
+	1. None if input is invalid.
+	2. (pn, ver, rev) if input is pv
+	"""
+	m = _pv_re.match(mypkg)
+	if m is None:
+		return None
+
+	if m.group('pn_inval') is not None:
+		# package name appears to have a version-like suffix
+		return None
+
+	rev = m.group('rev')
+	if rev is None:
+		rev = '0'
+	rev = 'r' + rev
+
+	return  (m.group('pn'), m.group('ver'), rev) 
+
+_cat_re = re.compile('^%s$' % _cat)
+_missing_cat = 'null'
+catcache={}
+def catpkgsplit(mydata,silent=1):
+	"""
+	Takes a Category/Package-Version-Rev and returns a list of each.
+	
+	@param mydata: Data to split
+	@type mydata: string 
+	@param silent: suppress error messages
+	@type silent: Boolean (integer)
+	@rype: list
+	@return:
+	1.  If each exists, it returns [cat, pkgname, version, rev]
+	2.  If cat is not specificed in mydata, cat will be "null"
+	3.  if rev does not exist it will be '-r0'
+	"""
+
+	try:
+		return catcache[mydata]
+	except KeyError:
+		pass
+	mysplit = mydata.split('/', 1)
+	p_split=None
+	if len(mysplit)==1:
+		cat = _missing_cat
+		p_split = _pkgsplit(mydata)
+	elif len(mysplit)==2:
+		cat = mysplit[0]
+		if _cat_re.match(cat) is not None:
+			p_split = _pkgsplit(mysplit[1])
+	if not p_split:
+		catcache[mydata]=None
+		return None
+	retval = (cat, p_split[0], p_split[1], p_split[2])
+	catcache[mydata]=retval
+	return retval
+
+def pkgsplit(mypkg, silent=1):
+	"""
+	@param mypkg: either a pv or cpv
+	@return:
+	1. None if input is invalid.
+	2. (pn, ver, rev) if input is pv
+	3. (cp, ver, rev) if input is a cpv
+	"""
+	catpsplit = catpkgsplit(mypkg)
+	if catpsplit is None:
+		return None
+	cat, pn, ver, rev = catpsplit
+	if cat is _missing_cat and '/' not in mypkg:
+		return (pn, ver, rev)
+	else:
+		return (cat + '/' + pn, ver, rev)
+
+def cpv_getkey(mycpv):
+	"""Calls catpkgsplit on a cpv and returns only the cp."""
+	mysplit = catpkgsplit(mycpv)
+	if mysplit is not None:
+		return mysplit[0] + '/' + mysplit[1]
+
+	warnings.warn("portage.versions.cpv_getkey() " + \
+		"called with invalid cpv: '%s'" % (mycpv,),
+		DeprecationWarning, stacklevel=2)
+
+	myslash = mycpv.split("/", 1)
+	mysplit = _pkgsplit(myslash[-1])
+	if mysplit is None:
+		return None
+	mylen = len(myslash)
+	if mylen == 2:
+		return myslash[0] + "/" + mysplit[0]
+	else:
+		return mysplit[0]
+
+def cpv_getversion(mycpv):
+	"""Returns the v (including revision) from an cpv."""
+	cp = cpv_getkey(mycpv)
+	if cp is None:
+		return None
+	return mycpv[len(cp+"-"):]
+
+def cpv_sort_key():
+	"""
+	Create an object for sorting cpvs, to be used as the 'key' parameter
+	in places like list.sort() or sorted(). This calls catpkgsplit() once for
+	each cpv and caches the result. If a given cpv is invalid or two cpvs
+	have different category/package names, then plain string (> and <)
+	comparison is used.
+
+	@rtype: key object for sorting
+	@return: object for use as the 'key' parameter in places like
+		list.sort() or sorted()
+	"""
+
+	split_cache = {}
+
+	def cmp_cpv(cpv1, cpv2):
+
+		split1 = split_cache.get(cpv1, False)
+		if split1 is False:
+			split1 = catpkgsplit(cpv1)
+			if split1 is not None:
+				split1 = (split1[:2], '-'.join(split1[2:]))
+			split_cache[cpv1] = split1
+
+		split2 = split_cache.get(cpv2, False)
+		if split2 is False:
+			split2 = catpkgsplit(cpv2)
+			if split2 is not None:
+				split2 = (split2[:2], '-'.join(split2[2:]))
+			split_cache[cpv2] = split2
+
+		if split1 is None or split2 is None or split1[0] != split2[0]:
+			return (cpv1 > cpv2) - (cpv1 < cpv2)
+
+		return vercmp(split1[1], split2[1])
+
+	return cmp_sort_key(cmp_cpv)
+
+def catsplit(mydep):
+        return mydep.split("/", 1)
+
+def best(mymatches):
+	"""Accepts None arguments; assumes matches are valid."""
+	if not mymatches:
+		return ""
+	if len(mymatches) == 1:
+		return mymatches[0]
+	bestmatch = mymatches[0]
+	p2 = catpkgsplit(bestmatch)[1:]
+	for x in mymatches[1:]:
+		p1 = catpkgsplit(x)[1:]
+		if pkgcmp(p1, p2) > 0:
+			bestmatch = x
+			p2 = catpkgsplit(bestmatch)[1:]
+	return bestmatch

diff --git a/portage_with_autodep/pym/portage/xml/__init__.py b/portage_with_autodep/pym/portage/xml/__init__.py
new file mode 100644
index 0000000..21a391a
--- /dev/null
+++ b/portage_with_autodep/pym/portage/xml/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/xml/metadata.py b/portage_with_autodep/pym/portage/xml/metadata.py
new file mode 100644
index 0000000..7acc1f3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/xml/metadata.py
@@ -0,0 +1,376 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Provides an easy-to-use python interface to Gentoo's metadata.xml file.
+
+	Example usage:
+		>>> from portage.xml.metadata import MetaDataXML
+		>>> pkg_md = MetaDataXML('/usr/portage/app-misc/gourmet/metadata.xml')
+		>>> pkg_md
+		<MetaDataXML '/usr/portage/app-misc/gourmet/metadata.xml'>
+		>>> pkg_md.herds()
+		['no-herd']
+		>>> for maint in pkg_md.maintainers():
+		...     print "{0} ({1})".format(maint.email, maint.name)
+		...
+		nixphoeni@gentoo.org (Joe Sapp)
+		>>> for flag in pkg_md.use():
+		...     print flag.name, "->", flag.description
+		...
+		rtf -> Enable export to RTF
+		gnome-print -> Enable printing support using gnome-print
+		>>> upstream = pkg_md.upstream()
+		>>> upstream
+		[<_Upstream {'docs': [], 'remoteid': [], 'maintainer':
+		 [<_Maintainer 'Thomas_Hinkle@alumni.brown.edu'>], 'bugtracker': [],
+		 'changelog': []}>]
+		>>> upstream[0].maintainer[0].name
+		'Thomas Mills Hinkle'
+"""
+
+__all__ = ('MetaDataXML',)
+
+try:
+	import xml.etree.cElementTree as etree
+except ImportError:
+	import xml.etree.ElementTree as etree
+
+import re
+import portage
+from portage import os
+from portage.util import unique_everseen
+
+class _Maintainer(object):
+	"""An object for representing one maintainer.
+
+	@type email: str or None
+	@ivar email: Maintainer's email address. Used for both Gentoo and upstream.
+	@type name: str or None
+	@ivar name: Maintainer's name. Used for both Gentoo and upstream.
+	@type description: str or None
+	@ivar description: Description of what a maintainer does. Gentoo only.
+	@type restrict: str or None
+	@ivar restrict: e.g. &gt;=portage-2.2 means only maintains versions
+		of Portage greater than 2.2. Should be DEPEND string with < and >
+		converted to &lt; and &gt; respectively. 
+	@type status: str or None
+	@ivar status: If set, either 'active' or 'inactive'. Upstream only.
+	"""
+
+	def __init__(self, node):
+		self.email = None
+		self.name = None
+		self.description = None
+		self.restrict = node.get('restrict')
+		self.status = node.get('status')
+		maint_attrs = node.getchildren()
+		for attr in maint_attrs:
+			setattr(self, attr.tag, attr.text)
+
+	def __repr__(self):
+		return "<%s %r>" % (self.__class__.__name__, self.email)
+
+
+class _Useflag(object):
+	"""An object for representing one USE flag.
+
+	@todo: Is there any way to have a keyword option to leave in
+		<pkg> and <cat> for later processing?
+	@type name: str or None
+	@ivar name: USE flag
+	@type restrict: str or None
+	@ivar restrict: e.g. &gt;=portage-2.2 means flag is only available in
+		versions greater than 2.2
+	@type description: str
+	@ivar description: description of the USE flag
+	"""
+
+	def __init__(self, node):
+		self.name = node.get('name')
+		self.restrict = node.get('restrict')
+		_desc = ''
+		if node.text:
+			_desc = node.text
+		for child in node.getchildren():
+			_desc += child.text if child.text else ''
+			_desc += child.tail if child.tail else ''
+		# This takes care of tabs and newlines left from the file
+		self.description = re.sub('\s+', ' ', _desc)
+
+	def __repr__(self):
+		return "<%s %r>" % (self.__class__.__name__, self.name)
+
+
+class _Upstream(object):
+	"""An object for representing one package's upstream.
+
+	@type maintainers: list
+	@ivar maintainers: L{_Maintainer} objects for each upstream maintainer
+	@type changelogs: list
+	@ivar changelogs: URLs to upstream's ChangeLog file in str format
+	@type docs: list
+	@ivar docs: Sequence of tuples containing URLs to upstream documentation
+		in the first slot and 'lang' attribute in the second, e.g.,
+		[('http.../docs/en/tut.html', None), ('http.../doc/fr/tut.html', 'fr')]
+	@type bugtrackers: list
+	@ivar bugtrackers: URLs to upstream's bugtracker. May also contain an email
+		address if prepended with 'mailto:'
+	@type remoteids: list
+	@ivar remoteids: Sequence of tuples containing the project's hosting site
+		name in the first slot and the project's ID name or number for that
+		site in the second, e.g., [('sourceforge', 'systemrescuecd')]
+	"""
+
+	def __init__(self, node):
+		self.node = node
+		self.maintainers = self.upstream_maintainers()
+		self.changelogs = self.upstream_changelogs()
+		self.docs = self.upstream_documentation()
+		self.bugtrackers = self.upstream_bugtrackers()
+		self.remoteids = self.upstream_remoteids()
+
+	def __repr__(self):
+		return "<%s %r>" % (self.__class__.__name__, self.__dict__)
+
+	def upstream_bugtrackers(self):
+		"""Retrieve upstream bugtracker location from xml node."""
+		return [e.text for e in self.node.findall('bugs-to')]
+
+	def upstream_changelogs(self):
+		"""Retrieve upstream changelog location from xml node."""
+		return [e.text for e in self.node.findall('changelog')]
+
+	def upstream_documentation(self):
+		"""Retrieve upstream documentation location from xml node."""
+		result = []
+		for elem in self.node.findall('doc'):
+			lang = elem.get('lang')
+			result.append((elem.text, lang))
+		return result
+	
+	def upstream_maintainers(self):
+		"""Retrieve upstream maintainer information from xml node."""
+		return [_Maintainer(m) for m in self.node.findall('maintainer')]
+
+	def upstream_remoteids(self):
+		"""Retrieve upstream remote ID from xml node."""
+		return [(e.text, e.get('type')) for e in self.node.findall('remote-id')]
+
+
+class MetaDataXML(object):
+	"""Access metadata.xml"""
+
+	def __init__(self, metadata_xml_path, herds):
+		"""Parse a valid metadata.xml file.
+
+		@type metadata_xml_path: str
+		@param metadata_xml_path: path to a valid metadata.xml file
+		@type herds: str or ElementTree
+		@param herds: path to a herds.xml, or a pre-parsed ElementTree
+		@raise IOError: if C{metadata_xml_path} can not be read
+		"""
+
+		self.metadata_xml_path = metadata_xml_path
+		self._xml_tree = None
+
+		try:
+			self._xml_tree = etree.parse(metadata_xml_path)
+		except ImportError:
+			pass
+
+		if isinstance(herds, etree.ElementTree):
+			herds_etree = herds
+			herds_path = None
+		else:
+			herds_etree = None
+			herds_path = herds
+
+		# Used for caching
+		self._herdstree = herds_etree
+		self._herds_path = herds_path
+		self._descriptions = None
+		self._maintainers = None
+		self._herds = None
+		self._useflags = None
+		self._upstream = None
+
+	def __repr__(self):
+		return "<%s %r>" % (self.__class__.__name__, self.metadata_xml_path)
+
+	def _get_herd_email(self, herd):
+		"""Get a herd's email address.
+
+		@type herd: str
+		@param herd: herd whose email you want
+		@rtype: str or None
+		@return: email address or None if herd is not in herds.xml
+		@raise IOError: if $PORTDIR/metadata/herds.xml can not be read
+		"""
+
+		if self._herdstree is None:
+			try:
+				self._herdstree = etree.parse(self._herds_path)
+			except (ImportError, IOError, SyntaxError):
+				return None
+
+		# Some special herds are not listed in herds.xml
+		if herd in ('no-herd', 'maintainer-wanted', 'maintainer-needed'):
+			return None
+
+		for node in self._herdstree.getiterator('herd'):
+			if node.findtext('name') == herd:
+				return node.findtext('email')
+
+	def herds(self, include_email=False):
+		"""Return a list of text nodes for <herd>.
+
+		@type include_email: bool
+		@keyword include_email: if True, also look up the herd's email
+		@rtype: tuple
+		@return: if include_email is False, return a list of strings;
+		         if include_email is True, return a list of tuples containing:
+					 [('herd1', 'herd1@gentoo.org'), ('no-herd', None);
+		"""
+		if self._herds is None:
+			if self._xml_tree is None:
+				self._herds = tuple()
+			else:
+				herds = []
+				for elem in self._xml_tree.findall('herd'):
+					text = elem.text
+					if text is None:
+						text = ''
+					if include_email:
+						herd_mail = self._get_herd_email(text)
+						herds.append((text, herd_mail))
+					else:
+						herds.append(text)
+				self._herds = tuple(herds)
+
+		return self._herds
+
+	def descriptions(self):
+		"""Return a list of text nodes for <longdescription>.
+
+		@rtype: list
+		@return: package description in string format
+		@todo: Support the C{lang} attribute
+		"""
+		if self._descriptions is None:
+			if self._xml_tree is None:
+				self._descriptions = tuple()
+			else:
+				self._descriptions = tuple(e.text \
+					for e in self._xml_tree.findall("longdescription"))
+
+		return self._descriptions
+
+	def maintainers(self):
+		"""Get maintainers' name, email and description.
+
+		@rtype: list
+		@return: a sequence of L{_Maintainer} objects in document order.
+		"""
+
+		if self._maintainers is None:
+			if self._xml_tree is None:
+				self._maintainers = tuple()
+			else:
+				self._maintainers = tuple(_Maintainer(node) \
+					for node in self._xml_tree.findall('maintainer'))
+
+		return self._maintainers
+
+	def use(self):
+		"""Get names and descriptions for USE flags defined in metadata.
+
+		@rtype: list
+		@return: a sequence of L{_Useflag} objects in document order.
+		"""
+
+		if self._useflags is None:
+			if self._xml_tree is None:
+				self._useflags = tuple()
+			else:
+				self._useflags = tuple(_Useflag(node) \
+					for node in self._xml_tree.getiterator('flag'))
+
+		return self._useflags
+
+	def upstream(self):
+		"""Get upstream contact information.
+
+		@rtype: list
+		@return: a sequence of L{_Upstream} objects in document order.
+		"""
+
+		if self._upstream is None:
+			if self._xml_tree is None:
+				self._upstream = tuple()
+			else:
+				self._upstream = tuple(_Upstream(node) \
+					for node in self._xml_tree.findall('upstream'))
+
+		return self._upstream
+
+	def format_maintainer_string(self):
+		"""Format string containing maintainers and herds (emails if possible).
+		Used by emerge to display maintainer information.
+		Entries are sorted according to the rules stated on the bug wranglers page.
+
+		@rtype: String
+		@return: a string containing maintainers and herds
+		"""
+		maintainers = []
+		for maintainer in self.maintainers():
+			if maintainer.email is None or not maintainer.email.strip():
+				if maintainer.name and maintainer.name.strip():
+					maintainers.append(maintainer.name)
+			else:
+				maintainers.append(maintainer.email)
+
+		for herd, email in self.herds(include_email=True):
+			if herd == "no-herd":
+				continue
+			if email is None or not email.strip():
+				if herd and herd.strip():
+					maintainers.append(herd)
+			else:
+				maintainers.append(email)
+
+		maintainers = list(unique_everseen(maintainers))
+
+		maint_str = ""
+		if maintainers:
+			maint_str = maintainers[0]
+			maintainers = maintainers[1:]
+		if maintainers:
+			maint_str += " " + ",".join(maintainers)
+
+		return maint_str
+
+	def format_upstream_string(self):
+		"""Format string containing upstream maintainers and bugtrackers.
+		Used by emerge to display upstream information.
+
+		@rtype: String
+		@return: a string containing upstream maintainers and bugtrackers
+		"""
+		maintainers = []
+		for upstream in self.upstream():
+			for maintainer in upstream.maintainers:
+				if maintainer.email is None or not maintainer.email.strip():
+					if maintainer.name and maintainer.name.strip():
+						maintainers.append(maintainer.name)
+				else:
+					maintainers.append(maintainer.email)
+
+			for bugtracker in upstream.bugtrackers:
+				if bugtracker.startswith("mailto:"):
+					bugtracker = bugtracker[7:]
+				maintainers.append(bugtracker)
+
+
+		maintainers = list(unique_everseen(maintainers))
+		maint_str = " ".join(maintainers)
+		return maint_str

diff --git a/portage_with_autodep/pym/portage/xpak.py b/portage_with_autodep/pym/portage/xpak.py
new file mode 100644
index 0000000..7487d67
--- /dev/null
+++ b/portage_with_autodep/pym/portage/xpak.py
@@ -0,0 +1,497 @@
+# Copyright 2001-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# The format for a tbz2/xpak:
+#
+#  tbz2: tar.bz2 + xpak + (xpak_offset) + "STOP"
+#  xpak: "XPAKPACK" + (index_len) + (data_len) + index + data + "XPAKSTOP"
+# index: (pathname_len) + pathname + (data_offset) + (data_len)
+#        index entries are concatenated end-to-end.
+#  data: concatenated data chunks, end-to-end.
+#
+# [tarball]XPAKPACKIIIIDDDD[index][data]XPAKSTOPOOOOSTOP
+#
+# (integer) == encodeint(integer)  ===> 4 characters (big-endian copy)
+# '+' means concatenate the fields ===> All chunks are strings
+
+__all__ = ['addtolist', 'decodeint', 'encodeint', 'getboth',
+	'getindex', 'getindex_mem', 'getitem', 'listindex',
+	'searchindex', 'tbz2', 'xpak_mem', 'xpak', 'xpand',
+	'xsplit', 'xsplit_mem']
+
+import array
+import errno
+import shutil
+import sys
+
+import portage
+from portage import os
+from portage import normalize_path
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+def addtolist(mylist, curdir):
+	"""(list, dir) --- Takes an array(list) and appends all files from dir down
+	the directory tree. Returns nothing. list is modified."""
+	curdir = normalize_path(_unicode_decode(curdir,
+		encoding=_encodings['fs'], errors='strict'))
+	for parent, dirs, files in os.walk(curdir):
+
+		parent = _unicode_decode(parent,
+			encoding=_encodings['fs'], errors='strict')
+		if parent != curdir:
+			mylist.append(parent[len(curdir) + 1:] + os.sep)
+
+		for x in dirs:
+			try:
+				_unicode_decode(x, encoding=_encodings['fs'], errors='strict')
+			except UnicodeDecodeError:
+				dirs.remove(x)
+
+		for x in files:
+			try:
+				x = _unicode_decode(x,
+					encoding=_encodings['fs'], errors='strict')
+			except UnicodeDecodeError:
+				continue
+			mylist.append(os.path.join(parent, x)[len(curdir) + 1:])
+
+def encodeint(myint):
+	"""Takes a 4 byte integer and converts it into a string of 4 characters.
+	Returns the characters in a string."""
+	a = array.array('B')
+	a.append((myint >> 24 ) & 0xff)
+	a.append((myint >> 16 ) & 0xff)
+	a.append((myint >> 8 ) & 0xff)
+	a.append(myint & 0xff)
+	return a.tostring()
+
+def decodeint(mystring):
+	"""Takes a 4 byte string and converts it into a 4 byte integer.
+	Returns an integer."""
+	if sys.hexversion < 0x3000000:
+		mystring = [ord(x) for x in mystring]
+	myint = 0
+	myint += mystring[3]
+	myint += mystring[2] << 8
+	myint += mystring[1] << 16
+	myint += mystring[0] << 24
+	return myint
+
+def xpak(rootdir,outfile=None):
+	"""(rootdir,outfile) -- creates an xpak segment of the directory 'rootdir'
+	and under the name 'outfile' if it is specified. Otherwise it returns the
+	xpak segment."""
+
+	mylist=[]
+
+	addtolist(mylist, rootdir)
+	mylist.sort()
+	mydata = {}
+	for x in mylist:
+		if x == 'CONTENTS':
+			# CONTENTS is generated during the merge process.
+			continue
+		x = _unicode_encode(x, encoding=_encodings['fs'], errors='strict')
+		mydata[x] = open(os.path.join(rootdir, x), 'rb').read()
+
+	xpak_segment = xpak_mem(mydata)
+	if outfile:
+		outf = open(_unicode_encode(outfile,
+			encoding=_encodings['fs'], errors='strict'), 'wb')
+		outf.write(xpak_segment)
+		outf.close()
+	else:
+		return xpak_segment
+
+def xpak_mem(mydata):
+	"""Create an xpack segement from a map object."""
+
+	mydata_encoded = {}
+	for k, v in mydata.items():
+		k = _unicode_encode(k,
+			encoding=_encodings['repo.content'], errors='backslashreplace')
+		v = _unicode_encode(v,
+			encoding=_encodings['repo.content'], errors='backslashreplace')
+		mydata_encoded[k] = v
+	mydata = mydata_encoded
+	del mydata_encoded
+
+	indexglob = b''
+	indexpos=0
+	dataglob = b''
+	datapos=0
+	for x, newglob in mydata.items():
+		mydatasize=len(newglob)
+		indexglob=indexglob+encodeint(len(x))+x+encodeint(datapos)+encodeint(mydatasize)
+		indexpos=indexpos+4+len(x)+4+4
+		dataglob=dataglob+newglob
+		datapos=datapos+mydatasize
+	return b'XPAKPACK' \
+	+ encodeint(len(indexglob)) \
+	+ encodeint(len(dataglob)) \
+	+ indexglob \
+	+ dataglob \
+	+ b'XPAKSTOP'
+
+def xsplit(infile):
+	"""(infile) -- Splits the infile into two files.
+	'infile.index' contains the index segment.
+	'infile.dat' contails the data segment."""
+	infile = _unicode_decode(infile,
+		encoding=_encodings['fs'], errors='strict')
+	myfile = open(_unicode_encode(infile,
+		encoding=_encodings['fs'], errors='strict'), 'rb')
+	mydat=myfile.read()
+	myfile.close()
+	
+	splits = xsplit_mem(mydat)
+	if not splits:
+		return False
+	
+	myfile = open(_unicode_encode(infile + '.index',
+		encoding=_encodings['fs'], errors='strict'), 'wb')
+	myfile.write(splits[0])
+	myfile.close()
+	myfile = open(_unicode_encode(infile + '.dat',
+		encoding=_encodings['fs'], errors='strict'), 'wb')
+	myfile.write(splits[1])
+	myfile.close()
+	return True
+
+def xsplit_mem(mydat):
+	if mydat[0:8] != b'XPAKPACK':
+		return None
+	if mydat[-8:] != b'XPAKSTOP':
+		return None
+	indexsize=decodeint(mydat[8:12])
+	return (mydat[16:indexsize+16], mydat[indexsize+16:-8])
+
+def getindex(infile):
+	"""(infile) -- grabs the index segment from the infile and returns it."""
+	myfile = open(_unicode_encode(infile,
+		encoding=_encodings['fs'], errors='strict'), 'rb')
+	myheader=myfile.read(16)
+	if myheader[0:8] != b'XPAKPACK':
+		myfile.close()
+		return
+	indexsize=decodeint(myheader[8:12])
+	myindex=myfile.read(indexsize)
+	myfile.close()
+	return myindex
+
+def getboth(infile):
+	"""(infile) -- grabs the index and data segments from the infile.
+	Returns an array [indexSegment,dataSegment]"""
+	myfile = open(_unicode_encode(infile,
+		encoding=_encodings['fs'], errors='strict'), 'rb')
+	myheader=myfile.read(16)
+	if myheader[0:8] != b'XPAKPACK':
+		myfile.close()
+		return
+	indexsize=decodeint(myheader[8:12])
+	datasize=decodeint(myheader[12:16])
+	myindex=myfile.read(indexsize)
+	mydata=myfile.read(datasize)
+	myfile.close()
+	return myindex, mydata
+
+def listindex(myindex):
+	"""Print to the terminal the filenames listed in the indexglob passed in."""
+	for x in getindex_mem(myindex):
+		print(x)
+
+def getindex_mem(myindex):
+	"""Returns the filenames listed in the indexglob passed in."""
+	myindexlen=len(myindex)
+	startpos=0
+	myret=[]
+	while ((startpos+8)<myindexlen):
+		mytestlen=decodeint(myindex[startpos:startpos+4])
+		myret=myret+[myindex[startpos+4:startpos+4+mytestlen]]
+		startpos=startpos+mytestlen+12
+	return myret
+
+def searchindex(myindex,myitem):
+	"""(index,item) -- Finds the offset and length of the file 'item' in the
+	datasegment via the index 'index' provided."""
+	myitem = _unicode_encode(myitem,
+		encoding=_encodings['repo.content'], errors='backslashreplace')
+	mylen=len(myitem)
+	myindexlen=len(myindex)
+	startpos=0
+	while ((startpos+8)<myindexlen):
+		mytestlen=decodeint(myindex[startpos:startpos+4])
+		if mytestlen==mylen:
+			if myitem==myindex[startpos+4:startpos+4+mytestlen]:
+				#found
+				datapos=decodeint(myindex[startpos+4+mytestlen:startpos+8+mytestlen]);
+				datalen=decodeint(myindex[startpos+8+mytestlen:startpos+12+mytestlen]);
+				return datapos, datalen
+		startpos=startpos+mytestlen+12
+		
+def getitem(myid,myitem):
+	myindex=myid[0]
+	mydata=myid[1]
+	myloc=searchindex(myindex,myitem)
+	if not myloc:
+		return None
+	return mydata[myloc[0]:myloc[0]+myloc[1]]
+
+def xpand(myid,mydest):
+	myindex=myid[0]
+	mydata=myid[1]
+	try:
+		origdir=os.getcwd()
+	except SystemExit as e:
+		raise
+	except:
+		os.chdir("/")
+		origdir="/"
+	os.chdir(mydest)
+	myindexlen=len(myindex)
+	startpos=0
+	while ((startpos+8)<myindexlen):
+		namelen=decodeint(myindex[startpos:startpos+4])
+		datapos=decodeint(myindex[startpos+4+namelen:startpos+8+namelen]);
+		datalen=decodeint(myindex[startpos+8+namelen:startpos+12+namelen]);
+		myname=myindex[startpos+4:startpos+4+namelen]
+		dirname=os.path.dirname(myname)
+		if dirname:
+			if not os.path.exists(dirname):
+				os.makedirs(dirname)
+		mydat = open(_unicode_encode(myname,
+			encoding=_encodings['fs'], errors='strict'), 'wb')
+		mydat.write(mydata[datapos:datapos+datalen])
+		mydat.close()
+		startpos=startpos+namelen+12
+	os.chdir(origdir)
+
+class tbz2(object):
+	def __init__(self,myfile):
+		self.file=myfile
+		self.filestat=None
+		self.index = b''
+		self.infosize=0
+		self.xpaksize=0
+		self.indexsize=None
+		self.datasize=None
+		self.indexpos=None
+		self.datapos=None
+
+	def decompose(self,datadir,cleanup=1):
+		"""Alias for unpackinfo() --- Complement to recompose() but optionally
+		deletes the destination directory. Extracts the xpak from the tbz2 into
+		the directory provided. Raises IOError if scan() fails.
+		Returns result of upackinfo()."""
+		if not self.scan():
+			raise IOError
+		if cleanup:
+			self.cleanup(datadir)
+		if not os.path.exists(datadir):
+			os.makedirs(datadir)
+		return self.unpackinfo(datadir)
+	def compose(self,datadir,cleanup=0):
+		"""Alias for recompose()."""
+		return self.recompose(datadir,cleanup)
+
+	def recompose(self, datadir, cleanup=0, break_hardlinks=True):
+		"""Creates an xpak segment from the datadir provided, truncates the tbz2
+		to the end of regular data if an xpak segment already exists, and adds
+		the new segment to the file with terminating info."""
+		xpdata = xpak(datadir)
+		self.recompose_mem(xpdata, break_hardlinks=break_hardlinks)
+		if cleanup:
+			self.cleanup(datadir)
+
+	def recompose_mem(self, xpdata, break_hardlinks=True):
+		"""
+		Update the xpak segment.
+		@param xpdata: A new xpak segment to be written, like that returned
+			from the xpak_mem() function.
+		@param break_hardlinks: If hardlinks exist, create a copy in order
+			to break them. This makes it safe to use hardlinks to create
+			cheap snapshots of the repository, which is useful for solving
+			race conditions on binhosts as described here:
+			http://code.google.com/p/chromium-os/issues/detail?id=3225.
+			Default is True.
+		"""
+		self.scan() # Don't care about condition... We'll rewrite the data anyway.
+
+		if break_hardlinks and self.filestat.st_nlink > 1:
+			tmp_fname = "%s.%d" % (self.file, os.getpid())
+			shutil.copyfile(self.file, tmp_fname)
+			try:
+				portage.util.apply_stat_permissions(self.file, self.filestat)
+			except portage.exception.OperationNotPermitted:
+				pass
+			os.rename(tmp_fname, self.file)
+
+		myfile = open(_unicode_encode(self.file,
+			encoding=_encodings['fs'], errors='strict'), 'ab+')
+		if not myfile:
+			raise IOError
+		myfile.seek(-self.xpaksize,2) # 0,2 or -0,2 just mean EOF.
+		myfile.truncate()
+		myfile.write(xpdata+encodeint(len(xpdata)) + b'STOP')
+		myfile.flush()
+		myfile.close()
+		return 1
+
+	def cleanup(self, datadir):
+		datadir_split = os.path.split(datadir)
+		if len(datadir_split) >= 2 and len(datadir_split[1]) > 0:
+			# This is potentially dangerous,
+			# thus the above sanity check.
+			try:
+				shutil.rmtree(datadir)
+			except OSError as oe:
+				if oe.errno == errno.ENOENT:
+					pass
+				else:
+					raise oe
+
+	def scan(self):
+		"""Scans the tbz2 to locate the xpak segment and setup internal values.
+		This function is called by relevant functions already."""
+		try:
+			mystat=os.stat(self.file)
+			if self.filestat:
+				changed=0
+				if mystat.st_size != self.filestat.st_size \
+					or mystat.st_mtime != self.filestat.st_mtime \
+					or mystat.st_ctime != self.filestat.st_ctime:
+					changed = True
+				if not changed:
+					return 1
+			self.filestat=mystat
+			a = open(_unicode_encode(self.file,
+				encoding=_encodings['fs'], errors='strict'), 'rb')
+			a.seek(-16,2)
+			trailer=a.read()
+			self.infosize=0
+			self.xpaksize=0
+			if trailer[-4:] != b'STOP':
+				a.close()
+				return 0
+			if trailer[0:8] != b'XPAKSTOP':
+				a.close()
+				return 0
+			self.infosize=decodeint(trailer[8:12])
+			self.xpaksize=self.infosize+8
+			a.seek(-(self.xpaksize),2)
+			header=a.read(16)
+			if header[0:8] != b'XPAKPACK':
+				a.close()
+				return 0
+			self.indexsize=decodeint(header[8:12])
+			self.datasize=decodeint(header[12:16])
+			self.indexpos=a.tell()
+			self.index=a.read(self.indexsize)
+			self.datapos=a.tell()
+			a.close()
+			return 2
+		except SystemExit as e:
+			raise
+		except:
+			return 0
+
+	def filelist(self):
+		"""Return an array of each file listed in the index."""
+		if not self.scan():
+			return None
+		return getindex_mem(self.index)
+
+	def getfile(self,myfile,mydefault=None):
+		"""Finds 'myfile' in the data segment and returns it."""
+		if not self.scan():
+			return None
+		myresult=searchindex(self.index,myfile)
+		if not myresult:
+			return mydefault
+		a = open(_unicode_encode(self.file,
+			encoding=_encodings['fs'], errors='strict'), 'rb')
+		a.seek(self.datapos+myresult[0],0)
+		myreturn=a.read(myresult[1])
+		a.close()
+		return myreturn
+
+	def getelements(self,myfile):
+		"""A split/array representation of tbz2.getfile()"""
+		mydat=self.getfile(myfile)
+		if not mydat:
+			return []
+		return mydat.split()
+
+	def unpackinfo(self,mydest):
+		"""Unpacks all the files from the dataSegment into 'mydest'."""
+		if not self.scan():
+			return 0
+		try:
+			origdir=os.getcwd()
+		except SystemExit as e:
+			raise
+		except:
+			os.chdir("/")
+			origdir="/"
+		a = open(_unicode_encode(self.file,
+			encoding=_encodings['fs'], errors='strict'), 'rb')
+		if not os.path.exists(mydest):
+			os.makedirs(mydest)
+		os.chdir(mydest)
+		startpos=0
+		while ((startpos+8)<self.indexsize):
+			namelen=decodeint(self.index[startpos:startpos+4])
+			datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
+			datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
+			myname=self.index[startpos+4:startpos+4+namelen]
+			myname = _unicode_decode(myname,
+				encoding=_encodings['repo.content'], errors='replace')
+			dirname=os.path.dirname(myname)
+			if dirname:
+				if not os.path.exists(dirname):
+					os.makedirs(dirname)
+			mydat = open(_unicode_encode(myname,
+				encoding=_encodings['fs'], errors='strict'), 'wb')
+			a.seek(self.datapos+datapos)
+			mydat.write(a.read(datalen))
+			mydat.close()
+			startpos=startpos+namelen+12
+		a.close()
+		os.chdir(origdir)
+		return 1
+
+	def get_data(self):
+		"""Returns all the files from the dataSegment as a map object."""
+		if not self.scan():
+			return {}
+		a = open(_unicode_encode(self.file,
+			encoding=_encodings['fs'], errors='strict'), 'rb')
+		mydata = {}
+		startpos=0
+		while ((startpos+8)<self.indexsize):
+			namelen=decodeint(self.index[startpos:startpos+4])
+			datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
+			datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
+			myname=self.index[startpos+4:startpos+4+namelen]
+			a.seek(self.datapos+datapos)
+			mydata[myname] = a.read(datalen)
+			startpos=startpos+namelen+12
+		a.close()
+		return mydata
+
+	def getboth(self):
+		"""Returns an array [indexSegment,dataSegment]"""
+		if not self.scan():
+			return None
+
+		a = open(_unicode_encode(self.file,
+			encoding=_encodings['fs'], errors='strict'), 'rb')
+		a.seek(self.datapos)
+		mydata =a.read(self.datasize)
+		a.close()
+
+		return self.index, mydata
+

diff --git a/portage_with_autodep/pym/repoman/__init__.py b/portage_with_autodep/pym/repoman/__init__.py
new file mode 100644
index 0000000..e69de29

diff --git a/portage_with_autodep/pym/repoman/checks.py b/portage_with_autodep/pym/repoman/checks.py
new file mode 100644
index 0000000..e7472df
--- /dev/null
+++ b/portage_with_autodep/pym/repoman/checks.py
@@ -0,0 +1,707 @@
+# repoman: Checks
+# Copyright 2007, 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""This module contains functions used in Repoman to ascertain the quality
+and correctness of an ebuild."""
+
+import re
+import time
+import repoman.errors as errors
+from portage.eapi import eapi_supports_prefix, eapi_has_implicit_rdepend, \
+	eapi_has_src_prepare_and_src_configure, eapi_has_dosed_dohard, \
+	eapi_exports_AA, eapi_exports_KV
+
+class LineCheck(object):
+	"""Run a check on a line of an ebuild."""
+	"""A regular expression to determine whether to ignore the line"""
+	ignore_line = False
+	"""True if lines containing nothing more than comments with optional
+	leading whitespace should be ignored"""
+	ignore_comment = True
+
+	def new(self, pkg):
+		pass
+
+	def check_eapi(self, eapi):
+		""" returns if the check should be run in the given EAPI (default is True) """
+		return True
+
+	def check(self, num, line):
+		"""Run the check on line and return error if there is one"""
+		if self.re.match(line):
+			return self.error
+
+	def end(self):
+		pass
+
+class PhaseCheck(LineCheck):
+	""" basic class for function detection """
+
+	func_end_re = re.compile(r'^\}$')
+	phases_re = re.compile('(%s)' % '|'.join((
+		'pkg_pretend', 'pkg_setup', 'src_unpack', 'src_prepare',
+		'src_configure', 'src_compile', 'src_test', 'src_install',
+		'pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm',
+		'pkg_config')))
+	in_phase = ''
+
+	def check(self, num, line):
+		m = self.phases_re.match(line)
+		if m is not None:
+			self.in_phase = m.group(1)
+		if self.in_phase != '' and \
+				self.func_end_re.match(line) is not None:
+			self.in_phase = ''
+
+		return self.phase_check(num, line)
+
+	def phase_check(self, num, line):
+		""" override this function for your checks """
+		pass
+
+class EbuildHeader(LineCheck):
+	"""Ensure ebuilds have proper headers
+		Copyright header errors
+		CVS header errors
+		License header errors
+	
+	Args:
+		modification_year - Year the ebuild was last modified
+	"""
+
+	repoman_check_name = 'ebuild.badheader'
+
+	gentoo_copyright = r'^# Copyright ((1999|2\d\d\d)-)?%s Gentoo Foundation$'
+	# Why a regex here, use a string match
+	# gentoo_license = re.compile(r'^# Distributed under the terms of the GNU General Public License v2$')
+	gentoo_license = '# Distributed under the terms of the GNU General Public License v2'
+	cvs_header = re.compile(r'^# \$Header: .*\$$')
+
+	def new(self, pkg):
+		if pkg.mtime is None:
+			self.modification_year = r'2\d\d\d'
+		else:
+			self.modification_year = str(time.gmtime(pkg.mtime)[0])
+		self.gentoo_copyright_re = re.compile(
+			self.gentoo_copyright % self.modification_year)
+
+	def check(self, num, line):
+		if num > 2:
+			return
+		elif num == 0:
+			if not self.gentoo_copyright_re.match(line):
+				return errors.COPYRIGHT_ERROR
+		elif num == 1 and line.rstrip('\n') != self.gentoo_license:
+			return errors.LICENSE_ERROR
+		elif num == 2:
+			if not self.cvs_header.match(line):
+				return errors.CVS_HEADER_ERROR
+
+
+class EbuildWhitespace(LineCheck):
+	"""Ensure ebuilds have proper whitespacing"""
+
+	repoman_check_name = 'ebuild.minorsyn'
+
+	ignore_line = re.compile(r'(^$)|(^(\t)*#)')
+	ignore_comment = False
+	leading_spaces = re.compile(r'^[\S\t]')
+	trailing_whitespace = re.compile(r'.*([\S]$)')	
+
+	def check(self, num, line):
+		if self.leading_spaces.match(line) is None:
+			return errors.LEADING_SPACES_ERROR
+		if self.trailing_whitespace.match(line) is None:
+			return errors.TRAILING_WHITESPACE_ERROR
+
+class EbuildBlankLine(LineCheck):
+	repoman_check_name = 'ebuild.minorsyn'
+	ignore_comment = False
+	blank_line = re.compile(r'^$')
+
+	def new(self, pkg):
+		self.line_is_blank = False
+
+	def check(self, num, line):
+		if self.line_is_blank and self.blank_line.match(line):
+			return 'Useless blank line on line: %d'
+		if self.blank_line.match(line):
+			self.line_is_blank = True
+		else:
+			self.line_is_blank = False
+
+	def end(self):
+		if self.line_is_blank:
+			yield 'Useless blank line on last line'
+
+class EbuildQuote(LineCheck):
+	"""Ensure ebuilds have valid quoting around things like D,FILESDIR, etc..."""
+
+	repoman_check_name = 'ebuild.minorsyn'
+	_message_commands = ["die", "echo", "eerror",
+		"einfo", "elog", "eqawarn", "ewarn"]
+	_message_re = re.compile(r'\s(' + "|".join(_message_commands) + \
+		r')\s+"[^"]*"\s*$')
+	_ignored_commands = ["local", "export"] + _message_commands
+	ignore_line = re.compile(r'(^$)|(^\s*#.*)|(^\s*\w+=.*)' + \
+		r'|(^\s*(' + "|".join(_ignored_commands) + r')\s+)')
+	ignore_comment = False
+	var_names = ["D", "DISTDIR", "FILESDIR", "S", "T", "ROOT", "WORKDIR"]
+
+	# EAPI=3/Prefix vars
+	var_names += ["ED", "EPREFIX", "EROOT"]
+
+	# variables for games.eclass
+	var_names += ["Ddir", "GAMES_PREFIX_OPT", "GAMES_DATADIR",
+		"GAMES_DATADIR_BASE", "GAMES_SYSCONFDIR", "GAMES_STATEDIR",
+		"GAMES_LOGDIR", "GAMES_BINDIR"]
+
+	var_names = "(%s)" % "|".join(var_names)
+	var_reference = re.compile(r'\$(\{'+var_names+'\}|' + \
+		var_names + '\W)')
+	missing_quotes = re.compile(r'(\s|^)[^"\'\s]*\$\{?' + var_names + \
+		r'\}?[^"\'\s]*(\s|$)')
+	cond_begin =  re.compile(r'(^|\s+)\[\[($|\\$|\s+)')
+	cond_end =  re.compile(r'(^|\s+)\]\]($|\\$|\s+)')
+	
+	def check(self, num, line):
+		if self.var_reference.search(line) is None:
+			return
+		# There can be multiple matches / violations on a single line. We
+		# have to make sure none of the matches are violators. Once we've
+		# found one violator, any remaining matches on the same line can
+		# be ignored.
+		pos = 0
+		while pos <= len(line) - 1:
+			missing_quotes = self.missing_quotes.search(line, pos)
+			if not missing_quotes:
+				break
+			# If the last character of the previous match is a whitespace
+			# character, that character may be needed for the next
+			# missing_quotes match, so search overlaps by 1 character.
+			group = missing_quotes.group()
+			pos = missing_quotes.end() - 1
+
+			# Filter out some false positives that can
+			# get through the missing_quotes regex.
+			if self.var_reference.search(group) is None:
+				continue
+
+			# Filter matches that appear to be an
+			# argument to a message command.
+			# For example: false || ewarn "foo $WORKDIR/bar baz"
+			message_match = self._message_re.search(line)
+			if message_match is not None and \
+				message_match.start() < pos and \
+				message_match.end() > pos:
+				break
+
+			# This is an attempt to avoid false positives without getting
+			# too complex, while possibly allowing some (hopefully
+			# unlikely) violations to slip through. We just assume
+			# everything is correct if the there is a ' [[ ' or a ' ]] '
+			# anywhere in the whole line (possibly continued over one
+			# line).
+			if self.cond_begin.search(line) is not None:
+				continue
+			if self.cond_end.search(line) is not None:
+				continue
+
+			# Any remaining matches on the same line can be ignored.
+			return errors.MISSING_QUOTES_ERROR
+
+
+class EbuildAssignment(LineCheck):
+	"""Ensure ebuilds don't assign to readonly variables."""
+
+	repoman_check_name = 'variable.readonly'
+
+	readonly_assignment = re.compile(r'^\s*(export\s+)?(A|CATEGORY|P|PV|PN|PR|PVR|PF|D|WORKDIR|FILESDIR|FEATURES|USE)=')
+	line_continuation = re.compile(r'([^#]*\S)(\s+|\t)\\$')
+	ignore_line = re.compile(r'(^$)|(^(\t)*#)')
+	ignore_comment = False
+
+	def __init__(self):
+		self.previous_line = None
+
+	def check(self, num, line):
+		match = self.readonly_assignment.match(line)
+		e = None
+		if match and (not self.previous_line or not self.line_continuation.match(self.previous_line)):
+			e = errors.READONLY_ASSIGNMENT_ERROR
+		self.previous_line = line
+		return e
+
+class Eapi3EbuildAssignment(EbuildAssignment):
+	"""Ensure ebuilds don't assign to readonly EAPI 3-introduced variables."""
+
+	readonly_assignment = re.compile(r'\s*(export\s+)?(ED|EPREFIX|EROOT)=')
+
+	def check_eapi(self, eapi):
+		return eapi_supports_prefix(eapi)
+
+class EbuildNestedDie(LineCheck):
+	"""Check ebuild for nested die statements (die statements in subshells"""
+	
+	repoman_check_name = 'ebuild.nesteddie'
+	nesteddie_re = re.compile(r'^[^#]*\s\(\s[^)]*\bdie\b')
+	
+	def check(self, num, line):
+		if self.nesteddie_re.match(line):
+			return errors.NESTED_DIE_ERROR
+
+
+class EbuildUselessDodoc(LineCheck):
+	"""Check ebuild for useless files in dodoc arguments."""
+	repoman_check_name = 'ebuild.minorsyn'
+	uselessdodoc_re = re.compile(
+		r'^\s*dodoc(\s+|\s+.*\s+)(ABOUT-NLS|COPYING|LICENCE|LICENSE)($|\s)')
+
+	def check(self, num, line):
+		match = self.uselessdodoc_re.match(line)
+		if match:
+			return "Useless dodoc '%s'" % (match.group(2), ) + " on line: %d"
+
+
+class EbuildUselessCdS(LineCheck):
+	"""Check for redundant cd ${S} statements"""
+	repoman_check_name = 'ebuild.minorsyn'
+	method_re = re.compile(r'^\s*src_(prepare|configure|compile|install|test)\s*\(\)')
+	cds_re = re.compile(r'^\s*cd\s+("\$(\{S\}|S)"|\$(\{S\}|S))\s')
+
+	def __init__(self):
+		self.check_next_line = False
+
+	def check(self, num, line):
+		if self.check_next_line:
+			self.check_next_line = False
+			if self.cds_re.match(line):
+				return errors.REDUNDANT_CD_S_ERROR
+		elif self.method_re.match(line):
+			self.check_next_line = True
+
+class EapiDefinition(LineCheck):
+	""" Check that EAPI is defined before inherits"""
+	repoman_check_name = 'EAPI.definition'
+
+	eapi_re = re.compile(r'^EAPI=')
+	inherit_re = re.compile(r'^\s*inherit\s')
+
+	def new(self, pkg):
+		self.inherit_line = None
+
+	def check(self, num, line):
+		if self.eapi_re.match(line) is not None:
+			if self.inherit_line is not None:
+				return errors.EAPI_DEFINED_AFTER_INHERIT
+		elif self.inherit_re.match(line) is not None:
+			self.inherit_line = line
+
+class EbuildPatches(LineCheck):
+	"""Ensure ebuilds use bash arrays for PATCHES to ensure white space safety"""
+	repoman_check_name = 'ebuild.patches'
+	re = re.compile(r'^\s*PATCHES=[^\(]')
+	error = errors.PATCHES_ERROR
+
+class EbuildQuotedA(LineCheck):
+	"""Ensure ebuilds have no quoting around ${A}"""
+
+	repoman_check_name = 'ebuild.minorsyn'
+	a_quoted = re.compile(r'.*\"\$(\{A\}|A)\"')
+
+	def check(self, num, line):
+		match = self.a_quoted.match(line)
+		if match:
+			return "Quoted \"${A}\" on line: %d"
+
+class EprefixifyDefined(LineCheck):
+	""" Check that prefix.eclass is inherited if needed"""
+
+	repoman_check_name = 'eprefixify.defined'
+
+	_eprefixify_re = re.compile(r'\beprefixify\b')
+	_inherit_prefix_re = re.compile(r'^\s*inherit\s(.*\s)?prefix\b')
+
+	def new(self, pkg):
+		self._prefix_inherited = False
+
+	def check(self, num, line):
+		if self._eprefixify_re.search(line) is not None:
+			if not self._prefix_inherited:
+				return errors.EPREFIXIFY_MISSING_INHERIT
+		elif self._inherit_prefix_re.search(line) is not None:
+			self._prefix_inherited = True
+
+class NoOffsetWithHelpers(LineCheck):
+	""" Check that the image location, the alternate root offset, and the
+	offset prefix (D, ROOT, ED, EROOT and EPREFIX) are not used with
+	helpers """
+
+	repoman_check_name = 'variable.usedwithhelpers'
+	# Ignore matches in quoted strings like this:
+	# elog "installed into ${ROOT}usr/share/php5/apc/."
+	re = re.compile(r'^[^#"\']*\b(dodir|dohard|exeinto|insinto|into)\s+"?\$\{?(D|ROOT|ED|EROOT|EPREFIX)\b.*')
+	error = errors.NO_OFFSET_WITH_HELPERS
+
+class ImplicitRuntimeDeps(LineCheck):
+	"""
+	Detect the case where DEPEND is set and RDEPEND is unset in the ebuild,
+	since this triggers implicit RDEPEND=$DEPEND assignment (prior to EAPI 4).
+	"""
+
+	repoman_check_name = 'RDEPEND.implicit'
+	_assignment_re = re.compile(r'^\s*(R?DEPEND)\+?=')
+
+	def new(self, pkg):
+		self._rdepend = False
+		self._depend = False
+
+	def check_eapi(self, eapi):
+		# Beginning with EAPI 4, there is no
+		# implicit RDEPEND=$DEPEND assignment
+		# to be concerned with.
+		return eapi_has_implicit_rdepend(eapi)
+
+	def check(self, num, line):
+		if not self._rdepend:
+			m = self._assignment_re.match(line)
+			if m is None:
+				pass
+			elif m.group(1) == "RDEPEND":
+				self._rdepend = True
+			elif m.group(1) == "DEPEND":
+				self._depend = True
+
+	def end(self):
+		if self._depend and not self._rdepend:
+			yield 'RDEPEND is not explicitly assigned'
+
+class InheritDeprecated(LineCheck):
+	"""Check if ebuild directly or indirectly inherits a deprecated eclass."""
+
+	repoman_check_name = 'inherit.deprecated'
+
+	# deprecated eclass : new eclass (False if no new eclass)
+	deprecated_classes = {
+		"gems": "ruby-fakegem",
+		"git": "git-2",
+		"mozconfig-2": "mozconfig-3",
+		"mozcoreconf": "mozcoreconf-2",
+		"php-ext-pecl-r1": "php-ext-pecl-r2",
+		"php-ext-source-r1": "php-ext-source-r2",
+		"php-pear": "php-pear-r1",
+		"qt3": False,
+		"qt4": "qt4-r2",
+		"ruby": "ruby-ng",
+		"ruby-gnome2": "ruby-ng-gnome2",
+		"x-modular": "xorg-2",
+		}
+
+	_inherit_re = re.compile(r'^\s*inherit\s(.*)$')
+
+	def new(self, pkg):
+		self._errors = []
+		self._indirect_deprecated = set(eclass for eclass in \
+			self.deprecated_classes if eclass in pkg.inherited)
+
+	def check(self, num, line):
+
+		direct_inherits = None
+		m = self._inherit_re.match(line)
+		if m is not None:
+			direct_inherits = m.group(1)
+			if direct_inherits:
+				direct_inherits = direct_inherits.split()
+
+		if not direct_inherits:
+			return
+
+		for eclass in direct_inherits:
+			replacement = self.deprecated_classes.get(eclass)
+			if replacement is None:
+				pass
+			elif replacement is False:
+				self._indirect_deprecated.discard(eclass)
+				self._errors.append("please migrate from " + \
+					"'%s' (no replacement) on line: %d" % (eclass, num + 1))
+			else:
+				self._indirect_deprecated.discard(eclass)
+				self._errors.append("please migrate from " + \
+					"'%s' to '%s' on line: %d" % \
+					(eclass, replacement, num + 1))
+
+	def end(self):
+		for error in self._errors:
+			yield error
+		del self._errors
+
+		for eclass in self._indirect_deprecated:
+			replacement = self.deprecated_classes[eclass]
+			if replacement is False:
+				yield "please migrate from indirect " + \
+					"inherit of '%s' (no replacement)" % (eclass,)
+			else:
+				yield "please migrate from indirect " + \
+					"inherit of '%s' to '%s'" % \
+					(eclass, replacement)
+		del self._indirect_deprecated
+
+class InheritAutotools(LineCheck):
+	"""
+	Make sure appropriate functions are called in
+	ebuilds that inherit autotools.eclass.
+	"""
+
+	repoman_check_name = 'inherit.autotools'
+	_inherit_autotools_re = re.compile(r'^\s*inherit\s(.*\s)?autotools(\s|$)')
+	_autotools_funcs = (
+		"eaclocal", "eautoconf", "eautoheader",
+		"eautomake", "eautoreconf", "_elibtoolize")
+	_autotools_func_re = re.compile(r'\b(' + \
+		"|".join(_autotools_funcs) + r')\b')
+	# Exempt eclasses:
+	# git - An EGIT_BOOTSTRAP variable may be used to call one of
+	#       the autotools functions.
+	# subversion - An ESVN_BOOTSTRAP variable may be used to call one of
+	#       the autotools functions.
+	_exempt_eclasses = frozenset(["git", "subversion"])
+
+	def new(self, pkg):
+		self._inherit_autotools = None
+		self._autotools_func_call = None
+		self._disabled = self._exempt_eclasses.intersection(pkg.inherited)
+
+	def check(self, num, line):
+		if self._disabled:
+			return
+		if self._inherit_autotools is None:
+			self._inherit_autotools = self._inherit_autotools_re.match(line)
+		if self._inherit_autotools is not None and \
+			self._autotools_func_call is None:
+			self._autotools_func_call = self._autotools_func_re.search(line)
+
+	def end(self):
+		if self._inherit_autotools and self._autotools_func_call is None:
+			yield 'no eauto* function called'
+
+class IUseUndefined(LineCheck):
+	"""
+	Make sure the ebuild defines IUSE (style guideline
+	says to define IUSE even when empty).
+	"""
+
+	repoman_check_name = 'IUSE.undefined'
+	_iuse_def_re = re.compile(r'^IUSE=.*')
+
+	def new(self, pkg):
+		self._iuse_def = None
+
+	def check(self, num, line):
+		if self._iuse_def is None:
+			self._iuse_def = self._iuse_def_re.match(line)
+
+	def end(self):
+		if self._iuse_def is None:
+			yield 'IUSE is not defined'
+
+class EMakeParallelDisabled(PhaseCheck):
+	"""Check for emake -j1 calls which disable parallelization."""
+	repoman_check_name = 'upstream.workaround'
+	re = re.compile(r'^\s*emake\s+.*-j\s*1\b')
+	error = errors.EMAKE_PARALLEL_DISABLED
+
+	def phase_check(self, num, line):
+		if self.in_phase == 'src_compile' or self.in_phase == 'src_install':
+			if self.re.match(line):
+				return self.error
+
+class EMakeParallelDisabledViaMAKEOPTS(LineCheck):
+	"""Check for MAKEOPTS=-j1 that disables parallelization."""
+	repoman_check_name = 'upstream.workaround'
+	re = re.compile(r'^\s*MAKEOPTS=(\'|")?.*-j\s*1\b')
+	error = errors.EMAKE_PARALLEL_DISABLED_VIA_MAKEOPTS
+
+class NoAsNeeded(LineCheck):
+	"""Check for calls to the no-as-needed function."""
+	repoman_check_name = 'upstream.workaround'
+	re = re.compile(r'.*\$\(no-as-needed\)')
+	error = errors.NO_AS_NEEDED
+
+class PreserveOldLib(LineCheck):
+	"""Check for calls to the preserve_old_lib function."""
+	repoman_check_name = 'upstream.workaround'
+	re = re.compile(r'.*preserve_old_lib')
+	error = errors.PRESERVE_OLD_LIB
+
+class SandboxAddpredict(LineCheck):
+	"""Check for calls to the addpredict function."""
+	repoman_check_name = 'upstream.workaround'
+	re = re.compile(r'(^|\s)addpredict\b')
+	error = errors.SANDBOX_ADDPREDICT
+
+class DeprecatedBindnowFlags(LineCheck):
+	"""Check for calls to the deprecated bindnow-flags function."""
+	repoman_check_name = 'ebuild.minorsyn'
+	re = re.compile(r'.*\$\(bindnow-flags\)')
+	error = errors.DEPRECATED_BINDNOW_FLAGS
+
+class WantAutoDefaultValue(LineCheck):
+	"""Check setting WANT_AUTO* to latest (default value)."""
+	repoman_check_name = 'ebuild.minorsyn'
+	_re = re.compile(r'^WANT_AUTO(CONF|MAKE)=(\'|")?latest')
+
+	def check(self, num, line):
+		m = self._re.match(line)
+		if m is not None:
+			return 'WANT_AUTO' + m.group(1) + \
+				' redundantly set to default value "latest" on line: %d'
+
+class SrcCompileEconf(PhaseCheck):
+	repoman_check_name = 'ebuild.minorsyn'
+	configure_re = re.compile(r'\s(econf|./configure)')
+
+	def check_eapi(self, eapi):
+		return eapi_has_src_prepare_and_src_configure(eapi)
+
+	def phase_check(self, num, line):
+		if self.in_phase == 'src_compile':
+			m = self.configure_re.match(line)
+			if m is not None:
+				return ("'%s'" % m.group(1)) + \
+					" call should be moved to src_configure from line: %d"
+
+class SrcUnpackPatches(PhaseCheck):
+	repoman_check_name = 'ebuild.minorsyn'
+	src_prepare_tools_re = re.compile(r'\s(e?patch|sed)\s')
+
+	def check_eapi(self, eapi):
+		return eapi_has_src_prepare_and_src_configure(eapi)
+
+	def phase_check(self, num, line):
+		if self.in_phase == 'src_unpack':
+			m = self.src_prepare_tools_re.search(line)
+			if m is not None:
+				return ("'%s'" % m.group(1)) + \
+					" call should be moved to src_prepare from line: %d"
+
+class BuiltWithUse(LineCheck):
+	repoman_check_name = 'ebuild.minorsyn'
+	re = re.compile(r'(^|.*\b)built_with_use\b')
+	error = errors.BUILT_WITH_USE
+
+class DeprecatedUseq(LineCheck):
+	"""Checks for use of the deprecated useq function"""
+	repoman_check_name = 'ebuild.minorsyn'
+	re = re.compile(r'(^|.*\b)useq\b')
+	error = errors.USEQ_ERROR
+
+class DeprecatedHasq(LineCheck):
+	"""Checks for use of the deprecated hasq function"""
+	repoman_check_name = 'ebuild.minorsyn'
+	re = re.compile(r'(^|.*\b)hasq\b')
+	error = errors.HASQ_ERROR
+
+# EAPI-3 checks
+class Eapi3DeprecatedFuncs(LineCheck):
+	repoman_check_name = 'EAPI.deprecated'
+	deprecated_commands_re = re.compile(r'^\s*(check_license)\b')
+
+	def check_eapi(self, eapi):
+		return eapi not in ('0', '1', '2')
+
+	def check(self, num, line):
+		m = self.deprecated_commands_re.match(line)
+		if m is not None:
+			return ("'%s'" % m.group(1)) + \
+				" has been deprecated in EAPI=3 on line: %d"
+
+# EAPI-4 checks
+class Eapi4IncompatibleFuncs(LineCheck):
+	repoman_check_name = 'EAPI.incompatible'
+	banned_commands_re = re.compile(r'^\s*(dosed|dohard)')
+
+	def check_eapi(self, eapi):
+		return not eapi_has_dosed_dohard(eapi)
+
+	def check(self, num, line):
+		m = self.banned_commands_re.match(line)
+		if m is not None:
+			return ("'%s'" % m.group(1)) + \
+				" has been banned in EAPI=4 on line: %d"
+
+class Eapi4GoneVars(LineCheck):
+	repoman_check_name = 'EAPI.incompatible'
+	undefined_vars_re = re.compile(r'.*\$(\{(AA|KV|EMERGE_FROM)\}|(AA|KV|EMERGE_FROM))')
+
+	def check_eapi(self, eapi):
+		# AA, KV, and EMERGE_FROM should not be referenced in EAPI 4 or later.
+		return not eapi_exports_AA(eapi)
+
+	def check(self, num, line):
+		m = self.undefined_vars_re.match(line)
+		if m is not None:
+			return ("variable '$%s'" % m.group(1)) + \
+				" is gone in EAPI=4 on line: %d"
+
+class PortageInternal(LineCheck):
+	repoman_check_name = 'portage.internal'
+	re = re.compile(r'[^#]*\b(ecompress|ecompressdir|prepall|prepalldocs|preplib)\b')
+
+	def check(self, num, line):
+		"""Run the check on line and return error if there is one"""
+		m = self.re.match(line)
+		if m is not None:
+			return ("'%s'" % m.group(1)) + " called on line: %d"
+
+_constant_checks = tuple((c() for c in (
+	EbuildHeader, EbuildWhitespace, EbuildBlankLine, EbuildQuote,
+	EbuildAssignment, Eapi3EbuildAssignment, EbuildUselessDodoc,
+	EbuildUselessCdS, EbuildNestedDie,
+	EbuildPatches, EbuildQuotedA, EapiDefinition, EprefixifyDefined,
+	ImplicitRuntimeDeps, InheritAutotools, InheritDeprecated, IUseUndefined,
+	EMakeParallelDisabled, EMakeParallelDisabledViaMAKEOPTS, NoAsNeeded,
+	DeprecatedBindnowFlags, SrcUnpackPatches, WantAutoDefaultValue,
+	SrcCompileEconf, Eapi3DeprecatedFuncs, NoOffsetWithHelpers,
+	Eapi4IncompatibleFuncs, Eapi4GoneVars, BuiltWithUse,
+	PreserveOldLib, SandboxAddpredict, PortageInternal,
+	DeprecatedUseq, DeprecatedHasq)))
+
+_here_doc_re = re.compile(r'.*\s<<[-]?(\w+)$')
+_ignore_comment_re = re.compile(r'^\s*#')
+
+def run_checks(contents, pkg):
+	checks = _constant_checks
+	here_doc_delim = None
+
+	for lc in checks:
+		lc.new(pkg)
+	for num, line in enumerate(contents):
+
+		# Check if we're inside a here-document.
+		if here_doc_delim is not None:
+			if here_doc_delim.match(line):
+				here_doc_delim = None
+		if here_doc_delim is None:
+			here_doc = _here_doc_re.match(line)
+			if here_doc is not None:
+				here_doc_delim = re.compile(r'^\s*%s$' % here_doc.group(1))
+
+		if here_doc_delim is None:
+			# We're not in a here-document.
+			is_comment = _ignore_comment_re.match(line) is not None
+			for lc in checks:
+				if is_comment and lc.ignore_comment:
+					continue
+				if lc.check_eapi(pkg.metadata['EAPI']):
+					ignore = lc.ignore_line
+					if not ignore or not ignore.match(line):
+						e = lc.check(num, line)
+						if e:
+							yield lc.repoman_check_name, e % (num + 1)
+
+	for lc in checks:
+		i = lc.end()
+		if i is not None:
+			for e in i:
+				yield lc.repoman_check_name, e

diff --git a/portage_with_autodep/pym/repoman/errors.py b/portage_with_autodep/pym/repoman/errors.py
new file mode 100644
index 0000000..3209243
--- /dev/null
+++ b/portage_with_autodep/pym/repoman/errors.py
@@ -0,0 +1,26 @@
+# repoman: Error Messages
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+COPYRIGHT_ERROR = 'Invalid Gentoo Copyright on line: %d'
+LICENSE_ERROR = 'Invalid Gentoo/GPL License on line: %d'
+CVS_HEADER_ERROR = 'Malformed CVS Header on line: %d'
+LEADING_SPACES_ERROR = 'Ebuild contains leading spaces on line: %d'
+TRAILING_WHITESPACE_ERROR = 'Trailing whitespace error on line: %d'
+READONLY_ASSIGNMENT_ERROR = 'Ebuild contains assignment to read-only variable on line: %d'
+MISSING_QUOTES_ERROR = 'Unquoted Variable on line: %d'
+NESTED_DIE_ERROR = 'Ebuild calls die in a subshell on line: %d'
+PATCHES_ERROR = 'PATCHES is not a bash array on line: %d'
+REDUNDANT_CD_S_ERROR = 'Ebuild has redundant cd ${S} statement on line: %d'
+EMAKE_PARALLEL_DISABLED = 'Upstream parallel compilation bug (ebuild calls emake -j1 on line: %d)'
+EMAKE_PARALLEL_DISABLED_VIA_MAKEOPTS = 'Upstream parallel compilation bug (MAKEOPTS=-j1 on line: %d)'
+DEPRECATED_BINDNOW_FLAGS = 'Deprecated bindnow-flags call on line: %d'
+EAPI_DEFINED_AFTER_INHERIT = 'EAPI defined after inherit on line: %d'
+NO_AS_NEEDED = 'Upstream asneeded linking bug (no-as-needed on line: %d)'
+PRESERVE_OLD_LIB = 'Upstream ABI change workaround on line: %d'
+BUILT_WITH_USE = 'built_with_use on line: %d'
+EPREFIXIFY_MISSING_INHERIT = "prefix.eclass is not inherited, but eprefixify is used on line: %d"
+NO_OFFSET_WITH_HELPERS = "Helper function is used with D, ROOT, ED, EROOT or EPREFIX on line :%d"
+SANDBOX_ADDPREDICT = 'Ebuild calls addpredict on line: %d'
+USEQ_ERROR = 'Ebuild calls deprecated useq function on line: %d'
+HASQ_ERROR = 'Ebuild calls deprecated hasq function on line: %d'

diff --git a/portage_with_autodep/pym/repoman/herdbase.py b/portage_with_autodep/pym/repoman/herdbase.py
new file mode 100644
index 0000000..64b59e6
--- /dev/null
+++ b/portage_with_autodep/pym/repoman/herdbase.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+# repoman: Herd database analysis
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2 or later
+
+import errno
+import xml.etree.ElementTree
+try:
+	from xml.parsers.expat import ExpatError
+except ImportError:
+	# This means that python is built without xml support.
+	# We tolerate global scope import failures for optional
+	# modules, so that ImportModulesTestCase can succeed (or
+	# possibly alert us about unexpected import failures).
+	pass
+from portage.exception import FileNotFound, ParseError, PermissionDenied
+
+__all__ = [
+	"make_herd_base"
+]
+
+_SPECIAL_HERDS = set(('no-herd',))
+
+def _make_email(nick_name):
+	if not nick_name.endswith('@gentoo.org'):
+		nick_name = nick_name + '@gentoo.org'
+	return nick_name
+
+
+class HerdBase(object):
+	def __init__(self, herd_to_emails, all_emails):
+		self.herd_to_emails = herd_to_emails
+		self.all_emails = all_emails
+
+	def known_herd(self, herd_name):
+		if herd_name in _SPECIAL_HERDS:
+			return True
+		return herd_name in self.herd_to_emails
+
+	def known_maintainer(self, nick_name):
+		return _make_email(nick_name) in self.all_emails
+
+	def maintainer_in_herd(self, nick_name, herd_name):
+		return _make_email(nick_name) in self.herd_to_emails[herd_name]
+
+class _HerdsTreeBuilder(xml.etree.ElementTree.TreeBuilder):
+	"""
+	Implements doctype() as required to avoid deprecation warnings with
+	>=python-2.7.
+	"""
+	def doctype(self, name, pubid, system):
+		pass
+
+def make_herd_base(filename):
+	herd_to_emails = dict()
+	all_emails = set()
+
+	try:
+		xml_tree = xml.etree.ElementTree.parse(filename,
+			parser=xml.etree.ElementTree.XMLParser(
+				target=_HerdsTreeBuilder()))
+	except ExpatError as e:
+		raise ParseError("metadata.xml: " + str(e))
+	except EnvironmentError as e:
+		func_call = "open('%s')" % filename
+		if e.errno == errno.EACCES:
+			raise PermissionDenied(func_call)
+		elif e.errno == errno.ENOENT:
+			raise FileNotFound(filename)
+		raise
+
+	herds = xml_tree.findall('herd')
+	for h in herds:
+		_herd_name = h.find('name')
+		if _herd_name is None:
+			continue
+		herd_name = _herd_name.text.strip()
+		del _herd_name
+
+		maintainers = h.findall('maintainer')
+		herd_emails = set()
+		for m in maintainers:
+			_m_email = m.find('email')
+			if _m_email is None:
+				continue
+			m_email = _m_email.text.strip()
+
+			herd_emails.add(m_email)
+			all_emails.add(m_email)
+
+		herd_to_emails[herd_name] = herd_emails
+
+	return HerdBase(herd_to_emails, all_emails)
+
+
+if __name__ == '__main__':
+	h = make_herd_base('/usr/portage/metadata/herds.xml')
+
+	assert(h.known_herd('sound'))
+	assert(not h.known_herd('media-sound'))
+
+	assert(h.known_maintainer('sping'))
+	assert(h.known_maintainer('sping@gentoo.org'))
+	assert(not h.known_maintainer('portage'))
+
+	assert(h.maintainer_in_herd('zmedico@gentoo.org', 'tools-portage'))
+	assert(not h.maintainer_in_herd('pva@gentoo.org', 'tools-portage'))
+
+	import pprint
+	pprint.pprint(h.herd_to_emails)

diff --git a/portage_with_autodep/pym/repoman/utilities.py b/portage_with_autodep/pym/repoman/utilities.py
new file mode 100644
index 0000000..232739e
--- /dev/null
+++ b/portage_with_autodep/pym/repoman/utilities.py
@@ -0,0 +1,511 @@
+# repoman: Utilities
+# Copyright 2007-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""This module contains utility functions to help repoman find ebuilds to
+scan"""
+
+__all__ = [
+	"detect_vcs_conflicts",
+	"editor_is_executable",
+	"FindPackagesToScan",
+	"FindPortdir",
+	"FindVCS",
+	"format_qa_output",
+	"get_commit_message_with_editor",
+	"get_commit_message_with_stdin",
+	"have_profile_dir",
+	"parse_metadata_use",
+	"UnknownHerdsError",
+	"check_metadata"
+]
+
+import errno
+import io
+import logging
+import sys
+from portage import os
+from portage import subprocess_getstatusoutput
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage import output
+from portage.localization import _
+from portage.output import red, green
+from portage.process import find_binary
+from portage import exception
+from portage import util
+normalize_path = util.normalize_path
+util.initialize_logger()
+
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
+def detect_vcs_conflicts(options, vcs):
+	"""Determine if the checkout has problems like cvs conflicts.
+	
+	If you want more vcs support here just keep adding if blocks...
+	This could be better.
+	
+	TODO(antarus): Also this should probably not call sys.exit() as
+	repoman is run on >1 packages and one failure should not cause
+	subsequent packages to fail.
+	
+	Args:
+		vcs - A string identifying the version control system in use
+	Returns:
+		None (calls sys.exit on fatal problems)
+	"""
+	retval = ("","")
+	if vcs == 'cvs':
+		logging.info("Performing a " + output.green("cvs -n up") + \
+			" with a little magic grep to check for updates.")
+		retval = subprocess_getstatusoutput("cvs -n up 2>&1 | " + \
+			"egrep '^[^\?] .*' | " + \
+			"egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'")
+	if vcs == 'svn':
+		logging.info("Performing a " + output.green("svn status -u") + \
+			" with a little magic grep to check for updates.")
+		retval = subprocess_getstatusoutput("svn status -u 2>&1 | " + \
+			"egrep -v '^.  +.*/digest-[^/]+' | " + \
+			"head -n-1")
+
+	if vcs in ['cvs', 'svn']:
+		mylines = retval[1].splitlines()
+		myupdates = []
+		for line in mylines:
+			if not line:
+				continue
+			if line[0] not in " UPMARD": # unmodified(svn),Updates,Patches,Modified,Added,Removed/Replaced(svn),Deleted(svn)
+				# Stray Manifest is fine, we will readd it anyway.
+				if line[0] == '?' and line[1:].lstrip() == 'Manifest':
+					continue
+				logging.error(red("!!! Please fix the following issues reported " + \
+					"from cvs: ")+green("(U,P,M,A,R,D are ok)"))
+				logging.error(red("!!! Note: This is a pretend/no-modify pass..."))
+				logging.error(retval[1])
+				sys.exit(1)
+			elif vcs == 'cvs' and line[0] in "UP":
+				myupdates.append(line[2:])
+			elif vcs == 'svn' and line[8] == '*':
+				myupdates.append(line[9:].lstrip(" 1234567890"))
+
+		if myupdates:
+			logging.info(green("Fetching trivial updates..."))
+			if options.pretend:
+				logging.info("(" + vcs + " update " + " ".join(myupdates) + ")")
+				retval = os.EX_OK
+			else:
+				retval = os.system(vcs + " update " + " ".join(myupdates))
+			if retval != os.EX_OK:
+				logging.fatal("!!! " + vcs + " exited with an error. Terminating.")
+				sys.exit(retval)
+
+
+def have_profile_dir(path, maxdepth=3, filename="profiles.desc"):
+	""" 
+	Try to figure out if 'path' has a profiles/
+	dir in it by checking for the given filename.
+	"""
+	while path != "/" and maxdepth:
+		if os.path.exists(os.path.join(path, "profiles", filename)):
+			return normalize_path(path)
+		path = normalize_path(path + "/..")
+		maxdepth -= 1
+
+def parse_metadata_use(xml_tree):
+	"""
+	Records are wrapped in XML as per GLEP 56
+	returns a dict with keys constisting of USE flag names and values
+	containing their respective descriptions
+	"""
+	uselist = {}
+
+	usetags = xml_tree.findall("use")
+	if not usetags:
+		return uselist
+
+	# It's possible to have multiple 'use' elements.
+	for usetag in usetags:
+		flags = usetag.findall("flag")
+		if not flags:
+			# DTD allows use elements containing no flag elements.
+			continue
+
+		for flag in flags:
+			pkg_flag = flag.get("name")
+			if pkg_flag is None:
+				raise exception.ParseError("missing 'name' attribute for 'flag' tag")
+			flag_restrict = flag.get("restrict")
+
+			# emulate the Element.itertext() method from python-2.7
+			inner_text = []
+			stack = []
+			stack.append(flag)
+			while stack:
+				obj = stack.pop()
+				if isinstance(obj, basestring):
+					inner_text.append(obj)
+					continue
+				if isinstance(obj.text, basestring):
+					inner_text.append(obj.text)
+				if isinstance(obj.tail, basestring):
+					stack.append(obj.tail)
+				stack.extend(reversed(obj))
+
+			if pkg_flag not in uselist:
+				uselist[pkg_flag] = {}
+
+			# (flag_restrict can be None)
+			uselist[pkg_flag][flag_restrict] = " ".join("".join(inner_text).split())
+
+	return uselist
+
+class UnknownHerdsError(ValueError):
+	def __init__(self, herd_names):
+		_plural = len(herd_names) != 1
+		super(UnknownHerdsError, self).__init__(
+			'Unknown %s %s' % (_plural and 'herds' or 'herd',
+			','.join('"%s"' % e for e in herd_names)))
+
+
+def check_metadata_herds(xml_tree, herd_base):
+	herd_nodes = xml_tree.findall('herd')
+	unknown_herds = [name for name in
+			(e.text.strip() for e in herd_nodes if e.text is not None)
+			if not herd_base.known_herd(name)]
+
+	if unknown_herds:
+		raise UnknownHerdsError(unknown_herds)
+
+def check_metadata(xml_tree, herd_base):
+	if herd_base is not None:
+		check_metadata_herds(xml_tree, herd_base)
+
+def FindPackagesToScan(settings, startdir, reposplit):
+	""" Try to find packages that need to be scanned
+	
+	Args:
+		settings - portage.config instance, preferably repoman_settings
+		startdir - directory that repoman was run in
+		reposplit - root of the repository
+	Returns:
+		A list of directories to scan
+	"""
+	
+	
+	def AddPackagesInDir(path):
+		""" Given a list of dirs, add any packages in it """
+		ret = []
+		pkgdirs = os.listdir(path)
+		for d in pkgdirs:
+			if d == 'CVS' or d.startswith('.'):
+				continue
+			p = os.path.join(path, d)
+
+			if os.path.isdir(p):
+				cat_pkg_dir = os.path.join(*p.split(os.path.sep)[-2:])
+				logging.debug('adding %s to scanlist' % cat_pkg_dir)
+				ret.append(cat_pkg_dir)
+		return ret
+	
+	scanlist = []
+	repolevel = len(reposplit)
+	if repolevel == 1: # root of the tree, startdir = repodir
+		for cat in settings.categories:
+			path = os.path.join(startdir, cat)
+			if not os.path.isdir(path):
+				continue
+			pkgdirs = os.listdir(path)
+			scanlist.extend(AddPackagesInDir(path))
+	elif repolevel == 2: # category level, startdir = catdir
+		# we only want 1 segment of the directory, is why we use catdir instead of startdir
+		catdir = reposplit[-2]
+		if catdir not in settings.categories:
+			logging.warn('%s is not a valid category according to profiles/categories, ' \
+				'skipping checks in %s' % (catdir, catdir))
+		else:
+			scanlist = AddPackagesInDir(catdir)
+	elif repolevel == 3: # pkgdir level, startdir = pkgdir
+		catdir = reposplit[-2]
+		pkgdir = reposplit[-1]
+		if catdir not in settings.categories:
+			logging.warn('%s is not a valid category according to profiles/categories, ' \
+			'skipping checks in %s' % (catdir, catdir))
+		else:
+			path = os.path.join(catdir, pkgdir)
+			logging.debug('adding %s to scanlist' % path)
+			scanlist.append(path)
+	return scanlist
+
+
+def format_qa_output(formatter, stats, fails, dofull, dofail, options, qawarnings):
+	"""Helper function that formats output properly
+	
+	Args:
+		formatter - a subclass of Formatter
+		stats - a dict of qa status items
+		fails - a dict of qa status failures
+		dofull - boolean to print full results or a summary
+		dofail - boolean to decide if failure was hard or soft
+	
+	Returns:
+		None (modifies formatter)
+	"""
+	full = options.mode == 'full'
+	# we only want key value pairs where value > 0 
+	for category, number in \
+		filter(lambda myitem: myitem[1] > 0, iter(stats.items())):
+		formatter.add_literal_data(_unicode_decode("  " + category.ljust(30)))
+		if category in qawarnings:
+			formatter.push_style("WARN")
+		else:
+			formatter.push_style("BAD")
+		formatter.add_literal_data(_unicode_decode(str(number)))
+		formatter.pop_style()
+		formatter.add_line_break()
+		if not dofull:
+			if not full and dofail and category in qawarnings:
+				# warnings are considered noise when there are failures
+				continue
+			fails_list = fails[category]
+			if not full and len(fails_list) > 12:
+				fails_list = fails_list[:12]
+			for failure in fails_list:
+				formatter.add_literal_data(_unicode_decode("   " + failure))
+				formatter.add_line_break()
+
+
+def editor_is_executable(editor):
+	"""
+	Given an EDITOR string, validate that it refers to
+	an executable. This uses shlex_split() to split the
+	first component and do a PATH lookup if necessary.
+
+	@param editor: An EDITOR value from the environment.
+	@type: string
+	@rtype: bool
+	@returns: True if an executable is found, False otherwise.
+	"""
+	editor_split = util.shlex_split(editor)
+	if not editor_split:
+		return False
+	filename = editor_split[0]
+	if not os.path.isabs(filename):
+		return find_binary(filename) is not None
+	return os.access(filename, os.X_OK) and os.path.isfile(filename)
+
+
+def get_commit_message_with_editor(editor, message=None):
+	"""
+	Execute editor with a temporary file as it's argument
+	and return the file content afterwards.
+
+	@param editor: An EDITOR value from the environment
+	@type: string
+	@param message: An iterable of lines to show in the editor.
+	@type: iterable
+	@rtype: string or None
+	@returns: A string on success or None if an error occurs.
+	"""
+	from tempfile import mkstemp
+	fd, filename = mkstemp()
+	try:
+		os.write(fd, _unicode_encode(_(
+			"\n# Please enter the commit message " + \
+			"for your changes.\n# (Comment lines starting " + \
+			"with '#' will not be included)\n"),
+			encoding=_encodings['content'], errors='backslashreplace'))
+		if message:
+			os.write(fd, b"#\n")
+			for line in message:
+				os.write(fd, _unicode_encode("#" + line,
+					encoding=_encodings['content'], errors='backslashreplace'))
+		os.close(fd)
+		retval = os.system(editor + " '%s'" % filename)
+		if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK):
+			return None
+		try:
+			mylines = io.open(_unicode_encode(filename,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['content'], errors='replace'
+				).readlines()
+		except OSError as e:
+			if e.errno != errno.ENOENT:
+				raise
+			del e
+			return None
+		return "".join(line for line in mylines if not line.startswith("#"))
+	finally:
+		try:
+			os.unlink(filename)
+		except OSError:
+			pass
+
+
+def get_commit_message_with_stdin():
+	"""
+	Read a commit message from the user and return it.
+
+	@rtype: string or None
+	@returns: A string on success or None if an error occurs.
+	"""
+	print("Please enter a commit message. Use Ctrl-d to finish or Ctrl-c to abort.")
+	commitmessage = []
+	while True:
+		commitmessage.append(sys.stdin.readline())
+		if not commitmessage[-1]:
+			break
+	commitmessage = "".join(commitmessage)
+	return commitmessage
+
+
+def FindPortdir(settings):
+	""" Try to figure out what repo we are in and whether we are in a regular
+	tree or an overlay.
+	
+	Basic logic is:
+	
+	1. Determine what directory we are in (supports symlinks).
+	2. Build a list of directories from / to our current location
+	3. Iterate over PORTDIR_OVERLAY, if we find a match, search for a profiles directory
+		 in the overlay.  If it has one, make it portdir, otherwise make it portdir_overlay.
+	4. If we didn't find an overlay in PORTDIR_OVERLAY, see if we are in PORTDIR; if so, set
+		 portdir_overlay to PORTDIR.  If we aren't in PORTDIR, see if PWD has a profiles dir, if
+		 so, set portdir_overlay and portdir to PWD, else make them False.
+	5. If we haven't found portdir_overlay yet, it means the user is doing something odd, report
+		 an error.
+	6. If we haven't found a portdir yet, set portdir to PORTDIR.
+	
+	Args:
+		settings - portage.config instance, preferably repoman_settings
+	Returns:
+		list(portdir, portdir_overlay, location)
+	"""
+
+	portdir = None
+	portdir_overlay = None
+	location = os.getcwd()
+	pwd = os.environ.get('PWD', '')
+	if pwd and pwd != location and os.path.realpath(pwd) == location:
+		# getcwd() returns the canonical path but that makes it hard for repoman to
+		# orient itself if the user has symlinks in their portage tree structure.
+		# We use os.environ["PWD"], if available, to get the non-canonical path of
+		# the current working directory (from the shell).
+		location = pwd
+
+	location = normalize_path(location)
+
+	path_ids = {}
+	p = location
+	s = None
+	while True:
+		s = os.stat(p)
+		path_ids[(s.st_dev, s.st_ino)] = p
+		if p == "/":
+			break
+		p = os.path.dirname(p)
+	if location[-1] != "/":
+		location += "/"
+
+	for overlay in settings["PORTDIR_OVERLAY"].split():
+		overlay = os.path.realpath(overlay)
+		try:
+			s = os.stat(overlay)
+		except OSError:
+			continue
+		overlay = path_ids.get((s.st_dev, s.st_ino))
+		if overlay is None:
+			continue
+		if overlay[-1] != "/":
+			overlay += "/"
+		if True:
+			portdir_overlay = overlay
+			subdir = location[len(overlay):]
+			if subdir and subdir[-1] != "/":
+				subdir += "/"
+			if have_profile_dir(location, subdir.count("/")):
+				portdir = portdir_overlay
+			break
+
+	# Couldn't match location with anything from PORTDIR_OVERLAY,
+	# so fall back to have_profile_dir() checks alone. Assume that
+	# an overlay will contain at least a "repo_name" file while a
+	# master repo (portdir) will contain at least a "profiles.desc"
+	# file.
+	if not portdir_overlay:
+		portdir_overlay = have_profile_dir(location, filename="repo_name")
+		if portdir_overlay:
+			subdir = location[len(portdir_overlay):]
+			if subdir and subdir[-1] != os.sep:
+				subdir += os.sep
+			if have_profile_dir(location, subdir.count(os.sep)):
+				portdir = portdir_overlay
+
+	if not portdir_overlay:
+		if (settings["PORTDIR"] + os.path.sep).startswith(location):
+			portdir_overlay = settings["PORTDIR"]
+		else:
+			portdir_overlay = have_profile_dir(location)
+		portdir = portdir_overlay
+	
+	if not portdir_overlay:
+		msg = 'Repoman is unable to determine PORTDIR or PORTDIR_OVERLAY' + \
+			' from the current working directory'
+		logging.critical(msg)
+		return (None, None, None)
+
+	if not portdir:
+		portdir = settings["PORTDIR"]
+
+	if not portdir_overlay.endswith('/'):
+		portdir_overlay += '/'
+	
+	if not portdir.endswith('/'):
+		portdir += '/'
+
+	return [normalize_path(x) for x in (portdir, portdir_overlay, location)]
+
+def FindVCS():
+	""" Try to figure out in what VCS' working tree we are. """
+
+	outvcs = []
+
+	def seek(depth = None):
+		""" Seek for distributed VCSes. """
+		retvcs = []
+		pathprep = ''
+
+		while depth is None or depth > 0:
+			if os.path.isdir(os.path.join(pathprep, '.git')):
+				retvcs.append('git')
+			if os.path.isdir(os.path.join(pathprep, '.bzr')):
+				retvcs.append('bzr')
+			if os.path.isdir(os.path.join(pathprep, '.hg')):
+				retvcs.append('hg')
+
+			if retvcs:
+				break
+			pathprep = os.path.join(pathprep, '..')
+			if os.path.realpath(pathprep).strip('/') == '':
+				break
+			if depth is not None:
+				depth = depth - 1
+
+		return retvcs
+
+	# Level zero VCS-es.
+	if os.path.isdir('CVS'):
+		outvcs.append('cvs')
+	if os.path.isdir('.svn'):
+		outvcs.append('svn')
+
+	# If we already found one of 'level zeros', just take a quick look
+	# at the current directory. Otherwise, seek parents till we get
+	# something or reach root.
+	if outvcs:
+		outvcs.extend(seek(1))
+	else:
+		outvcs = seek()
+
+	return outvcs



^ permalink raw reply related	[flat|nested] 2+ messages in thread

* [gentoo-commits] proj/autodep:master commit in: portage_with_autodep/pym/portage/tests/news/, ...
@ 2014-02-17 11:55 Александр Берсенев
  0 siblings, 0 replies; 2+ messages in thread
From: Александр Берсенев @ 2014-02-17 11:55 UTC (permalink / raw
  To: gentoo-commits

commit:     5a3f506c9ef1cfd78940b0509f10ef94b4434e29
Author:     Alexander Bersenev <bay <AT> hackerdom <DOT> ru>
AuthorDate: Mon Feb 17 11:55:51 2014 +0000
Commit:     Александр Берсенев <bay <AT> hackerdom <DOT> ru>
CommitDate: Mon Feb 17 11:55:51 2014 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/autodep.git;a=commit;h=5a3f506c

updated portage to 2.2.8-r1

---
 portage_with_autodep/bin/archive-conf              |    2 +-
 portage_with_autodep/bin/bashrc-functions.sh       |  140 ++
 portage_with_autodep/bin/chpathtool.py             |  182 ++
 portage_with_autodep/bin/dispatch-conf             |   94 +-
 portage_with_autodep/bin/dohtml.py                 |   13 +-
 portage_with_autodep/bin/eapi.sh                   |  145 ++
 portage_with_autodep/bin/ebuild                    |   64 +-
 .../bin/ebuild-helpers/{ => bsd}/sed               |   14 +-
 portage_with_autodep/bin/ebuild-helpers/dobin      |   11 +-
 portage_with_autodep/bin/ebuild-helpers/dodir      |    7 +-
 portage_with_autodep/bin/ebuild-helpers/dodoc      |    5 +-
 portage_with_autodep/bin/ebuild-helpers/doexe      |   11 +-
 portage_with_autodep/bin/ebuild-helpers/dohard     |    9 +-
 portage_with_autodep/bin/ebuild-helpers/doheader   |   19 +
 portage_with_autodep/bin/ebuild-helpers/doinfo     |   11 +-
 portage_with_autodep/bin/ebuild-helpers/doins      |   17 +-
 portage_with_autodep/bin/ebuild-helpers/dolib      |    7 +-
 portage_with_autodep/bin/ebuild-helpers/doman      |    9 +-
 portage_with_autodep/bin/ebuild-helpers/domo       |   12 +-
 portage_with_autodep/bin/ebuild-helpers/dosbin     |   11 +-
 portage_with_autodep/bin/ebuild-helpers/dosed      |    9 +-
 portage_with_autodep/bin/ebuild-helpers/dosym      |   18 +-
 .../bin/ebuild-helpers/ecompressdir                |   73 +-
 portage_with_autodep/bin/ebuild-helpers/fowners    |   13 +-
 portage_with_autodep/bin/ebuild-helpers/fperms     |    7 +-
 portage_with_autodep/bin/ebuild-helpers/keepdir    |   20 +
 portage_with_autodep/bin/ebuild-helpers/newbin     |    5 +-
 portage_with_autodep/bin/ebuild-helpers/newconfd   |    5 +-
 portage_with_autodep/bin/ebuild-helpers/newdoc     |    5 +-
 portage_with_autodep/bin/ebuild-helpers/newenvd    |    5 +-
 portage_with_autodep/bin/ebuild-helpers/newexe     |    5 +-
 portage_with_autodep/bin/ebuild-helpers/newheader  |    1 +
 portage_with_autodep/bin/ebuild-helpers/newinitd   |    5 +-
 portage_with_autodep/bin/ebuild-helpers/newins     |    3 +
 portage_with_autodep/bin/ebuild-helpers/newlib.a   |    5 +-
 portage_with_autodep/bin/ebuild-helpers/newlib.so  |    5 +-
 portage_with_autodep/bin/ebuild-helpers/newman     |    5 +-
 portage_with_autodep/bin/ebuild-helpers/newsbin    |    5 +-
 portage_with_autodep/bin/ebuild-helpers/prepall    |   11 +-
 .../bin/ebuild-helpers/prepalldocs                 |    8 +-
 .../bin/ebuild-helpers/prepallinfo                 |    7 +-
 portage_with_autodep/bin/ebuild-helpers/prepallman |    7 +-
 .../bin/ebuild-helpers/prepallstrip                |    7 +-
 portage_with_autodep/bin/ebuild-helpers/prepinfo   |    9 +-
 portage_with_autodep/bin/ebuild-helpers/preplib    |    9 +-
 portage_with_autodep/bin/ebuild-helpers/prepman    |   11 +-
 portage_with_autodep/bin/ebuild-helpers/prepstrip  |  290 ++-
 .../bin/ebuild-helpers/unprivileged/chgrp          |    1 +
 .../bin/ebuild-helpers/unprivileged/chown          |   41 +
 .../bin/ebuild-helpers/xattr/install               |   12 +
 portage_with_autodep/bin/ebuild-ipc.py             |   38 +-
 portage_with_autodep/bin/ebuild.sh                 | 2073 ++------------------
 portage_with_autodep/bin/egencache                 |  181 +-
 portage_with_autodep/bin/emaint                    |   22 +-
 portage_with_autodep/bin/emerge-webrsync           |   17 +-
 portage_with_autodep/bin/emirrordist               |   13 +
 portage_with_autodep/bin/etc-update                |  769 ++++----
 portage_with_autodep/bin/fixpackages               |    9 +-
 portage_with_autodep/bin/glsa-check                |    5 +-
 portage_with_autodep/bin/helper-functions.sh       |   62 +
 portage_with_autodep/bin/install.py                |  253 +++
 portage_with_autodep/bin/isolated-functions.sh     |  175 +-
 portage_with_autodep/bin/lock-helper.py            |    5 +-
 portage_with_autodep/bin/misc-functions.sh         |  503 +++--
 portage_with_autodep/bin/phase-functions.sh        | 1001 ++++++++++
 portage_with_autodep/bin/phase-helpers.sh          |  663 +++++++
 portage_with_autodep/bin/portageq                  |  257 ++-
 portage_with_autodep/bin/quickpkg                  |   73 +-
 portage_with_autodep/bin/regenworld                |    5 +-
 portage_with_autodep/bin/repoman                   |  838 ++++----
 portage_with_autodep/bin/save-ebuild-env.sh        |  100 +
 portage_with_autodep/bin/xattr-helper.py           |  190 ++
 portage_with_autodep/bin/xpak-helper.py            |    4 +-
 .../integration_with_portage.patch                 |  920 +++++++++
 .../pym/_emerge/AbstractDepPriority.py             |    4 +-
 .../pym/_emerge/AbstractDepPriority.pyo            |  Bin 0 -> 1757 bytes
 .../pym/_emerge/AbstractEbuildProcess.py           |   52 +-
 .../pym/_emerge/AbstractEbuildProcess.pyo          |  Bin 0 -> 10082 bytes
 .../pym/_emerge/AbstractPollTask.py                |  112 +-
 .../pym/_emerge/AbstractPollTask.pyo               |  Bin 0 -> 4918 bytes
 .../pym/_emerge/AsynchronousLock.py                |   90 +-
 .../pym/_emerge/AsynchronousLock.pyo               |  Bin 0 -> 10536 bytes
 .../pym/_emerge/AsynchronousTask.py                |   45 +-
 .../pym/_emerge/AsynchronousTask.pyo               |  Bin 0 -> 5610 bytes
 portage_with_autodep/pym/_emerge/AtomArg.pyo       |  Bin 0 -> 771 bytes
 portage_with_autodep/pym/_emerge/Binpkg.py         |   85 +-
 portage_with_autodep/pym/_emerge/Binpkg.pyo        |  Bin 0 -> 13229 bytes
 .../pym/_emerge/BinpkgEnvExtractor.py              |    6 +-
 .../pym/_emerge/BinpkgEnvExtractor.pyo             |  Bin 0 -> 3084 bytes
 .../pym/_emerge/BinpkgExtractorAsync.py            |    3 +-
 .../pym/_emerge/BinpkgExtractorAsync.pyo           |  Bin 0 -> 1337 bytes
 portage_with_autodep/pym/_emerge/BinpkgFetcher.py  |    3 -
 portage_with_autodep/pym/_emerge/BinpkgFetcher.pyo |  Bin 0 -> 5698 bytes
 .../pym/_emerge/BinpkgPrefetcher.pyo               |  Bin 0 -> 1932 bytes
 .../pym/_emerge/BinpkgVerifier.pyo                 |  Bin 0 -> 2515 bytes
 portage_with_autodep/pym/_emerge/Blocker.pyo       |  Bin 0 -> 853 bytes
 portage_with_autodep/pym/_emerge/BlockerCache.py   |   21 +-
 portage_with_autodep/pym/_emerge/BlockerCache.pyo  |  Bin 0 -> 6840 bytes
 portage_with_autodep/pym/_emerge/BlockerDB.py      |    5 +-
 portage_with_autodep/pym/_emerge/BlockerDB.pyo     |  Bin 0 -> 4286 bytes
 .../pym/_emerge/BlockerDepPriority.pyo             |  Bin 0 -> 797 bytes
 portage_with_autodep/pym/_emerge/CompositeTask.py  |    7 +-
 portage_with_autodep/pym/_emerge/CompositeTask.pyo |  Bin 0 -> 5111 bytes
 portage_with_autodep/pym/_emerge/DepPriority.pyo   |  Bin 0 -> 1653 bytes
 .../pym/_emerge/DepPriorityNormalRange.pyo         |  Bin 0 -> 1866 bytes
 .../pym/_emerge/DepPrioritySatisfiedRange.pyo      |  Bin 0 -> 2980 bytes
 portage_with_autodep/pym/_emerge/Dependency.py     |    5 +-
 portage_with_autodep/pym/_emerge/Dependency.pyo    |  Bin 0 -> 1092 bytes
 portage_with_autodep/pym/_emerge/DependencyArg.pyo |  Bin 0 -> 1612 bytes
 portage_with_autodep/pym/_emerge/EbuildBinpkg.py   |    6 +-
 portage_with_autodep/pym/_emerge/EbuildBinpkg.pyo  |  Bin 0 -> 2007 bytes
 portage_with_autodep/pym/_emerge/EbuildBuild.py    |   29 +-
 portage_with_autodep/pym/_emerge/EbuildBuild.pyo   |  Bin 0 -> 11947 bytes
 portage_with_autodep/pym/_emerge/EbuildBuildDir.py |    5 +-
 .../pym/_emerge/EbuildBuildDir.pyo                 |  Bin 0 -> 3933 bytes
 portage_with_autodep/pym/_emerge/EbuildExecuter.py |    4 +-
 .../pym/_emerge/EbuildExecuter.pyo                 |  Bin 0 -> 3424 bytes
 portage_with_autodep/pym/_emerge/EbuildFetcher.py  |   28 +-
 portage_with_autodep/pym/_emerge/EbuildFetcher.pyo |  Bin 0 -> 9374 bytes
 .../pym/_emerge/EbuildFetchonly.py                 |    6 +-
 .../pym/_emerge/EbuildFetchonly.pyo                |  Bin 0 -> 1552 bytes
 .../pym/_emerge/EbuildIpcDaemon.py                 |   31 +-
 .../pym/_emerge/EbuildIpcDaemon.pyo                |  Bin 0 -> 3227 bytes
 portage_with_autodep/pym/_emerge/EbuildMerge.py    |    4 +-
 portage_with_autodep/pym/_emerge/EbuildMerge.pyo   |  Bin 0 -> 2558 bytes
 .../pym/_emerge/EbuildMetadataPhase.py             |  154 +-
 .../pym/_emerge/EbuildMetadataPhase.pyo            |  Bin 0 -> 5787 bytes
 portage_with_autodep/pym/_emerge/EbuildPhase.py    |   15 +-
 .../pym/_emerge/EbuildPhase.py.rej                 |   12 +
 portage_with_autodep/pym/_emerge/EbuildPhase.pyo   |  Bin 0 -> 11191 bytes
 portage_with_autodep/pym/_emerge/EbuildProcess.pyo |  Bin 0 -> 967 bytes
 .../pym/_emerge/EbuildSpawnProcess.pyo             |  Bin 0 -> 897 bytes
 portage_with_autodep/pym/_emerge/EventsAnalyser.py |  180 +-
 portage_with_autodep/pym/_emerge/EventsLogger.py   |  102 +-
 portage_with_autodep/pym/_emerge/FakeVartree.py    |   26 +-
 portage_with_autodep/pym/_emerge/FakeVartree.pyo   |  Bin 0 -> 9274 bytes
 portage_with_autodep/pym/_emerge/FifoIpcDaemon.py  |   25 +-
 portage_with_autodep/pym/_emerge/FifoIpcDaemon.pyo |  Bin 0 -> 2902 bytes
 .../pym/_emerge/JobStatusDisplay.py                |   21 +-
 .../pym/_emerge/JobStatusDisplay.pyo               |  Bin 0 -> 9115 bytes
 portage_with_autodep/pym/_emerge/MergeListItem.py  |    2 +-
 portage_with_autodep/pym/_emerge/MergeListItem.pyo |  Bin 0 -> 3960 bytes
 portage_with_autodep/pym/_emerge/MetadataRegen.py  |   79 +-
 portage_with_autodep/pym/_emerge/MetadataRegen.pyo |  Bin 0 -> 5760 bytes
 .../pym/_emerge/MiscFunctionsProcess.py            |   10 +-
 .../pym/_emerge/MiscFunctionsProcess.pyo           |  Bin 0 -> 1701 bytes
 portage_with_autodep/pym/_emerge/Package.py        |   50 +-
 portage_with_autodep/pym/_emerge/Package.pyo       |  Bin 0 -> 21535 bytes
 portage_with_autodep/pym/_emerge/PackageArg.pyo    |  Bin 0 -> 1110 bytes
 portage_with_autodep/pym/_emerge/PackageMerge.py   |    2 +-
 portage_with_autodep/pym/_emerge/PackageMerge.pyo  |  Bin 0 -> 1509 bytes
 .../pym/_emerge/PackageUninstall.pyo               |  Bin 0 -> 4110 bytes
 .../pym/_emerge/PackageVirtualDbapi.py             |   22 +-
 .../pym/_emerge/PackageVirtualDbapi.pyo            |  Bin 0 -> 6099 bytes
 portage_with_autodep/pym/_emerge/PipeReader.py     |   60 +-
 portage_with_autodep/pym/_emerge/PipeReader.pyo    |  Bin 0 -> 3672 bytes
 portage_with_autodep/pym/_emerge/PollScheduler.py  |  346 +---
 portage_with_autodep/pym/_emerge/PollScheduler.pyo |  Bin 0 -> 8151 bytes
 .../pym/_emerge/ProgressHandler.pyo                |  Bin 0 -> 1115 bytes
 portage_with_autodep/pym/_emerge/QueueScheduler.py |   79 +-
 .../pym/_emerge/QueueScheduler.pyo                 |  Bin 0 -> 3658 bytes
 portage_with_autodep/pym/_emerge/RootConfig.py     |    2 +-
 portage_with_autodep/pym/_emerge/RootConfig.pyo    |  Bin 0 -> 1404 bytes
 portage_with_autodep/pym/_emerge/Scheduler.py      |  399 ++--
 portage_with_autodep/pym/_emerge/Scheduler.pyo     |  Bin 0 -> 55155 bytes
 .../pym/_emerge/SequentialTaskQueue.py             |   72 +-
 .../pym/_emerge/SequentialTaskQueue.pyo            |  Bin 0 -> 3343 bytes
 portage_with_autodep/pym/_emerge/SetArg.pyo        |  Bin 0 -> 719 bytes
 portage_with_autodep/pym/_emerge/SpawnProcess.py   |  107 +-
 portage_with_autodep/pym/_emerge/SpawnProcess.pyo  |  Bin 0 -> 6006 bytes
 portage_with_autodep/pym/_emerge/SubProcess.py     |   65 +-
 portage_with_autodep/pym/_emerge/SubProcess.pyo    |  Bin 0 -> 4178 bytes
 portage_with_autodep/pym/_emerge/Task.py           |    5 +-
 portage_with_autodep/pym/_emerge/Task.pyo          |  Bin 0 -> 2148 bytes
 portage_with_autodep/pym/_emerge/TaskScheduler.py  |    7 +-
 portage_with_autodep/pym/_emerge/TaskScheduler.pyo |  Bin 0 -> 1309 bytes
 portage_with_autodep/pym/_emerge/TaskSequence.pyo  |  Bin 0 -> 2147 bytes
 .../pym/_emerge/UninstallFailure.pyo               |  Bin 0 -> 785 bytes
 .../pym/_emerge/UnmergeDepPriority.pyo             |  Bin 0 -> 1345 bytes
 .../pym/_emerge/UseFlagDisplay.pyo                 |  Bin 0 -> 4148 bytes
 portage_with_autodep/pym/_emerge/__init__.pyo      |  Bin 0 -> 129 bytes
 .../pym/_emerge/_find_deep_system_runtime_deps.pyo |  Bin 0 -> 1299 bytes
 .../pym/_emerge/_flush_elog_mod_echo.py            |    2 +-
 .../pym/_emerge/_flush_elog_mod_echo.pyo           |  Bin 0 -> 606 bytes
 portage_with_autodep/pym/_emerge/actions.py        |  500 +++--
 portage_with_autodep/pym/_emerge/actions.pyo       |  Bin 0 -> 80730 bytes
 .../pym/_emerge/chk_updated_cfg_files.py           |   42 +
 portage_with_autodep/pym/_emerge/clear_caches.pyo  |  Bin 0 -> 719 bytes
 portage_with_autodep/pym/_emerge/countdown.pyo     |  Bin 0 -> 917 bytes
 .../pym/_emerge/create_depgraph_params.py          |   20 +
 .../pym/_emerge/create_depgraph_params.pyo         |  Bin 0 -> 1954 bytes
 .../pym/_emerge/create_world_atom.py               |   35 +-
 .../pym/_emerge/create_world_atom.pyo              |  Bin 0 -> 2648 bytes
 portage_with_autodep/pym/_emerge/depgraph.py       | 1140 ++++++-----
 portage_with_autodep/pym/_emerge/depgraph.pyo      |  Bin 0 -> 162245 bytes
 portage_with_autodep/pym/_emerge/emergelog.py      |    9 +-
 portage_with_autodep/pym/_emerge/emergelog.pyo     |  Bin 0 -> 1927 bytes
 portage_with_autodep/pym/_emerge/getloadavg.pyo    |  Bin 0 -> 931 bytes
 portage_with_autodep/pym/_emerge/help.py           |  798 +-------
 portage_with_autodep/pym/_emerge/help.pyo          |  Bin 0 -> 2546 bytes
 .../pym/_emerge/is_valid_package_atom.pyo          |  Bin 0 -> 910 bytes
 portage_with_autodep/pym/_emerge/main.py           |  295 ++-
 portage_with_autodep/pym/_emerge/main.pyo          |  Bin 0 -> 52644 bytes
 portage_with_autodep/pym/_emerge/post_emerge.py    |  165 ++
 .../pym/_emerge/resolver/__init__.pyo              |  Bin 0 -> 138 bytes
 .../pym/_emerge/resolver/backtracking.py           |   27 +-
 .../pym/_emerge/resolver/backtracking.pyo          |  Bin 0 -> 7838 bytes
 .../pym/_emerge/resolver/circular_dependency.py    |    5 +-
 .../pym/_emerge/resolver/circular_dependency.pyo   |  Bin 0 -> 7555 bytes
 .../pym/_emerge/resolver/output.py                 |  235 ++-
 .../pym/_emerge/resolver/output.pyo                |  Bin 0 -> 28079 bytes
 .../pym/_emerge/resolver/output_helpers.py         |  142 +-
 .../pym/_emerge/resolver/output_helpers.pyo        |  Bin 0 -> 18016 bytes
 .../pym/_emerge/resolver/slot_collision.py         |   22 +-
 .../pym/_emerge/resolver/slot_collision.pyo        |  Bin 0 -> 23644 bytes
 portage_with_autodep/pym/_emerge/search.py         |   29 +-
 portage_with_autodep/pym/_emerge/search.pyo        |  Bin 0 -> 11825 bytes
 .../pym/_emerge/show_invalid_depstring_notice.pyo  |  Bin 0 -> 2001 bytes
 .../pym/_emerge/stdout_spinner.pyo                 |  Bin 0 -> 3440 bytes
 portage_with_autodep/pym/_emerge/sync/__init__.pyo |  Bin 0 -> 134 bytes
 .../pym/_emerge/sync/getaddrinfo_validate.pyo      |  Bin 0 -> 847 bytes
 .../pym/_emerge/sync/old_tree_timestamp.pyo        |  Bin 0 -> 2746 bytes
 portage_with_autodep/pym/_emerge/unmerge.py        |   55 +-
 portage_with_autodep/pym/_emerge/unmerge.pyo       |  Bin 0 -> 14236 bytes
 portage_with_autodep/pym/_emerge/userquery.py      |    6 +-
 portage_with_autodep/pym/_emerge/userquery.pyo     |  Bin 0 -> 2254 bytes
 portage_with_autodep/pym/portage/__init__.py       |  132 +-
 portage_with_autodep/pym/portage/__init__.pyo      |  Bin 0 -> 22647 bytes
 .../pym/portage/_emirrordist/Config.py             |  132 ++
 .../pym/portage/_emirrordist/DeletionIterator.py   |   83 +
 .../pym/portage/_emirrordist/DeletionTask.py       |  129 ++
 .../pym/portage/_emirrordist/FetchIterator.py      |  147 ++
 .../pym/portage/_emirrordist/FetchTask.py          |  629 ++++++
 .../pym/portage/_emirrordist/MirrorDistTask.py     |  218 ++
 .../{tests/resolver => _emirrordist}/__init__.py   |    2 +-
 .../pym/portage/_emirrordist/main.py               |  455 +++++
 .../pym/portage/_global_updates.py                 |   13 +-
 .../pym/portage/_global_updates.pyo                |  Bin 0 -> 7542 bytes
 .../pym/portage/_legacy_globals.py                 |   13 +-
 .../pym/portage/_legacy_globals.pyo                |  Bin 0 -> 1922 bytes
 portage_with_autodep/pym/portage/_selinux.pyo      |  Bin 0 -> 4706 bytes
 portage_with_autodep/pym/portage/_sets/__init__.py |   45 +-
 .../pym/portage/_sets/__init__.pyo                 |  Bin 0 -> 9216 bytes
 portage_with_autodep/pym/portage/_sets/base.pyo    |  Bin 0 -> 10383 bytes
 portage_with_autodep/pym/portage/_sets/dbapi.py    |   14 +-
 portage_with_autodep/pym/portage/_sets/dbapi.pyo   |  Bin 0 -> 15064 bytes
 portage_with_autodep/pym/portage/_sets/files.pyo   |  Bin 0 -> 13042 bytes
 portage_with_autodep/pym/portage/_sets/libs.pyo    |  Bin 0 -> 4053 bytes
 .../pym/portage/_sets/profiles.pyo                 |  Bin 0 -> 2296 bytes
 portage_with_autodep/pym/portage/_sets/security.py |    8 +-
 .../pym/portage/_sets/security.pyo                 |  Bin 0 -> 4426 bytes
 portage_with_autodep/pym/portage/_sets/shell.pyo   |  Bin 0 -> 2072 bytes
 .../pym/portage/cache/__init__.pyo                 |  Bin 0 -> 135 bytes
 portage_with_autodep/pym/portage/cache/anydbm.pyo  |  Bin 0 -> 3755 bytes
 .../pym/portage/cache/cache_errors.pyo             |  Bin 0 -> 4629 bytes
 .../pym/portage/cache/ebuild_xattr.py              |    3 +-
 .../pym/portage/cache/ebuild_xattr.pyo             |  Bin 0 -> 6260 bytes
 .../pym/portage/cache/flat_hash.py                 |    9 +-
 .../pym/portage/cache/flat_hash.pyo                |  Bin 0 -> 5468 bytes
 .../pym/portage/cache/flat_list.pyo                |  Bin 0 -> 4939 bytes
 .../pym/portage/cache/fs_template.pyo              |  Bin 0 -> 3580 bytes
 portage_with_autodep/pym/portage/cache/mappings.py |    2 +-
 .../pym/portage/cache/mappings.pyo                 |  Bin 0 -> 18081 bytes
 portage_with_autodep/pym/portage/cache/metadata.py |    8 +-
 .../pym/portage/cache/metadata.pyo                 |  Bin 0 -> 5048 bytes
 .../pym/portage/cache/metadata_overlay.py          |  105 -
 .../pym/portage/cache/sql_template.pyo             |  Bin 0 -> 10542 bytes
 portage_with_autodep/pym/portage/cache/sqlite.pyo  |  Bin 0 -> 9109 bytes
 portage_with_autodep/pym/portage/cache/template.py |  108 +-
 .../pym/portage/cache/template.pyo                 |  Bin 0 -> 11332 bytes
 portage_with_autodep/pym/portage/cache/util.py     |  170 --
 portage_with_autodep/pym/portage/cache/volatile.py |   11 +-
 .../pym/portage/cache/volatile.pyo                 |  Bin 0 -> 1633 bytes
 portage_with_autodep/pym/portage/checksum.py       |  136 +-
 portage_with_autodep/pym/portage/checksum.pyo      |  Bin 0 -> 10716 bytes
 portage_with_autodep/pym/portage/const.py          |   59 +-
 portage_with_autodep/pym/portage/const.py.rej      |   12 +
 portage_with_autodep/pym/portage/const.pyo         |  Bin 0 -> 4887 bytes
 portage_with_autodep/pym/portage/cvstree.py        |   10 +-
 portage_with_autodep/pym/portage/cvstree.pyo       |  Bin 0 -> 9826 bytes
 portage_with_autodep/pym/portage/data.py           |  208 +-
 portage_with_autodep/pym/portage/data.pyo          |  Bin 0 -> 5965 bytes
 .../pym/portage/dbapi/_MergeProcess.py             |   91 +-
 .../pym/portage/dbapi/_MergeProcess.pyo            |  Bin 0 -> 6813 bytes
 .../pym/portage/dbapi/_SyncfsProcess.py            |   53 +
 portage_with_autodep/pym/portage/dbapi/__init__.py |  122 +-
 .../pym/portage/dbapi/__init__.pyo                 |  Bin 0 -> 11096 bytes
 .../pym/portage/dbapi/_expand_new_virt.py          |    3 +-
 .../pym/portage/dbapi/_expand_new_virt.pyo         |  Bin 0 -> 1943 bytes
 .../pym/portage/dbapi/_similar_name_search.py      |   57 +
 portage_with_autodep/pym/portage/dbapi/bintree.py  |  110 +-
 portage_with_autodep/pym/portage/dbapi/bintree.pyo |  Bin 0 -> 39953 bytes
 .../pym/portage/dbapi/cpv_expand.pyo               |  Bin 0 -> 2373 bytes
 .../pym/portage/dbapi/dep_expand.pyo               |  Bin 0 -> 1500 bytes
 portage_with_autodep/pym/portage/dbapi/porttree.py |  694 ++++---
 .../pym/portage/dbapi/porttree.pyo                 |  Bin 0 -> 33775 bytes
 portage_with_autodep/pym/portage/dbapi/vartree.py  |  834 +++++---
 portage_with_autodep/pym/portage/dbapi/vartree.pyo |  Bin 0 -> 120778 bytes
 portage_with_autodep/pym/portage/dbapi/virtual.py  |   32 +-
 portage_with_autodep/pym/portage/dbapi/virtual.pyo |  Bin 0 -> 5813 bytes
 portage_with_autodep/pym/portage/debug.py          |    4 +-
 portage_with_autodep/pym/portage/debug.pyo         |  Bin 0 -> 4434 bytes
 portage_with_autodep/pym/portage/dep/__init__.py   |  232 ++-
 portage_with_autodep/pym/portage/dep/__init__.pyo  |  Bin 0 -> 67806 bytes
 .../pym/portage/dep/_slot_operator.py              |   97 +
 portage_with_autodep/pym/portage/dep/dep_check.py  |   44 +-
 portage_with_autodep/pym/portage/dep/dep_check.pyo |  Bin 0 -> 13211 bytes
 portage_with_autodep/pym/portage/dispatch_conf.py  |   42 +-
 portage_with_autodep/pym/portage/dispatch_conf.pyo |  Bin 0 -> 7195 bytes
 portage_with_autodep/pym/portage/eapi.py           |   14 +-
 portage_with_autodep/pym/portage/eapi.pyo          |  Bin 0 -> 4007 bytes
 portage_with_autodep/pym/portage/eclass_cache.py   |   84 +-
 portage_with_autodep/pym/portage/eclass_cache.pyo  |  Bin 0 -> 5716 bytes
 portage_with_autodep/pym/portage/elog/__init__.py  |   20 +-
 portage_with_autodep/pym/portage/elog/__init__.pyo |  Bin 0 -> 5346 bytes
 .../pym/portage/elog/filtering.pyo                 |  Bin 0 -> 574 bytes
 portage_with_autodep/pym/portage/elog/messages.py  |   36 +-
 portage_with_autodep/pym/portage/elog/messages.pyo |  Bin 0 -> 4935 bytes
 .../pym/portage/elog/mod_custom.pyo                |  Bin 0 -> 988 bytes
 portage_with_autodep/pym/portage/elog/mod_echo.py  |   13 +
 portage_with_autodep/pym/portage/elog/mod_echo.pyo |  Bin 0 -> 1933 bytes
 portage_with_autodep/pym/portage/elog/mod_mail.pyo |  Bin 0 -> 1464 bytes
 .../pym/portage/elog/mod_mail_summary.pyo          |  Bin 0 -> 3107 bytes
 portage_with_autodep/pym/portage/elog/mod_save.py  |   28 +-
 portage_with_autodep/pym/portage/elog/mod_save.pyo |  Bin 0 -> 2192 bytes
 .../pym/portage/elog/mod_save_summary.py           |   21 +-
 .../pym/portage/elog/mod_save_summary.pyo          |  Bin 0 -> 2342 bytes
 .../pym/portage/elog/mod_syslog.py                 |   14 +-
 .../pym/portage/elog/mod_syslog.pyo                |  Bin 0 -> 1292 bytes
 .../pym/portage/emaint/__init__.py                 |    5 +
 .../pym/portage/emaint/defaults.py                 |   25 +
 portage_with_autodep/pym/portage/emaint/main.py    |  222 +++
 portage_with_autodep/pym/portage/emaint/module.py  |  194 ++
 .../pym/portage/emaint/modules/__init__.py         |    5 +
 .../pym/portage/emaint/modules/binhost/__init__.py |   20 +
 .../pym/portage/emaint/modules/binhost/binhost.py  |  163 ++
 .../pym/portage/emaint/modules/config/__init__.py  |   20 +
 .../pym/portage/emaint/modules/config/config.py    |   79 +
 .../pym/portage/emaint/modules/logs/__init__.py    |   45 +
 .../pym/portage/emaint/modules/logs/logs.py        |  103 +
 .../pym/portage/emaint/modules/move/__init__.py    |   30 +
 .../pym/portage/emaint/modules/move/move.py        |  180 ++
 .../pym/portage/emaint/modules/resume/__init__.py  |   20 +
 .../pym/portage/emaint/modules/resume/resume.py    |   58 +
 .../pym/portage/emaint/modules/world/__init__.py   |   20 +
 .../pym/portage/emaint/modules/world/world.py      |   89 +
 .../pym/portage/emaint/progress.py                 |   61 +
 portage_with_autodep/pym/portage/env/__init__.pyo  |  Bin 0 -> 133 bytes
 portage_with_autodep/pym/portage/env/config.pyo    |  Bin 0 -> 4454 bytes
 portage_with_autodep/pym/portage/env/loaders.py    |    4 +-
 portage_with_autodep/pym/portage/env/loaders.pyo   |  Bin 0 -> 11328 bytes
 .../pym/portage/env/validators.pyo                 |  Bin 0 -> 762 bytes
 portage_with_autodep/pym/portage/exception.py      |    4 +
 portage_with_autodep/pym/portage/exception.pyo     |  Bin 0 -> 11981 bytes
 portage_with_autodep/pym/portage/getbinpkg.py      |   36 +-
 portage_with_autodep/pym/portage/getbinpkg.pyo     |  Bin 0 -> 24428 bytes
 portage_with_autodep/pym/portage/glsa.py           |   23 +-
 portage_with_autodep/pym/portage/glsa.pyo          |  Bin 0 -> 24474 bytes
 portage_with_autodep/pym/portage/localization.pyo  |  Bin 0 -> 793 bytes
 portage_with_autodep/pym/portage/locks.py          |  246 ++-
 portage_with_autodep/pym/portage/locks.pyo         |  Bin 0 -> 12530 bytes
 portage_with_autodep/pym/portage/mail.py           |    3 +-
 portage_with_autodep/pym/portage/mail.pyo          |  Bin 0 -> 4745 bytes
 portage_with_autodep/pym/portage/manifest.py       |  239 ++-
 portage_with_autodep/pym/portage/manifest.pyo      |  Bin 0 -> 24109 bytes
 portage_with_autodep/pym/portage/news.py           |   81 +-
 portage_with_autodep/pym/portage/news.pyo          |  Bin 0 -> 15630 bytes
 portage_with_autodep/pym/portage/output.py         |   45 +-
 portage_with_autodep/pym/portage/output.pyo        |  Bin 0 -> 28175 bytes
 .../pym/portage/package/__init__.pyo               |  Bin 0 -> 137 bytes
 .../pym/portage/package/ebuild/__init__.pyo        |  Bin 0 -> 144 bytes
 .../package/ebuild/_config/KeywordsManager.py      |   43 +-
 .../package/ebuild/_config/KeywordsManager.pyo     |  Bin 0 -> 8961 bytes
 .../package/ebuild/_config/LicenseManager.py       |    9 +-
 .../package/ebuild/_config/LicenseManager.pyo      |  Bin 0 -> 8214 bytes
 .../package/ebuild/_config/LocationsManager.py     |  149 +-
 .../package/ebuild/_config/LocationsManager.pyo    |  Bin 0 -> 9666 bytes
 .../portage/package/ebuild/_config/MaskManager.py  |   82 +-
 .../portage/package/ebuild/_config/MaskManager.pyo |  Bin 0 -> 8064 bytes
 .../portage/package/ebuild/_config/UseManager.py   |   30 +-
 .../portage/package/ebuild/_config/UseManager.pyo  |  Bin 0 -> 9077 bytes
 .../package/ebuild/_config/VirtualsManager.pyo     |  Bin 0 -> 6480 bytes
 .../portage/package/ebuild/_config/__init__.pyo    |  Bin 0 -> 152 bytes
 .../package/ebuild/_config/env_var_validation.pyo  |  Bin 0 -> 1011 bytes
 .../package/ebuild/_config/features_set.pyo        |  Bin 0 -> 5840 bytes
 .../pym/portage/package/ebuild/_config/helper.py   |    4 +-
 .../pym/portage/package/ebuild/_config/helper.pyo  |  Bin 0 -> 2235 bytes
 .../package/ebuild/_config/special_env_vars.py     |   32 +-
 .../package/ebuild/_config/special_env_vars.pyo    |  Bin 0 -> 5517 bytes
 .../package/ebuild/_config/unpack_dependencies.py  |   38 +
 .../pym/portage/package/ebuild/_eapi_invalid.py    |   54 +
 .../pym/portage/package/ebuild/_eapi_invalid.pyo   |  Bin 0 -> 1848 bytes
 .../portage/package/ebuild/_ipc/ExitCommand.pyo    |  Bin 0 -> 1057 bytes
 .../pym/portage/package/ebuild/_ipc/IpcCommand.pyo |  Bin 0 -> 612 bytes
 .../portage/package/ebuild/_ipc/QueryCommand.py    |   11 +-
 .../portage/package/ebuild/_ipc/QueryCommand.pyo   |  Bin 0 -> 3629 bytes
 .../pym/portage/package/ebuild/_ipc/__init__.pyo   |  Bin 0 -> 149 bytes
 .../portage/package/ebuild/_metadata_invalid.py    |   41 +
 .../ebuild/_parallel_manifest/ManifestProcess.py   |   43 +
 .../ebuild/_parallel_manifest/ManifestScheduler.py |   93 +
 .../ebuild/_parallel_manifest/ManifestTask.py      |  186 ++
 .../ebuild/_parallel_manifest}/__init__.py         |    2 +-
 .../pym/portage/package/ebuild/_spawn_nofetch.py   |    8 +-
 .../pym/portage/package/ebuild/_spawn_nofetch.pyo  |  Bin 0 -> 3182 bytes
 .../pym/portage/package/ebuild/config.py           |  443 +++--
 .../pym/portage/package/ebuild/config.pyo          |  Bin 0 -> 65727 bytes
 .../package/ebuild/deprecated_profile_check.pyo    |  Bin 0 -> 1949 bytes
 .../pym/portage/package/ebuild/digestcheck.py      |   57 +-
 .../pym/portage/package/ebuild/digestcheck.pyo     |  Bin 0 -> 4457 bytes
 .../pym/portage/package/ebuild/digestgen.py        |   28 +-
 .../pym/portage/package/ebuild/digestgen.pyo       |  Bin 0 -> 5721 bytes
 .../pym/portage/package/ebuild/doebuild.py         |  592 ++++--
 .../pym/portage/package/ebuild/doebuild.pyo        |  Bin 0 -> 55376 bytes
 .../pym/portage/package/ebuild/fetch.py            |   88 +-
 .../pym/portage/package/ebuild/fetch.pyo           |  Bin 0 -> 24287 bytes
 .../pym/portage/package/ebuild/getmaskingreason.py |    8 +-
 .../portage/package/ebuild/getmaskingreason.pyo    |  Bin 0 -> 3654 bytes
 .../pym/portage/package/ebuild/getmaskingstatus.py |   22 +-
 .../portage/package/ebuild/getmaskingstatus.pyo    |  Bin 0 -> 5230 bytes
 .../portage/package/ebuild/prepare_build_dirs.py   |   48 +-
 .../portage/package/ebuild/prepare_build_dirs.pyo  |  Bin 0 -> 11519 bytes
 portage_with_autodep/pym/portage/process.py        |   66 +-
 portage_with_autodep/pym/portage/process.pyo       |  Bin 0 -> 12173 bytes
 .../pym/portage/proxy/__init__.pyo                 |  Bin 0 -> 135 bytes
 .../pym/portage/proxy/lazyimport.pyo               |  Bin 0 -> 6140 bytes
 .../pym/portage/proxy/objectproxy.pyo              |  Bin 0 -> 5289 bytes
 .../pym/portage/repository/__init__.pyo            |  Bin 0 -> 140 bytes
 .../pym/portage/repository/config.py               |  620 ++++--
 .../pym/portage/repository/config.pyo              |  Bin 0 -> 24828 bytes
 portage_with_autodep/pym/portage/tests/__init__.py |  150 +-
 .../pym/portage/tests/__init__.pyo                 |  Bin 0 -> 10394 bytes
 .../pym/portage/tests/bin/__init__.py              |    0
 .../pym/portage/tests/bin/__test__                 |    0
 .../pym/portage/tests/bin/setup_env.py             |   85 -
 .../pym/portage/tests/bin/test_dobin.py            |   16 -
 .../pym/portage/tests/bin/test_dodir.py            |   16 -
 .../pym/portage/tests/dbapi/__test__               |    0
 .../pym/portage/tests/dbapi/test_fakedbapi.py      |   58 -
 .../pym/portage/tests/dep/__init__.py              |    3 -
 .../pym/portage/tests/dep/__test__                 |    0
 .../pym/portage/tests/dep/testAtom.py              |  315 ---
 .../pym/portage/tests/dep/testCheckRequiredUse.py  |  219 ---
 .../pym/portage/tests/dep/testExtendedAtomDict.py  |   18 -
 .../portage/tests/dep/testExtractAffectingUSE.py   |   75 -
 .../pym/portage/tests/dep/testStandalone.py        |   36 -
 .../portage/tests/dep/test_best_match_to_list.py   |   43 -
 .../pym/portage/tests/dep/test_dep_getcpv.py       |   35 -
 .../pym/portage/tests/dep/test_dep_getrepo.py      |   29 -
 .../pym/portage/tests/dep/test_dep_getslot.py      |   28 -
 .../pym/portage/tests/dep/test_dep_getusedeps.py   |   35 -
 .../pym/portage/tests/dep/test_get_operator.py     |   33 -
 .../tests/dep/test_get_required_use_flags.py       |   42 -
 .../pym/portage/tests/dep/test_isjustname.py       |   24 -
 .../pym/portage/tests/dep/test_isvalidatom.py      |  146 --
 .../pym/portage/tests/dep/test_match_from_list.py  |  108 -
 .../pym/portage/tests/dep/test_paren_reduce.py     |   66 -
 .../pym/portage/tests/dep/test_use_reduce.py       |  627 ------
 .../pym/portage/tests/ebuild/__init__.py           |    2 -
 .../pym/portage/tests/ebuild/__test__              |    0
 .../tests/ebuild/test_array_fromfile_eof.py        |   43 -
 .../pym/portage/tests/ebuild/test_config.py        |  198 --
 .../portage/tests/ebuild/test_doebuild_spawn.py    |   82 -
 .../pym/portage/tests/ebuild/test_ipc_daemon.py    |  124 --
 .../pym/portage/tests/ebuild/test_pty_eof.py       |   32 -
 .../pym/portage/tests/ebuild/test_spawn.py         |   52 -
 .../pym/portage/tests/env/__init__.py              |    4 -
 .../pym/portage/tests/env/__test__                 |    0
 .../pym/portage/tests/env/config/__init__.py       |    4 -
 .../pym/portage/tests/env/config/__test__          |    0
 .../tests/env/config/test_PackageKeywordsFile.py   |   40 -
 .../tests/env/config/test_PackageMaskFile.py       |   29 -
 .../tests/env/config/test_PackageUseFile.py        |   37 -
 .../tests/env/config/test_PortageModulesFile.py    |   39 -
 .../pym/portage/tests/lafilefixer/__init__.py      |    0
 .../pym/portage/tests/lafilefixer/__test__         |    0
 .../portage/tests/lafilefixer/test_lafilefixer.py  |  145 --
 .../pym/portage/tests/lazyimport/__init__.py       |    0
 .../pym/portage/tests/lazyimport/__test__          |    0
 .../test_lazy_import_portage_baseline.py           |   81 -
 .../lazyimport/test_preload_portage_submodules.py  |   16 -
 .../pym/portage/tests/lint/__init__.pyo            |  Bin 0 -> 140 bytes
 .../pym/portage/tests/lint/test_bash_syntax.pyo    |  Bin 0 -> 1944 bytes
 .../portage/tests/lint/test_compile_modules.pyo    |  Bin 0 -> 1855 bytes
 .../pym/portage/tests/lint/test_import_modules.pyo |  Bin 0 -> 1725 bytes
 .../pym/portage/tests/locks/__test__               |    0
 .../portage/tests/locks/test_asynchronous_lock.py  |  124 --
 .../pym/portage/tests/locks/test_lock_nonblock.py  |   46 -
 .../pym/portage/tests/news/__init__.py             |    3 -
 .../pym/portage/tests/news/__test__                |    0
 .../pym/portage/tests/news/test_NewsItem.py        |   95 -
 .../pym/portage/tests/process/__init__.py          |    2 -
 .../pym/portage/tests/process/__test__             |    0
 .../pym/portage/tests/process/test_poll.py         |   39 -
 .../portage/tests/resolver/ResolverPlayground.py   |  690 -------
 .../pym/portage/tests/resolver/__test__            |    0
 .../pym/portage/tests/resolver/test_autounmask.py  |  326 ---
 .../portage/tests/resolver/test_backtracking.py    |  169 --
 .../tests/resolver/test_circular_dependencies.py   |   84 -
 .../pym/portage/tests/resolver/test_depclean.py    |  285 ---
 .../pym/portage/tests/resolver/test_depth.py       |  252 ---
 .../pym/portage/tests/resolver/test_eapi.py        |  115 --
 .../pym/portage/tests/resolver/test_merge_order.py |  453 -----
 .../test_missing_iuse_and_evaluated_atoms.py       |   31 -
 .../pym/portage/tests/resolver/test_multirepo.py   |  318 ---
 .../pym/portage/tests/resolver/test_multislot.py   |   40 -
 .../tests/resolver/test_old_dep_chain_display.py   |   35 -
 .../pym/portage/tests/resolver/test_output.py      |   88 -
 .../pym/portage/tests/resolver/test_rebuild.py     |  138 --
 .../portage/tests/resolver/test_required_use.py    |  114 --
 .../pym/portage/tests/resolver/test_simple.py      |   57 -
 .../portage/tests/resolver/test_slot_collisions.py |  143 --
 .../tests/resolver/test_use_dep_defaults.py        |   40 -
 portage_with_autodep/pym/portage/tests/runTests    |    6 +-
 .../pym/portage/tests/sets/__init__.py             |    0
 .../pym/portage/tests/sets/base/__init__.py        |    0
 .../pym/portage/tests/sets/base/__test__           |    0
 .../tests/sets/base/testInternalPackageSet.py      |   61 -
 .../pym/portage/tests/sets/files/__init__.py       |    0
 .../pym/portage/tests/sets/files/__test__          |    0
 .../portage/tests/sets/files/testConfigFileSet.py  |   32 -
 .../portage/tests/sets/files/testStaticFileSet.py  |   27 -
 .../pym/portage/tests/sets/shell/__init__.py       |    0
 .../pym/portage/tests/sets/shell/__test__          |    0
 .../pym/portage/tests/sets/shell/testShell.py      |   28 -
 .../pym/portage/tests/unicode/__test__             |    0
 .../portage/tests/unicode/test_string_format.py    |  108 -
 .../pym/portage/tests/util/__init__.py             |    4 -
 .../pym/portage/tests/util/__test__                |    0
 .../pym/portage/tests/util/test_digraph.py         |  201 --
 .../pym/portage/tests/util/test_getconfig.py       |   29 -
 .../pym/portage/tests/util/test_grabdict.py        |   11 -
 .../pym/portage/tests/util/test_normalizedPath.py  |   14 -
 .../pym/portage/tests/util/test_stackDictList.py   |   17 -
 .../pym/portage/tests/util/test_stackDicts.py      |   36 -
 .../pym/portage/tests/util/test_stackLists.py      |   19 -
 .../pym/portage/tests/util/test_uniqueArray.py     |   24 -
 .../pym/portage/tests/util/test_varExpand.py       |   92 -
 .../pym/portage/tests/versions/__init__.py         |    3 -
 .../pym/portage/tests/versions/__test__            |    0
 .../portage/tests/versions/test_cpv_sort_key.py    |   16 -
 .../pym/portage/tests/versions/test_vercmp.py      |   80 -
 .../pym/portage/tests/xpak/__init__.py             |    3 -
 .../pym/portage/tests/xpak/__test__                |    0
 .../pym/portage/tests/xpak/test_decodeint.py       |   16 -
 portage_with_autodep/pym/portage/update.py         |   26 +-
 portage_with_autodep/pym/portage/update.pyo        |  Bin 0 -> 10829 bytes
 .../pym/portage/util/ExtractKernelVersion.py       |    4 +-
 .../pym/portage/util/ExtractKernelVersion.pyo      |  Bin 0 -> 2297 bytes
 .../pym/{_emerge => portage/util}/SlotObject.py    |   13 +-
 .../pym/portage/util/SlotObject.pyo                |  Bin 0 -> 1719 bytes
 .../pym/portage/util/_ShelveUnicodeWrapper.py      |   45 +
 portage_with_autodep/pym/portage/util/__init__.py  |  203 +-
 portage_with_autodep/pym/portage/util/__init__.pyo |  Bin 0 -> 47487 bytes
 portage_with_autodep/pym/portage/util/_argparse.py |   42 +
 .../pym/portage/util/_async/AsyncScheduler.py      |  102 +
 .../pym/portage/util/_async/FileCopier.py          |   17 +
 .../pym/portage/util/_async/FileDigester.py        |   73 +
 .../pym/portage/util/_async/ForkProcess.py         |   65 +
 .../pym/portage/util/_async/PipeLogger.py          |  163 ++
 .../portage/util/_async/PipeReaderBlockingIO.py    |   91 +
 .../pym/portage/util/_async/PopenProcess.py        |   33 +
 .../pym/portage/util/_async/SchedulerInterface.py  |   79 +
 .../pym/portage/util/_async/TaskScheduler.py       |   20 +
 .../{tests/locks => util/_async}/__init__.py       |    2 +-
 .../pym/portage/util/_async/run_main_scheduler.py  |   41 +
 portage_with_autodep/pym/portage/util/_ctypes.py   |   47 +
 .../pym/portage/util/_desktop_entry.py             |   75 +
 .../pym/portage/util/_desktop_entry.pyo            |  Bin 0 -> 2878 bytes
 .../pym/portage/util/_dyn_libs/LinkageMapELF.py    |    8 +
 .../pym/portage/util/_dyn_libs/LinkageMapELF.pyo   |  Bin 0 -> 26399 bytes
 .../util/_dyn_libs/PreservedLibsRegistry.py        |  109 +-
 .../util/_dyn_libs/PreservedLibsRegistry.pyo       |  Bin 0 -> 8884 bytes
 .../pym/portage/util/_dyn_libs/__init__.pyo        |  Bin 0 -> 144 bytes
 .../util/_dyn_libs/display_preserved_libs.py       |   98 +
 .../pym/portage/util/_eventloop/EventLoop.py       |  490 +++++
 .../pym/portage/util/_eventloop/EventLoop.pyo      |  Bin 0 -> 13508 bytes
 .../pym/portage/util/_eventloop/GlibEventLoop.py   |   23 +
 .../pym/portage/util/_eventloop/GlibEventLoop.pyo  |  Bin 0 -> 1029 bytes
 .../util/_eventloop}/PollConstants.py              |    0
 .../pym/portage/util/_eventloop/PollConstants.pyo  |  Bin 0 -> 787 bytes
 .../util/_eventloop}/PollSelectAdapter.py          |    7 +-
 .../portage/util/_eventloop/PollSelectAdapter.pyo  |  Bin 0 -> 2220 bytes
 .../{tests/unicode => util/_eventloop}/__init__.py |    2 +-
 .../pym/portage/util/_eventloop/__init__.pyo       |  Bin 0 -> 145 bytes
 .../portage/util/_eventloop/global_event_loop.py   |   35 +
 .../portage/util/_eventloop/global_event_loop.pyo  |  Bin 0 -> 878 bytes
 .../pym/portage/util/_get_vm_info.py               |   80 +
 .../pym/portage/util/_info_files.py                |  138 ++
 portage_with_autodep/pym/portage/util/_path.py     |   27 +
 portage_with_autodep/pym/portage/util/_pty.py      |  152 +-
 portage_with_autodep/pym/portage/util/_pty.pyo     |  Bin 0 -> 1936 bytes
 portage_with_autodep/pym/portage/util/_urlopen.py  |   42 +
 portage_with_autodep/pym/portage/util/_urlopen.pyo |  Bin 0 -> 1626 bytes
 portage_with_autodep/pym/portage/util/digraph.py   |   15 +-
 portage_with_autodep/pym/portage/util/digraph.pyo  |  Bin 0 -> 10678 bytes
 .../pym/portage/util/env_update.py                 |   78 +-
 .../pym/portage/util/env_update.pyo                |  Bin 0 -> 9095 bytes
 .../pym/portage/util/lafilefixer.py                |    2 +-
 .../pym/portage/util/lafilefixer.pyo               |  Bin 0 -> 3621 bytes
 portage_with_autodep/pym/portage/util/listdir.py   |    2 +-
 portage_with_autodep/pym/portage/util/listdir.pyo  |  Bin 0 -> 4088 bytes
 portage_with_autodep/pym/portage/util/movefile.py  |  144 +-
 portage_with_autodep/pym/portage/util/movefile.pyo |  Bin 0 -> 8236 bytes
 portage_with_autodep/pym/portage/util/mtimedb.py   |   75 +-
 portage_with_autodep/pym/portage/util/mtimedb.pyo  |  Bin 0 -> 3770 bytes
 portage_with_autodep/pym/portage/util/whirlpool.py |  794 ++++++++
 .../pym/portage/util/whirlpool.pyo                 |  Bin 0 -> 38994 bytes
 portage_with_autodep/pym/portage/versions.py       |  187 +-
 portage_with_autodep/pym/portage/versions.pyo      |  Bin 0 -> 13125 bytes
 portage_with_autodep/pym/portage/xml/__init__.pyo  |  Bin 0 -> 133 bytes
 portage_with_autodep/pym/portage/xml/metadata.py   |   60 +-
 portage_with_autodep/pym/portage/xml/metadata.pyo  |  Bin 0 -> 15298 bytes
 portage_with_autodep/pym/portage/xpak.py           |  312 +--
 portage_with_autodep/pym/portage/xpak.pyo          |  Bin 0 -> 16218 bytes
 portage_with_autodep/pym/repoman/__init__.pyo      |  Bin 0 -> 129 bytes
 portage_with_autodep/pym/repoman/checks.py         |   49 +-
 portage_with_autodep/pym/repoman/checks.pyo        |  Bin 0 -> 31426 bytes
 portage_with_autodep/pym/repoman/errors.pyo        |  Bin 0 -> 1948 bytes
 portage_with_autodep/pym/repoman/herdbase.py       |    8 +-
 portage_with_autodep/pym/repoman/herdbase.pyo      |  Bin 0 -> 3401 bytes
 portage_with_autodep/pym/repoman/utilities.py      |  384 +++-
 portage_with_autodep/pym/repoman/utilities.pyo     |  Bin 0 -> 24780 bytes
 622 files changed, 21337 insertions(+), 16583 deletions(-)

diff --git a/portage_with_autodep/bin/archive-conf b/portage_with_autodep/bin/archive-conf
index 5a03b85..7978668 100755
--- a/portage_with_autodep/bin/archive-conf
+++ b/portage_with_autodep/bin/archive-conf
@@ -20,7 +20,7 @@ except ImportError:
     import portage
 
 from portage import os
-import dispatch_conf
+from portage import dispatch_conf
 
 FIND_EXTANT_CONTENTS  = "find %s -name CONTENTS"
 

diff --git a/portage_with_autodep/bin/bashrc-functions.sh b/portage_with_autodep/bin/bashrc-functions.sh
new file mode 100755
index 0000000..4da5585
--- /dev/null
+++ b/portage_with_autodep/bin/bashrc-functions.sh
@@ -0,0 +1,140 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+portageq() {
+	PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+	"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" "$@"
+}
+
+register_die_hook() {
+	local x
+	for x in $* ; do
+		has $x $EBUILD_DEATH_HOOKS || \
+			export EBUILD_DEATH_HOOKS="$EBUILD_DEATH_HOOKS $x"
+	done
+}
+
+register_success_hook() {
+	local x
+	for x in $* ; do
+		has $x $EBUILD_SUCCESS_HOOKS || \
+			export EBUILD_SUCCESS_HOOKS="$EBUILD_SUCCESS_HOOKS $x"
+	done
+}
+
+strip_duplicate_slashes() {
+	if [[ -n $1 ]] ; then
+		local removed=$1
+		while [[ ${removed} == *//* ]] ; do
+			removed=${removed//\/\///}
+		done
+		echo ${removed}
+	fi
+}
+
+# this is a function for removing any directory matching a passed in pattern from
+# PATH
+remove_path_entry() {
+	save_IFS
+	IFS=":"
+	stripped_path="${PATH}"
+	while [ -n "$1" ]; do
+		cur_path=""
+		for p in ${stripped_path}; do
+			if [ "${p/${1}}" == "${p}" ]; then
+				cur_path="${cur_path}:${p}"
+			fi
+		done
+		stripped_path="${cur_path#:*}"
+		shift
+	done
+	restore_IFS
+	PATH="${stripped_path}"
+}
+
+# Set given variables unless these variable have been already set (e.g. during emerge
+# invocation) to values different than values set in make.conf.
+set_unless_changed() {
+	if [[ $# -lt 1 ]]; then
+		die "${FUNCNAME}() requires at least 1 argument: VARIABLE=VALUE"
+	fi
+
+	local argument value variable
+	for argument in "$@"; do
+		if [[ ${argument} != *=* ]]; then
+			die "${FUNCNAME}(): Argument '${argument}' has incorrect syntax"
+		fi
+		variable="${argument%%=*}"
+		value="${argument#*=}"
+		if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
+			eval "${variable}=\"\${value}\""
+		fi
+	done
+}
+
+# Unset given variables unless these variable have been set (e.g. during emerge
+# invocation) to values different than values set in make.conf.
+unset_unless_changed() {
+	if [[ $# -lt 1 ]]; then
+		die "${FUNCNAME}() requires at least 1 argument: VARIABLE"
+	fi
+
+	local variable
+	for variable in "$@"; do
+		if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
+			unset ${variable}
+		fi
+	done
+}
+
+KV_major() {
+	[[ -z $1 ]] && return 1
+
+	local KV=$@
+	echo "${KV%%.*}"
+}
+
+KV_minor() {
+	[[ -z $1 ]] && return 1
+
+	local KV=$@
+	KV=${KV#*.}
+	echo "${KV%%.*}"
+}
+
+KV_micro() {
+	[[ -z $1 ]] && return 1
+
+	local KV=$@
+	KV=${KV#*.*.}
+	echo "${KV%%[^[:digit:]]*}"
+}
+
+KV_to_int() {
+	[[ -z $1 ]] && return 1
+
+	local KV_MAJOR=$(KV_major "$1")
+	local KV_MINOR=$(KV_minor "$1")
+	local KV_MICRO=$(KV_micro "$1")
+	local KV_int=$(( KV_MAJOR * 65536 + KV_MINOR * 256 + KV_MICRO ))
+
+	# We make version 2.2.0 the minimum version we will handle as
+	# a sanity check ... if its less, we fail ...
+	if [[ ${KV_int} -ge 131584 ]] ; then
+		echo "${KV_int}"
+		return 0
+	fi
+
+	return 1
+}
+
+_RC_GET_KV_CACHE=""
+get_KV() {
+	[[ -z ${_RC_GET_KV_CACHE} ]] \
+		&& _RC_GET_KV_CACHE=$(uname -r)
+
+	echo $(KV_to_int "${_RC_GET_KV_CACHE}")
+
+	return $?
+}

diff --git a/portage_with_autodep/bin/chpathtool.py b/portage_with_autodep/bin/chpathtool.py
new file mode 100755
index 0000000..d0d49cb
--- /dev/null
+++ b/portage_with_autodep/bin/chpathtool.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import optparse
+import os
+import stat
+import sys
+
+CONTENT_ENCODING = "utf_8"
+FS_ENCODING = "utf_8"
+
+try:
+	import magic
+except ImportError:
+	magic = None
+else:
+	try:
+		magic.MIME_TYPE
+	except AttributeError:
+		# magic module seems to be broken
+		magic = None
+
+class IsTextFile(object):
+
+	def __init__(self):
+		if magic is not None:
+			self._call = self._is_text_magic
+			self._m = magic.open(magic.MIME_TYPE)
+			self._m.load()
+		else:
+			self._call = self._is_text_encoding
+			self._encoding = CONTENT_ENCODING
+
+	def __call__(self, filename):
+		"""
+		Returns True if the given file is a text file, and False otherwise.
+		"""
+		return self._call(filename)
+
+	def _is_text_magic(self, filename):
+		mime_type = self._m.file(filename)
+		return mime_type.startswith("text/")
+
+	def _is_text_encoding(self, filename):
+		try:
+			for line in io.open(filename, mode='r', encoding=self._encoding):
+				pass
+		except UnicodeDecodeError:
+			return False
+		return True
+
+def chpath_inplace(filename, is_text_file, old, new):
+	"""
+	Returns True if any modifications were made, and False otherwise.
+	"""
+
+	modified = False
+	orig_stat = os.lstat(filename)
+	try:
+		f = io.open(filename, buffering=0, mode='r+b')
+	except IOError:
+		try:
+			orig_mode = stat.S_IMODE(os.lstat(filename).st_mode)
+		except OSError as e:
+			sys.stderr.write("%s: %s\n" % (e, filename))
+			return
+		temp_mode = 0o200 | orig_mode
+		os.chmod(filename, temp_mode)
+		try:
+			f = io.open(filename, buffering=0, mode='r+b')
+		finally:
+			os.chmod(filename, orig_mode)
+
+	len_old = len(old)
+	len_new = len(new)
+	matched_byte_count = 0
+	while True:
+		in_byte = f.read(1)
+
+		if not in_byte:
+			break
+
+		if in_byte == old[matched_byte_count]:
+			matched_byte_count += 1
+			if matched_byte_count == len_old:
+				modified = True
+				matched_byte_count = 0
+				end_position = f.tell()
+				start_position = end_position - len_old
+				if not is_text_file:
+					# search backwards for leading slashes written by
+					# a previous invocation of this tool
+					num_to_write = len_old
+					f.seek(start_position - 1)
+					while True:
+						if f.read(1) != b'/':
+							break
+						num_to_write += 1
+						f.seek(f.tell() - 2)
+
+					# pad with as many leading slashes as necessary
+					while num_to_write > len_new:
+						f.write(b'/')
+						num_to_write -= 1
+					f.write(new)
+				else:
+					remainder = f.read()
+					f.seek(start_position)
+					f.write(new)
+					if remainder:
+						f.write(remainder)
+						f.truncate()
+						f.seek(start_position + len_new)
+		elif matched_byte_count > 0:
+			# back up an try to start a new match after
+			# the first byte of the previous partial match
+			f.seek(f.tell() - matched_byte_count)
+			matched_byte_count = 0
+
+	f.close()
+	if modified:
+		orig_mtime = orig_stat[stat.ST_MTIME]
+		os.utime(filename, (orig_mtime, orig_mtime))
+	return modified
+
+def chpath_inplace_symlink(filename, st, old, new):
+	target = os.readlink(filename)
+	if target.startswith(old):
+		new_target = new + target[len(old):]
+		os.unlink(filename)
+		os.symlink(new_target, filename)
+		os.lchown(filename, st.st_uid, st.st_gid)
+
+def main(argv):
+
+	usage = "%s [options] <location> <old> <new>" % (os.path.basename(argv[0],))
+	parser = optparse.OptionParser(usage=usage)
+	options, args = parser.parse_args(argv[1:])
+
+	if len(args) != 3:
+		parser.error("3 args required, got %s" % (len(args),))
+
+	location, old, new = args
+
+	is_text_file = IsTextFile()
+
+	if not isinstance(location, bytes):
+		location = location.encode(FS_ENCODING)
+	if not isinstance(old, bytes):
+		old = old.encode(FS_ENCODING)
+	if not isinstance(new, bytes):
+		new = new.encode(FS_ENCODING)
+
+	st = os.lstat(location)
+
+	if stat.S_ISDIR(st.st_mode):
+		for parent, dirs, files in os.walk(location):
+			for filename in files:
+				filename = os.path.join(parent, filename)
+				try:
+					st = os.lstat(filename)
+				except OSError:
+					pass
+				else:
+					if stat.S_ISREG(st.st_mode):
+						chpath_inplace(filename,
+							is_text_file(filename), old, new)
+					elif stat.S_ISLNK(st.st_mode):
+						chpath_inplace_symlink(filename, st, old, new)
+
+	elif stat.S_ISREG(st.st_mode):
+		chpath_inplace(location,
+			is_text_file(location), old, new)
+	elif stat.S_ISLNK(st.st_mode):
+		chpath_inplace_symlink(location, st, old, new)
+
+	return os.EX_OK
+
+if __name__ == "__main__":
+	sys.exit(main(sys.argv))

diff --git a/portage_with_autodep/bin/dispatch-conf b/portage_with_autodep/bin/dispatch-conf
index 1e21a52..139a001 100755
--- a/portage_with_autodep/bin/dispatch-conf
+++ b/portage_with_autodep/bin/dispatch-conf
@@ -1,5 +1,5 @@
 #!/usr/bin/python -O
-# Copyright 1999-2006 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 #
@@ -27,13 +27,11 @@ except ImportError:
 from portage import os
 from portage import dispatch_conf
 from portage import _unicode_decode
-from portage.dispatch_conf import diffstatusoutput_len
+from portage.dispatch_conf import diffstatusoutput
 from portage.process import find_binary
 
 FIND_EXTANT_CONFIGS  = "find '%s' %s -name '._cfg????_%s' ! -name '.*~' ! -iname '.*.bak' -print"
 DIFF_CONTENTS        = "diff -Nu '%s' '%s'"
-DIFF_CVS_INTERP      = "diff -Nu '%s' '%s' | grep '^[+-][^+-]' | grep -v '# .Header:.*'"
-DIFF_WSCOMMENTS      = "diff -Nu '%s' '%s' | grep '^[+-][^+-]' | grep -v '^[-+]#' | grep -v '^[-+][:space:]*$'"
 
 # We need a secure scratch dir and python does silly verbose errors on the use of tempnam
 oldmask = os.umask(0o077)
@@ -62,7 +60,21 @@ def cleanup(mydir=SCRATCH_DIR):
     shutil.rmtree(mydir)
 atexit.register(cleanup)
 
-MANDATORY_OPTS  = [ 'archive-dir', 'diff', 'replace-cvs', 'replace-wscomments', 'merge' ]
+MANDATORY_OPTS = [ 'archive-dir', 'diff', 'replace-cvs', 'replace-wscomments', 'merge' ]
+
+def cmd_var_is_valid(cmd):
+    """
+    Return true if the first whitespace-separated token contained
+    in cmd is an executable file, false otherwise.
+    """
+    cmd = portage.util.shlex_split(cmd)
+    if not cmd:
+        return False
+
+    if os.path.isabs(cmd[0]):
+        return os.access(cmd[0], os.EX_OK)
+
+    return find_binary(cmd[0]) is not None
 
 class dispatch:
     options = {}
@@ -71,7 +83,7 @@ class dispatch:
         confs = []
         count = 0
 
-        config_root = '/'
+        config_root = portage.const.EPREFIX or os.sep
         self.options = portage.dispatch_conf.read_config(MANDATORY_OPTS)
 
         if "log-file" in self.options:
@@ -84,12 +96,30 @@ class dispatch:
         else:
             self.options["log-file"] = "/dev/null"
 
+        pager = self.options.get("pager")
+        if pager is None or not cmd_var_is_valid(pager):
+            pager = os.environ.get("PAGER")
+            if pager is None or not cmd_var_is_valid(pager):
+                pager = "cat"
+
+        pager_basename = os.path.basename(portage.util.shlex_split(pager)[0])
+        if pager_basename == "less":
+            less_opts = self.options.get("less-opts")
+            if less_opts is not None and less_opts.strip():
+                pager += " " + less_opts
+
+        if pager_basename == "cat":
+            pager = ""
+        else:
+            pager = " | " + pager
+
         #
         # Build list of extant configs
         #
 
         for path in config_paths:
-            path = portage.normalize_path(path)
+            path = portage.normalize_path(
+                 os.path.join(config_root, path.lstrip(os.sep)))
             try:
                 mymode = os.stat(path).st_mode
             except OSError:
@@ -100,7 +130,9 @@ class dispatch:
                 path, basename = os.path.split(path)
                 find_opts = "-maxdepth 1"
 
-            confs += self.massage(os.popen(FIND_EXTANT_CONFIGS % (path, find_opts, basename)).readlines())
+            with os.popen(FIND_EXTANT_CONFIGS %
+                (path, find_opts, basename)) as proc:
+                confs += self.massage(proc.readlines())
 
         if self.options['use-rcs'] == 'yes':
             for rcs_util in ("rcs", "ci", "co", "rcsmerge"):
@@ -118,6 +150,9 @@ class dispatch:
             portage.util.shlex_split(
             portage.settings.get('CONFIG_PROTECT_MASK', '')))
 
+        def diff(file1, file2):
+            return diffstatusoutput(DIFF_CONTENTS, file1, file2)
+
         #
         # Remove new configs identical to current
         #                  and
@@ -134,11 +169,11 @@ class dispatch:
             else:
                 mrgfail = portage.dispatch_conf.file_archive(archive, conf['current'], conf['new'], mrgconf)
             if os.path.exists(archive + '.dist'):
-                unmodified = diffstatusoutput_len(DIFF_CONTENTS % (conf['current'], archive + '.dist'))[1] == 0
+                unmodified = len(diff(conf['current'], archive + '.dist')[1]) == 0
             else:
                 unmodified = 0
             if os.path.exists(mrgconf):
-                if mrgfail or diffstatusoutput_len(DIFF_CONTENTS % (conf['new'], mrgconf))[1] == 0:
+                if mrgfail or len(diff(conf['new'], mrgconf)[1]) == 0:
                     os.unlink(mrgconf)
                     newconf = conf['new']
                 else:
@@ -149,24 +184,34 @@ class dispatch:
             if newconf == mrgconf and \
                 self.options.get('ignore-previously-merged') != 'yes' and \
                 os.path.exists(archive+'.dist') and \
-                diffstatusoutput_len(DIFF_CONTENTS % (archive+'.dist', conf['new']))[1] == 0:
+                len(diff(archive+'.dist', conf['new'])[1]) == 0:
                 # The current update is identical to the archived .dist
                 # version that has previously been merged.
                 os.unlink(mrgconf)
                 newconf = conf['new']
 
-            mystatus, myoutput_len = diffstatusoutput_len(
-                DIFF_CONTENTS  % (conf ['current'], newconf))
+            mystatus, myoutput = diff(conf['current'], newconf)
+            myoutput_len = len(myoutput)
             same_file = 0 == myoutput_len
             if mystatus >> 8 == 2:
                 # Binary files differ
                 same_cvs = False
                 same_wsc = False
             else:
-                same_cvs = 0 == diffstatusoutput_len(
-                    DIFF_CVS_INTERP % (conf ['current'], newconf))[1]
-                same_wsc = 0 == diffstatusoutput_len(
-                    DIFF_WSCOMMENTS % (conf ['current'], newconf))[1]
+                # Extract all the normal diff lines (ignore the headers).
+                mylines = re.findall('^[+-][^\n+-].*$', myoutput, re.MULTILINE)
+
+                # Filter out all the cvs headers
+                cvs_header = re.compile('# [$]Header:')
+                cvs_lines = list(filter(cvs_header.search, mylines))
+                same_cvs = len(mylines) == len(cvs_lines)
+
+                # Filter out comments and whitespace-only changes.
+                # Note: be nice to also ignore lines that only differ in whitespace...
+                wsc_lines = []
+                for x in ['^[-+]\s*#', '^[-+]\s*$']:
+                   wsc_lines += list(filter(re.compile(x).match, mylines))
+                same_wsc = len(mylines) == len(wsc_lines)
 
             # Do options permit?
             same_cvs = same_cvs and self.options['replace-cvs'] == 'yes'
@@ -224,10 +269,12 @@ class dispatch:
                 clear_screen()
                 if show_new_diff:
                     cmd = self.options['diff'] % (conf['new'], mrgconf)
+                    cmd += pager
                     spawn_shell(cmd)
                     show_new_diff = 0
                 else:
                     cmd = self.options['diff'] % (conf['current'], newconf)
+                    cmd += pager
                     spawn_shell(cmd)
 
                 print()
@@ -423,6 +470,19 @@ def spawn_shell(cmd):
     else:
         os.system(cmd)
 
+def usage(argv):
+    print('dispatch-conf: sane configuration file update\n')
+    print('Usage: dispatch-conf [config dirs]\n')
+    print('See the dispatch-conf(1) man page for more details')
+    sys.exit(os.EX_OK)
+
+for x in sys.argv:
+    if x in ('-h', '--help'):
+        usage(sys.argv)
+    elif x in ('--version'):
+        print("Portage", portage.VERSION)
+        sys.exit(os.EX_OK)
+
 # run
 d = dispatch ()
 

diff --git a/portage_with_autodep/bin/dohtml.py b/portage_with_autodep/bin/dohtml.py
index 00258ec..f0a7f2c 100755
--- a/portage_with_autodep/bin/dohtml.py
+++ b/portage_with_autodep/bin/dohtml.py
@@ -56,9 +56,9 @@ def install(basename, dirname, options, prefix=""):
 		fullpath = dirname + "/" + fullpath
 
 	if options.DOCDESTTREE:
-		destdir = options.D + "usr/share/doc/" + options.PF + "/" + options.DOCDESTTREE + "/" + options.doc_prefix + "/" + prefix
+		destdir = options.ED + "usr/share/doc/" + options.PF + "/" + options.DOCDESTTREE + "/" + options.doc_prefix + "/" + prefix
 	else:
-		destdir = options.D + "usr/share/doc/" + options.PF + "/html/" + options.doc_prefix + "/" + prefix
+		destdir = options.ED + "usr/share/doc/" + options.PF + "/html/" + options.doc_prefix + "/" + prefix
 
 	if not os.path.exists(fullpath):
 		sys.stderr.write("!!! dohtml: %s does not exist\n" % fullpath)
@@ -86,13 +86,16 @@ def install(basename, dirname, options, prefix=""):
 class OptionsClass:
 	def __init__(self):
 		self.PF = ""
-		self.D = ""
+		self.ED = ""
 		self.DOCDESTTREE = ""
 		
 		if "PF" in os.environ:
 			self.PF = os.environ["PF"]
-		if "D" in os.environ:
-			self.D = os.environ["D"]
+		if "force-prefix" not in os.environ.get("FEATURES", "").split() and \
+			os.environ.get("EAPI", "0") in ("0", "1", "2"):
+			self.ED = os.environ.get("D", "")
+		else:
+			self.ED = os.environ.get("ED", "")
 		if "_E_DOCDESTTREE_" in os.environ:
 			self.DOCDESTTREE = os.environ["_E_DOCDESTTREE_"]
 		

diff --git a/portage_with_autodep/bin/eapi.sh b/portage_with_autodep/bin/eapi.sh
new file mode 100755
index 0000000..623b89f
--- /dev/null
+++ b/portage_with_autodep/bin/eapi.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# PHASES
+
+___eapi_has_pkg_pretend() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_src_prepare() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1)$ ]]
+}
+
+___eapi_has_src_configure() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1)$ ]]
+}
+
+___eapi_default_src_test_disables_parallel_jobs() {
+	[[ ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_has_S_WORKDIR_fallback() {
+	[[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+# VARIABLES
+
+___eapi_has_prefix_variables() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2)$ || " ${FEATURES} " == *" force-prefix "* ]]
+}
+
+___eapi_has_HDEPEND() {
+	[[ ${1-${EAPI}} =~ ^(5-hdepend)$ ]]
+}
+
+___eapi_has_RDEPEND_DEPEND_fallback() {
+	[[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+# HELPERS PRESENCE
+
+___eapi_has_dohard() {
+	[[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_dosed() {
+	[[ ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_docompress() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_nonfatal() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_has_doheader() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_has_usex() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_has_master_repositories() {
+	[[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_repository_path() {
+	[[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_available_eclasses() {
+	[[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_eclass_path() {
+	[[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_license_path() {
+	[[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_package_manager_build_user() {
+	[[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+___eapi_has_package_manager_build_group() {
+	[[ ${1-${EAPI}} =~ ^(5-progress)$ ]]
+}
+
+# HELPERS BEHAVIOR
+
+___eapi_best_version_and_has_version_support_--host-root() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_unpack_supports_xz() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2)$ ]]
+}
+
+___eapi_econf_passes_--disable-dependency-tracking() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_econf_passes_--disable-silent-rules() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_use_enable_and_use_with_support_empty_third_argument() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_dodoc_supports_-r() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_doins_and_newins_preserve_symlinks() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_newins_supports_reading_from_standard_input() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi)$ ]]
+}
+
+___eapi_helpers_can_die() {
+	[[ ! ${1-${EAPI}} =~ ^(0|1|2|3)$ ]]
+}
+
+___eapi_disallows_helpers_in_global_scope() {
+	[[ ${1-${EAPI}} =~ ^(4-python|5-progress)$ ]]
+}
+
+___eapi_unpack_is_case_sensitive() {
+	[[ ${1-${EAPI}} =~ ^(0|1|2|3|4|4-python|4-slot-abi|5|5-hdepend)$ ]]
+}
+
+# OTHERS
+
+___eapi_enables_globstar() {
+	[[ ${1-${EAPI}} =~ ^(4-python|5-progress)$ ]]
+}

diff --git a/portage_with_autodep/bin/ebuild b/portage_with_autodep/bin/ebuild
index f8b6d79..35cdc14 100755
--- a/portage_with_autodep/bin/ebuild
+++ b/portage_with_autodep/bin/ebuild
@@ -1,5 +1,5 @@
 #!/usr/bin/python -O
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -29,6 +29,7 @@ def debug_signal(signum, frame):
 signal.signal(signal.SIGUSR1, debug_signal)
 
 import imp
+import io
 import optparse
 import os
 
@@ -46,6 +47,8 @@ parser.add_option("--color", help="enable or disable color output",
 	type="choice", choices=("y", "n"))
 parser.add_option("--debug", help="show debug output",
 	action="store_true", dest="debug")
+parser.add_option("--version", help="show version and exit",
+	action="store_true", dest="version")
 parser.add_option("--ignore-default-opts",
 	action="store_true",
 	help="do not use the EBUILD_DEFAULT_OPTS environment variable")
@@ -54,14 +57,6 @@ parser.add_option("--skip-manifest", help="skip all manifest checks",
 
 opts, pargs = parser.parse_args(args=sys.argv[1:])
 
-if len(pargs) < 2:
-	parser.error("missing required args")
-
-if "merge" in pargs:
-	print("Disabling noauto in features... merge disables it. (qmerge doesn't)")
-	os.environ["FEATURES"] = os.environ.get("FEATURES", "") + " -noauto"
-
-os.environ["PORTAGE_CALLER"]="ebuild"
 try:
 	import portage
 except ImportError:
@@ -79,6 +74,13 @@ from portage.const import VDB_PATH
 from _emerge.Package import Package
 from _emerge.RootConfig import RootConfig
 
+if opts.version:
+	print("Portage", portage.VERSION)
+	sys.exit(os.EX_OK)
+
+if len(pargs) < 2:
+	parser.error("missing required args")
+
 if not opts.ignore_default_opts:
 	default_opts = portage.settings.get("EBUILD_DEFAULT_OPTS", "").split()
 	opts, pargs = parser.parse_args(default_opts + sys.argv[1:])
@@ -138,27 +140,24 @@ vdb_path = os.path.realpath(os.path.join(portage.settings['EROOT'], VDB_PATH))
 # Make sure that portdb.findname() returns the correct ebuild.
 if ebuild_portdir != vdb_path and \
 	ebuild_portdir not in portage.portdb.porttrees:
+	portdir_overlay = portage.settings.get("PORTDIR_OVERLAY", "")
 	if sys.hexversion >= 0x3000000:
 		os.environ["PORTDIR_OVERLAY"] = \
-			os.environ.get("PORTDIR_OVERLAY","") + \
+			portdir_overlay + \
 			" " + _shell_quote(ebuild_portdir)
 	else:
 		os.environ["PORTDIR_OVERLAY"] = \
-			os.environ.get("PORTDIR_OVERLAY","") + \
+			_unicode_encode(portdir_overlay,
+			encoding=_encodings['content'], errors='strict') + \
 			" " + _unicode_encode(_shell_quote(ebuild_portdir),
 			encoding=_encodings['content'], errors='strict')
 
 	print("Appending %s to PORTDIR_OVERLAY..." % ebuild_portdir)
 	imp.reload(portage)
 
-# Constrain eclass resolution to the master(s)
-# that are specified in layout.conf (using an
-# approach similar to repoman's).
 myrepo = None
 if ebuild_portdir != vdb_path:
 	myrepo = portage.portdb.getRepositoryName(ebuild_portdir)
-	repo_info = portage.portdb._repo_info[ebuild_portdir]
-	portage.portdb.porttrees = list(repo_info.eclass_db.porttrees)
 
 if not os.path.exists(ebuild):
 	print("'%s' does not exist." % ebuild)
@@ -167,7 +166,12 @@ if not os.path.exists(ebuild):
 ebuild_split = ebuild.split("/")
 cpv = "%s/%s" % (ebuild_split[-3], pf)
 
-if not portage.catpkgsplit(cpv):
+with io.open(_unicode_encode(ebuild, encoding=_encodings['fs'], errors='strict'),
+	mode='r', encoding=_encodings['repo.content'], errors='replace') as f:
+	eapi = portage._parse_eapi_ebuild_head(f)[0]
+if eapi is None:
+	eapi = "0"
+if not portage.catpkgsplit(cpv, eapi=eapi):
 	print("!!! %s does not follow correct package syntax." % (cpv))
 	sys.exit(1)
 
@@ -204,9 +208,10 @@ def discard_digests(myebuild, mysettings, mydbapi):
 		portage._doebuild_manifest_exempt_depend += 1
 		pkgdir = os.path.dirname(myebuild)
 		fetchlist_dict = portage.FetchlistDict(pkgdir, mysettings, mydbapi)
-		from portage.manifest import Manifest
-		mf = Manifest(pkgdir, mysettings["DISTDIR"],
-			fetchlist_dict=fetchlist_dict, manifest1_compat=False)
+		mf = mysettings.repositories.get_repo_for_location(
+			os.path.dirname(os.path.dirname(pkgdir)))
+		mf = mf.load_manifest(pkgdir, mysettings["DISTDIR"],
+			fetchlist_dict=fetchlist_dict)
 		mf.create(requiredDistfiles=None,
 			assumeDistHashesSometimes=True, assumeDistHashesAlways=True)
 		distfiles = fetchlist_dict[cpv]
@@ -228,10 +233,8 @@ build_dir_phases = set(["setup", "unpack", "prepare", "configure", "compile",
 # sourced again even if $T/environment already exists.
 ebuild_changed = False
 if mytree == "porttree" and build_dir_phases.intersection(pargs):
-	metadata, st, emtime = \
-		portage.portdb._pull_valid_cache(cpv, ebuild, ebuild_portdir)
-	if metadata is None:
-		ebuild_changed = True
+	ebuild_changed = \
+		portage.portdb._pull_valid_cache(cpv, ebuild, ebuild_portdir)[0] is None
 
 tmpsettings = portage.config(clone=portage.settings)
 tmpsettings["PORTAGE_VERBOSE"] = "1"
@@ -257,16 +260,20 @@ if "test" in pargs:
 
 tmpsettings.features.discard("fail-clean")
 
+if "merge" in pargs and "noauto" in tmpsettings.features:
+	print("Disabling noauto in features... merge disables it. (qmerge doesn't)")
+	tmpsettings.features.discard("noauto")
+
 try:
 	metadata = dict(zip(Package.metadata_keys,
-		portage.db[portage.settings["ROOT"]][mytree].dbapi.aux_get(
+		portage.db[portage.settings['EROOT']][mytree].dbapi.aux_get(
 		cpv, Package.metadata_keys, myrepo=myrepo)))
 except KeyError:
 	# aux_get failure, message should have been shown on stderr.
 	sys.exit(1)
 
 root_config = RootConfig(portage.settings,
-	portage.db[portage.settings["ROOT"]], None)
+	portage.db[portage.settings['EROOT']], None)
 
 pkg = Package(built=(pkg_type != "ebuild"), cpv=cpv,
 	installed=(pkg_type=="installed"),
@@ -275,7 +282,10 @@ pkg = Package(built=(pkg_type != "ebuild"), cpv=cpv,
 
 # Apply package.env and repo-level settings. This allows per-package
 # FEATURES and other variables (possibly PORTAGE_TMPDIR) to be
-# available as soon as possible.
+# available as soon as possible. Also, note that the only way to ensure
+# that setcpv gets metadata from the correct repository is to pass in
+# a Package instance, as we do here (previously we had to modify
+# portdb.porttrees in order to accomplish this).
 tmpsettings.setcpv(pkg)
 
 def stale_env_warning():

diff --git a/portage_with_autodep/bin/ebuild-helpers/sed b/portage_with_autodep/bin/ebuild-helpers/bsd/sed
similarity index 66%
rename from portage_with_autodep/bin/ebuild-helpers/sed
rename to portage_with_autodep/bin/ebuild-helpers/bsd/sed
index b21e856..01b8847 100755
--- a/portage_with_autodep/bin/ebuild-helpers/sed
+++ b/portage_with_autodep/bin/ebuild-helpers/bsd/sed
@@ -1,27 +1,27 @@
 #!/bin/bash
-# Copyright 2007 Gentoo Foundation
+# Copyright 2007-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 scriptpath=${BASH_SOURCE[0]}
 scriptname=${scriptpath##*/}
 
-if [[ sed == ${scriptname} ]] && [[ -n ${ESED} ]]; then
+if [[ sed == ${scriptname} && -n ${ESED} ]]; then
 	exec ${ESED} "$@"
 elif type -P g${scriptname} > /dev/null ; then
 	exec g${scriptname} "$@"
 else
 	old_IFS="${IFS}"
 	IFS=":"
- 
+
 	for path in $PATH; do
-		[[ ${path}/${scriptname} == ${scriptpath} ]] && continue
 		if [[ -x ${path}/${scriptname} ]]; then
-			exec ${path}/${scriptname} "$@"
+			[[ ${path}/${scriptname} -ef ${scriptpath} ]] && continue
+			exec "${path}/${scriptname}" "$@"
 			exit 0
 		fi
 	done
-	
+
 	IFS="${old_IFS}"
 fi
- 
+
 exit 1

diff --git a/portage_with_autodep/bin/ebuild-helpers/dobin b/portage_with_autodep/bin/ebuild-helpers/dobin
index e385455..f90d893 100755
--- a/portage_with_autodep/bin/ebuild-helpers/dobin
+++ b/portage_with_autodep/bin/ebuild-helpers/dobin
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -9,15 +9,18 @@ if [[ $# -lt 1 ]] ; then
 	exit 1
 fi
 
-if [[ ! -d ${D}${DESTTREE}/bin ]] ; then
-	install -d "${D}${DESTTREE}/bin" || { helpers_die "${0##*/}: failed to install ${D}${DESTTREE}/bin"; exit 2; }
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+if [[ ! -d ${ED}${DESTTREE}/bin ]] ; then
+	install -d "${ED}${DESTTREE}/bin" || { helpers_die "${0##*/}: failed to install ${ED}${DESTTREE}/bin"; exit 2; }
 fi
 
 ret=0
 
 for x in "$@" ; do
 	if [[ -e ${x} ]] ; then
-		install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${D}${DESTTREE}/bin"
+		install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${ED}${DESTTREE}/bin"
 	else
 		echo "!!! ${0##*/}: $x does not exist" 1>&2
 		false

diff --git a/portage_with_autodep/bin/ebuild-helpers/dodir b/portage_with_autodep/bin/ebuild-helpers/dodir
index f40bee7..90a3efe 100755
--- a/portage_with_autodep/bin/ebuild-helpers/dodir
+++ b/portage_with_autodep/bin/ebuild-helpers/dodir
@@ -1,10 +1,13 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 
-install -d ${DIROPTIONS} "${@/#/${D}/}"
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+install -d ${DIROPTIONS} "${@/#/${ED}/}"
 ret=$?
 [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
 exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/dodoc b/portage_with_autodep/bin/ebuild-helpers/dodoc
index 65713db..1f333a6 100755
--- a/portage_with_autodep/bin/ebuild-helpers/dodoc
+++ b/portage_with_autodep/bin/ebuild-helpers/dodoc
@@ -9,7 +9,10 @@ if [ $# -lt 1 ] ; then
 	exit 1 	
 fi
 
-dir="${D}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+dir="${ED}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
 if [ ! -d "${dir}" ] ; then
 	install -d "${dir}"
 fi

diff --git a/portage_with_autodep/bin/ebuild-helpers/doexe b/portage_with_autodep/bin/ebuild-helpers/doexe
index 360800e..fb228f9 100755
--- a/portage_with_autodep/bin/ebuild-helpers/doexe
+++ b/portage_with_autodep/bin/ebuild-helpers/doexe
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -9,8 +9,11 @@ if [[ $# -lt 1 ]] ; then
 	exit 1
 fi
 
-if [[ ! -d ${D}${_E_EXEDESTTREE_} ]] ; then
-	install -d "${D}${_E_EXEDESTTREE_}"
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+if [[ ! -d ${ED}${_E_EXEDESTTREE_} ]] ; then
+	install -d "${ED}${_E_EXEDESTTREE_}"
 fi
 
 TMP=$T/.doexe_tmp
@@ -29,7 +32,7 @@ for x in "$@" ; do
 		mysrc="${x}"
 	fi
 	if [ -e "$mysrc" ] ; then
-		install $EXEOPTIONS "$mysrc" "$D$_E_EXEDESTTREE_"
+		install $EXEOPTIONS "$mysrc" "$ED$_E_EXEDESTTREE_"
 	else
 		echo "!!! ${0##*/}: $mysrc does not exist" 1>&2
 		false

diff --git a/portage_with_autodep/bin/ebuild-helpers/dohard b/portage_with_autodep/bin/ebuild-helpers/dohard
index 2270487..b52fd7c 100755
--- a/portage_with_autodep/bin/ebuild-helpers/dohard
+++ b/portage_with_autodep/bin/ebuild-helpers/dohard
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2007 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 if [[ $# -ne 2 ]] ; then
@@ -7,7 +7,10 @@ if [[ $# -ne 2 ]] ; then
 	exit 1
 fi
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 destdir=${2%/*}
-[[ ! -d ${D}${destdir} ]] && dodir "${destdir}"
+[[ ! -d ${ED}${destdir} ]] && dodir "${destdir}"
 
-exec ln -f "${D}$1" "${D}$2"
+exec ln -f "${ED}$1" "${ED}$2"

diff --git a/portage_with_autodep/bin/ebuild-helpers/doheader b/portage_with_autodep/bin/ebuild-helpers/doheader
new file mode 100755
index 0000000..3795365
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/doheader
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_doheader; then
+	die "${0##*/} is not supported in EAPI ${EAPI}"
+fi
+
+if [[ $# -lt 1 ]] || [[ $1 == -r && $# -lt 2 ]] ; then
+	__helpers_die "${0##*/}: at least one argument needed"
+	exit 1
+fi
+
+exec \
+env \
+INSDESTTREE="/usr/include/" \
+doins "$@"

diff --git a/portage_with_autodep/bin/ebuild-helpers/doinfo b/portage_with_autodep/bin/ebuild-helpers/doinfo
index 54fb8da..8fd7d45 100755
--- a/portage_with_autodep/bin/ebuild-helpers/doinfo
+++ b/portage_with_autodep/bin/ebuild-helpers/doinfo
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -9,11 +9,14 @@ if [[ -z $1 ]] ; then
 	exit 1 	
 fi
 
-if [[ ! -d ${D}usr/share/info ]] ; then
-	install -d "${D}usr/share/info" || { helpers_die "${0##*/}: failed to install ${D}usr/share/info"; exit 1; }
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+if [[ ! -d ${ED}usr/share/info ]] ; then
+	install -d "${ED}usr/share/info" || { helpers_die "${0##*/}: failed to install ${ED}usr/share/info"; exit 1; }
 fi
 
-install -m0644 "$@" "${D}usr/share/info"
+install -m0644 "$@" "${ED}usr/share/info"
 rval=$?
 if [ $rval -ne 0 ] ; then
 	for x in "$@" ; do

diff --git a/portage_with_autodep/bin/ebuild-helpers/doins b/portage_with_autodep/bin/ebuild-helpers/doins
index 7dec146..443bfdb 100755
--- a/portage_with_autodep/bin/ebuild-helpers/doins
+++ b/portage_with_autodep/bin/ebuild-helpers/doins
@@ -27,12 +27,15 @@ else
 	DOINSRECUR=n
 fi
 
-if [[ ${INSDESTTREE#${D}} != "${INSDESTTREE}" ]]; then
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) export ED="${D}" ;; esac
+
+if [[ ${INSDESTTREE#${ED}} != "${INSDESTTREE}" ]]; then
 	vecho "-------------------------------------------------------" 1>&2
-	vecho "You should not use \${D} with helpers." 1>&2
+	vecho "You should not use \${D} or \${ED} with helpers." 1>&2
 	vecho "  --> ${INSDESTTREE}" 1>&2
 	vecho "-------------------------------------------------------" 1>&2
-	helpers_die "${0##*/} used with \${D}"
+	helpers_die "${0##*/} used with \${D} or \${ED}"
 	exit 1
 fi
 
@@ -49,7 +52,7 @@ export TMP=$T/.doins_tmp
 # Use separate directories to avoid potential name collisions.
 mkdir -p "$TMP"/{1,2}
 
-[[ ! -d ${D}${INSDESTTREE} ]] && dodir "${INSDESTTREE}"
+[[ ! -d ${ED}${INSDESTTREE} ]] && dodir "${INSDESTTREE}"
 
 _doins() {
 	local mysrc="$1" mydir="$2" cleanup="" rval
@@ -63,8 +66,8 @@ _doins() {
 		# $PORTAGE_ACTUAL_DISTDIR/.
 		if [ $PRESERVE_SYMLINKS = y ] && \
 			! [[ $(readlink "$mysrc") == "$PORTAGE_ACTUAL_DISTDIR"/* ]] ; then
-			rm -rf "$D$INSDESTTREE/$mydir/${mysrc##*/}" || return $?
-			cp -P "$mysrc" "$D$INSDESTTREE/$mydir/${mysrc##*/}"
+			rm -rf "${ED}$INSDESTTREE/$mydir/${mysrc##*/}" || return $?
+			cp -P "$mysrc" "${ED}$INSDESTTREE/$mydir/${mysrc##*/}"
 			return $?
 		else
 			cp "$mysrc" "$TMP/2/${mysrc##*/}" || return $?
@@ -73,7 +76,7 @@ _doins() {
 		fi
 	fi
 
-	install ${INSOPTIONS} "${mysrc}" "${D}${INSDESTTREE}/${mydir}"
+	install ${INSOPTIONS} "${mysrc}" "${ED}${INSDESTTREE}/${mydir}"
 	rval=$?
 	[[ -n ${cleanup} ]] && rm -f "${cleanup}"
 	[ $rval -ne 0 ] && echo "!!! ${0##*/}: $mysrc does not exist" 1>&2

diff --git a/portage_with_autodep/bin/ebuild-helpers/dolib b/portage_with_autodep/bin/ebuild-helpers/dolib
index 87ade42..9af5418 100755
--- a/portage_with_autodep/bin/ebuild-helpers/dolib
+++ b/portage_with_autodep/bin/ebuild-helpers/dolib
@@ -1,9 +1,12 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 # Setup ABI cruft
 LIBDIR_VAR="LIBDIR_${ABI}"
 if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
@@ -12,7 +15,7 @@ fi
 unset LIBDIR_VAR
 # we need this to default to lib so that things dont break
 CONF_LIBDIR=${CONF_LIBDIR:-lib}
-libdir="${D}${DESTTREE}/${CONF_LIBDIR}"
+libdir="${ED}${DESTTREE}/${CONF_LIBDIR}"
 
 
 if [[ $# -lt 1 ]] ; then

diff --git a/portage_with_autodep/bin/ebuild-helpers/doman b/portage_with_autodep/bin/ebuild-helpers/doman
index 4561bef..b4047ce 100755
--- a/portage_with_autodep/bin/ebuild-helpers/doman
+++ b/portage_with_autodep/bin/ebuild-helpers/doman
@@ -9,6 +9,9 @@ if [[ $# -lt 1 ]] ; then
 	exit 1
 fi
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 i18n=""
 
 ret=0
@@ -44,11 +47,11 @@ for x in "$@" ; do
 
 	if [[ ${mandir} == *man[0-9n] ]] ; then
 		if [[ -s ${x} ]] ; then
-			if [[ ! -d ${D}/usr/share/man/${mandir} ]] ; then
-				install -d "${D}/usr/share/man/${mandir}"
+			if [[ ! -d ${ED}/usr/share/man/${mandir} ]] ; then
+				install -d "${ED}/usr/share/man/${mandir}"
 			fi
 
-			install -m0644 "${x}" "${D}/usr/share/man/${mandir}/${name}"
+			install -m0644 "${x}" "${ED}/usr/share/man/${mandir}/${name}"
 			((ret|=$?))
 		elif [[ ! -e ${x} ]] ; then
 			echo "!!! ${0##*/}: $x does not exist" 1>&2

diff --git a/portage_with_autodep/bin/ebuild-helpers/domo b/portage_with_autodep/bin/ebuild-helpers/domo
index 4737f44..d994343 100755
--- a/portage_with_autodep/bin/ebuild-helpers/domo
+++ b/portage_with_autodep/bin/ebuild-helpers/domo
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -9,8 +9,12 @@ if [ ${mynum} -lt 1 ] ; then
 	helpers_die "${0}: at least one argument needed"
 	exit 1
 fi
-if [ ! -d "${D}${DESTTREE}/share/locale" ] ; then
-	install -d "${D}${DESTTREE}/share/locale/"
+
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+if [ ! -d "${ED}${DESTTREE}/share/locale" ] ; then
+	install -d "${ED}${DESTTREE}/share/locale/"
 fi
 
 ret=0
@@ -18,7 +22,7 @@ ret=0
 for x in "$@" ; do
 	if [ -e "${x}" ] ; then
 		mytiny="${x##*/}"
-		mydir="${D}${DESTTREE}/share/locale/${mytiny%.*}/LC_MESSAGES"
+		mydir="${ED}${DESTTREE}/share/locale/${mytiny%.*}/LC_MESSAGES"
 		if [ ! -d "${mydir}" ] ; then
 			install -d "${mydir}"
 		fi

diff --git a/portage_with_autodep/bin/ebuild-helpers/dosbin b/portage_with_autodep/bin/ebuild-helpers/dosbin
index 87a3091..d101c8a 100755
--- a/portage_with_autodep/bin/ebuild-helpers/dosbin
+++ b/portage_with_autodep/bin/ebuild-helpers/dosbin
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -9,15 +9,18 @@ if [[ $# -lt 1 ]] ; then
 	exit 1
 fi
 
-if [[ ! -d ${D}${DESTTREE}/sbin ]] ; then
-	install -d "${D}${DESTTREE}/sbin" || { helpers_die "${0##*/}: failed to install ${D}${DESTTREE}/sbin"; exit 2; }
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+if [[ ! -d ${ED}${DESTTREE}/sbin ]] ; then
+	install -d "${ED}${DESTTREE}/sbin" || { helpers_die "${0##*/}: failed to install ${ED}${DESTTREE}/sbin"; exit 2; }
 fi
 
 ret=0
 
 for x in "$@" ; do
 	if [[ -e ${x} ]] ; then
-		install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${D}${DESTTREE}/sbin"
+		install -m0755 -o ${PORTAGE_INST_UID:-0} -g ${PORTAGE_INST_GID:-0} "${x}" "${ED}${DESTTREE}/sbin"
 	else
 		echo "!!! ${0##*/}: ${x} does not exist" 1>&2
 		false

diff --git a/portage_with_autodep/bin/ebuild-helpers/dosed b/portage_with_autodep/bin/ebuild-helpers/dosed
index afc949b..f202df7 100755
--- a/portage_with_autodep/bin/ebuild-helpers/dosed
+++ b/portage_with_autodep/bin/ebuild-helpers/dosed
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2006 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 if [[ $# -lt 1 ]] ; then
@@ -7,12 +7,15 @@ if [[ $# -lt 1 ]] ; then
 	exit 1
 fi
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 ret=0
 file_found=0
-mysed="s:${D}::g"
+mysed="s:${ED}::g"
 
 for x in "$@" ; do
-	y=$D${x#/}
+	y=$ED${x#/}
 	if [ -e "${y}" ] ; then
 		if [ -f "${y}" ] ; then
 			file_found=1

diff --git a/portage_with_autodep/bin/ebuild-helpers/dosym b/portage_with_autodep/bin/ebuild-helpers/dosym
index 500dad0..2489e22 100755
--- a/portage_with_autodep/bin/ebuild-helpers/dosym
+++ b/portage_with_autodep/bin/ebuild-helpers/dosym
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -9,10 +9,22 @@ if [[ $# -ne 2 ]] ; then
 	exit 1
 fi
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+if [[ ${2} == */ ]] || \
+	[[ -d ${ED}${2} && ! -L ${ED}${2} ]] ; then
+	# implicit basename not allowed by PMS (bug #379899)
+	eqawarn "QA Notice: dosym target omits basename: '${2}'"
+fi
+
 destdir=${2%/*}
-[[ ! -d ${D}${destdir} ]] && dodir "${destdir}"
+[[ ! -d ${ED}${destdir} ]] && dodir "${destdir}"
+# when absolute, prefix with offset for Gentoo Prefix
+target="${1}"
+[[ ${target:0:1} == "/" ]] && target="${EPREFIX}${target}"
+ln -snf "${target}" "${ED}${2}"
 
-ln -snf "$1" "${D}$2"
 ret=$?
 [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
 exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/ecompressdir b/portage_with_autodep/bin/ebuild-helpers/ecompressdir
index 7a95120..a2c9e52 100755
--- a/portage_with_autodep/bin/ebuild-helpers/ecompressdir
+++ b/portage_with_autodep/bin/ebuild-helpers/ecompressdir
@@ -1,27 +1,30 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/helper-functions.sh
 
 if [[ -z $1 ]] ; then
 	helpers_die "${0##*/}: at least one argument needed"
 	exit 1
 fi
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} EPREFIX= ;; esac
+
 case $1 in
 	--ignore)
 		shift
 		for skip in "$@" ; do
-			[[ -d ${D}${skip} || -f ${D}${skip} ]] \
-				&& >> "${D}${skip}.ecompress.skip"
+			[[ -d ${ED}${skip} || -f ${ED}${skip} ]] \
+				&& >> "${ED}${skip}.ecompress.skip"
 		done
 		exit 0
 		;;
 	--queue)
 		shift
 		set -- "${@/%/.ecompress.dir}"
-		set -- "${@/#/${D}}"
+		set -- "${@/#/${ED}}"
 		ret=0
 		for x in "$@" ; do
 			>> "$x"
@@ -32,10 +35,10 @@ case $1 in
 		;;
 	--dequeue)
 		[[ -n $2 ]] && vecho "${0##*/}: --dequeue takes no additional arguments" 1>&2
-		find "${D}" -name '*.ecompress.dir' -print0 \
-			| sed -e 's:\.ecompress\.dir::g' -e "s:${D}:/:g" \
+		find "${ED}" -name '*.ecompress.dir' -print0 \
+			| sed -e 's:\.ecompress\.dir::g' -e "s:${ED}:/:g" \
 			| ${XARGS} -0 ecompressdir
-		find "${D}" -name '*.ecompress.skip' -print0 | ${XARGS} -0 rm -f
+		find "${ED}" -name '*.ecompress.skip' -print0 | ${XARGS} -0 rm -f
 		exit 0
 		;;
 	--*)
@@ -66,6 +69,17 @@ funk_up_dir() {
 	while read -r -d $'\0' brokenlink ; do
 		[[ -e ${brokenlink} ]] && continue
 		olddest=$(readlink "${brokenlink}")
+		# Ignore temporarily broken symlinks due to
+		# _relocate_skip_dirs (bug #399595), and handle
+		# absolute symlinks to files that aren't merged
+		# yet (bug #405327).
+		if [[ ${olddest} == /* ]] ; then
+			[ -e "${D}${olddest}" ] && continue
+			skip_dir_dest=${T}/ecompress-skip/${olddest#${EPREFIX}}
+		else
+			skip_dir_dest=${T}/ecompress-skip/${actual_dir#${ED}}/${brokenlink%/*}/${olddest}
+		fi
+		[[ -e ${skip_dir_dest} ]] && continue
 		[[ ${act} == "compress" ]] \
 			&& newdest="${olddest}${suffix}" \
 			|| newdest="${olddest%${suffix}}"
@@ -95,18 +109,28 @@ _relocate_skip_dirs() {
 		mv "${src}.ecompress.skip" "${dst}.ecompress.skip"
 	done
 }
-hide_skip_dirs()    { _relocate_skip_dirs "${D}" "${T}"/ecompress-skip/ ; }
-restore_skip_dirs() { _relocate_skip_dirs "${T}"/ecompress-skip/ "${D}" ; }
+hide_skip_dirs()    { _relocate_skip_dirs "${ED}" "${T}"/ecompress-skip/ ; }
+restore_skip_dirs() { _relocate_skip_dirs "${T}"/ecompress-skip/ "${ED}" ; }
 
 ret=0
 
 rm -rf "${T}"/ecompress-skip
 
+decompressors=(
+	".Z"    "gunzip -f"
+	".gz"   "gunzip -f"
+	".bz2"  "bunzip2 -f"
+	".xz"   "unxz -f"
+	".lzma" "unxz -f"
+)
+
+multijob_init
+
 for dir in "$@" ; do
 	dir=${dir#/}
-	dir="${D}${dir}"
+	dir="${ED}${dir}"
 	if [[ ! -d ${dir} ]] ; then
-		vecho "${0##*/}: /${dir#${D}} does not exist!"
+		vecho "${0##*/}: /${dir#${ED}} does not exist!"
 		continue
 	fi
 	cd "${dir}"
@@ -122,18 +146,31 @@ for dir in "$@" ; do
 	find "${dir}" -type f -name '*.ecompress.file' -print0 | ${XARGS} -0 rm -f
 
 	# not uncommon for packages to compress doc files themselves
-	funk_up_dir "decompress" ".Z" "gunzip -f"
-	funk_up_dir "decompress" ".gz" "gunzip -f"
-	funk_up_dir "decompress" ".bz2" "bunzip2 -f"
+	for (( d = 0; d < ${#decompressors[@]}; d += 2 )) ; do
+		# It's faster to parallelize at this stage than to try to
+		# parallelize the compressors.  This is because the find|xargs
+		# ends up launching less compressors overall, so the overhead
+		# of forking children ends up dominating.
+		(
+		multijob_child_init
+		funk_up_dir "decompress" "${decompressors[i]}" "${decompressors[i+1]}"
+		) &
+		multijob_post_fork
+		: $(( ret |= $? ))
+	done
 
 	# forcibly break all hard links as some compressors whine about it
 	find "${dir}" -type f -links +1 -exec env file="{}" sh -c \
 		'cp -p "${file}" "${file}.ecompress.break" ; mv -f "${file}.ecompress.break" "${file}"' \;
 
+	multijob_finish
+	: $(( ret |= $? ))
+
 	# now lets do our work
-	[[ -z ${suffix} ]] && continue
-	vecho "${0##*/}: $(ecompress --bin) /${actual_dir#${D}}"
-	funk_up_dir "compress" "${suffix}" "ecompress"
+	if [[ -n ${suffix} ]] ; then
+		vecho "${0##*/}: $(ecompress --bin) /${actual_dir#${ED}}"
+		funk_up_dir "compress" "${suffix}" "ecompress"
+	fi
 
 	# finally, restore the skipped stuff
 	restore_skip_dirs

diff --git a/portage_with_autodep/bin/ebuild-helpers/fowners b/portage_with_autodep/bin/ebuild-helpers/fowners
index 4cc6bfa..a213c9e 100755
--- a/portage_with_autodep/bin/ebuild-helpers/fowners
+++ b/portage_with_autodep/bin/ebuild-helpers/fowners
@@ -1,13 +1,22 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) EPREFIX= ED=${D} ;; esac
+
 # we can't prefix all arguments because
 # chown takes random options
 slash="/"
-chown "${@/#${slash}/${D}${slash}}"
+chown "${@/#${slash}/${ED}${slash}}"
 ret=$?
+
+if [[ ${ret} != 0 && -n ${EPREFIX} && ${EUID} != 0 ]] ; then
+	ewarn "fowners failure ignored in Prefix with non-privileged user"
+	exit 0
+fi
+
 [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
 exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/fperms b/portage_with_autodep/bin/ebuild-helpers/fperms
index 0260bdc..a2f77ea 100755
--- a/portage_with_autodep/bin/ebuild-helpers/fperms
+++ b/portage_with_autodep/bin/ebuild-helpers/fperms
@@ -1,13 +1,16 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 # we can't prefix all arguments because
 # chmod takes random options
 slash="/"
-chmod "${@/#${slash}/${D}${slash}}"
+chmod "${@/#${slash}/${ED}${slash}}"
 ret=$?
 [[ $ret -ne 0 ]] && helpers_die "${0##*/} failed"
 exit $ret

diff --git a/portage_with_autodep/bin/ebuild-helpers/keepdir b/portage_with_autodep/bin/ebuild-helpers/keepdir
new file mode 100755
index 0000000..bec2feb
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/keepdir
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+if ! ___eapi_has_prefix_variables; then
+	ED=${D}
+fi
+
+dodir "$@"
+ret=$?
+
+for x in "$@"; do
+	>> "${ED}${x}/.keep_${CATEGORY}_${PN}-${SLOT%/*}" || \
+		{ echo "!!! ${0##*/}: cannot write .keep in ${ED}${x}" 1>&2; ret=1; }
+done
+
+[[ ${ret} -ne 0 ]] && __helpers_die "${0##*/} failed"
+exit ${ret}

diff --git a/portage_with_autodep/bin/ebuild-helpers/newbin b/portage_with_autodep/bin/ebuild-helpers/newbin
index 30f19b0..bf98744 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newbin
+++ b/portage_with_autodep/bin/ebuild-helpers/newbin
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec dobin "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newconfd b/portage_with_autodep/bin/ebuild-helpers/newconfd
index 5752cfa..fa3710d 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newconfd
+++ b/portage_with_autodep/bin/ebuild-helpers/newconfd
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec doconfd "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newdoc b/portage_with_autodep/bin/ebuild-helpers/newdoc
index f97ce0d..df6fb1d 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newdoc
+++ b/portage_with_autodep/bin/ebuild-helpers/newdoc
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec dodoc "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newenvd b/portage_with_autodep/bin/ebuild-helpers/newenvd
index 83c556e..c54af05 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newenvd
+++ b/portage_with_autodep/bin/ebuild-helpers/newenvd
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec doenvd "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newexe b/portage_with_autodep/bin/ebuild-helpers/newexe
index 92dbe9f..9bcf64b 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newexe
+++ b/portage_with_autodep/bin/ebuild-helpers/newexe
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec doexe "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newheader b/portage_with_autodep/bin/ebuild-helpers/newheader
new file mode 120000
index 0000000..59a0db2
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/newheader
@@ -0,0 +1 @@
+newins
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/newinitd b/portage_with_autodep/bin/ebuild-helpers/newinitd
index fc6003a..03bbe68 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newinitd
+++ b/portage_with_autodep/bin/ebuild-helpers/newinitd
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec doinitd "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newins b/portage_with_autodep/bin/ebuild-helpers/newins
index 065477f..adf2d80 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newins
+++ b/portage_with_autodep/bin/ebuild-helpers/newins
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" || exit $?
 case "$EAPI" in
 	0|1|2|3|3_pre2)

diff --git a/portage_with_autodep/bin/ebuild-helpers/newlib.a b/portage_with_autodep/bin/ebuild-helpers/newlib.a
index eef4104..7ff8195 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newlib.a
+++ b/portage_with_autodep/bin/ebuild-helpers/newlib.a
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec dolib.a "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newlib.so b/portage_with_autodep/bin/ebuild-helpers/newlib.so
index c8696f3..fd4c097 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newlib.so
+++ b/portage_with_autodep/bin/ebuild-helpers/newlib.so
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec dolib.so "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newman b/portage_with_autodep/bin/ebuild-helpers/newman
index ffb8a2d..889e0f9 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newman
+++ b/portage_with_autodep/bin/ebuild-helpers/newman
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec doman "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/newsbin b/portage_with_autodep/bin/ebuild-helpers/newsbin
index 82242aa..9df0af2 100755
--- a/portage_with_autodep/bin/ebuild-helpers/newsbin
+++ b/portage_with_autodep/bin/ebuild-helpers/newsbin
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -14,6 +14,9 @@ if [ ! -e "$1" ] ; then
 	exit 1
 fi
 
+(($#>2)) && \
+	eqawarn "QA Notice: ${0##*/} called with more than 2 arguments: ${@:3}"
+
 rm -rf "${T}/${2}" && \
 cp -f "${1}" "${T}/${2}" && \
 exec dosbin "${T}/${2}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepall b/portage_with_autodep/bin/ebuild-helpers/prepall
index 701ecba..49e646c 100755
--- a/portage_with_autodep/bin/ebuild-helpers/prepall
+++ b/portage_with_autodep/bin/ebuild-helpers/prepall
@@ -4,12 +4,15 @@
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 if has chflags $FEATURES ; then
 	# Save all the file flags for restoration at the end of prepall.
-	mtree -c -p "${D}" -k flags > "${T}/bsdflags.mtree"
+	mtree -c -p "${ED}" -k flags > "${T}/bsdflags.mtree"
 	# Remove all the file flags so that prepall can do anything necessary.
-	chflags -R noschg,nouchg,nosappnd,nouappnd "${D}"
-	chflags -R nosunlnk,nouunlnk "${D}" 2>/dev/null
+	chflags -R noschg,nouchg,nosappnd,nouappnd "${ED}"
+	chflags -R nosunlnk,nouunlnk "${ED}" 2>/dev/null
 fi
 
 prepallman
@@ -19,5 +22,5 @@ prepallstrip
 
 if has chflags $FEATURES ; then
 	# Restore all the file flags that were saved at the beginning of prepall.
-	mtree -U -e -p "${D}" -k flags < "${T}/bsdflags.mtree" &> /dev/null
+	mtree -U -e -p "${ED}" -k flags < "${T}/bsdflags.mtree" &> /dev/null
 fi

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepalldocs b/portage_with_autodep/bin/ebuild-helpers/prepalldocs
index fdc735d..560a02b 100755
--- a/portage_with_autodep/bin/ebuild-helpers/prepalldocs
+++ b/portage_with_autodep/bin/ebuild-helpers/prepalldocs
@@ -1,5 +1,5 @@
 #!/bin/bash
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
@@ -8,8 +8,10 @@ if [[ -n $1 ]] ; then
 	vecho "${0##*/}: invalid usage; takes no arguments" 1>&2
 fi
 
-cd "${D}"
-[[ -d usr/share/doc ]] || exit 0
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+[[ -d ${ED}usr/share/doc ]] || exit 0
 
 ecompressdir --ignore /usr/share/doc/${PF}/html
 ecompressdir --queue /usr/share/doc

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepallinfo b/portage_with_autodep/bin/ebuild-helpers/prepallinfo
index 0d97803..db9bbfa 100755
--- a/portage_with_autodep/bin/ebuild-helpers/prepallinfo
+++ b/portage_with_autodep/bin/ebuild-helpers/prepallinfo
@@ -1,9 +1,12 @@
 #!/bin/bash
-# Copyright 1999-2006 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 
-[[ ! -d ${D}usr/share/info ]] && exit 0
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+[[ -d ${ED}usr/share/info ]] || exit 0
 
 exec prepinfo

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepallman b/portage_with_autodep/bin/ebuild-helpers/prepallman
index e50de6d..dee1c72 100755
--- a/portage_with_autodep/bin/ebuild-helpers/prepallman
+++ b/portage_with_autodep/bin/ebuild-helpers/prepallman
@@ -7,11 +7,14 @@ source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 # replaced by controllable compression in EAPI 4
 has "${EAPI}" 0 1 2 3 || exit 0
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 ret=0
 
-find "${D}" -type d -name man > "${T}"/prepallman.filelist
+find "${ED}" -type d -name man > "${T}"/prepallman.filelist
 while read -r mandir ; do
-	mandir=${mandir#${D}}
+	mandir=${mandir#${ED}}
 	prepman "${mandir%/man}"
 	((ret|=$?))
 done < "${T}"/prepallman.filelist

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepallstrip b/portage_with_autodep/bin/ebuild-helpers/prepallstrip
index ec12ce6..28320d9 100755
--- a/portage_with_autodep/bin/ebuild-helpers/prepallstrip
+++ b/portage_with_autodep/bin/ebuild-helpers/prepallstrip
@@ -1,5 +1,8 @@
 #!/bin/bash
-# Copyright 1999-2006 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-exec prepstrip "${D}"
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
+exec prepstrip "${ED}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepinfo b/portage_with_autodep/bin/ebuild-helpers/prepinfo
index 691fd13..ffe2ece 100755
--- a/portage_with_autodep/bin/ebuild-helpers/prepinfo
+++ b/portage_with_autodep/bin/ebuild-helpers/prepinfo
@@ -4,17 +4,20 @@
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 if [[ -z $1 ]] ; then
 	infodir="/usr/share/info"
 else
-	if [[ -d ${D}$1/share/info ]] ; then
+	if [[ -d ${ED}$1/share/info ]] ; then
 		infodir="$1/share/info"
 	else
 		infodir="$1/info"
 	fi
 fi
 
-if [[ ! -d ${D}${infodir} ]] ; then
+if [[ ! -d ${ED}${infodir} ]] ; then
 	if [[ -n $1 ]] ; then
 		vecho "${0##*/}: '${infodir}' does not exist!"
 		exit 1
@@ -23,7 +26,7 @@ if [[ ! -d ${D}${infodir} ]] ; then
 	fi
 fi
 
-find "${D}${infodir}" -type d -print0 | while read -r -d $'\0' x ; do
+find "${ED}${infodir}" -type d -print0 | while read -r -d $'\0' x ; do
 	for f in "${x}"/.keepinfodir*; do
 		[[ -e ${f} ]] && continue 2
 	done

diff --git a/portage_with_autodep/bin/ebuild-helpers/preplib b/portage_with_autodep/bin/ebuild-helpers/preplib
index 76aabe6..6e91cf3 100755
--- a/portage_with_autodep/bin/ebuild-helpers/preplib
+++ b/portage_with_autodep/bin/ebuild-helpers/preplib
@@ -1,11 +1,14 @@
 #!/bin/bash
-# Copyright 1999-2006 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 
 eqawarn "QA Notice: Deprecated call to 'preplib'"
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 LIBDIR_VAR="LIBDIR_${ABI}"
 if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
 	CONF_LIBDIR="${!LIBDIR_VAR}"
@@ -18,9 +21,9 @@ if [ -z "${CONF_LIBDIR}" ]; then
 fi
 
 if [ -z "$1" ] ; then
-	z="${D}usr/${CONF_LIBDIR}"
+	z="${ED}usr/${CONF_LIBDIR}"
 else
-	z="${D}$1/${CONF_LIBDIR}"
+	z="${ED}$1/${CONF_LIBDIR}"
 fi
 
 if [ -d "${z}" ] ; then

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepman b/portage_with_autodep/bin/ebuild-helpers/prepman
index c9add8a..f96b641 100755
--- a/portage_with_autodep/bin/ebuild-helpers/prepman
+++ b/portage_with_autodep/bin/ebuild-helpers/prepman
@@ -4,14 +4,17 @@
 
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
 
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "$EAPI" in 0|1|2) ED=${D} ;; esac
+
 if [[ -z $1 ]] ; then 
-	mandir="${D}usr/share/man"
+	mandir="${ED}usr/share/man"
 else
-	mandir="${D}$1/man"
+	mandir="${ED}$1/man"
 fi
 
 if [[ ! -d ${mandir} ]] ; then
-	eqawarn "QA Notice: prepman called with non-existent dir '${mandir#${D}}'"
+	eqawarn "QA Notice: prepman called with non-existent dir '${mandir#${ED}}'"
 	exit 0
 fi
 
@@ -27,6 +30,6 @@ for subdir in "${mandir}"/man* "${mandir}"/*/man* ; do
 	[[ -d ${subdir} ]] && really_is_mandir=1 && break
 done
 
-[[ ${really_is_mandir} == 1 ]] && exec ecompressdir --queue "${mandir#${D}}"
+[[ ${really_is_mandir} == 1 ]] && exec ecompressdir --queue "${mandir#${ED}}"
 
 exit 0

diff --git a/portage_with_autodep/bin/ebuild-helpers/prepstrip b/portage_with_autodep/bin/ebuild-helpers/prepstrip
index d25259d..85d5d6a 100755
--- a/portage_with_autodep/bin/ebuild-helpers/prepstrip
+++ b/portage_with_autodep/bin/ebuild-helpers/prepstrip
@@ -1,85 +1,153 @@
 #!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/helper-functions.sh
+
+# avoid multiple calls to `has`.  this creates things like:
+#   FEATURES_foo=false
+# if "foo" is not in $FEATURES
+tf() { "$@" && echo true || echo false ; }
+exp_tf() {
+	local flag var=$1
+	shift
+	for flag in "$@" ; do
+		eval ${var}_${flag}=$(tf has ${flag} ${!var})
+	done
+}
+exp_tf FEATURES compressdebug installsources nostrip splitdebug
+exp_tf RESTRICT binchecks installsources strip
+
+[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+	case "${EAPI}" in 0|1|2) EPREFIX= ED=${D} ;; esac
 
 banner=false
 SKIP_STRIP=false
-if has nostrip ${FEATURES} || \
-   has strip ${RESTRICT}
-then
+if ${RESTRICT_strip} || ${FEATURES_nostrip} ; then
 	SKIP_STRIP=true
 	banner=true
-	has installsources ${FEATURES} || exit 0
+	${FEATURES_installsources} || exit 0
 fi
 
-STRIP=${STRIP:-${CHOST}-strip}
-type -P -- ${STRIP} > /dev/null || STRIP=strip
-OBJCOPY=${OBJCOPY:-${CHOST}-objcopy}
-type -P -- ${OBJCOPY} > /dev/null || OBJCOPY=objcopy
+# look up the tools we might be using
+for t in STRIP:strip OBJCOPY:objcopy READELF:readelf ; do
+	v=${t%:*} # STRIP
+	t=${t#*:} # strip
+	eval ${v}=\"${!v:-${CHOST}-${t}}\"
+	type -P -- ${!v} >/dev/null || eval ${v}=${t}
+done
 
-# We'll leave out -R .note for now until we can check out the relevance
-# of the section when it has the ALLOC flag set on it ...
-export SAFE_STRIP_FLAGS="--strip-unneeded"
-export PORTAGE_STRIP_FLAGS=${PORTAGE_STRIP_FLAGS-${SAFE_STRIP_FLAGS} -R .comment}
-prepstrip_sources_dir=/usr/src/debug/${CATEGORY}/${PF}
+# Figure out what tool set we're using to strip stuff
+unset SAFE_STRIP_FLAGS DEF_STRIP_FLAGS SPLIT_STRIP_FLAGS
+case $(${STRIP} --version 2>/dev/null) in
+*elfutils*) # dev-libs/elfutils
+	# elfutils default behavior is always safe, so don't need to specify
+	# any flags at all
+	SAFE_STRIP_FLAGS=""
+	DEF_STRIP_FLAGS="--remove-comment"
+	SPLIT_STRIP_FLAGS="-f"
+	;;
+*GNU*) # sys-devel/binutils
+	# We'll leave out -R .note for now until we can check out the relevance
+	# of the section when it has the ALLOC flag set on it ...
+	SAFE_STRIP_FLAGS="--strip-unneeded"
+	DEF_STRIP_FLAGS="-R .comment -R .GCC.command.line"
+	SPLIT_STRIP_FLAGS=
+	;;
+esac
+: ${PORTAGE_STRIP_FLAGS=${SAFE_STRIP_FLAGS} ${DEF_STRIP_FLAGS}}
 
-if has installsources ${FEATURES} && ! type -P debugedit >/dev/null ; then
-	ewarn "FEATURES=installsources is enabled but the debugedit binary could not"
-	ewarn "be found. This feature will not work unless debugedit is installed!"
-fi
+prepstrip_sources_dir=${EPREFIX}/usr/src/debug/${CATEGORY}/${PF}
+
+type -P debugedit >/dev/null && debugedit_found=true || debugedit_found=false
+debugedit_warned=false
 
-unset ${!INODE_*}
+multijob_init
 
-inode_var_name() {
-	if  [[ $USERLAND = BSD ]] ; then
-		stat -f 'INODE_%d_%i' "$1"
+# Setup $T filesystem layout that we care about.
+tmpdir="${T}/prepstrip"
+rm -rf "${tmpdir}"
+mkdir -p "${tmpdir}"/{inodes,splitdebug,sources}
+
+# Usage: inode_var_name: <file>
+inode_file_link() {
+	echo -n "${tmpdir}/inodes/"
+	if  [[ ${USERLAND} == "BSD" ]] ; then
+		stat -f '%i' "$1"
 	else
-		stat -c 'INODE_%d_%i' "$1"
+		stat -c '%i' "$1"
 	fi
 }
 
+# Usage: save_elf_sources <elf>
 save_elf_sources() {
-	has installsources ${FEATURES} || return 0
-	has installsources ${RESTRICT} && return 0
-	type -P debugedit >/dev/null || return 0
+	${FEATURES_installsources} || return 0
+	${RESTRICT_installsources} && return 0
+	if ! ${debugedit_found} ; then
+		if ! ${debugedit_warned} ; then
+			debugedit_warned=true
+			ewarn "FEATURES=installsources is enabled but the debugedit binary could not"
+			ewarn "be found. This feature will not work unless debugedit is installed!"
+		fi
+		return 0
+	fi
 
 	local x=$1
-	local inode=$(inode_var_name "$x")
-	[[ -n ${!inode} ]] && return 0
-	debugedit -b "${WORKDIR}" -d "${prepstrip_sources_dir}" \
-		-l "${T}"/debug.sources "${x}"
+	[[ -f $(inode_file_link "${x}") ]] && return 0
+
+	# since we're editing the ELF here, we should recompute the build-id
+	# (the -i flag below).  save that output so we don't need to recompute
+	# it later on in the save_elf_debug step.
+	buildid=$(debugedit -i \
+		-b "${WORKDIR}" \
+		-d "${prepstrip_sources_dir}" \
+		-l "${tmpdir}/sources/${x##*/}.${BASHPID}" \
+		"${x}")
 }
 
+# Usage: save_elf_debug <elf> [splitdebug file]
 save_elf_debug() {
-	has splitdebug ${FEATURES} || return 0
+	${FEATURES_splitdebug} || return 0
 
+	# NOTE: Debug files must be installed in
+	# ${EPREFIX}/usr/lib/debug/${EPREFIX} (note that ${EPREFIX} occurs
+	# twice in this path) in order for gdb's debug-file-directory
+	# lookup to work correctly.
 	local x=$1
-	local y="${D}usr/lib/debug/${x:${#D}}.debug"
+	local splitdebug=$2
+	local y=${ED}usr/lib/debug/${x:${#D}}.debug
 
 	# dont save debug info twice
 	[[ ${x} == *".debug" ]] && return 0
 
-	# this will recompute the build-id, but for now that's ok
-	local buildid="$( type -P debugedit >/dev/null && debugedit -i "${x}" )"
-
-	mkdir -p $(dirname "${y}")
+	mkdir -p "${y%/*}"
 
-	local inode=$(inode_var_name "$x")
-	if [[ -n ${!inode} ]] ; then
-		ln "${D}usr/lib/debug/${!inode:${#D}}.debug" "$y"
+	local inode=$(inode_file_link "${x}")
+	if [[ -f ${inode} ]] ; then
+		ln "${inode}" "${y}"
 	else
-		eval $inode=\$x
-		${OBJCOPY} --only-keep-debug "${x}" "${y}"
-		${OBJCOPY} --add-gnu-debuglink="${y}" "${x}"
-		[[ -g ${x} ]] && chmod go-r "${y}"
-		[[ -u ${x} ]] && chmod go-r "${y}"
-		chmod a-x,o-w "${y}"
+		if [[ -n ${splitdebug} ]] ; then
+			mv "${splitdebug}" "${y}"
+		else
+			local objcopy_flags="--only-keep-debug"
+			${FEATURES_compressdebug} && objcopy_flags+=" --compress-debug-sections"
+			${OBJCOPY} ${objcopy_flags} "${x}" "${y}"
+			${OBJCOPY} --add-gnu-debuglink="${y}" "${x}"
+		fi
+		local args="a-x,o-w"
+		[[ -g ${x} || -u ${x} ]] && args+=",go-r"
+		chmod ${args} "${y}"
+		ln "${y}" "${inode}"
 	fi
 
+	# if we don't already have build-id from debugedit, look it up
+	if [[ -z ${buildid} ]] ; then
+		# convert the readelf output to something useful
+		buildid=$(${READELF} -x .note.gnu.build-id "${x}" 2>/dev/null \
+			| awk '$NF ~ /GNU/ { getline; printf $2$3$4$5; getline; print $2 }')
+	fi
 	if [[ -n ${buildid} ]] ; then
-		local buildid_dir="${D}usr/lib/debug/.build-id/${buildid:0:2}"
+		local buildid_dir="${ED}usr/lib/debug/.build-id/${buildid:0:2}"
 		local buildid_file="${buildid_dir}/${buildid:2}"
 		mkdir -p "${buildid_dir}"
 		ln -s "../../${x:${#D}}.debug" "${buildid_file}.debug"
@@ -87,33 +155,78 @@ save_elf_debug() {
 	fi
 }
 
+# Usage: process_elf <elf>
+process_elf() {
+	local x=$1 strip_flags=${*:2}
+
+	vecho "   ${x:${#ED}}"
+	save_elf_sources "${x}"
+
+	if ${strip_this} ; then
+
+		# If two processes try to strip the same hardlink at the same
+		# time, it will cause one of them to lose the splitdebug info.
+		# So, use a lockfile to prevent interference (easily observed
+		# with dev-vcs/git which creates ~109 hardlinks to one file in
+		# /usr/libexec/git-core).
+		local lockfile=$(inode_file_link "${x}")_lockfile
+		if ! ln "${x}" "${lockfile}" ; then
+			while [[ -f ${lockfile} ]] ; do
+				sleep 1
+			done
+			unset lockfile
+		fi
+
+		# see if we can split & strip at the same time
+		if [[ -n ${SPLIT_STRIP_FLAGS} ]] ; then
+			local shortname="${x##*/}.debug"
+			local splitdebug="${tmpdir}/splitdebug/${shortname}.${BASHPID}"
+			${STRIP} ${strip_flags} \
+				-f "${splitdebug}" \
+				-F "${shortname}" \
+				"${x}"
+			save_elf_debug "${x}" "${splitdebug}"
+		else
+			save_elf_debug "${x}"
+			${STRIP} ${strip_flags} "${x}"
+		fi
+		[[ -n ${lockfile} ]] && rm -f "${lockfile}"
+	fi
+}
+
 # The existance of the section .symtab tells us that a binary is stripped.
 # We want to log already stripped binaries, as this may be a QA violation.
 # They prevent us from getting the splitdebug data.
-if ! has binchecks ${RESTRICT} && \
-	! has strip ${RESTRICT} ; then
-	log=$T/scanelf-already-stripped.log
+if ! ${RESTRICT_binchecks} && ! ${RESTRICT_strip} ; then
+	# We need to do the non-stripped scan serially first before we turn around
+	# and start stripping the files ourselves.  The log parsing can be done in
+	# parallel though.
+	log=${tmpdir}/scanelf-already-stripped.log
+	scanelf -yqRBF '#k%F' -k '!.symtab' "$@" | sed -e "s#^${ED}##" > "${log}"
+	(
+	multijob_child_init
 	qa_var="QA_PRESTRIPPED_${ARCH/-/_}"
 	[[ -n ${!qa_var} ]] && QA_PRESTRIPPED="${!qa_var}"
-	scanelf -yqRBF '#k%F' -k '!.symtab' "$@" | sed -e "s#^$D##" > "$log"
-	if [[ -n $QA_PRESTRIPPED && -s $log && \
+	if [[ -n ${QA_PRESTRIPPED} && -s ${log} && \
 		${QA_STRICT_PRESTRIPPED-unset} = unset ]] ; then
 		shopts=$-
 		set -o noglob
-		for x in $QA_PRESTRIPPED ; do
-			sed -e "s#^${x#/}\$##" -i "$log"
+		for x in ${QA_PRESTRIPPED} ; do
+			sed -e "s#^${x#/}\$##" -i "${log}"
 		done
 		set +o noglob
-		set -$shopts
+		set -${shopts}
 	fi
-	sed -e "/^\$/d" -e "s#^#/#" -i "$log"
-	if [[ -s $log ]] ; then
+	sed -e "/^\$/d" -e "s#^#/#" -i "${log}"
+	if [[ -s ${log} ]] ; then
 		vecho -e "\n"
 		eqawarn "QA Notice: Pre-stripped files found:"
-		eqawarn "$(<"$log")"
+		eqawarn "$(<"${log}")"
 	else
-		rm -f "$log"
+		rm -f "${log}"
 	fi
+	) &
+	multijob_post_fork
 fi
 
 # Now we look for unstripped binaries.
@@ -126,8 +239,10 @@ do
 		banner=true
 	fi
 
-	f=$(file "${x}") || continue
-	[[ -z ${f} ]] && continue
+	(
+	multijob_child_init
+	f=$(file "${x}") || exit 0
+	[[ -z ${f} ]] && exit 0
 
 	if ! ${SKIP_STRIP} ; then
 		# The noglob funk is to support STRIP_MASK="/*/booga" and to keep
@@ -136,50 +251,61 @@ do
 		set -o noglob
 		strip_this=true
 		for m in $(eval echo ${STRIP_MASK}) ; do
-			[[ /${x#${D}} == ${m} ]] && strip_this=false && break
+			[[ /${x#${ED}} == ${m} ]] && strip_this=false && break
 		done
 		set +o noglob
 	else
 		strip_this=false
 	fi
 
+	# In Prefix we are usually an unprivileged user, so we can't strip
+	# unwritable objects.  Make them temporarily writable for the
+	# stripping.
+	was_not_writable=false
+	if [[ ! -w ${x} ]] ; then
+		was_not_writable=true
+		chmod u+w "${x}"
+	fi
+
 	# only split debug info for final linked objects
 	# or kernel modules as debuginfo for intermediatary
 	# files (think crt*.o from gcc/glibc) is useless and
 	# actually causes problems.  install sources for all
 	# elf types though cause that stuff is good.
 
+	buildid=
 	if [[ ${f} == *"current ar archive"* ]] ; then
-		vecho "   ${x:${#D}}"
+		vecho "   ${x:${#ED}}"
 		if ${strip_this} ; then
 			# hmm, can we split debug/sources for .a ?
 			${STRIP} -g "${x}"
 		fi
 	elif [[ ${f} == *"SB executable"* || ${f} == *"SB shared object"* ]] ; then
-		vecho "   ${x:${#D}}"
-		save_elf_sources "${x}"
-		if ${strip_this} ; then
-			save_elf_debug "${x}"
-			${STRIP} ${PORTAGE_STRIP_FLAGS} "${x}"
-		fi
+		process_elf "${x}" ${PORTAGE_STRIP_FLAGS}
 	elif [[ ${f} == *"SB relocatable"* ]] ; then
-		vecho "   ${x:${#D}}"
-		save_elf_sources "${x}"
-		if ${strip_this} ; then
-			[[ ${x} == *.ko ]] && save_elf_debug "${x}"
-			${STRIP} ${SAFE_STRIP_FLAGS} "${x}"
-		fi
+		process_elf "${x}" ${SAFE_STRIP_FLAGS}
 	fi
+
+	if ${was_not_writable} ; then
+		chmod u-w "${x}"
+	fi
+	) &
+	multijob_post_fork
 done
 
-if [[ -s ${T}/debug.sources ]] && \
-        has installsources ${FEATURES} && \
-        ! has installsources ${RESTRICT} && \
-        type -P debugedit >/dev/null
+# With a bit more work, we could run the rsync processes below in
+# parallel, but not sure that'd be an overall improvement.
+multijob_finish
+
+cd "${tmpdir}"/sources/ && cat * > "${tmpdir}/debug.sources" 2>/dev/null
+if [[ -s ${tmpdir}/debug.sources ]] && \
+   ${FEATURES_installsources} && \
+   ! ${RESTRICT_installsources} && \
+   ${debugedit_found}
 then
 	vecho "installsources: rsyncing source files"
 	[[ -d ${D}${prepstrip_sources_dir} ]] || mkdir -p "${D}${prepstrip_sources_dir}"
-	grep -zv '/<[^/>]*>$' "${T}"/debug.sources | \
+	grep -zv '/<[^/>]*>$' "${tmpdir}"/debug.sources | \
 		(cd "${WORKDIR}"; LANG=C sort -z -u | \
 		rsync -tL0 --files-from=- "${WORKDIR}/" "${D}${prepstrip_sources_dir}/" )
 
@@ -188,6 +314,8 @@ then
 	# https://bugzilla.redhat.com/show_bug.cgi?id=444310
 	while read -r -d $'\0' emptydir
 	do
-		>> "$emptydir"/.keepdir
+		>> "${emptydir}"/.keepdir
 	done < <(find "${D}${prepstrip_sources_dir}/" -type d -empty -print0)
 fi
+
+rm -rf "${tmpdir}"

diff --git a/portage_with_autodep/bin/ebuild-helpers/unprivileged/chgrp b/portage_with_autodep/bin/ebuild-helpers/unprivileged/chgrp
new file mode 120000
index 0000000..6fb0fcd
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/unprivileged/chgrp
@@ -0,0 +1 @@
+chown
\ No newline at end of file

diff --git a/portage_with_autodep/bin/ebuild-helpers/unprivileged/chown b/portage_with_autodep/bin/ebuild-helpers/unprivileged/chown
new file mode 100755
index 0000000..08fa650
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/unprivileged/chown
@@ -0,0 +1,41 @@
+#!/bin/bash
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+scriptpath=${BASH_SOURCE[0]}
+scriptname=${scriptpath##*/}
+
+IFS=':'
+
+for path in ${PATH}; do
+	[[ -x ${path}/${scriptname} ]] || continue
+	[[ ${path}/${scriptname} -ef ${scriptpath} ]] && continue
+	IFS=$' \t\n'
+	output=$("${path}/${scriptname}" "$@" 2>&1)
+	if [[ $? -ne 0 ]] ; then
+
+		# Avoid an extreme performance problem when the
+		# output is very long (bug #470992).
+		if [[ $(wc -l <<< "${output}") -gt 100 ]]; then
+			output=$(head -n100 <<< "${output}")
+			output="${output}\n ... (further messages truncated)"
+		fi
+
+		source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+		if ! ___eapi_has_prefix_variables; then
+			EPREFIX=
+		fi
+		msg="${scriptname} failure ignored with unprivileged user:\n    ${scriptname} $*\n    ${output}"
+		# Reverse expansion of ${D} and ${EPREFIX}, for readability.
+		msg=${msg//${D}/'${D}'}
+		if [[ -n ${EPREFIX} ]] ; then
+			msg=${msg//${EPREFIX}/'${EPREFIX}'}
+			msg=${msg//${EPREFIX#/}/'${EPREFIX}'}
+		fi
+		ewarn "${msg}"
+	fi
+	exit 0
+done
+
+exit 1

diff --git a/portage_with_autodep/bin/ebuild-helpers/xattr/install b/portage_with_autodep/bin/ebuild-helpers/xattr/install
new file mode 100755
index 0000000..f51f621
--- /dev/null
+++ b/portage_with_autodep/bin/ebuild-helpers/xattr/install
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+PORTAGE_BIN_PATH=${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}
+PORTAGE_PYM_PATH=${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}
+# Use safe cwd, avoiding unsafe import for bug #469338.
+export __PORTAGE_HELPER_CWD=${PWD}
+cd "${PORTAGE_PYM_PATH}"
+export __PORTAGE_HELPER_PATH=${BASH_SOURCE[0]}
+PYTHONPATH=${PORTAGE_PYTHONPATH:-${PORTAGE_PYM_PATH}} \
+	exec "${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/install.py" "$@"

diff --git a/portage_with_autodep/bin/ebuild-ipc.py b/portage_with_autodep/bin/ebuild-ipc.py
index 68ad985..29d4c23 100755
--- a/portage_with_autodep/bin/ebuild-ipc.py
+++ b/portage_with_autodep/bin/ebuild-ipc.py
@@ -13,6 +13,7 @@ import select
 import signal
 import sys
 import time
+import traceback
 
 def debug_signal(signum, frame):
 	import pdb
@@ -228,14 +229,22 @@ class EbuildIpc(object):
 		pid = os.fork()
 
 		if pid == 0:
-			os.close(pr)
-
-			# File streams are in unbuffered mode since we do atomic
-			# read and write of whole pickles.
-			output_file = open(self.ipc_in_fifo, 'wb', 0)
-			output_file.write(pickle.dumps(args))
-			output_file.close()
-			os._exit(os.EX_OK)
+			retval = 2
+			try:
+				os.close(pr)
+
+				# File streams are in unbuffered mode since we do atomic
+				# read and write of whole pickles.
+				output_file = open(self.ipc_in_fifo, 'wb', 0)
+				output_file.write(pickle.dumps(args))
+				output_file.close()
+				retval = os.EX_OK
+			except SystemExit:
+				raise
+			except:
+				traceback.print_exc()
+			finally:
+				os._exit(retval)
 
 		os.close(pw)
 
@@ -258,9 +267,16 @@ class EbuildIpc(object):
 		pid = os.fork()
 
 		if pid == 0:
-			os.close(pr)
-			retval = self._receive_reply(input_fd)
-			os._exit(retval)
+			retval = 2
+			try:
+				os.close(pr)
+				retval = self._receive_reply(input_fd)
+			except SystemExit:
+				raise
+			except:
+				traceback.print_exc()
+			finally:
+				os._exit(retval)
 
 		os.close(pw)
 		retval = self._wait(pid, pr, portage.localization._('during read'))

diff --git a/portage_with_autodep/bin/ebuild.sh b/portage_with_autodep/bin/ebuild.sh
index d68e54b..2589113 100755
--- a/portage_with_autodep/bin/ebuild.sh
+++ b/portage_with_autodep/bin/ebuild.sh
@@ -1,17 +1,53 @@
 #!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 PORTAGE_BIN_PATH="${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"
 PORTAGE_PYM_PATH="${PORTAGE_PYM_PATH:-/usr/lib/portage/pym}"
 
-if [[ $PORTAGE_SANDBOX_COMPAT_LEVEL -lt 22 ]] ; then
-	# Ensure that /dev/std* streams have appropriate sandbox permission for
-	# bug #288863. This can be removed after sandbox is fixed and portage
-	# depends on the fixed version (sandbox-2.2 has the fix but it is
-	# currently unstable).
-	export SANDBOX_WRITE="${SANDBOX_WRITE:+${SANDBOX_WRITE}:}/dev/stdout:/dev/stderr"
-	export SANDBOX_READ="${SANDBOX_READ:+${SANDBOX_READ}:}/dev/stdin"
+# Prevent aliases from causing portage to act inappropriately.
+# Make sure it's before everything so we don't mess aliases that follow.
+unalias -a
+
+source "${PORTAGE_BIN_PATH}/isolated-functions.sh" || exit 1
+
+if [[ $EBUILD_PHASE != depend ]] ; then
+	source "${PORTAGE_BIN_PATH}/phase-functions.sh" || die
+	source "${PORTAGE_BIN_PATH}/save-ebuild-env.sh" || die
+	source "${PORTAGE_BIN_PATH}/phase-helpers.sh" || die
+	source "${PORTAGE_BIN_PATH}/bashrc-functions.sh" || die
+else
+	# These dummy functions are for things that are likely to be called
+	# in global scope, even though they are completely useless during
+	# the "depend" phase.
+	for x in diropts docompress exeopts get_KV insopts \
+		keepdir KV_major KV_micro KV_minor KV_to_int \
+		libopts register_die_hook register_success_hook \
+		remove_path_entry set_unless_changed strip_duplicate_slashes \
+		unset_unless_changed use_with use_enable ; do
+		eval "${x}() {
+			if has \"\${EAPI:-0}\" 4-python; then
+				die \"\${FUNCNAME}() calls are not allowed in global scope\"
+			fi
+		}"
+	done
+	# These dummy functions return false in older EAPIs, in order to ensure that
+	# `use multislot` is false for the "depend" phase.
+	for x in use useq usev ; do
+		eval "${x}() {
+			if has \"\${EAPI:-0}\" 4-python; then
+				die \"\${FUNCNAME}() calls are not allowed in global scope\"
+			else
+				return 1
+			fi
+		}"
+	done
+	# These functions die because calls to them during the "depend" phase
+	# are considered to be severe QA violations.
+	for x in best_version has_version portageq ; do
+		eval "${x}() { die \"\${FUNCNAME}() calls are not allowed in global scope\"; }"
+	done
+	unset x
 fi
 
 # Don't use sandbox's BASH_ENV for new shells because it does
@@ -19,13 +55,6 @@ fi
 # environment by modifying our PATH.
 unset BASH_ENV
 
-ROOTPATH=${ROOTPATH##:}
-ROOTPATH=${ROOTPATH%%:}
-PREROOTPATH=${PREROOTPATH##:}
-PREROOTPATH=${PREROOTPATH%%:}
-PATH=$PORTAGE_BIN_PATH/ebuild-helpers:$PREROOTPATH${PREROOTPATH:+:}/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin${ROOTPATH:+:}$ROOTPATH
-export PATH
-
 # This is just a temporary workaround for portage-9999 users since
 # earlier portage versions do not detect a version change in this case
 # (9999 to 9999) and therefore they try execute an incompatible version of
@@ -67,1227 +96,63 @@ EBUILD_SH_ARGS="$*"
 
 shift $#
 
-# Prevent aliases from causing portage to act inappropriately.
-# Make sure it's before everything so we don't mess aliases that follow.
-unalias -a
-
 # Unset some variables that break things.
 unset GZIP BZIP BZIP2 CDPATH GREP_OPTIONS GREP_COLOR GLOBIGNORE
 
-source "${PORTAGE_BIN_PATH}/isolated-functions.sh"  &>/dev/null
-
 [[ $PORTAGE_QUIET != "" ]] && export PORTAGE_QUIET
 
 # sandbox support functions; defined prior to profile.bashrc srcing, since the profile might need to add a default exception (/usr/lib64/conftest fex)
 _sb_append_var() {
 	local _v=$1 ; shift
-	local var="SANDBOX_${_v}"
-	[[ -z $1 || -n $2 ]] && die "Usage: add$(echo ${_v} | \
-		LC_ALL=C tr [:upper:] [:lower:]) <colon-delimited list of paths>"
-	export ${var}="${!var:+${!var}:}$1"
-}
-# bash-4 version:
-# local var="SANDBOX_${1^^}"
-# addread() { _sb_append_var ${0#add} "$@" ; }
-addread()    { _sb_append_var READ    "$@" ; }
-addwrite()   { _sb_append_var WRITE   "$@" ; }
-adddeny()    { _sb_append_var DENY    "$@" ; }
-addpredict() { _sb_append_var PREDICT "$@" ; }
-
-addwrite "${PORTAGE_TMPDIR}"
-addread "/:${PORTAGE_TMPDIR}"
-[[ -n ${PORTAGE_GPG_DIR} ]] && addpredict "${PORTAGE_GPG_DIR}"
-
-# Avoid sandbox violations in temporary directories.
-if [[ -w $T ]] ; then
-	export TEMP=$T
-	export TMP=$T
-	export TMPDIR=$T
-elif [[ $SANDBOX_ON = 1 ]] ; then
-	for x in TEMP TMP TMPDIR ; do
-		[[ -n ${!x} ]] && addwrite "${!x}"
-	done
-	unset x
-fi
-
-# the sandbox is disabled by default except when overridden in the relevant stages
-export SANDBOX_ON=0
-
-lchown() {
-	chown -h "$@"
-}
-
-lchgrp() {
-	chgrp -h "$@"
-}
-
-esyslog() {
-	# Custom version of esyslog() to take care of the "Red Star" bug.
-	# MUST follow functions.sh to override the "" parameter problem.
-	return 0
-}
-
-useq() {
-	has $EBUILD_PHASE prerm postrm || eqawarn \
-		"QA Notice: The 'useq' function is deprecated (replaced by 'use')"
-	use ${1}
-}
-
-usev() {
-	if use ${1}; then
-		echo "${1#!}"
-		return 0
-	fi
-	return 1
-}
-
-use() {
-	local u=$1
-	local found=0
-
-	# if we got something like '!flag', then invert the return value
-	if [[ ${u:0:1} == "!" ]] ; then
-		u=${u:1}
-		found=1
-	fi
-
-	if [[ $EBUILD_PHASE = depend ]] ; then
-		# TODO: Add a registration interface for eclasses to register
-		# any number of phase hooks, so that global scope eclass
-		# initialization can by migrated to phase hooks in new EAPIs.
-		# Example: add_phase_hook before pkg_setup $ECLASS_pre_pkg_setup
-		#if [[ -n $EAPI ]] && ! has "$EAPI" 0 1 2 3 ; then
-		#	die "use() called during invalid phase: $EBUILD_PHASE"
-		#fi
-		true
-
-	# Make sure we have this USE flag in IUSE
-	elif [[ -n $PORTAGE_IUSE && -n $EBUILD_PHASE ]] ; then
-		[[ $u =~ $PORTAGE_IUSE ]] || \
-			eqawarn "QA Notice: USE Flag '${u}' not" \
-				"in IUSE for ${CATEGORY}/${PF}"
-	fi
-
-	if has ${u} ${USE} ; then
-		return ${found}
-	else
-		return $((!found))
-	fi
-}
-
-# Return true if given package is installed. Otherwise return false.
-# Takes single depend-type atoms.
-has_version() {
-	if [ "${EBUILD_PHASE}" == "depend" ]; then
-		die "portageq calls (has_version calls portageq) are not allowed in the global scope"
-	fi
-
-	if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
-		"$PORTAGE_BIN_PATH"/ebuild-ipc has_version "$ROOT" "$1"
-	else
-		PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
-		"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" has_version "${ROOT}" "$1"
-	fi
-	local retval=$?
-	case "${retval}" in
-		0|1)
-			return ${retval}
-			;;
-		*)
-			die "unexpected portageq exit code: ${retval}"
-			;;
-	esac
-}
-
-portageq() {
-	if [ "${EBUILD_PHASE}" == "depend" ]; then
-		die "portageq calls are not allowed in the global scope"
-	fi
-
-	PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
-	"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" "$@"
-}
-
-
-# ----------------------------------------------------------------------------
-# ----------------------------------------------------------------------------
-# ----------------------------------------------------------------------------
-
-
-# Returns the best/most-current match.
-# Takes single depend-type atoms.
-best_version() {
-	if [ "${EBUILD_PHASE}" == "depend" ]; then
-		die "portageq calls (best_version calls portageq) are not allowed in the global scope"
-	fi
-
-	if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
-		"$PORTAGE_BIN_PATH"/ebuild-ipc best_version "$ROOT" "$1"
-	else
-		PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
-		"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" 'best_version' "${ROOT}" "$1"
-	fi
-	local retval=$?
-	case "${retval}" in
-		0|1)
-			return ${retval}
-			;;
-		*)
-			die "unexpected portageq exit code: ${retval}"
-			;;
-	esac
-}
-
-use_with() {
-	if [ -z "$1" ]; then
-		echo "!!! use_with() called without a parameter." >&2
-		echo "!!! use_with <USEFLAG> [<flagname> [value]]" >&2
-		return 1
-	fi
-
-	if ! has "${EAPI:-0}" 0 1 2 3 ; then
-		local UW_SUFFIX=${3+=$3}
-	else
-		local UW_SUFFIX=${3:+=$3}
-	fi
-	local UWORD=${2:-$1}
-
-	if use $1; then
-		echo "--with-${UWORD}${UW_SUFFIX}"
-	else
-		echo "--without-${UWORD}"
-	fi
-	return 0
-}
-
-use_enable() {
-	if [ -z "$1" ]; then
-		echo "!!! use_enable() called without a parameter." >&2
-		echo "!!! use_enable <USEFLAG> [<flagname> [value]]" >&2
-		return 1
-	fi
-
-	if ! has "${EAPI:-0}" 0 1 2 3 ; then
-		local UE_SUFFIX=${3+=$3}
-	else
-		local UE_SUFFIX=${3:+=$3}
-	fi
-	local UWORD=${2:-$1}
-
-	if use $1; then
-		echo "--enable-${UWORD}${UE_SUFFIX}"
-	else
-		echo "--disable-${UWORD}"
-	fi
-	return 0
-}
-
-register_die_hook() {
-	local x
-	for x in $* ; do
-		has $x $EBUILD_DEATH_HOOKS || \
-			export EBUILD_DEATH_HOOKS="$EBUILD_DEATH_HOOKS $x"
-	done
-}
-
-register_success_hook() {
-	local x
-	for x in $* ; do
-		has $x $EBUILD_SUCCESS_HOOKS || \
-			export EBUILD_SUCCESS_HOOKS="$EBUILD_SUCCESS_HOOKS $x"
-	done
-}
-
-# Ensure that $PWD is sane whenever possible, to protect against
-# exploitation of insecure search path for python -c in ebuilds.
-# See bug #239560.
-if ! has "$EBUILD_PHASE" clean cleanrm depend help ; then
-	cd "$PORTAGE_BUILDDIR" || \
-		die "PORTAGE_BUILDDIR does not exist: '$PORTAGE_BUILDDIR'"
-fi
-
-#if no perms are specified, dirs/files will have decent defaults
-#(not secretive, but not stupid)
-umask 022
-export DESTTREE=/usr
-export INSDESTTREE=""
-export _E_EXEDESTTREE_=""
-export _E_DOCDESTTREE_=""
-export INSOPTIONS="-m0644"
-export EXEOPTIONS="-m0755"
-export LIBOPTIONS="-m0644"
-export DIROPTIONS="-m0755"
-export MOPREFIX=${PN}
-declare -a PORTAGE_DOCOMPRESS=( /usr/share/{doc,info,man} )
-declare -a PORTAGE_DOCOMPRESS_SKIP=( /usr/share/doc/${PF}/html )
-
-# adds ".keep" files so that dirs aren't auto-cleaned
-keepdir() {
-	dodir "$@"
-	local x
-	if [ "$1" == "-R" ] || [ "$1" == "-r" ]; then
-		shift
-		find "$@" -type d -printf "${D}%p/.keep_${CATEGORY}_${PN}-${SLOT}\n" \
-			| tr "\n" "\0" | \
-			while read -r -d $'\0' ; do
-				>> "$REPLY" || \
-					die "Failed to recursively create .keep files"
-			done
-	else
-		for x in "$@"; do
-			>> "${D}${x}/.keep_${CATEGORY}_${PN}-${SLOT}" || \
-				die "Failed to create .keep in ${D}${x}"
-		done
-	fi
-}
-
-unpack() {
-	local srcdir
-	local x
-	local y
-	local myfail
-	local eapi=${EAPI:-0}
-	[ -z "$*" ] && die "Nothing passed to the 'unpack' command"
-
-	for x in "$@"; do
-		vecho ">>> Unpacking ${x} to ${PWD}"
-		y=${x%.*}
-		y=${y##*.}
-
-		if [[ ${x} == "./"* ]] ; then
-			srcdir=""
-		elif [[ ${x} == ${DISTDIR%/}/* ]] ; then
-			die "Arguments to unpack() cannot begin with \${DISTDIR}."
-		elif [[ ${x} == "/"* ]] ; then
-			die "Arguments to unpack() cannot be absolute"
-		else
-			srcdir="${DISTDIR}/"
-		fi
-		[[ ! -s ${srcdir}${x} ]] && die "${x} does not exist"
-
-		_unpack_tar() {
-			if [ "${y}" == "tar" ]; then
-				$1 -c -- "$srcdir$x" | tar xof -
-				assert_sigpipe_ok "$myfail"
-			else
-				local cwd_dest=${x##*/}
-				cwd_dest=${cwd_dest%.*}
-				$1 -c -- "${srcdir}${x}" > "${cwd_dest}" || die "$myfail"
-			fi
-		}
-
-		myfail="failure unpacking ${x}"
-		case "${x##*.}" in
-			tar)
-				tar xof "$srcdir$x" || die "$myfail"
-				;;
-			tgz)
-				tar xozf "$srcdir$x" || die "$myfail"
-				;;
-			tbz|tbz2)
-				${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- "$srcdir$x" | tar xof -
-				assert_sigpipe_ok "$myfail"
-				;;
-			ZIP|zip|jar)
-				# unzip will interactively prompt under some error conditions,
-				# as reported in bug #336285
-				( while true ; do echo n || break ; done ) | \
-				unzip -qo "${srcdir}${x}" || die "$myfail"
-				;;
-			gz|Z|z)
-				_unpack_tar "gzip -d"
-				;;
-			bz2|bz)
-				_unpack_tar "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d}"
-				;;
-			7Z|7z)
-				local my_output
-				my_output="$(7z x -y "${srcdir}${x}")"
-				if [ $? -ne 0 ]; then
-					echo "${my_output}" >&2
-					die "$myfail"
-				fi
-				;;
-			RAR|rar)
-				unrar x -idq -o+ "${srcdir}${x}" || die "$myfail"
-				;;
-			LHa|LHA|lha|lzh)
-				lha xfq "${srcdir}${x}" || die "$myfail"
-				;;
-			a)
-				ar x "${srcdir}${x}" || die "$myfail"
-				;;
-			deb)
-				# Unpacking .deb archives can not always be done with
-				# `ar`.  For instance on AIX this doesn't work out.  If
-				# we have `deb2targz` installed, prefer it over `ar` for
-				# that reason.  We just make sure on AIX `deb2targz` is
-				# installed.
-				if type -P deb2targz > /dev/null; then
-					y=${x##*/}
-					local created_symlink=0
-					if [ ! "$srcdir$x" -ef "$y" ] ; then
-						# deb2targz always extracts into the same directory as
-						# the source file, so create a symlink in the current
-						# working directory if necessary.
-						ln -sf "$srcdir$x" "$y" || die "$myfail"
-						created_symlink=1
-					fi
-					deb2targz "$y" || die "$myfail"
-					if [ $created_symlink = 1 ] ; then
-						# Clean up the symlink so the ebuild
-						# doesn't inadvertently install it.
-						rm -f "$y"
-					fi
-					mv -f "${y%.deb}".tar.gz data.tar.gz || die "$myfail"
-				else
-					ar x "$srcdir$x" || die "$myfail"
-				fi
-				;;
-			lzma)
-				_unpack_tar "lzma -d"
-				;;
-			xz)
-				if has $eapi 0 1 2 ; then
-					vecho "unpack ${x}: file format not recognized. Ignoring."
-				else
-					_unpack_tar "xz -d"
-				fi
-				;;
-			*)
-				vecho "unpack ${x}: file format not recognized. Ignoring."
-				;;
-		esac
-	done
-	# Do not chmod '.' since it's probably ${WORKDIR} and PORTAGE_WORKDIR_MODE
-	# should be preserved.
-	find . -mindepth 1 -maxdepth 1 ! -type l -print0 | \
-		${XARGS} -0 chmod -fR a+rX,u+w,g-w,o-w
-}
-
-strip_duplicate_slashes() {
-	if [[ -n $1 ]] ; then
-		local removed=$1
-		while [[ ${removed} == *//* ]] ; do
-			removed=${removed//\/\///}
-		done
-		echo ${removed}
-	fi
-}
-
-hasg() {
-    local x s=$1
-    shift
-    for x ; do [[ ${x} == ${s} ]] && echo "${x}" && return 0 ; done
-    return 1
-}
-hasgq() { hasg "$@" >/dev/null ; }
-econf() {
-	local x
-
-	local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
-	if [[ -n $phase_func ]] ; then
-		if has "$EAPI" 0 1 ; then
-			[[ $phase_func != src_compile ]] && \
-				eqawarn "QA Notice: econf called in" \
-					"$phase_func instead of src_compile"
-		else
-			[[ $phase_func != src_configure ]] && \
-				eqawarn "QA Notice: econf called in" \
-					"$phase_func instead of src_configure"
-		fi
-	fi
-
-	: ${ECONF_SOURCE:=.}
-	if [ -x "${ECONF_SOURCE}/configure" ]; then
-		if [[ -n $CONFIG_SHELL && \
-			"$(head -n1 "$ECONF_SOURCE/configure")" =~ ^'#!'[[:space:]]*/bin/sh([[:space:]]|$) ]] ; then
-			sed -e "1s:^#![[:space:]]*/bin/sh:#!$CONFIG_SHELL:" -i "$ECONF_SOURCE/configure" || \
-				die "Substition of shebang in '$ECONF_SOURCE/configure' failed"
-		fi
-		if [ -e /usr/share/gnuconfig/ ]; then
-			find "${WORKDIR}" -type f '(' \
-			-name config.guess -o -name config.sub ')' -print0 | \
-			while read -r -d $'\0' x ; do
-				vecho " * econf: updating ${x/${WORKDIR}\/} with /usr/share/gnuconfig/${x##*/}"
-				cp -f /usr/share/gnuconfig/"${x##*/}" "${x}"
-			done
-		fi
-
-		# EAPI=4 adds --disable-dependency-tracking to econf
-		if ! has "$EAPI" 0 1 2 3 3_pre2 && \
-			"${ECONF_SOURCE}/configure" --help 2>/dev/null | \
-			grep -q disable-dependency-tracking ; then
-			set -- --disable-dependency-tracking "$@"
-		fi
-
-		# if the profile defines a location to install libs to aside from default, pass it on.
-		# if the ebuild passes in --libdir, they're responsible for the conf_libdir fun.
-		local CONF_LIBDIR LIBDIR_VAR="LIBDIR_${ABI}"
-		if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
-			CONF_LIBDIR=${!LIBDIR_VAR}
-		fi
-		if [[ -n ${CONF_LIBDIR} ]] && ! hasgq --libdir=\* "$@" ; then
-			export CONF_PREFIX=$(hasg --exec-prefix=\* "$@")
-			[[ -z ${CONF_PREFIX} ]] && CONF_PREFIX=$(hasg --prefix=\* "$@")
-			: ${CONF_PREFIX:=/usr}
-			CONF_PREFIX=${CONF_PREFIX#*=}
-			[[ ${CONF_PREFIX} != /* ]] && CONF_PREFIX="/${CONF_PREFIX}"
-			[[ ${CONF_LIBDIR} != /* ]] && CONF_LIBDIR="/${CONF_LIBDIR}"
-			set -- --libdir="$(strip_duplicate_slashes ${CONF_PREFIX}${CONF_LIBDIR})" "$@"
-		fi
-
-		set -- \
-			--prefix=/usr \
-			${CBUILD:+--build=${CBUILD}} \
-			--host=${CHOST} \
-			${CTARGET:+--target=${CTARGET}} \
-			--mandir=/usr/share/man \
-			--infodir=/usr/share/info \
-			--datadir=/usr/share \
-			--sysconfdir=/etc \
-			--localstatedir=/var/lib \
-			"$@" \
-			${EXTRA_ECONF}
-		vecho "${ECONF_SOURCE}/configure" "$@"
-
-		if ! "${ECONF_SOURCE}/configure" "$@" ; then
-
-			if [ -s config.log ]; then
-				echo
-				echo "!!! Please attach the following file when seeking support:"
-				echo "!!! ${PWD}/config.log"
-			fi
-			die "econf failed"
-		fi
-	elif [ -f "${ECONF_SOURCE}/configure" ]; then
-		die "configure is not executable"
-	else
-		die "no configure script found"
-	fi
-}
-
-einstall() {
-	# CONF_PREFIX is only set if they didn't pass in libdir above.
-	local LOCAL_EXTRA_EINSTALL="${EXTRA_EINSTALL}"
-	LIBDIR_VAR="LIBDIR_${ABI}"
-	if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
-		CONF_LIBDIR="${!LIBDIR_VAR}"
-	fi
-	unset LIBDIR_VAR
-	if [ -n "${CONF_LIBDIR}" ] && [ "${CONF_PREFIX:+set}" = set ]; then
-		EI_DESTLIBDIR="${D}/${CONF_PREFIX}/${CONF_LIBDIR}"
-		EI_DESTLIBDIR="$(strip_duplicate_slashes ${EI_DESTLIBDIR})"
-		LOCAL_EXTRA_EINSTALL="libdir=${EI_DESTLIBDIR} ${LOCAL_EXTRA_EINSTALL}"
-		unset EI_DESTLIBDIR
-	fi
-
-	if [ -f ./[mM]akefile -o -f ./GNUmakefile ] ; then
-		if [ "${PORTAGE_DEBUG}" == "1" ]; then
-			${MAKE:-make} -n prefix="${D}usr" \
-				datadir="${D}usr/share" \
-				infodir="${D}usr/share/info" \
-				localstatedir="${D}var/lib" \
-				mandir="${D}usr/share/man" \
-				sysconfdir="${D}etc" \
-				${LOCAL_EXTRA_EINSTALL} \
-				${MAKEOPTS} ${EXTRA_EMAKE} -j1 \
-				"$@" install
-		fi
-		${MAKE:-make} prefix="${D}usr" \
-			datadir="${D}usr/share" \
-			infodir="${D}usr/share/info" \
-			localstatedir="${D}var/lib" \
-			mandir="${D}usr/share/man" \
-			sysconfdir="${D}etc" \
-			${LOCAL_EXTRA_EINSTALL} \
-			${MAKEOPTS} ${EXTRA_EMAKE} -j1 \
-			"$@" install || die "einstall failed"
-	else
-		die "no Makefile found"
-	fi
-}
-
-_eapi0_pkg_nofetch() {
-	[ -z "${SRC_URI}" ] && return
-
-	elog "The following are listed in SRC_URI for ${PN}:"
-	local x
-	for x in $(echo ${SRC_URI}); do
-		elog "   ${x}"
-	done
-}
-
-_eapi0_src_unpack() {
-	[[ -n ${A} ]] && unpack ${A}
-}
-
-_eapi0_src_compile() {
-	if [ -x ./configure ] ; then
-		econf
-	fi
-	_eapi2_src_compile
-}
-
-_eapi0_src_test() {
-	# Since we don't want emake's automatic die
-	# support (EAPI 4 and later), and we also don't
-	# want the warning messages that it produces if
-	# we call it in 'nonfatal' mode, we use emake_cmd
-	# to emulate the desired parts of emake behavior.
-	local emake_cmd="${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE}"
-	if $emake_cmd -j1 check -n &> /dev/null; then
-		vecho ">>> Test phase [check]: ${CATEGORY}/${PF}"
-		if ! $emake_cmd -j1 check; then
-			has test $FEATURES && die "Make check failed. See above for details."
-			has test $FEATURES || eerror "Make check failed. See above for details."
-		fi
-	elif $emake_cmd -j1 test -n &> /dev/null; then
-		vecho ">>> Test phase [test]: ${CATEGORY}/${PF}"
-		if ! $emake_cmd -j1 test; then
-			has test $FEATURES && die "Make test failed. See above for details."
-			has test $FEATURES || eerror "Make test failed. See above for details."
-		fi
-	else
-		vecho ">>> Test phase [none]: ${CATEGORY}/${PF}"
-	fi
-}
-
-_eapi1_src_compile() {
-	_eapi2_src_configure
-	_eapi2_src_compile
-}
-
-_eapi2_src_configure() {
-	if [[ -x ${ECONF_SOURCE:-.}/configure ]] ; then
-		econf
-	fi
-}
-
-_eapi2_src_compile() {
-	if [ -f Makefile ] || [ -f GNUmakefile ] || [ -f makefile ]; then
-		emake || die "emake failed"
-	fi
-}
-
-_eapi4_src_install() {
-	if [[ -f Makefile || -f GNUmakefile || -f makefile ]] ; then
-		emake DESTDIR="${D}" install
-	fi
-
-	if ! declare -p DOCS &>/dev/null ; then
-		local d
-		for d in README* ChangeLog AUTHORS NEWS TODO CHANGES \
-				THANKS BUGS FAQ CREDITS CHANGELOG ; do
-			[[ -s "${d}" ]] && dodoc "${d}"
-		done
-	elif [[ $(declare -p DOCS) == "declare -a "* ]] ; then
-		dodoc "${DOCS[@]}"
-	else
-		dodoc ${DOCS}
-	fi
-}
-
-ebuild_phase() {
-	declare -F "$1" >/dev/null && qa_call $1
-}
-
-ebuild_phase_with_hooks() {
-	local x phase_name=${1}
-	for x in {pre_,,post_}${phase_name} ; do
-		ebuild_phase ${x}
-	done
-}
-
-dyn_pretend() {
-	if [[ -e $PORTAGE_BUILDDIR/.pretended ]] ; then
-		vecho ">>> It appears that '$PF' is already pretended; skipping."
-		vecho ">>> Remove '$PORTAGE_BUILDDIR/.pretended' to force pretend."
-		return 0
-	fi
-	ebuild_phase pre_pkg_pretend
-	ebuild_phase pkg_pretend
-	>> "$PORTAGE_BUILDDIR/.pretended" || \
-		die "Failed to create $PORTAGE_BUILDDIR/.pretended"
-	ebuild_phase post_pkg_pretend
-}
-
-dyn_setup() {
-	if [[ -e $PORTAGE_BUILDDIR/.setuped ]] ; then
-		vecho ">>> It appears that '$PF' is already setup; skipping."
-		vecho ">>> Remove '$PORTAGE_BUILDDIR/.setuped' to force setup."
-		return 0
-	fi
-	ebuild_phase pre_pkg_setup
-	ebuild_phase pkg_setup
-	>> "$PORTAGE_BUILDDIR/.setuped" || \
-		die "Failed to create $PORTAGE_BUILDDIR/.setuped"
-	ebuild_phase post_pkg_setup
-}
-
-dyn_unpack() {
-	local newstuff="no"
-	if [ -e "${WORKDIR}" ]; then
-		local x
-		local checkme
-		for x in $A ; do
-			vecho ">>> Checking ${x}'s mtime..."
-			if [ "${PORTAGE_ACTUAL_DISTDIR:-${DISTDIR}}/${x}" -nt "${WORKDIR}" ]; then
-				vecho ">>> ${x} has been updated; recreating WORKDIR..."
-				newstuff="yes"
-				break
-			fi
-		done
-		if [ ! -f "${PORTAGE_BUILDDIR}/.unpacked" ] ; then
-			vecho ">>> Not marked as unpacked; recreating WORKDIR..."
-			newstuff="yes"
-		fi
-	fi
-	if [ "${newstuff}" == "yes" ]; then
-		# We don't necessarily have privileges to do a full dyn_clean here.
-		rm -rf "${PORTAGE_BUILDDIR}"/{.setuped,.unpacked,.prepared,.configured,.compiled,.tested,.installed,.packaged,build-info}
-		if ! has keepwork $FEATURES ; then
-			rm -rf "${WORKDIR}"
-		fi
-		if [ -d "${T}" ] && \
-			! has keeptemp $FEATURES ; then
-			rm -rf "${T}" && mkdir "${T}"
-		fi
-	fi
-	if [ -e "${WORKDIR}" ]; then
-		if [ "$newstuff" == "no" ]; then
-			vecho ">>> WORKDIR is up-to-date, keeping..."
-			return 0
-		fi
-	fi
-
-	if [ ! -d "${WORKDIR}" ]; then
-		install -m${PORTAGE_WORKDIR_MODE:-0700} -d "${WORKDIR}" || die "Failed to create dir '${WORKDIR}'"
-	fi
-	cd "${WORKDIR}" || die "Directory change failed: \`cd '${WORKDIR}'\`"
-	ebuild_phase pre_src_unpack
-	vecho ">>> Unpacking source..."
-	ebuild_phase src_unpack
-	>> "$PORTAGE_BUILDDIR/.unpacked" || \
-		die "Failed to create $PORTAGE_BUILDDIR/.unpacked"
-	vecho ">>> Source unpacked in ${WORKDIR}"
-	ebuild_phase post_src_unpack
-}
-
-dyn_clean() {
-	if [ -z "${PORTAGE_BUILDDIR}" ]; then
-		echo "Aborting clean phase because PORTAGE_BUILDDIR is unset!"
-		return 1
-	elif [ ! -d "${PORTAGE_BUILDDIR}" ] ; then
-		return 0
-	fi
-	if has chflags $FEATURES ; then
-		chflags -R noschg,nouchg,nosappnd,nouappnd "${PORTAGE_BUILDDIR}"
-		chflags -R nosunlnk,nouunlnk "${PORTAGE_BUILDDIR}" 2>/dev/null
-	fi
-
-	rm -rf "${PORTAGE_BUILDDIR}/image" "${PORTAGE_BUILDDIR}/homedir"
-	rm -f "${PORTAGE_BUILDDIR}/.installed"
-
-	if [[ $EMERGE_FROM = binary ]] || \
-		! has keeptemp $FEATURES && ! has keepwork $FEATURES ; then
-		rm -rf "${T}"
-	fi
-
-	if [[ $EMERGE_FROM = binary ]] || ! has keepwork $FEATURES; then
-		rm -f "$PORTAGE_BUILDDIR"/.{ebuild_changed,logid,pretended,setuped,unpacked,prepared} \
-			"$PORTAGE_BUILDDIR"/.{configured,compiled,tested,packaged} \
-			"$PORTAGE_BUILDDIR"/.die_hooks \
-			"$PORTAGE_BUILDDIR"/.ipc_{in,out,lock} \
-			"$PORTAGE_BUILDDIR"/.exit_status
-
-		rm -rf "${PORTAGE_BUILDDIR}/build-info"
-		rm -rf "${WORKDIR}"
-	fi
-
-	if [ -f "${PORTAGE_BUILDDIR}/.unpacked" ]; then
-		find "${PORTAGE_BUILDDIR}" -type d ! -regex "^${WORKDIR}" | sort -r | tr "\n" "\0" | $XARGS -0 rmdir &>/dev/null
-	fi
-
-	# do not bind this to doebuild defined DISTDIR; don't trust doebuild, and if mistakes are made it'll
-	# result in it wiping the users distfiles directory (bad).
-	rm -rf "${PORTAGE_BUILDDIR}/distdir"
-
-	# Some kernels, such as Solaris, return EINVAL when an attempt
-	# is made to remove the current working directory.
-	cd "$PORTAGE_BUILDDIR"/../..
-	rmdir "$PORTAGE_BUILDDIR" 2>/dev/null
-
-	true
-}
-
-into() {
-	if [ "$1" == "/" ]; then
-		export DESTTREE=""
-	else
-		export DESTTREE=$1
-		if [ ! -d "${D}${DESTTREE}" ]; then
-			install -d "${D}${DESTTREE}"
-			local ret=$?
-			if [[ $ret -ne 0 ]] ; then
-				helpers_die "${FUNCNAME[0]} failed"
-				return $ret
-			fi
-		fi
-	fi
-}
-
-insinto() {
-	if [ "$1" == "/" ]; then
-		export INSDESTTREE=""
-	else
-		export INSDESTTREE=$1
-		if [ ! -d "${D}${INSDESTTREE}" ]; then
-			install -d "${D}${INSDESTTREE}"
-			local ret=$?
-			if [[ $ret -ne 0 ]] ; then
-				helpers_die "${FUNCNAME[0]} failed"
-				return $ret
-			fi
-		fi
-	fi
-}
-
-exeinto() {
-	if [ "$1" == "/" ]; then
-		export _E_EXEDESTTREE_=""
-	else
-		export _E_EXEDESTTREE_="$1"
-		if [ ! -d "${D}${_E_EXEDESTTREE_}" ]; then
-			install -d "${D}${_E_EXEDESTTREE_}"
-			local ret=$?
-			if [[ $ret -ne 0 ]] ; then
-				helpers_die "${FUNCNAME[0]} failed"
-				return $ret
-			fi
-		fi
-	fi
-}
-
-docinto() {
-	if [ "$1" == "/" ]; then
-		export _E_DOCDESTTREE_=""
-	else
-		export _E_DOCDESTTREE_="$1"
-		if [ ! -d "${D}usr/share/doc/${PF}/${_E_DOCDESTTREE_}" ]; then
-			install -d "${D}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
-			local ret=$?
-			if [[ $ret -ne 0 ]] ; then
-				helpers_die "${FUNCNAME[0]} failed"
-				return $ret
-			fi
-		fi
-	fi
-}
-
-insopts() {
-	export INSOPTIONS="$@"
-
-	# `install` should never be called with '-s' ...
-	has -s ${INSOPTIONS} && die "Never call insopts() with -s"
-}
-
-diropts() {
-	export DIROPTIONS="$@"
-}
-
-exeopts() {
-	export EXEOPTIONS="$@"
-
-	# `install` should never be called with '-s' ...
-	has -s ${EXEOPTIONS} && die "Never call exeopts() with -s"
-}
-
-libopts() {
-	export LIBOPTIONS="$@"
-
-	# `install` should never be called with '-s' ...
-	has -s ${LIBOPTIONS} && die "Never call libopts() with -s"
-}
-
-docompress() {
-	has "${EAPI}" 0 1 2 3 && die "'docompress' not supported in this EAPI"
-
-	local f g
-	if [[ $1 = "-x" ]]; then
-		shift
-		for f; do
-			f=$(strip_duplicate_slashes "${f}"); f=${f%/}
-			[[ ${f:0:1} = / ]] || f="/${f}"
-			for g in "${PORTAGE_DOCOMPRESS_SKIP[@]}"; do
-				[[ ${f} = "${g}" ]] && continue 2
-			done
-			PORTAGE_DOCOMPRESS_SKIP[${#PORTAGE_DOCOMPRESS_SKIP[@]}]=${f}
-		done
-	else
-		for f; do
-			f=$(strip_duplicate_slashes "${f}"); f=${f%/}
-			[[ ${f:0:1} = / ]] || f="/${f}"
-			for g in "${PORTAGE_DOCOMPRESS[@]}"; do
-				[[ ${f} = "${g}" ]] && continue 2
-			done
-			PORTAGE_DOCOMPRESS[${#PORTAGE_DOCOMPRESS[@]}]=${f}
-		done
-	fi
-}
-
-abort_handler() {
-	local msg
-	if [ "$2" != "fail" ]; then
-		msg="${EBUILD}: ${1} aborted; exiting."
-	else
-		msg="${EBUILD}: ${1} failed; exiting."
-	fi
-	echo
-	echo "$msg"
-	echo
-	eval ${3}
-	#unset signal handler
-	trap - SIGINT SIGQUIT
-}
-
-abort_prepare() {
-	abort_handler src_prepare $1
-	rm -f "$PORTAGE_BUILDDIR/.prepared"
-	exit 1
-}
-
-abort_configure() {
-	abort_handler src_configure $1
-	rm -f "$PORTAGE_BUILDDIR/.configured"
-	exit 1
-}
-
-abort_compile() {
-	abort_handler "src_compile" $1
-	rm -f "${PORTAGE_BUILDDIR}/.compiled"
-	exit 1
-}
-
-abort_test() {
-	abort_handler "dyn_test" $1
-	rm -f "${PORTAGE_BUILDDIR}/.tested"
-	exit 1
-}
-
-abort_install() {
-	abort_handler "src_install" $1
-	rm -rf "${PORTAGE_BUILDDIR}/image"
-	exit 1
-}
-
-has_phase_defined_up_to() {
-	local phase
-	for phase in unpack prepare configure compile install; do
-		has ${phase} ${DEFINED_PHASES} && return 0
-		[[ ${phase} == $1 ]] && return 1
-	done
-	# We shouldn't actually get here
-	return 1
-}
-
-dyn_prepare() {
-
-	if [[ -e $PORTAGE_BUILDDIR/.prepared ]] ; then
-		vecho ">>> It appears that '$PF' is already prepared; skipping."
-		vecho ">>> Remove '$PORTAGE_BUILDDIR/.prepared' to force prepare."
-		return 0
-	fi
-
-	if [[ -d $S ]] ; then
-		cd "${S}"
-	elif has $EAPI 0 1 2 3 3_pre2 ; then
-		cd "${WORKDIR}"
-	elif [[ -z ${A} ]] && ! has_phase_defined_up_to prepare; then
-		cd "${WORKDIR}"
-	else
-		die "The source directory '${S}' doesn't exist"
-	fi
-
-	trap abort_prepare SIGINT SIGQUIT
-
-	ebuild_phase pre_src_prepare
-	vecho ">>> Preparing source in $PWD ..."
-	ebuild_phase src_prepare
-	>> "$PORTAGE_BUILDDIR/.prepared" || \
-		die "Failed to create $PORTAGE_BUILDDIR/.prepared"
-	vecho ">>> Source prepared."
-	ebuild_phase post_src_prepare
-
-	trap - SIGINT SIGQUIT
-}
-
-dyn_configure() {
-
-	if [[ -e $PORTAGE_BUILDDIR/.configured ]] ; then
-		vecho ">>> It appears that '$PF' is already configured; skipping."
-		vecho ">>> Remove '$PORTAGE_BUILDDIR/.configured' to force configuration."
-		return 0
-	fi
-
-	if [[ -d $S ]] ; then
-		cd "${S}"
-	elif has $EAPI 0 1 2 3 3_pre2 ; then
-		cd "${WORKDIR}"
-	elif [[ -z ${A} ]] && ! has_phase_defined_up_to configure; then
-		cd "${WORKDIR}"
-	else
-		die "The source directory '${S}' doesn't exist"
-	fi
-
-	trap abort_configure SIGINT SIGQUIT
-
-	ebuild_phase pre_src_configure
-
-	vecho ">>> Configuring source in $PWD ..."
-	ebuild_phase src_configure
-	>> "$PORTAGE_BUILDDIR/.configured" || \
-		die "Failed to create $PORTAGE_BUILDDIR/.configured"
-	vecho ">>> Source configured."
-
-	ebuild_phase post_src_configure
-
-	trap - SIGINT SIGQUIT
-}
-
-dyn_compile() {
-
-	if [[ -e $PORTAGE_BUILDDIR/.compiled ]] ; then
-		vecho ">>> It appears that '${PF}' is already compiled; skipping."
-		vecho ">>> Remove '$PORTAGE_BUILDDIR/.compiled' to force compilation."
-		return 0
-	fi
-
-	if [[ -d $S ]] ; then
-		cd "${S}"
-	elif has $EAPI 0 1 2 3 3_pre2 ; then
-		cd "${WORKDIR}"
-	elif [[ -z ${A} ]] && ! has_phase_defined_up_to compile; then
-		cd "${WORKDIR}"
-	else
-		die "The source directory '${S}' doesn't exist"
-	fi
-
-	trap abort_compile SIGINT SIGQUIT
-
-	if has distcc $FEATURES && has distcc-pump $FEATURES ; then
-		if [[ -z $INCLUDE_SERVER_PORT ]] || [[ ! -w $INCLUDE_SERVER_PORT ]] ; then
-			eval $(pump --startup)
-			trap "pump --shutdown" EXIT
-		fi
-	fi
-
-	ebuild_phase pre_src_compile
-
-	vecho ">>> Compiling source in $PWD ..."
-	ebuild_phase src_compile
-	>> "$PORTAGE_BUILDDIR/.compiled" || \
-		die "Failed to create $PORTAGE_BUILDDIR/.compiled"
-	vecho ">>> Source compiled."
-
-	ebuild_phase post_src_compile
-
-	trap - SIGINT SIGQUIT
-}
-
-dyn_test() {
-
-	if [[ -e $PORTAGE_BUILDDIR/.tested ]] ; then
-		vecho ">>> It appears that ${PN} has already been tested; skipping."
-		vecho ">>> Remove '${PORTAGE_BUILDDIR}/.tested' to force test."
-		return
-	fi
-
-	if [ "${EBUILD_FORCE_TEST}" == "1" ] ; then
-		# If USE came from ${T}/environment then it might not have USE=test
-		# like it's supposed to here.
-		! has test ${USE} && export USE="${USE} test"
-	fi
-
-	trap "abort_test" SIGINT SIGQUIT
-	if [ -d "${S}" ]; then
-		cd "${S}"
-	else
-		cd "${WORKDIR}"
-	fi
-
-	if ! has test $FEATURES && [ "${EBUILD_FORCE_TEST}" != "1" ]; then
-		vecho ">>> Test phase [not enabled]: ${CATEGORY}/${PF}"
-	elif has test $RESTRICT; then
-		einfo "Skipping make test/check due to ebuild restriction."
-		vecho ">>> Test phase [explicitly disabled]: ${CATEGORY}/${PF}"
-	else
-		local save_sp=${SANDBOX_PREDICT}
-		addpredict /
-		ebuild_phase pre_src_test
-		ebuild_phase src_test
-		>> "$PORTAGE_BUILDDIR/.tested" || \
-			die "Failed to create $PORTAGE_BUILDDIR/.tested"
-		ebuild_phase post_src_test
-		SANDBOX_PREDICT=${save_sp}
-	fi
-
-	trap - SIGINT SIGQUIT
+	local var="SANDBOX_${_v}"
+	[[ -z $1 || -n $2 ]] && die "Usage: add$(echo ${_v} | \
+		LC_ALL=C tr [:upper:] [:lower:]) <colon-delimited list of paths>"
+	export ${var}="${!var:+${!var}:}$1"
 }
+# bash-4 version:
+# local var="SANDBOX_${1^^}"
+# addread() { _sb_append_var ${0#add} "$@" ; }
+addread()    { _sb_append_var READ    "$@" ; }
+addwrite()   { _sb_append_var WRITE   "$@" ; }
+adddeny()    { _sb_append_var DENY    "$@" ; }
+addpredict() { _sb_append_var PREDICT "$@" ; }
 
-dyn_install() {
-	[ -z "$PORTAGE_BUILDDIR" ] && die "${FUNCNAME}: PORTAGE_BUILDDIR is unset"
-	if has noauto $FEATURES ; then
-		rm -f "${PORTAGE_BUILDDIR}/.installed"
-	elif [[ -e $PORTAGE_BUILDDIR/.installed ]] ; then
-		vecho ">>> It appears that '${PF}' is already installed; skipping."
-		vecho ">>> Remove '${PORTAGE_BUILDDIR}/.installed' to force install."
-		return 0
-	fi
-	trap "abort_install" SIGINT SIGQUIT
-	ebuild_phase pre_src_install
-	rm -rf "${PORTAGE_BUILDDIR}/image"
-	mkdir "${PORTAGE_BUILDDIR}/image"
-	if [[ -d $S ]] ; then
-		cd "${S}"
-	elif has $EAPI 0 1 2 3 3_pre2 ; then
-		cd "${WORKDIR}"
-	elif [[ -z ${A} ]] && ! has_phase_defined_up_to install; then
-		cd "${WORKDIR}"
-	else
-		die "The source directory '${S}' doesn't exist"
-	fi
+addwrite "${PORTAGE_TMPDIR}"
+addread "/:${PORTAGE_TMPDIR}"
+[[ -n ${PORTAGE_GPG_DIR} ]] && addpredict "${PORTAGE_GPG_DIR}"
 
-	vecho
-	vecho ">>> Install ${PF} into ${D} category ${CATEGORY}"
-	#our custom version of libtool uses $S and $D to fix
-	#invalid paths in .la files
-	export S D
-
-	# Reset exeinto(), docinto(), insinto(), and into() state variables
-	# in case the user is running the install phase multiple times
-	# consecutively via the ebuild command.
-	export DESTTREE=/usr
-	export INSDESTTREE=""
-	export _E_EXEDESTTREE_=""
-	export _E_DOCDESTTREE_=""
-
-	ebuild_phase src_install
-	>> "$PORTAGE_BUILDDIR/.installed" || \
-		die "Failed to create $PORTAGE_BUILDDIR/.installed"
-	vecho ">>> Completed installing ${PF} into ${D}"
-	vecho
-	ebuild_phase post_src_install
-
-	cd "${PORTAGE_BUILDDIR}"/build-info
-	set -f
-	local f x
-	IFS=$' \t\n\r'
-	for f in CATEGORY DEFINED_PHASES FEATURES INHERITED IUSE REQUIRED_USE \
-		PF PKGUSE SLOT KEYWORDS HOMEPAGE DESCRIPTION ; do
-		x=$(echo -n ${!f})
-		[[ -n $x ]] && echo "$x" > $f
+# Avoid sandbox violations in temporary directories.
+if [[ -w $T ]] ; then
+	export TEMP=$T
+	export TMP=$T
+	export TMPDIR=$T
+elif [[ $SANDBOX_ON = 1 ]] ; then
+	for x in TEMP TMP TMPDIR ; do
+		[[ -n ${!x} ]] && addwrite "${!x}"
 	done
-	if [[ $CATEGORY != virtual ]] ; then
-		for f in ASFLAGS CBUILD CC CFLAGS CHOST CTARGET CXX \
-			CXXFLAGS EXTRA_ECONF EXTRA_EINSTALL EXTRA_MAKE \
-			LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
-			x=$(echo -n ${!f})
-			[[ -n $x ]] && echo "$x" > $f
-		done
-	fi
-	echo "${USE}"       > USE
-	echo "${EAPI:-0}"   > EAPI
-	set +f
-
-	# local variables can leak into the saved environment.
-	unset f
-
-	save_ebuild_env --exclude-init-phases | filter_readonly_variables \
-		--filter-path --filter-sandbox --allow-extra-vars > environment
-	assert "save_ebuild_env failed"
+	unset x
+fi
 
-	${PORTAGE_BZIP2_COMMAND} -f9 environment
+# the sandbox is disabled by default except when overridden in the relevant stages
+export SANDBOX_ON=0
 
-	cp "${EBUILD}" "${PF}.ebuild"
-	[ -n "${PORTAGE_REPO_NAME}" ]  && echo "${PORTAGE_REPO_NAME}" > repository
-	if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT}
-	then
-		>> DEBUGBUILD
-	fi
-	trap - SIGINT SIGQUIT
+esyslog() {
+	# Custom version of esyslog() to take care of the "Red Star" bug.
+	# MUST follow functions.sh to override the "" parameter problem.
+	return 0
 }
 
-dyn_preinst() {
-	if [ -z "${D}" ]; then
-		eerror "${FUNCNAME}: D is unset"
-		return 1
-	fi
-	ebuild_phase_with_hooks pkg_preinst
-}
+# Ensure that $PWD is sane whenever possible, to protect against
+# exploitation of insecure search path for python -c in ebuilds.
+# See bug #239560.
+if ! has "$EBUILD_PHASE" clean cleanrm depend help ; then
+	cd "$PORTAGE_BUILDDIR" || \
+		die "PORTAGE_BUILDDIR does not exist: '$PORTAGE_BUILDDIR'"
+fi
 
-dyn_help() {
-	echo
-	echo "Portage"
-	echo "Copyright 1999-2010 Gentoo Foundation"
-	echo
-	echo "How to use the ebuild command:"
-	echo
-	echo "The first argument to ebuild should be an existing .ebuild file."
-	echo
-	echo "One or more of the following options can then be specified.  If more"
-	echo "than one option is specified, each will be executed in order."
-	echo
-	echo "  help        : show this help screen"
-	echo "  pretend     : execute package specific pretend actions"
-	echo "  setup       : execute package specific setup actions"
-	echo "  fetch       : download source archive(s) and patches"
-	echo "  digest      : create a manifest file for the package"
-	echo "  manifest    : create a manifest file for the package"
-	echo "  unpack      : unpack sources (auto-dependencies if needed)"
-	echo "  prepare     : prepare sources (auto-dependencies if needed)"
-	echo "  configure   : configure sources (auto-fetch/unpack if needed)"
-	echo "  compile     : compile sources (auto-fetch/unpack/configure if needed)"
-	echo "  test        : test package (auto-fetch/unpack/configure/compile if needed)"
-	echo "  preinst     : execute pre-install instructions"
-	echo "  postinst    : execute post-install instructions"
-	echo "  install     : install the package to the temporary install directory"
-	echo "  qmerge      : merge image into live filesystem, recording files in db"
-	echo "  merge       : do fetch, unpack, compile, install and qmerge"
-	echo "  prerm       : execute pre-removal instructions"
-	echo "  postrm      : execute post-removal instructions"
-	echo "  unmerge     : remove package from live filesystem"
-	echo "  config      : execute package specific configuration actions"
-	echo "  package     : create a tarball package in ${PKGDIR}/All"
-	echo "  rpm         : build a RedHat RPM package"
-	echo "  clean       : clean up all source and temporary files"
-	echo
-	echo "The following settings will be used for the ebuild process:"
-	echo
-	echo "  package     : ${PF}"
-	echo "  slot        : ${SLOT}"
-	echo "  category    : ${CATEGORY}"
-	echo "  description : ${DESCRIPTION}"
-	echo "  system      : ${CHOST}"
-	echo "  c flags     : ${CFLAGS}"
-	echo "  c++ flags   : ${CXXFLAGS}"
-	echo "  make flags  : ${MAKEOPTS}"
-	echo -n "  build mode  : "
-	if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT} ;
-	then
-		echo "debug (large)"
-	else
-		echo "production (stripped)"
-	fi
-	echo "  merge to    : ${ROOT}"
-	echo
-	if [ -n "$USE" ]; then
-		echo "Additionally, support for the following optional features will be enabled:"
-		echo
-		echo "  ${USE}"
-	fi
-	echo
-}
+#if no perms are specified, dirs/files will have decent defaults
+#(not secretive, but not stupid)
+umask 022
 
 # debug-print() gets called from many places with verbose status information useful
 # for tracking down problems. The output is in $T/eclass-debug.log.
@@ -1365,11 +230,15 @@ inherit() {
 		unset $__export_funcs_var
 
 		if [ "${EBUILD_PHASE}" != "depend" ] && \
+			[ "${EBUILD_PHASE}" != "nofetch" ] && \
 			[[ ${EBUILD_PHASE} != *rm ]] && \
 			[[ ${EMERGE_FROM} != "binary" ]] ; then
 			# This is disabled in the *rm phases because they frequently give
 			# false alarms due to INHERITED in /var/db/pkg being outdated
-			# in comparison the the eclasses from the portage tree.
+			# in comparison the the eclasses from the portage tree. It's
+			# disabled for nofetch, since that can be called by repoman and
+			# that triggers bug #407449 due to repoman not exporting
+			# non-essential variables such as INHERITED.
 			if ! has $ECLASS $INHERITED $__INHERITED_QA_CACHE ; then
 				eqawarn "QA Notice: ECLASS '$ECLASS' inherited illegally in $CATEGORY/$PF $EBUILD_PHASE"
 			fi
@@ -1418,11 +287,11 @@ inherit() {
 
 		# If each var has a value, append it to the global variable E_* to
 		# be applied after everything is finished. New incremental behavior.
-		[ "${IUSE+set}"       = set ] && export E_IUSE="${E_IUSE} ${IUSE}"
-		[ "${REQUIRED_USE+set}"       = set ] && export E_REQUIRED_USE="${E_REQUIRED_USE} ${REQUIRED_USE}"
-		[ "${DEPEND+set}"     = set ] && export E_DEPEND="${E_DEPEND} ${DEPEND}"
-		[ "${RDEPEND+set}"    = set ] && export E_RDEPEND="${E_RDEPEND} ${RDEPEND}"
-		[ "${PDEPEND+set}"    = set ] && export E_PDEPEND="${E_PDEPEND} ${PDEPEND}"
+		[ "${IUSE+set}"         = set ] && E_IUSE+="${E_IUSE:+ }${IUSE}"
+		[ "${REQUIRED_USE+set}" = set ] && E_REQUIRED_USE+="${E_REQUIRED_USE:+ }${REQUIRED_USE}"
+		[ "${DEPEND+set}"       = set ] && E_DEPEND+="${E_DEPEND:+ }${DEPEND}"
+		[ "${RDEPEND+set}"      = set ] && E_RDEPEND+="${E_RDEPEND:+ }${RDEPEND}"
+		[ "${PDEPEND+set}"      = set ] && E_PDEPEND+="${E_PDEPEND:+ }${PDEPEND}"
 
 		[ "${B_IUSE+set}"     = set ] && IUSE="${B_IUSE}"
 		[ "${B_IUSE+set}"     = set ] || unset IUSE
@@ -1477,214 +346,6 @@ EXPORT_FUNCTIONS() {
 	eval $__export_funcs_var+=\" $*\"
 }
 
-# this is a function for removing any directory matching a passed in pattern from
-# PATH
-remove_path_entry() {
-	save_IFS
-	IFS=":"
-	stripped_path="${PATH}"
-	while [ -n "$1" ]; do
-		cur_path=""
-		for p in ${stripped_path}; do
-			if [ "${p/${1}}" == "${p}" ]; then
-				cur_path="${cur_path}:${p}"
-			fi
-		done
-		stripped_path="${cur_path#:*}"
-		shift
-	done
-	restore_IFS
-	PATH="${stripped_path}"
-}
-
-# @FUNCTION: _ebuild_arg_to_phase
-# @DESCRIPTION:
-# Translate a known ebuild(1) argument into the precise
-# name of it's corresponding ebuild phase.
-_ebuild_arg_to_phase() {
-	[ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
-	local eapi=$1
-	local arg=$2
-	local phase_func=""
-
-	case "$arg" in
-		pretend)
-			! has $eapi 0 1 2 3 3_pre2 && \
-				phase_func=pkg_pretend
-			;;
-		setup)
-			phase_func=pkg_setup
-			;;
-		nofetch)
-			phase_func=pkg_nofetch
-			;;
-		unpack)
-			phase_func=src_unpack
-			;;
-		prepare)
-			! has $eapi 0 1 && \
-				phase_func=src_prepare
-			;;
-		configure)
-			! has $eapi 0 1 && \
-				phase_func=src_configure
-			;;
-		compile)
-			phase_func=src_compile
-			;;
-		test)
-			phase_func=src_test
-			;;
-		install)
-			phase_func=src_install
-			;;
-		preinst)
-			phase_func=pkg_preinst
-			;;
-		postinst)
-			phase_func=pkg_postinst
-			;;
-		prerm)
-			phase_func=pkg_prerm
-			;;
-		postrm)
-			phase_func=pkg_postrm
-			;;
-	esac
-
-	[[ -z $phase_func ]] && return 1
-	echo "$phase_func"
-	return 0
-}
-
-_ebuild_phase_funcs() {
-	[ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
-	local eapi=$1
-	local phase_func=$2
-	local default_phases="pkg_nofetch src_unpack src_prepare src_configure
-		src_compile src_install src_test"
-	local x y default_func=""
-
-	for x in pkg_nofetch src_unpack src_test ; do
-		declare -F $x >/dev/null || \
-			eval "$x() { _eapi0_$x \"\$@\" ; }"
-	done
-
-	case $eapi in
-
-		0|1)
-
-			if ! declare -F src_compile >/dev/null ; then
-				case $eapi in
-					0)
-						src_compile() { _eapi0_src_compile "$@" ; }
-						;;
-					*)
-						src_compile() { _eapi1_src_compile "$@" ; }
-						;;
-				esac
-			fi
-
-			for x in $default_phases ; do
-				eval "default_$x() {
-					die \"default_$x() is not supported with EAPI='$eapi' during phase $phase_func\"
-				}"
-			done
-
-			eval "default() {
-				die \"default() is not supported with EAPI='$eapi' during phase $phase_func\"
-			}"
-
-			;;
-
-		*)
-
-			declare -F src_configure >/dev/null || \
-				src_configure() { _eapi2_src_configure "$@" ; }
-
-			declare -F src_compile >/dev/null || \
-				src_compile() { _eapi2_src_compile "$@" ; }
-
-			has $eapi 2 3 3_pre2 || declare -F src_install >/dev/null || \
-				src_install() { _eapi4_src_install "$@" ; }
-
-			if has $phase_func $default_phases ; then
-
-				_eapi2_pkg_nofetch   () { _eapi0_pkg_nofetch          "$@" ; }
-				_eapi2_src_unpack    () { _eapi0_src_unpack           "$@" ; }
-				_eapi2_src_prepare   () { true                             ; }
-				_eapi2_src_test      () { _eapi0_src_test             "$@" ; }
-				_eapi2_src_install   () { die "$FUNCNAME is not supported" ; }
-
-				for x in $default_phases ; do
-					eval "default_$x() { _eapi2_$x \"\$@\" ; }"
-				done
-
-				eval "default() { _eapi2_$phase_func \"\$@\" ; }"
-
-				case $eapi in
-					2|3)
-						;;
-					*)
-						eval "default_src_install() { _eapi4_src_install \"\$@\" ; }"
-						[[ $phase_func = src_install ]] && \
-							eval "default() { _eapi4_$phase_func \"\$@\" ; }"
-						;;
-				esac
-
-			else
-
-				for x in $default_phases ; do
-					eval "default_$x() {
-						die \"default_$x() is not supported in phase $default_func\"
-					}"
-				done
-
-				eval "default() {
-					die \"default() is not supported with EAPI='$eapi' during phase $phase_func\"
-				}"
-
-			fi
-
-			;;
-	esac
-}
-
-# Set given variables unless these variable have been already set (e.g. during emerge
-# invocation) to values different than values set in make.conf.
-set_unless_changed() {
-	if [[ $# -lt 1 ]]; then
-		die "${FUNCNAME}() requires at least 1 argument: VARIABLE=VALUE"
-	fi
-
-	local argument value variable
-	for argument in "$@"; do
-		if [[ ${argument} != *=* ]]; then
-			die "${FUNCNAME}(): Argument '${argument}' has incorrect syntax"
-		fi
-		variable="${argument%%=*}"
-		value="${argument#*=}"
-		if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
-			eval "${variable}=\"\${value}\""
-		fi
-	done
-}
-
-# Unset given variables unless these variable have been set (e.g. during emerge
-# invocation) to values different than values set in make.conf.
-unset_unless_changed() {
-	if [[ $# -lt 1 ]]; then
-		die "${FUNCNAME}() requires at least 1 argument: VARIABLE"
-	fi
-
-	local variable
-	for variable in "$@"; do
-		if eval "[[ \${${variable}} == \$(env -u ${variable} portageq envvar ${variable}) ]]"; then
-			unset ${variable}
-		fi
-	done
-}
-
 PORTAGE_BASHRCS_SOURCED=0
 
 # @FUNCTION: source_all_bashrcs
@@ -1716,222 +377,46 @@ source_all_bashrcs() {
 		done
 	fi
 
-	# We assume if people are changing shopts in their bashrc they do so at their
-	# own peril.  This is the ONLY non-portage bit of code that can change shopts
-	# without a QA violation.
-	for x in "${PORTAGE_BASHRC}" "${PM_EBUILD_HOOK_DIR}"/${CATEGORY}/{${PN},${PN}:${SLOT},${P},${PF}}; do
-		if [ -r "${x}" ]; then
-			# If $- contains x, then tracing has already enabled elsewhere for some
-			# reason.  We preserve it's state so as not to interfere.
-			if [ "$PORTAGE_DEBUG" != "1" ] || [ "${-/x/}" != "$-" ]; then
-				source "${x}"
-			else
-				set -x
-				source "${x}"
-				set +x
-			fi
+	if [ -r "${PORTAGE_BASHRC}" ] ; then
+		if [ "$PORTAGE_DEBUG" != "1" ] || [ "${-/x/}" != "$-" ]; then
+			source "${PORTAGE_BASHRC}"
+		else
+			set -x
+			source "${PORTAGE_BASHRC}"
+			set +x
 		fi
-	done
-
-	[ ! -z "${OCC}" ] && export CC="${OCC}"
-	[ ! -z "${OCXX}" ] && export CXX="${OCXX}"
-}
-
-# Hardcoded bash lists are needed for backward compatibility with
-# <portage-2.1.4 since they assume that a newly installed version
-# of ebuild.sh will work for pkg_postinst, pkg_prerm, and pkg_postrm
-# when portage is upgrading itself.
-
-PORTAGE_READONLY_METADATA="DEFINED_PHASES DEPEND DESCRIPTION
-	EAPI HOMEPAGE INHERITED IUSE REQUIRED_USE KEYWORDS LICENSE
-	PDEPEND PROVIDE RDEPEND RESTRICT SLOT SRC_URI"
-
-PORTAGE_READONLY_VARS="D EBUILD EBUILD_PHASE \
-	EBUILD_SH_ARGS ECLASSDIR EMERGE_FROM FILESDIR MERGE_TYPE \
-	PM_EBUILD_HOOK_DIR \
-	PORTAGE_ACTUAL_DISTDIR PORTAGE_ARCHLIST PORTAGE_BASHRC  \
-	PORTAGE_BINPKG_FILE PORTAGE_BINPKG_TAR_OPTS PORTAGE_BINPKG_TMPFILE \
-	PORTAGE_BIN_PATH PORTAGE_BUILDDIR PORTAGE_BUNZIP2_COMMAND \
-	PORTAGE_BZIP2_COMMAND PORTAGE_COLORMAP PORTAGE_CONFIGROOT \
-	PORTAGE_DEBUG PORTAGE_DEPCACHEDIR PORTAGE_EBUILD_EXIT_FILE \
-	PORTAGE_GID PORTAGE_GRPNAME PORTAGE_INST_GID PORTAGE_INST_UID \
-	PORTAGE_IPC_DAEMON PORTAGE_IUSE PORTAGE_LOG_FILE \
-	PORTAGE_MUTABLE_FILTERED_VARS PORTAGE_PYM_PATH PORTAGE_PYTHON \
-	PORTAGE_READONLY_METADATA PORTAGE_READONLY_VARS \
-	PORTAGE_REPO_NAME PORTAGE_RESTRICT PORTAGE_SANDBOX_COMPAT_LEVEL \
-	PORTAGE_SAVED_READONLY_VARS PORTAGE_SIGPIPE_STATUS \
-	PORTAGE_TMPDIR PORTAGE_UPDATE_ENV PORTAGE_USERNAME \
-	PORTAGE_VERBOSE PORTAGE_WORKDIR_MODE PORTDIR PORTDIR_OVERLAY \
-	PROFILE_PATHS REPLACING_VERSIONS REPLACED_BY_VERSION T WORKDIR"
-
-PORTAGE_SAVED_READONLY_VARS="A CATEGORY P PF PN PR PV PVR"
-
-# Variables that portage sets but doesn't mark readonly.
-# In order to prevent changed values from causing unexpected
-# interference, they are filtered out of the environment when
-# it is saved or loaded (any mutations do not persist).
-PORTAGE_MUTABLE_FILTERED_VARS="AA HOSTNAME"
-
-# @FUNCTION: filter_readonly_variables
-# @DESCRIPTION: [--filter-sandbox] [--allow-extra-vars]
-# Read an environment from stdin and echo to stdout while filtering variables
-# with names that are known to cause interference:
-#
-#   * some specific variables for which bash does not allow assignment
-#   * some specific variables that affect portage or sandbox behavior
-#   * variable names that begin with a digit or that contain any
-#     non-alphanumeric characters that are not be supported by bash
-#
-# --filter-sandbox causes all SANDBOX_* variables to be filtered, which
-# is only desired in certain cases, such as during preprocessing or when
-# saving environment.bz2 for a binary or installed package.
-#
-# --filter-features causes the special FEATURES variable to be filtered.
-# Generally, we want it to persist between phases since the user might
-# want to modify it via bashrc to enable things like splitdebug and
-# installsources for specific packages. They should be able to modify it
-# in pre_pkg_setup() and have it persist all the way through the install
-# phase. However, if FEATURES exist inside environment.bz2 then they
-# should be overridden by current settings.
-#
-# --filter-locale causes locale related variables such as LANG and LC_*
-# variables to be filtered. These variables should persist between phases,
-# in case they are modified by the ebuild. However, the current user
-# settings should be used when loading the environment from a binary or
-# installed package.
-#
-# --filter-path causes the PATH variable to be filtered. This variable
-# should persist between phases, in case it is modified by the ebuild.
-# However, old settings should be overridden when loading the
-# environment from a binary or installed package.
-#
-# ---allow-extra-vars causes some extra vars to be allowd through, such
-# as ${PORTAGE_SAVED_READONLY_VARS} and ${PORTAGE_MUTABLE_FILTERED_VARS}.
-#
-# In bash-3.2_p20+ an attempt to assign BASH_*, FUNCNAME, GROUPS or any
-# readonly variable cause the shell to exit while executing the "source"
-# builtin command. To avoid this problem, this function filters those
-# variables out and discards them. See bug #190128.
-filter_readonly_variables() {
-	local x filtered_vars
-	local readonly_bash_vars="BASHOPTS BASHPID DIRSTACK EUID
-		FUNCNAME GROUPS PIPESTATUS PPID SHELLOPTS UID"
-	local bash_misc_vars="BASH BASH_.* COMP_WORDBREAKS HISTCMD
-		HISTFILE HOSTNAME HOSTTYPE IFS LINENO MACHTYPE OLDPWD
-		OPTERR OPTIND OSTYPE POSIXLY_CORRECT PS4 PWD RANDOM
-		SECONDS SHELL SHLVL"
-	local filtered_sandbox_vars="SANDBOX_ACTIVE SANDBOX_BASHRC
-		SANDBOX_DEBUG_LOG SANDBOX_DISABLED SANDBOX_LIB
-		SANDBOX_LOG SANDBOX_ON"
-	local misc_garbage_vars="_portage_filter_opts"
-	filtered_vars="$readonly_bash_vars $bash_misc_vars
-		$PORTAGE_READONLY_VARS $misc_garbage_vars"
-
-	# Don't filter/interfere with prefix variables unless they are
-	# supported by the current EAPI.
-	case "${EAPI:-0}" in
-		0|1|2)
-			;;
-		*)
-			filtered_vars+=" ED EPREFIX EROOT"
-			;;
-	esac
-
-	if has --filter-sandbox $* ; then
-		filtered_vars="${filtered_vars} SANDBOX_.*"
-	else
-		filtered_vars="${filtered_vars} ${filtered_sandbox_vars}"
-	fi
-	if has --filter-features $* ; then
-		filtered_vars="${filtered_vars} FEATURES PORTAGE_FEATURES"
-	fi
-	if has --filter-path $* ; then
-		filtered_vars+=" PATH"
-	fi
-	if has --filter-locale $* ; then
-		filtered_vars+=" LANG LC_ALL LC_COLLATE
-			LC_CTYPE LC_MESSAGES LC_MONETARY
-			LC_NUMERIC LC_PAPER LC_TIME"
 	fi
-	if ! has --allow-extra-vars $* ; then
-		filtered_vars="
-			${filtered_vars}
-			${PORTAGE_SAVED_READONLY_VARS}
-			${PORTAGE_MUTABLE_FILTERED_VARS}
-		"
-	fi
-
-	"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}"/filter-bash-environment.py "${filtered_vars}" || die "filter-bash-environment.py failed"
-}
 
-# @FUNCTION: preprocess_ebuild_env
-# @DESCRIPTION:
-# Filter any readonly variables from ${T}/environment, source it, and then
-# save it via save_ebuild_env(). This process should be sufficient to prevent
-# any stale variables or functions from an arbitrary environment from
-# interfering with the current environment. This is useful when an existing
-# environment needs to be loaded from a binary or installed package.
-preprocess_ebuild_env() {
-	local _portage_filter_opts="--filter-features --filter-locale --filter-path --filter-sandbox"
-
-	# If environment.raw is present, this is a signal from the python side,
-	# indicating that the environment may contain stale FEATURES and
-	# SANDBOX_{DENY,PREDICT,READ,WRITE} variables that should be filtered out.
-	# Otherwise, we don't need to filter the environment.
-	[ -f "${T}/environment.raw" ] || return 0
-
-	filter_readonly_variables $_portage_filter_opts < "${T}"/environment \
-		>> "$T/environment.filtered" || return $?
-	unset _portage_filter_opts
-	mv "${T}"/environment.filtered "${T}"/environment || return $?
-	rm -f "${T}/environment.success" || return $?
-	# WARNING: Code inside this subshell should avoid making assumptions
-	# about variables or functions after source "${T}"/environment has been
-	# called. Any variables that need to be relied upon should already be
-	# filtered out above.
-	(
-		export SANDBOX_ON=1
-		source "${T}/environment" || exit $?
-		# We have to temporarily disable sandbox since the
-		# SANDBOX_{DENY,READ,PREDICT,WRITE} values we've just loaded
-		# may be unusable (triggering in spurious sandbox violations)
-		# until we've merged them with our current values.
-		export SANDBOX_ON=0
-
-		# It's remotely possible that save_ebuild_env() has been overridden
-		# by the above source command. To protect ourselves, we override it
-		# here with our own version. ${PORTAGE_BIN_PATH} is safe to use here
-		# because it's already filtered above.
-		source "${PORTAGE_BIN_PATH}/isolated-functions.sh" || exit $?
-
-		# Rely on save_ebuild_env() to filter out any remaining variables
-		# and functions that could interfere with the current environment.
-		save_ebuild_env || exit $?
-		>> "$T/environment.success" || exit $?
-	) > "${T}/environment.filtered"
-	local retval
-	if [ -e "${T}/environment.success" ] ; then
-		filter_readonly_variables --filter-features < \
-			"${T}/environment.filtered" > "${T}/environment"
-		retval=$?
-	else
-		retval=1
+	if [[ $EBUILD_PHASE != depend ]] ; then
+		# The user's bashrc is the ONLY non-portage bit of code that can
+		# change shopts without a QA violation.
+		for x in "${PM_EBUILD_HOOK_DIR}"/${CATEGORY}/{${PN},${PN}:${SLOT},${P},${PF}}; do
+			if [ -r "${x}" ]; then
+				# If $- contains x, then tracing has already been enabled
+				# elsewhere for some reason. We preserve it's state so as
+				# not to interfere.
+				if [ "$PORTAGE_DEBUG" != "1" ] || [ "${-/x/}" != "$-" ]; then
+					source "${x}"
+				else
+					set -x
+					source "${x}"
+					set +x
+				fi
+			fi
+		done
 	fi
-	rm -f "${T}"/environment.{filtered,raw,success}
-	return ${retval}
+
+	[ ! -z "${OCC}" ] && export CC="${OCC}"
+	[ ! -z "${OCXX}" ] && export CXX="${OCXX}"
 }
 
 # === === === === === === === === === === === === === === === === === ===
 # === === === === === functions end, main part begins === === === === ===
-# === === === === === functions end, main part begins === === === === ===
-# === === === === === functions end, main part begins === === === === ===
 # === === === === === === === === === === === === === === === === === ===
 
 export SANDBOX_ON="1"
 export S=${WORKDIR}/${P}
 
-unset E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND
-
 # Turn of extended glob matching so that g++ doesn't get incorrectly matched.
 shopt -u extglob
 
@@ -1993,11 +478,12 @@ if ! has "$EBUILD_PHASE" clean cleanrm depend && \
 	# The environment may have been extracted from environment.bz2 or
 	# may have come from another version of ebuild.sh or something.
 	# In any case, preprocess it to prevent any potential interference.
+	# NOTE: export ${FOO}=... requires quoting, unlike normal exports
 	preprocess_ebuild_env || \
 		die "error processing environment"
 	# Colon separated SANDBOX_* variables need to be cumulative.
 	for x in SANDBOX_DENY SANDBOX_READ SANDBOX_PREDICT SANDBOX_WRITE ; do
-		export PORTAGE_${x}=${!x}
+		export PORTAGE_${x}="${!x}"
 	done
 	PORTAGE_SANDBOX_ON=${SANDBOX_ON}
 	export SANDBOX_ON=1
@@ -2011,13 +497,13 @@ if ! has "$EBUILD_PHASE" clean cleanrm depend && \
 	for x in SANDBOX_DENY SANDBOX_PREDICT SANDBOX_READ SANDBOX_WRITE ; do
 		y="PORTAGE_${x}"
 		if [ -z "${!x}" ] ; then
-			export ${x}=${!y}
+			export ${x}="${!y}"
 		elif [ -n "${!y}" ] && [ "${!y}" != "${!x}" ] ; then
 			# filter out dupes
-			export ${x}=$(printf "${!y}:${!x}" | tr ":" "\0" | \
-				sort -z -u | tr "\0" ":")
+			export ${x}="$(printf "${!y}:${!x}" | tr ":" "\0" | \
+				sort -z -u | tr "\0" ":")"
 		fi
-		export ${x}=${!x%:}
+		export ${x}="${!x%:}"
 		unset PORTAGE_${x}
 	done
 	unset x y
@@ -2026,6 +512,10 @@ if ! has "$EBUILD_PHASE" clean cleanrm depend && \
 	[[ -n $EAPI ]] || EAPI=0
 fi
 
+if has "${EAPI:-0}" 4-python; then
+	shopt -s globstar
+fi
+
 if ! has "$EBUILD_PHASE" clean cleanrm ; then
 	if [[ $EBUILD_PHASE = depend || ! -f $T/environment || \
 		-f $PORTAGE_BUILDDIR/.ebuild_changed ]] || \
@@ -2044,7 +534,7 @@ if ! has "$EBUILD_PHASE" clean cleanrm ; then
 		# In order to ensure correct interaction between ebuilds and
 		# eclasses, they need to be unset before this process of
 		# interaction begins.
-		unset DEPEND RDEPEND PDEPEND INHERITED IUSE REQUIRED_USE \
+		unset EAPI DEPEND RDEPEND PDEPEND INHERITED IUSE REQUIRED_USE \
 			ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND
 
 		if [[ $PORTAGE_DEBUG != 1 || ${-/x/} != $- ]] ; then
@@ -2061,7 +551,7 @@ if ! has "$EBUILD_PHASE" clean cleanrm ; then
 			rm "$PORTAGE_BUILDDIR/.ebuild_changed"
 		fi
 
-		[[ -n $EAPI ]] || EAPI=0
+		[ "${EAPI+set}" = set ] || EAPI=0
 
 		if has "$EAPI" 0 1 2 3 3_pre2 ; then
 			export RDEPEND=${RDEPEND-${DEPEND}}
@@ -2069,11 +559,11 @@ if ! has "$EBUILD_PHASE" clean cleanrm ; then
 		fi
 
 		# add in dependency info from eclasses
-		IUSE="${IUSE} ${E_IUSE}"
-		DEPEND="${DEPEND} ${E_DEPEND}"
-		RDEPEND="${RDEPEND} ${E_RDEPEND}"
-		PDEPEND="${PDEPEND} ${E_PDEPEND}"
-		REQUIRED_USE="${REQUIRED_USE} ${E_REQUIRED_USE}"
+		IUSE+="${IUSE:+ }${E_IUSE}"
+		DEPEND+="${DEPEND:+ }${E_DEPEND}"
+		RDEPEND+="${RDEPEND:+ }${E_RDEPEND}"
+		PDEPEND+="${PDEPEND:+ }${E_PDEPEND}"
+		REQUIRED_USE+="${REQUIRED_USE:+ }${E_REQUIRED_USE}"
 		
 		unset ECLASS E_IUSE E_REQUIRED_USE E_DEPEND E_RDEPEND E_PDEPEND \
 			__INHERITED_QA_CACHE
@@ -2110,29 +600,11 @@ if ! has "$EBUILD_PHASE" clean cleanrm ; then
 
 		if [[ $EBUILD_PHASE != depend ]] ; then
 
-			case "$EAPI" in
-				0|1|2|3)
-					_ebuild_helpers_path="$PORTAGE_BIN_PATH/ebuild-helpers"
-					;;
-				*)
-					_ebuild_helpers_path="$PORTAGE_BIN_PATH/ebuild-helpers/4:$PORTAGE_BIN_PATH/ebuild-helpers"
-					;;
-			esac
-
-			PATH=$_ebuild_helpers_path:$PREROOTPATH${PREROOTPATH:+:}/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin${ROOTPATH:+:}$ROOTPATH
-			unset _ebuild_helpers_path
-
-			# Use default ABI libdir in accordance with bug #355283.
-			x=LIBDIR_${DEFAULT_ABI}
-			[[ -n $DEFAULT_ABI && -n ${!x} ]] && x=${!x} || x=lib
-
 			if has distcc $FEATURES ; then
-				PATH="/usr/$x/distcc/bin:$PATH"
 				[[ -n $DISTCC_LOG ]] && addwrite "${DISTCC_LOG%/*}"
 			fi
 
 			if has ccache $FEATURES ; then
-				PATH="/usr/$x/ccache/bin:$PATH"
 
 				if [[ -n $CCACHE_DIR ]] ; then
 					addread "$CCACHE_DIR"
@@ -2142,8 +614,6 @@ if ! has "$EBUILD_PHASE" clean cleanrm ; then
 				[[ -n $CCACHE_SIZE ]] && ccache -M $CCACHE_SIZE &> /dev/null
 			fi
 
-			unset x
-
 			if [[ -n $QA_PREBUILT ]] ; then
 
 				# these ones support fnmatch patterns
@@ -2153,7 +623,7 @@ if ! has "$EBUILD_PHASE" clean cleanrm ; then
 
 				# these ones support regular expressions, so translate
 				# fnmatch patterns to regular expressions
-				for x in QA_DT_HASH QA_DT_NEEDED QA_PRESTRIPPED QA_SONAME ; do
+				for x in QA_DT_NEEDED QA_FLAGS_IGNORED QA_PRESTRIPPED QA_SONAME ; do
 					if [[ $(declare -p $x 2>/dev/null) = declare\ -a* ]] ; then
 						eval "$x=(\"\${$x[@]}\" ${QA_PREBUILT//\*/.*})"
 					else
@@ -2183,241 +653,60 @@ then
 	export DEBUGBUILD=1
 fi
 
-#a reasonable default for $S
-[[ -z ${S} ]] && export S=${WORKDIR}/${P}
+if [[ $EBUILD_PHASE = depend ]] ; then
+	export SANDBOX_ON="0"
+	set -f
+
+	if [ -n "${dbkey}" ] ; then
+		if [ ! -d "${dbkey%/*}" ]; then
+			install -d -g ${PORTAGE_GID} -m2775 "${dbkey%/*}"
+		fi
+		# Make it group writable. 666&~002==664
+		umask 002
+	fi
+
+	auxdbkeys="DEPEND RDEPEND SLOT SRC_URI RESTRICT HOMEPAGE LICENSE
+		DESCRIPTION KEYWORDS INHERITED IUSE REQUIRED_USE PDEPEND PROVIDE EAPI
+		PROPERTIES DEFINED_PHASES UNUSED_05 UNUSED_04
+		UNUSED_03 UNUSED_02 UNUSED_01"
 
-# Note: readonly variables interfere with preprocess_ebuild_env(), so
-# declare them only after it has already run.
-if [ "${EBUILD_PHASE}" != "depend" ] ; then
+	# The extra $(echo) commands remove newlines.
+	if [ -n "${dbkey}" ] ; then
+		> "${dbkey}"
+		for f in ${auxdbkeys} ; do
+			echo $(echo ${!f}) >> "${dbkey}" || exit $?
+		done
+	else
+		for f in ${auxdbkeys} ; do
+			echo $(echo ${!f}) 1>&9 || exit $?
+		done
+		exec 9>&-
+	fi
+	set +f
+else
+	# Note: readonly variables interfere with preprocess_ebuild_env(), so
+	# declare them only after it has already run.
 	declare -r $PORTAGE_READONLY_METADATA $PORTAGE_READONLY_VARS
 	case "$EAPI" in
 		0|1|2)
+			[[ " ${FEATURES} " == *" force-prefix "* ]] && \
+				declare -r ED EPREFIX EROOT
 			;;
 		*)
 			declare -r ED EPREFIX EROOT
 			;;
 	esac
-fi
-
-ebuild_main() {
-
-	# Subshell/helper die support (must export for the die helper).
-	# Since this function is typically executed in a subshell,
-	# setup EBUILD_MASTER_PID to refer to the current $BASHPID,
-	# which seems to give the best results when further
-	# nested subshells call die.
-	export EBUILD_MASTER_PID=$BASHPID
-	trap 'exit 1' SIGTERM
-
-	if [[ $EBUILD_PHASE != depend ]] ; then
-		# Force configure scripts that automatically detect ccache to
-		# respect FEATURES="-ccache".
-		has ccache $FEATURES || export CCACHE_DISABLE=1
-
-		local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
-		[[ -n $phase_func ]] && _ebuild_phase_funcs "$EAPI" "$phase_func"
-		unset phase_func
-	fi
-
-	source_all_bashrcs
-
-	case ${EBUILD_SH_ARGS} in
-	nofetch)
-		ebuild_phase_with_hooks pkg_nofetch
-		;;
-	prerm|postrm|postinst|config|info)
-		if has "$EBUILD_SH_ARGS" config info && \
-			! declare -F "pkg_$EBUILD_SH_ARGS" >/dev/null ; then
-			ewarn  "pkg_${EBUILD_SH_ARGS}() is not defined: '${EBUILD##*/}'"
-		fi
-		export SANDBOX_ON="0"
-		if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
-			ebuild_phase_with_hooks pkg_${EBUILD_SH_ARGS}
-		else
-			set -x
-			ebuild_phase_with_hooks pkg_${EBUILD_SH_ARGS}
-			set +x
-		fi
-		if [[ $EBUILD_PHASE == postinst ]] && [[ -n $PORTAGE_UPDATE_ENV ]]; then
-			# Update environment.bz2 in case installation phases
-			# need to pass some variables to uninstallation phases.
-			save_ebuild_env --exclude-init-phases | \
-				filter_readonly_variables --filter-path \
-				--filter-sandbox --allow-extra-vars \
-				| ${PORTAGE_BZIP2_COMMAND} -c -f9 > "$PORTAGE_UPDATE_ENV"
-			assert "save_ebuild_env failed"
-		fi
-		;;
-	unpack|prepare|configure|compile|test|clean|install)
-		if [[ ${SANDBOX_DISABLED:-0} = 0 ]] ; then
-			export SANDBOX_ON="1"
-		else
-			export SANDBOX_ON="0"
-		fi
-
-		case "$EBUILD_SH_ARGS" in
-		configure|compile)
-
-			local x
-			for x in ASFLAGS CCACHE_DIR CCACHE_SIZE \
-				CFLAGS CXXFLAGS LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
-				[[ ${!x+set} = set ]] && export $x
-			done
-			unset x
-
-			has distcc $FEATURES && [[ -n $DISTCC_DIR ]] && \
-				[[ ${SANDBOX_WRITE/$DISTCC_DIR} = $SANDBOX_WRITE ]] && \
-				addwrite "$DISTCC_DIR"
-
-			x=LIBDIR_$ABI
-			[ -z "$PKG_CONFIG_PATH" -a -n "$ABI" -a -n "${!x}" ] && \
-				export PKG_CONFIG_PATH=/usr/${!x}/pkgconfig
-
-			if has noauto $FEATURES && \
-				[[ ! -f $PORTAGE_BUILDDIR/.unpacked ]] ; then
-				echo
-				echo "!!! We apparently haven't unpacked..." \
-					"This is probably not what you"
-				echo "!!! want to be doing... You are using" \
-					"FEATURES=noauto so I'll assume"
-				echo "!!! that you know what you are doing..." \
-					"You have 5 seconds to abort..."
-				echo
-
-				local x
-				for x in 1 2 3 4 5 6 7 8; do
-					LC_ALL=C sleep 0.25
-				done
-
-				sleep 3
-			fi
-
-			cd "$PORTAGE_BUILDDIR"
-			if [ ! -d build-info ] ; then
-				mkdir build-info
-				cp "$EBUILD" "build-info/$PF.ebuild"
-			fi
-
-			#our custom version of libtool uses $S and $D to fix
-			#invalid paths in .la files
-			export S D
-
-			;;
-		esac
-
-		if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
-			dyn_${EBUILD_SH_ARGS}
-		else
-			set -x
-			dyn_${EBUILD_SH_ARGS}
-			set +x
-		fi
-		export SANDBOX_ON="0"
-		;;
-	help|pretend|setup|preinst)
-		#pkg_setup needs to be out of the sandbox for tmp file creation;
-		#for example, awking and piping a file in /tmp requires a temp file to be created
-		#in /etc.  If pkg_setup is in the sandbox, both our lilo and apache ebuilds break.
-		export SANDBOX_ON="0"
-		if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
-			dyn_${EBUILD_SH_ARGS}
-		else
-			set -x
-			dyn_${EBUILD_SH_ARGS}
-			set +x
-		fi
-		;;
-	depend)
-		export SANDBOX_ON="0"
-		set -f
-
-		if [ -n "${dbkey}" ] ; then
-			if [ ! -d "${dbkey%/*}" ]; then
-				install -d -g ${PORTAGE_GID} -m2775 "${dbkey%/*}"
-			fi
-			# Make it group writable. 666&~002==664
-			umask 002
-		fi
-
-		auxdbkeys="DEPEND RDEPEND SLOT SRC_URI RESTRICT HOMEPAGE LICENSE
-			DESCRIPTION KEYWORDS INHERITED IUSE REQUIRED_USE PDEPEND PROVIDE EAPI
-			PROPERTIES DEFINED_PHASES UNUSED_05 UNUSED_04
-			UNUSED_03 UNUSED_02 UNUSED_01"
-
-		#the extra $(echo) commands remove newlines
-		[ -n "${EAPI}" ] || EAPI=0
 
-		if [ -n "${dbkey}" ] ; then
-			> "${dbkey}"
-			for f in ${auxdbkeys} ; do
-				echo $(echo ${!f}) >> "${dbkey}" || exit $?
-			done
-		else
-			for f in ${auxdbkeys} ; do
-				echo $(echo ${!f}) 1>&9 || exit $?
-			done
+	if [[ -n $EBUILD_SH_ARGS ]] ; then
+		(
+			# Don't allow subprocesses to inherit the pipe which
+			# emerge uses to monitor ebuild.sh.
 			exec 9>&-
-		fi
-		set +f
-		;;
-	_internal_test)
-		;;
-	*)
-		export SANDBOX_ON="1"
-		echo "Unrecognized EBUILD_SH_ARGS: '${EBUILD_SH_ARGS}'"
-		echo
-		dyn_help
-		exit 1
-		;;
-	esac
-}
-
-if [[ -s $SANDBOX_LOG ]] ; then
-	# We use SANDBOX_LOG to check for sandbox violations,
-	# so we ensure that there can't be a stale log to
-	# interfere with our logic.
-	x=
-	if [[ -n SANDBOX_ON ]] ; then
-		x=$SANDBOX_ON
-		export SANDBOX_ON=0
+			ebuild_main ${EBUILD_SH_ARGS}
+			exit 0
+		)
+		exit $?
 	fi
-
-	rm -f "$SANDBOX_LOG" || \
-		die "failed to remove stale sandbox log: '$SANDBOX_LOG'"
-
-	if [[ -n $x ]] ; then
-		export SANDBOX_ON=$x
-	fi
-	unset x
-fi
-
-if [[ $EBUILD_PHASE = depend ]] ; then
-	ebuild_main
-elif [[ -n $EBUILD_SH_ARGS ]] ; then
-	(
-		# Don't allow subprocesses to inherit the pipe which
-		# emerge uses to monitor ebuild.sh.
-		exec 9>&-
-
-		ebuild_main
-
-		# Save the env only for relevant phases.
-		if ! has "$EBUILD_SH_ARGS" clean help info nofetch ; then
-			umask 002
-			save_ebuild_env | filter_readonly_variables \
-				--filter-features > "$T/environment"
-			assert "save_ebuild_env failed"
-			chown portage:portage "$T/environment" &>/dev/null
-			chmod g+w "$T/environment" &>/dev/null
-		fi
-		[[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
-		if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
-			[[ ! -s $SANDBOX_LOG ]]
-			"$PORTAGE_BIN_PATH"/ebuild-ipc exit $?
-		fi
-		exit 0
-	)
-	exit $?
 fi
 
 # Do not exit when ebuild.sh is sourced by other scripts.

diff --git a/portage_with_autodep/bin/egencache b/portage_with_autodep/bin/egencache
index 1b4265d..2f53b40 100755
--- a/portage_with_autodep/bin/egencache
+++ b/portage_with_autodep/bin/egencache
@@ -1,5 +1,5 @@
 #!/usr/bin/python
-# Copyright 2009-2011 Gentoo Foundation
+# Copyright 2009-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -42,7 +42,7 @@ from portage.manifest import guessManifestFileType
 from portage.util import cmp_sort_key, writemsg_level
 from portage import cpv_getkey
 from portage.dep import Atom, isjustname
-from portage.versions import pkgcmp, pkgsplit, vercmp
+from portage.versions import pkgsplit, vercmp
 
 try:
 	from xml.etree import ElementTree
@@ -87,6 +87,9 @@ def parse_args(args):
 	common.add_option("--portdir",
 		help="override the portage tree location",
 		dest="portdir")
+	common.add_option("--portdir-overlay",
+		help="override the PORTDIR_OVERLAY variable (requires that --repo is also specified)",
+		dest="portdir_overlay")
 	common.add_option("--tolerant",
 		action="store_true",
 		help="exit successfully if only minor errors occurred")
@@ -160,9 +163,17 @@ def parse_args(args):
 		parser.error("Not a directory: --config-root='%s'" % \
 			(options.config_root,))
 
-	if options.cache_dir is not None and not os.path.isdir(options.cache_dir):
-		parser.error("Not a directory: --cache-dir='%s'" % \
-			(options.cache_dir,))
+	if options.cache_dir is not None:
+		if not os.path.isdir(options.cache_dir):
+			parser.error("Not a directory: --cache-dir='%s'" % \
+				(options.cache_dir,))
+		if not os.access(options.cache_dir, os.W_OK):
+			parser.error("Write access denied: --cache-dir='%s'" % \
+				(options.cache_dir,))
+
+	if options.portdir_overlay is not None and \
+		options.repo is None:
+		parser.error("--portdir-overlay option requires --repo option")
 
 	for atom in args:
 		try:
@@ -188,7 +199,12 @@ def parse_args(args):
 class GenCache(object):
 	def __init__(self, portdb, cp_iter=None, max_jobs=None, max_load=None,
 		rsync=False):
+		# The caller must set portdb.porttrees in order to constrain
+		# findname, cp_list, and cpv_list to the desired tree.
+		tree = portdb.porttrees[0]
 		self._portdb = portdb
+		self._eclass_db = portdb.repositories.get_repo_for_location(tree).eclass_db
+		self._auxdbkeys = portdb._known_keys
 		# We can globally cleanse stale cache only if we
 		# iterate over every single cp.
 		self._global_cleanse = cp_iter is None
@@ -203,27 +219,67 @@ class GenCache(object):
 			consumer=self._metadata_callback,
 			max_jobs=max_jobs, max_load=max_load)
 		self.returncode = os.EX_OK
-		metadbmodule = portdb.settings.load_best_module("portdbapi.metadbmodule")
-		self._trg_cache = metadbmodule(portdb.porttrees[0],
-			"metadata/cache", portage.auxdbkeys[:])
+		conf = portdb.repositories.get_repo_for_location(tree)
+		self._trg_caches = tuple(conf.iter_pregenerated_caches(
+			self._auxdbkeys, force=True, readonly=False))
+		if not self._trg_caches:
+			raise Exception("cache formats '%s' aren't supported" %
+				(" ".join(conf.cache_formats),))
+
 		if rsync:
-			self._trg_cache.raise_stat_collision = True
-		try:
-			self._trg_cache.ec = \
-				portdb._repo_info[portdb.porttrees[0]].eclass_db
-		except AttributeError:
-			pass
+			for trg_cache in self._trg_caches:
+				if hasattr(trg_cache, 'raise_stat_collision'):
+					trg_cache.raise_stat_collision = True
+					# Make _metadata_callback write this cache first, in case
+					# it raises a StatCollision and triggers mtime
+					# modification.
+					self._trg_caches = tuple([trg_cache] +
+						[x for x in self._trg_caches if x is not trg_cache])
+
 		self._existing_nodes = set()
 
-	def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata):
+	def _metadata_callback(self, cpv, repo_path, metadata,
+		ebuild_hash, eapi_supported):
 		self._existing_nodes.add(cpv)
 		self._cp_missing.discard(cpv_getkey(cpv))
-		if metadata is not None:
+
+		# Since we're supposed to be able to efficiently obtain the
+		# EAPI from _parse_eapi_ebuild_head, we don't write cache
+		# entries for unsupported EAPIs.
+		if metadata is not None and eapi_supported:
 			if metadata.get('EAPI') == '0':
 				del metadata['EAPI']
+			for trg_cache in self._trg_caches:
+				self._write_cache(trg_cache,
+					cpv, repo_path, metadata, ebuild_hash)
+
+	def _write_cache(self, trg_cache, cpv, repo_path, metadata, ebuild_hash):
+
+			if not hasattr(trg_cache, 'raise_stat_collision'):
+				# This cache does not avoid redundant writes automatically,
+				# so check for an identical existing entry before writing.
+				# This prevents unnecessary disk writes and can also prevent
+				# unnecessary rsync transfers.
+				try:
+					dest = trg_cache[cpv]
+				except (KeyError, CacheError):
+					pass
+				else:
+					if trg_cache.validate_entry(dest,
+						ebuild_hash, self._eclass_db):
+						identical = True
+						for k in self._auxdbkeys:
+							if dest.get(k, '') != metadata.get(k, ''):
+								identical = False
+								break
+						if identical:
+							return
+
 			try:
+				chf = trg_cache.validation_chf
+				metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
 				try:
-					self._trg_cache[cpv] = metadata
+					trg_cache[cpv] = metadata
 				except StatCollision as sc:
 					# If the content of a cache entry changes and neither the
 					# file mtime nor size changes, it will prevent rsync from
@@ -231,24 +287,30 @@ class GenCache(object):
 					# exception from _setitem() if they detect this type of stat
 					# collision. These exceptions are handled by bumping the
 					# mtime on the ebuild (and the corresponding cache entry).
-					# See bug #139134.
+					# See bug #139134. It is convenient to include checks for
+					# redundant writes along with the internal StatCollision
+					# detection code, so for caches with the
+					# raise_stat_collision attribute, we do not need to
+					# explicitly check for redundant writes like we do for the
+					# other cache types above.
 					max_mtime = sc.mtime
-					for ec, (loc, ec_mtime) in metadata['_eclasses_'].items():
-						if max_mtime < ec_mtime:
-							max_mtime = ec_mtime
+					for ec, ec_hash in metadata['_eclasses_'].items():
+						if max_mtime < ec_hash.mtime:
+							max_mtime = ec_hash.mtime
 					if max_mtime == sc.mtime:
 						max_mtime += 1
 					max_mtime = long(max_mtime)
 					try:
-						os.utime(ebuild_path, (max_mtime, max_mtime))
+						os.utime(ebuild_hash.location, (max_mtime, max_mtime))
 					except OSError as e:
 						self.returncode |= 1
 						writemsg_level(
 							"%s writing target: %s\n" % (cpv, e),
 							level=logging.ERROR, noiselevel=-1)
 					else:
+						ebuild_hash.mtime = max_mtime
 						metadata['_mtime_'] = max_mtime
-						self._trg_cache[cpv] = metadata
+						trg_cache[cpv] = metadata
 						self._portdb.auxdb[repo_path][cpv] = metadata
 
 			except CacheError as ce:
@@ -287,9 +349,12 @@ class GenCache(object):
 			sys.exit(received_signal[0])
 
 		self.returncode |= self._regen.returncode
-		cp_missing = self._cp_missing
 
-		trg_cache = self._trg_cache
+		for trg_cache in self._trg_caches:
+			self._cleanse_cache(trg_cache)
+
+	def _cleanse_cache(self, trg_cache):
+		cp_missing = self._cp_missing
 		dead_nodes = set()
 		if self._global_cleanse:
 			try:
@@ -443,12 +508,12 @@ class GenUseLocalDesc(object):
 				errors='backslashreplace')
 			output.write(_unicode_decode('\n'))
 		else:
-			output.write(_unicode_decode('''
-# This file is deprecated as per GLEP 56 in favor of metadata.xml. Please add
-# your descriptions to your package's metadata.xml ONLY.
-# * generated automatically using egencache *
+			output.write(textwrap.dedent(_unicode_decode('''\
+				# This file is deprecated as per GLEP 56 in favor of metadata.xml. Please add
+				# your descriptions to your package's metadata.xml ONLY.
+				# * generated automatically using egencache *
 
-'''.lstrip()))
+				''')))
 
 		# The cmp function no longer exists in python3, so we'll
 		# implement our own here under a slightly different name
@@ -465,10 +530,20 @@ class GenUseLocalDesc(object):
 					return 1
 			return (a > b) - (a < b)
 
+		class _MetadataTreeBuilder(ElementTree.TreeBuilder):
+			"""
+			Implements doctype() as required to avoid deprecation warnings
+			since Python >=2.7
+			"""
+			def doctype(self, name, pubid, system):
+				pass
+
 		for cp in self._portdb.cp_all():
 			metadata_path = os.path.join(repo_path, cp, 'metadata.xml')
 			try:
-				metadata = ElementTree.parse(metadata_path)
+				metadata = ElementTree.parse(metadata_path,
+					parser=ElementTree.XMLParser(
+					target=_MetadataTreeBuilder()))
 			except IOError:
 				pass
 			except (ExpatError, EnvironmentError) as e:
@@ -495,7 +570,7 @@ class GenUseLocalDesc(object):
 								return cmp_func(atomb.operator, atoma.operator)
 							# Version matching
 							elif atoma.cpv != atomb.cpv:
-								return pkgcmp(pkgsplit(atoma.cpv), pkgsplit(atomb.cpv))
+								return vercmp(atoma.version, atomb.version)
 							# Versions match, let's fallback to operator matching
 							else:
 								return cmp_func(ops.get(atoma.operator, -1),
@@ -620,12 +695,12 @@ class GenChangeLogs(object):
 			self.returncode |= 2
 			return
 
-		output.write(_unicode_decode('''
-# ChangeLog for %s
-# Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2
-# $Header: $
+		output.write(textwrap.dedent(_unicode_decode('''\
+			# ChangeLog for %s
+			# Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2
+			# $Header: $
 
-''' % (cp, time.strftime('%Y'))).lstrip())
+			''' % (cp, time.strftime('%Y')))))
 
 		# now grab all the commits
 		commits = self.grab(['git', 'rev-list', 'HEAD', '--', '.']).split()
@@ -755,8 +830,6 @@ def egencache_main(args):
 	parser, options, atoms = parse_args(args)
 
 	config_root = options.config_root
-	if config_root is None:
-		config_root = '/'
 
 	# The calling environment is ignored, so the program is
 	# completely controlled by commandline arguments.
@@ -764,6 +837,8 @@ def egencache_main(args):
 
 	if options.repo is None:
 		env['PORTDIR_OVERLAY'] = ''
+	elif options.portdir_overlay:
+		env['PORTDIR_OVERLAY'] = options.portdir_overlay
 
 	if options.cache_dir is not None:
 		env['PORTAGE_DEPCACHEDIR'] = options.cache_dir
@@ -772,7 +847,7 @@ def egencache_main(args):
 		env['PORTDIR'] = options.portdir
 
 	settings = portage.config(config_root=config_root,
-		target_root='/', local_config=False, env=env)
+		local_config=False, env=env)
 
 	default_opts = None
 	if not options.ignore_default_opts:
@@ -781,14 +856,11 @@ def egencache_main(args):
 	if default_opts:
 		parser, options, args = parse_args(default_opts + args)
 
-		if options.config_root is not None:
-			config_root = options.config_root
-
 		if options.cache_dir is not None:
 			env['PORTAGE_DEPCACHEDIR'] = options.cache_dir
 
 		settings = portage.config(config_root=config_root,
-			target_root='/', local_config=False, env=env)
+			local_config=False, env=env)
 
 	if not options.update and not options.update_use_local_desc \
 			and not options.update_changelogs:
@@ -796,14 +868,27 @@ def egencache_main(args):
 		return 1
 
 	if options.update and 'metadata-transfer' not in settings.features:
-		writemsg_level("ecachegen: warning: " + \
-			"automatically enabling FEATURES=metadata-transfer\n",
-			level=logging.WARNING, noiselevel=-1)
 		settings.features.add('metadata-transfer')
 
 	settings.lock()
 
 	portdb = portage.portdbapi(mysettings=settings)
+
+	if options.update:
+		if options.cache_dir is not None:
+			# already validated earlier
+			pass
+		else:
+			# We check write access after the portdbapi constructor
+			# has had an opportunity to create it. This ensures that
+			# we don't use the cache in the "volatile" mode which is
+			# undesirable for egencache.
+			if not os.access(settings["PORTAGE_DEPCACHEDIR"], os.W_OK):
+				writemsg_level("ecachegen: error: " + \
+					"write access denied: %s\n" % (settings["PORTAGE_DEPCACHEDIR"],),
+					level=logging.ERROR, noiselevel=-1)
+				return 1
+
 	if options.repo is not None:
 		repo_path = portdb.getRepositoryPath(options.repo)
 		if repo_path is None:
@@ -813,6 +898,8 @@ def egencache_main(args):
 
 		# Limit ebuilds to the specified repo.
 		portdb.porttrees = [repo_path]
+	else:
+		portdb.porttrees = [portdb.porttree_root]
 
 	ret = [os.EX_OK]
 

diff --git a/portage_with_autodep/bin/emaint b/portage_with_autodep/bin/emaint
index fdd01ed..1bee0fe 100755
--- a/portage_with_autodep/bin/emaint
+++ b/portage_with_autodep/bin/emaint
@@ -40,15 +40,15 @@ class WorldHandler(object):
 		self.okay = []
 		from portage._sets import load_default_config
 		setconfig = load_default_config(portage.settings,
-			portage.db[portage.settings["ROOT"]])
+			portage.db[portage.settings['EROOT']])
 		self._sets = setconfig.getSets()
 
 	def _check_world(self, onProgress):
 		categories = set(portage.settings.categories)
-		myroot = portage.settings["ROOT"]
-		self.world_file = os.path.join(portage.settings["EROOT"], portage.const.WORLD_FILE)
+		eroot = portage.settings['EROOT']
+		self.world_file = os.path.join(eroot, portage.const.WORLD_FILE)
 		self.found = os.access(self.world_file, os.R_OK)
-		vardb = portage.db[myroot]["vartree"].dbapi
+		vardb = portage.db[eroot]["vartree"].dbapi
 
 		from portage._sets import SETPREFIX
 		sets = self._sets
@@ -120,8 +120,8 @@ class BinhostHandler(object):
 	name = staticmethod(name)
 
 	def __init__(self):
-		myroot = portage.settings["ROOT"]
-		self._bintree = portage.db[myroot]["bintree"]
+		eroot = portage.settings['EROOT']
+		self._bintree = portage.db[eroot]["bintree"]
 		self._bintree.populate()
 		self._pkgindex_file = self._bintree._pkgindex_file
 		self._pkgindex = self._bintree._load_pkgindex()
@@ -403,8 +403,8 @@ class MoveInstalled(MoveHandler):
 		return "moveinst"
 	name = staticmethod(name)
 	def __init__(self):
-		myroot = portage.settings["ROOT"]
-		MoveHandler.__init__(self, portage.db[myroot]["vartree"], portage.db[myroot]["porttree"])
+		eroot = portage.settings['EROOT']
+		MoveHandler.__init__(self, portage.db[eroot]["vartree"], portage.db[eroot]["porttree"])
 
 class MoveBinary(MoveHandler):
 
@@ -414,8 +414,8 @@ class MoveBinary(MoveHandler):
 		return "movebin"
 	name = staticmethod(name)
 	def __init__(self):
-		myroot = portage.settings["ROOT"]
-		MoveHandler.__init__(self, portage.db[myroot]["bintree"], portage.db[myroot]["porttree"])
+		eroot = portage.settings['EROOT']
+		MoveHandler.__init__(self, portage.db[eroot]["bintree"], portage.db[eroot]['porttree'])
 
 class VdbKeyHandler(object):
 	def name():
@@ -423,7 +423,7 @@ class VdbKeyHandler(object):
 	name = staticmethod(name)
 
 	def __init__(self):
-		self.list = portage.db["/"]["vartree"].dbapi.cpv_all()
+		self.list = portage.db[portage.settings["EROOT"]]["vartree"].dbapi.cpv_all()
 		self.missing = []
 		self.keys = ["HOMEPAGE", "SRC_URI", "KEYWORDS", "DESCRIPTION"]
 		

diff --git a/portage_with_autodep/bin/emerge-webrsync b/portage_with_autodep/bin/emerge-webrsync
index d933871..bfd9aa2 100755
--- a/portage_with_autodep/bin/emerge-webrsync
+++ b/portage_with_autodep/bin/emerge-webrsync
@@ -12,7 +12,7 @@
 
 #
 # gpg key import
-# KEY_ID=0x239C75C4
+# KEY_ID=0x96D8BF6D
 # gpg --homedir /etc/portage/gnupg --keyserver subkeys.pgp.net --recv-keys $KEY_ID
 # gpg --homedir /etc/portage/gnupg --edit-key $KEY_ID trust
 #
@@ -27,11 +27,19 @@ wecho() { echo "${argv0}: warning: $*" 1>&2 ; }
 eecho() { echo "${argv0}: error: $*" 1>&2 ; }
 
 argv0=$0
-if ! type -P portageq > /dev/null ; then
+
+# Use portageq from the same directory/prefix as the current script, so
+# that we don't have to rely on PATH including the current EPREFIX.
+scriptpath=${BASH_SOURCE[0]}
+if [ -x "${scriptpath%/*}/portageq" ]; then
+	portageq=${scriptpath%/*}/portageq
+elif type -P portageq > /dev/null ; then
+	portageq=portageq
+else
 	eecho "could not find 'portageq'; aborting"
 	exit 1
 fi
-eval $(portageq envvar -v FEATURES FETCHCOMMAND GENTOO_MIRRORS \
+eval $("${portageq}" envvar -v FEATURES FETCHCOMMAND GENTOO_MIRRORS \
 	PORTAGE_BIN_PATH PORTAGE_GPG_DIR \
 	PORTAGE_NICENESS PORTAGE_RSYNC_EXTRA_OPTS PORTAGE_TMPDIR PORTDIR \
 	SYNC http_proxy ftp_proxy)
@@ -157,6 +165,7 @@ check_file_signature() {
 			gpg --homedir "${PORTAGE_GPG_DIR}" --verify "$signature" "$file" && r=0
 		else
 			eecho "cannot check signature: gpg binary not found"
+			exit 1
 		fi
 	else
 		r=0
@@ -211,6 +220,8 @@ sync_local() {
 		emerge --metadata
 	fi
 	[ -x /etc/portage/bin/post_sync ] && /etc/portage/bin/post_sync
+	# --quiet suppresses output if there are no relevant news items
+	has news ${FEATURES} && emerge --check-news --quiet
 	return 0
 }
 

diff --git a/portage_with_autodep/bin/emirrordist b/portage_with_autodep/bin/emirrordist
new file mode 100755
index 0000000..8d93de9
--- /dev/null
+++ b/portage_with_autodep/bin/emirrordist
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+portage._internal_caller = True
+portage._disable_legacy_globals()
+from portage._emirrordist.main import emirrordist_main
+
+if __name__ == "__main__":
+	sys.exit(emirrordist_main(sys.argv[1:]))

diff --git a/portage_with_autodep/bin/etc-update b/portage_with_autodep/bin/etc-update
index 42518ad..1edc91f 100755
--- a/portage_with_autodep/bin/etc-update
+++ b/portage_with_autodep/bin/etc-update
@@ -1,8 +1,9 @@
 #!/bin/bash
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 # Author Brandon Low <lostlogic@gentoo.org>
+# Mike Frysinger <vapier@gentoo.org>
 #
 # Previous version (from which I've borrowed a few bits) by:
 # Jochem Kossen <j.kossen@home.nl>
@@ -11,9 +12,7 @@
 
 cd /
 
-if type -P gsed >/dev/null ; then
-	sed() { gsed "$@"; }
-fi
+type -P gsed >/dev/null && sed() { gsed "$@"; }
 
 get_config() {
 	# the sed here does:
@@ -25,54 +24,92 @@ get_config() {
 	# If there's more than one of the same configuration item, then
 	# the store to the hold space clobbers previous value so the last
 	# setting takes precedence.
-	local item=$1
-	eval echo $(sed -n \
+	local match=$1
+	eval $(sed -n -r \
 		-e 's:[[:space:]]*#.*$::' \
-		-e "/^[[:space:]]*$item[[:space:]]*=/{s:[^=]*=[[:space:]]*\([\"']\{0,1\}\)\(.*\)\1:\2:;h}" \
+		-e "/^[[:space:]]*${match}[[:space:]]*=/{s:^([^=]*)=[[:space:]]*([\"']{0,1})(.*)\2:\1=\2\3\2:;H}" \
 		-e '${g;p}' \
 		"${PORTAGE_CONFIGROOT}"etc/etc-update.conf)
 }
 
+cmd_var_is_valid() {
+	# return true if the first whitespace-separated token contained
+	# in "${1}" is an executable file, false otherwise
+	[[ -x $(type -P ${1%%[[:space:]]*}) ]]
+}
+
 diff_command() {
 	local cmd=${diff_command//%file1/$1}
 	${cmd//%file2/$2}
 }
 
+# Usage: do_mv_ln [options] <src> <dst>
+# Files have to be the last two args, and has to be
+# files so we can handle symlinked target sanely.
+do_mv_ln() {
+	local opts=( ${@:1:$(( $# - 2 ))} )
+	local src=${@:$(( $# - 1 )):1}
+	local dst=${@:$(( $# - 0 )):1}
+
+	if [[ -L ${dst} ]] ; then #330221
+		local lfile=$(readlink "${dst}")
+		[[ ${lfile} == /* ]] || lfile="${dst%/*}/${lfile}"
+		echo " Target is a symlink; replacing ${lfile}"
+		dst=${lfile}
+	fi
+
+	mv "${opts[@]}" "${src}" "${dst}"
+}
+
 scan() {
 	echo "Scanning Configuration files..."
-	rm -rf ${TMP}/files > /dev/null 2>&1
-	mkdir ${TMP}/files || die "Failed mkdir command!" 1
+	rm -rf "${TMP}"/files > /dev/null 2>&1
+	mkdir "${TMP}"/files || die "Failed mkdir command!"
 	count=0
 	input=0
 	local find_opts
-	local my_basename
-
-	for path in ${CONFIG_PROTECT} ; do
-		path="${ROOT}${path}"
-		# Do not traverse hidden directories such as .svn or .git.
-		find_opts="-name .* -type d -prune -o -name ._cfg????_*"
-		if [ ! -d "${path}" ]; then
-			[ ! -f "${path}" ] && continue
-			my_basename="${path##*/}"
+	local path
+
+	for path in ${SCAN_PATHS} ; do
+		path="${EROOT%/}${path}"
+
+		if [[ ! -d ${path} ]] ; then
+			[[ ! -f ${path} ]] && continue
+			local my_basename="${path##*/}"
 			path="${path%/*}"
-			find_opts="-maxdepth 1 -name ._cfg????_${my_basename}"
+			find_opts=( -maxdepth 1 -name "._cfg????_${my_basename}" )
+		else
+			# Do not traverse hidden directories such as .svn or .git.
+			find_opts=( -name '.*' -type d -prune -o -name '._cfg????_*' )
 		fi
+		find_opts+=( ! -name '.*~' ! -iname '.*.bak' -print )
 
-		ofile=""
-		# The below set -f turns off file name globbing in the ${find_opts} expansion.
-		for file in $(set -f ; find ${path}/ ${find_opts} \
-		       ! -name '.*~' ! -iname '.*.bak' -print |
-			   sed -e "s:\(^.*/\)\(\._cfg[0-9]*_\)\(.*$\):\1\2\3\%\1%\2\%\3:" |
-			   sort -t'%' -k2,2 -k4,4 -k3,3 | LANG=POSIX LC_ALL=POSIX cut -f1 -d'%'); do
+		if [ ! -w "${path}" ] ; then
+			[ -e "${path}" ] || continue
+			die "Need write access to ${path}"
+		fi
 
-			rpath=$(echo "${file/\/\///}" | sed -e "s:/[^/]*$::")
-			rfile=$(echo "${file/\/\///}" | sed -e "s:^.*/::")
+		local file ofile b=$'\001'
+		for file in $(find "${path}"/ "${find_opts[@]}" |
+		              sed \
+						-e 's://*:/:g' \
+						-e "s:\(^.*/\)\(\._cfg[0-9]*_\)\(.*$\):\1\2\3$b\1$b\2$b\3:" |
+		              sort -t"$b" -k2,2 -k4,4 -k3,3 |
+		              LC_ALL=C cut -f1 -d"$b")
+		do
+			local rpath rfile cfg_file live_file
+			rpath=${file%/*}
+			rfile=${file##*/}
+			cfg_file="${rpath}/${rfile}"
+			live_file="${rpath}/${rfile:10}"
+
+			local mpath
 			for mpath in ${CONFIG_PROTECT_MASK}; do
-				mpath="${ROOT}${mpath}"
-				mpath=$(echo "${mpath/\/\///}")
-				if [[ "${rpath}" == "${mpath}"* ]]; then
-					mv ${rpath}/${rfile} ${rpath}/${rfile:10}
-					break
+				mpath="${EROOT%/}${mpath}"
+				if [[ "${rpath}" == "${mpath}"* ]] ; then
+					echo "Updating masked file: ${live_file}"
+					mv "${cfg_file}" "${live_file}"
+					continue 2
 				fi
 			done
 			if [[ ! -f ${file} ]] ; then
@@ -81,45 +118,88 @@ scan() {
 			fi
 
 			if [[ "${ofile:10}" != "${rfile:10}" ]] ||
-			   [[ ${opath} != ${rpath} ]]; then
+			   [[ ${opath} != ${rpath} ]]
+			then
 				MATCHES=0
-				if [[ "${EU_AUTOMERGE}" == "yes" ]]; then
-					if [ ! -e "${rpath}/${rfile}" ] || [ ! -e "${rpath}/${rfile:10}" ]; then
+				if [[ ${eu_automerge} == "yes" ]] ; then
+					if [[ ! -e ${cfg_file} || ! -e ${live_file} ]] ; then
 						MATCHES=0
 					else
-						diff -Bbua ${rpath}/${rfile} ${rpath}/${rfile:10} | egrep '^[+-]' | egrep -v '^[+-][\t ]*#|^--- |^\+\+\+ ' | egrep -qv '^[-+][\t ]*$'
-						MATCHES=$?
+						diff -Bbua "${cfg_file}" "${live_file}" | \
+							sed -n -r \
+								-e '/^[+-]/{/^([+-][\t ]*(#|$)|-{3} |\+{3} )/d;q1}'
+						: $(( MATCHES = ($? == 0) ))
 					fi
-				elif [[ -z $(diff -Nua ${rpath}/${rfile} ${rpath}/${rfile:10}|
-							  grep "^[+-][^+-]"|grep -v '# .Header:.*') ]]; then
-					MATCHES=1
+
+				else
+					diff -Nbua "${cfg_file}" "${live_file}" |
+						sed -n \
+							-e '/# .Header:/d' \
+							-e '/^[+-][^+-]/q1'
+					: $(( MATCHES = ($? == 0) ))
 				fi
-				if [[ "${MATCHES}" == "1" ]]; then
-					echo "Automerging trivial changes in: ${rpath}/${rfile:10}"
-					mv ${rpath}/${rfile} ${rpath}/${rfile:10}
+
+				if [[ ${MATCHES} == 1 ]] ; then
+					echo "Automerging trivial changes in: ${live_file}"
+					do_mv_ln "${cfg_file}" "${live_file}"
 					continue
 				else
-					count=${count}+1
-					echo "${rpath}/${rfile:10}" > ${TMP}/files/${count}
-					echo "${rpath}/${rfile}" >> ${TMP}/files/${count}
+					: $(( ++count ))
+					echo "${live_file}" > "${TMP}"/files/${count}
+					echo "${cfg_file}" >> "${TMP}"/files/${count}
 					ofile="${rfile}"
 					opath="${rpath}"
 					continue
 				fi
 			fi
 
-			if [[ -z $(diff -Nua ${rpath}/${rfile} ${rpath}/${ofile}|
-					  grep "^[+-][^+-]"|grep -v '# .Header:.*') ]]; then
-				mv ${rpath}/${rfile} ${rpath}/${ofile}
-				continue
-			else
-				echo "${rpath}/${rfile}" >> ${TMP}/files/${count}
+			if ! diff -Nbua "${cfg_file}" "${rpath}/${ofile}" |
+				sed -n \
+					-e '/# .Header:/d' \
+					-e '/^[+-][^+-]/q1'
+			then
+				echo "${cfg_file}" >> "${TMP}"/files/${count}
 				ofile="${rfile}"
 				opath="${rpath}"
+			else
+				mv "${cfg_file}" "${rpath}/${ofile}"
+				continue
 			fi
 		done
 	done
+}
 
+parse_automode_flag() {
+	case $1 in
+	-9)
+		local reply
+		read -p "Are you sure that you want to delete all updates (type YES): " reply
+		if [[ ${reply} != "YES" ]] ; then
+			echo "Did not get a 'YES', so ignoring request"
+			return 1
+		else
+			parse_automode_flag -7
+			export rm_opts=""
+		fi
+		;;
+	-7)
+		input=0
+		export DELETE_ALL="yes"
+		;;
+	-5)
+		parse_automode_flag -3
+		export mv_opts=" ${mv_opts} "
+		mv_opts="${mv_opts// -i / }"
+		;;
+	-3)
+		input=0
+		export OVERWRITE_ALL="yes"
+		;;
+	*)
+		return 1
+		;;
+	esac
+	return 0
 }
 
 sel_file() {
@@ -128,88 +208,77 @@ sel_file() {
 	      [[ ${input} == -1 ]] || \
 	      [[ ${input} == -3 ]]
 	do
-		local numfiles=$(ls ${TMP}/files|wc -l)
-		local numwidth=${#numfiles}
-		for file in $(ls ${TMP}/files|sort -n); do
-			if [[ ${isfirst} == 0 ]] ; then
-				isfirst=${file}
-			fi
-			numshow=$(printf "%${numwidth}i${PAR} " ${file})
-			numupdates=$(( $(wc -l <${TMP}/files/${file}) - 1 ))
-			echo -n "${numshow}"
-			if [[ ${mode} == 0 ]] ; then
-				echo "$(head -n1 ${TMP}/files/${file}) (${numupdates})"
-			else
-				head -n1 ${TMP}/files/${file}
-			fi
-		done > ${TMP}/menuitems
+		local allfiles=( $(cd "${TMP}"/files/ && printf '%s\n' * | sort -n) )
+		local isfirst=${allfiles[0]}
 
-		if [ "${OVERWRITE_ALL}" == "yes" ]; then
-			input=0
-		elif [ "${DELETE_ALL}" == "yes" ]; then
+		# Optimize: no point in building the whole file list if
+		# we're not actually going to talk to the user.
+		if [[ ${OVERWRITE_ALL} == "yes" || ${DELETE_ALL} == "yes" ]] ; then
 			input=0
 		else
-			[[ $CLEAR_TERM == yes ]] && clear
-			if [[ ${mode} == 0 ]] ; then
-				echo "The following is the list of files which need updating, each
-configuration file is followed by a list of possible replacement files."
-			else
-				local my_title="Please select a file to update"
-			fi
+			local numfiles=${#allfiles[@]}
+			local numwidth=${#numfiles}
+			local file fullfile line
+			for file in "${allfiles[@]}" ; do
+				fullfile="${TMP}/files/${file}"
+				line=$(head -n1 "${fullfile}")
+				printf '%*i%s %s' ${numwidth} ${file} "${PAR}" "${line}"
+				if [[ ${mode} == 0 ]] ; then
+					local numupdates=$(( $(wc -l <"${fullfile}") - 1 ))
+					echo " (${numupdates})"
+				else
+					echo
+				fi
+			done > "${TMP}"/menuitems
+
+			clear
 
 			if [[ ${mode} == 0 ]] ; then
-				cat ${TMP}/menuitems
-				echo    "Please select a file to edit by entering the corresponding number."
-				echo    "              (don't use -3, -5, -7 or -9 if you're unsure what to do)"
-				echo    "              (-1 to exit) (-3 to auto merge all remaining files)"
-				echo    "                           (-5 to auto-merge AND not use 'mv -i')"
-				echo    "                           (-7 to discard all updates)"
-				echo -n "                           (-9 to discard all updates AND not use 'rm -i'): "
+				cat <<-EOF
+					The following is the list of files which need updating, each
+					configuration file is followed by a list of possible replacement files.
+					$(<"${TMP}"/menuitems)
+					Please select a file to edit by entering the corresponding number.
+					              (don't use -3, -5, -7 or -9 if you're unsure what to do)
+					              (-1 to exit) (${_3_HELP_TEXT})
+					                           (${_5_HELP_TEXT})
+					                           (${_7_HELP_TEXT})
+				EOF
+				printf "                           (${_9_HELP_TEXT}): "
 				input=$(read_int)
 			else
-				dialog --title "${title}" --menu "${my_title}" \
-					0 0 0 $(echo -e "-1 Exit\n$(<${TMP}/menuitems)") \
-					2> ${TMP}/input || die "User termination!" 0
-				input=$(<${TMP}/input)
+				dialog \
+					--title "${title}" \
+					--menu "Please select a file to update"	\
+					0 0 0 $(<"${TMP}"/menuitems) \
+					2> "${TMP}"/input \
+					|| die "$(<"${TMP}"/input)\n\nUser termination!" 0
+				input=$(<"${TMP}"/input)
 			fi
-			if [[ ${input} == -9 ]]; then
-				read -p "Are you sure that you want to delete all updates (type YES):" reply
-				if [[ ${reply} != "YES" ]]; then
-					continue
-				else
-					input=-7
-					export rm_opts=""
-				fi
-			fi
-			if [[ ${input} == -7 ]]; then
-				input=0
-				export DELETE_ALL="yes"
-			fi
-			if [[ ${input} == -5 ]] ; then
-				input=-3
-				export mv_opts=" ${mv_opts} "
-				mv_opts="${mv_opts// -i / }"
-			fi
-			if [[ ${input} == -3 ]] ; then
-				input=0
-				export OVERWRITE_ALL="yes"
+			: ${input:=0}
+
+			if [[ ${input} != 0 ]] ; then
+				parse_automode_flag ${input} || continue
 			fi
 		fi # -3 automerge
-		if [[ -z ${input} ]] || [[ ${input} == 0 ]] ; then
+		if [[ ${input} == 0 ]] ; then
 			input=${isfirst}
 		fi
 	done
 }
 
 user_special() {
-	if [ -r ${PORTAGE_CONFIGROOT}etc/etc-update.special ]; then
-		if [ -z "$1" ]; then
-			echo "ERROR: user_special() called without arguments"
+	local special="${PORTAGE_CONFIGROOT}etc/etc-update.special"
+
+	if [[ -r ${special} ]] ; then
+		if [[ -z $1 ]] ; then
+			error "user_special() called without arguments"
 			return 1
 		fi
-		while read -r pat; do
-			echo ${1} | grep "${pat}" > /dev/null && return 0
-		done < ${PORTAGE_CONFIGROOT}etc/etc-update.special
+		local pat
+		while read -r pat ; do
+			echo "$1" | grep -q "${pat}" && return 0
+		done < "${special}"
 	fi
 	return 1
 }
@@ -219,12 +288,12 @@ read_int() {
 	# read.  This is a workaround for odd behavior of bash when an attempt is
 	# made to store a value such as "1y" into an integer-only variable.
 	local my_input
-	while true; do
+	while : ; do
 		read my_input
 		# failed integer conversions will break a loop unless they're enclosed
 		# in a subshell.
-		echo "${my_input}" | ( declare -i x; read x) 2>/dev/null && break
-		echo -n "Value '$my_input' is not valid. Please enter an integer value:" >&2
+		echo "${my_input}" | (declare -i x; read x) 2>/dev/null && break
+		printf 'Value "%s" is not valid. Please enter an integer value: ' "${my_input}" >&2
 	done
 	echo ${my_input}
 }
@@ -233,141 +302,147 @@ do_file() {
 	interactive_echo() { [ "${OVERWRITE_ALL}" != "yes" ] && [ "${DELETE_ALL}" != "yes" ] && echo; }
 	interactive_echo
 	local -i my_input
-	local -i fcount=0
-	until (( $(wc -l < ${TMP}/files/${input}) < 2 )); do
-		my_input=0
-		if (( $(wc -l < ${TMP}/files/${input}) == 2 )); then
+	local -i linecnt
+	local fullfile="${TMP}/files/${input}"
+	local ofile=$(head -n1 "${fullfile}")
+
+	# Walk through all the pending updates for this one file.
+	linecnt=$(wc -l <"${fullfile}")
+	while (( linecnt > 1 )) ; do
+		if (( linecnt == 2 )) ; then
+			# Only one update ... keeps things simple.
 			my_input=1
+		else
+			my_input=0
 		fi
-		until (( ${my_input} > 0 )) && (( ${my_input} < $(wc -l < ${TMP}/files/${input}) )); do
-			fcount=0
 
-			if [ "${OVERWRITE_ALL}" == "yes" ]; then
-				my_input=0
-			elif [ "${DELETE_ALL}" == "yes" ]; then
-				my_input=0
-			else
-				for line in $(<${TMP}/files/${input}); do
-					if (( ${fcount} > 0 )); then
-						echo -n "${fcount}${PAR} "
-						echo "${line}"
-					else
-						if [[ ${mode} == 0 ]] ; then
-							echo "Below are the new config files for ${line}:"
-						else
-							local my_title="Please select a file to process for ${line}"
-						fi
-					fi
-					fcount=${fcount}+1
-				done > ${TMP}/menuitems
+		# Optimize: no point in scanning the file list when we know
+		# we're just going to consume all the ones available.
+		if [[ ${OVERWRITE_ALL} == "yes" || ${DELETE_ALL} == "yes" ]] ; then
+			my_input=1
+		fi
 
-				if [[ ${mode} == 0 ]] ; then
-					cat ${TMP}/menuitems
-					echo -n "Please select a file to process (-1 to exit this file): "
-					my_input=$(read_int)
-				else
-					dialog --title "${title}" --menu "${my_title}" \
-						0 0 0 $(echo -e "$(<${TMP}/menuitems)\n${fcount} Exit") \
-						2> ${TMP}/input || die "User termination!" 0
-					my_input=$(<${TMP}/input)
+		# Figure out which file they wish to operate on.
+		while (( my_input <= 0 || my_input >= linecnt )) ; do
+			local fcount=0
+			for line in $(<"${fullfile}"); do
+				if (( fcount > 0 )); then
+					printf '%i%s %s\n' ${fcount} "${PAR}" "${line}"
 				fi
-			fi # OVERWRITE_ALL
+				: $(( ++fcount ))
+			done > "${TMP}"/menuitems
+
+			if [[ ${mode} == 0 ]] ; then
+				echo "Below are the new config files for ${ofile}:"
+				cat "${TMP}"/menuitems
+				echo -n "Please select a file to process (-1 to exit this file): "
+				my_input=$(read_int)
+			else
+				dialog \
+					--title "${title}" \
+					--menu "Please select a file to process for ${ofile}" \
+					0 0 0 $(<"${TMP}"/menuitems) \
+					2> "${TMP}"/input \
+					|| die "$(<"${TMP}"/input)\n\nUser termination!" 0
+				my_input=$(<"${TMP}"/input)
+			fi
 
 			if [[ ${my_input} == 0 ]] ; then
+				# Auto select the first file.
 				my_input=1
 			elif [[ ${my_input} == -1 ]] ; then
 				input=0
 				return
-			elif [[ ${my_input} == ${fcount} ]] ; then
-				break
 			fi
 		done
-		if [[ ${my_input} == ${fcount} ]] ; then
-			break
-		fi
-
-		fcount=${my_input}+1
-
-		file=$(sed -e "${fcount}p;d" ${TMP}/files/${input})
-		ofile=$(head -n1 ${TMP}/files/${input})
 
+		# First line is the old file while the rest are the config files.
+		: $(( ++my_input ))
+		local file=$(sed -n -e "${my_input}p" "${fullfile}")
 		do_cfg "${file}" "${ofile}"
 
-		sed -e "${fcount}!p;d" ${TMP}/files/${input} > ${TMP}/files/sed
-		mv ${TMP}/files/sed ${TMP}/files/${input}
+		sed -i -e "${my_input}d" "${fullfile}"
 
-		if [[ ${my_input} == -1 ]] ; then
-			break
-		fi
+		: $(( --linecnt ))
 	done
+
 	interactive_echo
-	rm ${TMP}/files/${input}
-	count=${count}-1
+	rm "${fullfile}"
+	: $(( --count ))
 }
 
-do_cfg() {
+show_diff() {
+	clear
+	local file1=$1 file2=$2
+	if [[ ${using_editor} == 0 ]] ; then
+		(
+			echo "Showing differences between ${file1} and ${file2}"
+			diff_command "${file1}" "${file2}"
+		) | ${pager}
+	else
+		echo "Beginning of differences between ${file1} and ${file2}"
+		diff_command "${file1}" "${file2}"
+		echo "End of differences between ${file1} and ${file2}"
+	fi
+}
 
-	local file="${1}"
-	local ofile="${2}"
+do_cfg() {
+	local file=$1
+	local ofile=$2
 	local -i my_input=0
 
-	until (( ${my_input} == -1 )) || [ ! -f ${file} ]; do
+	until (( my_input == -1 )) || [ ! -f "${file}" ] ; do
 		if [[ "${OVERWRITE_ALL}" == "yes" ]] && ! user_special "${ofile}"; then
 			my_input=1
 		elif [[ "${DELETE_ALL}" == "yes" ]] && ! user_special "${ofile}"; then
 			my_input=2
 		else
-			[[ $CLEAR_TERM == yes ]] && clear
-			if [ "${using_editor}" == 0 ]; then
-				(
-					echo "Showing differences between ${ofile} and ${file}"
-					diff_command "${ofile}" "${file}"
-				) | ${pager}
-			else
-				echo "Beginning of differences between ${ofile} and ${file}"
-				diff_command "${ofile}" "${file}"
-				echo "End of differences between ${ofile} and ${file}"
-			fi
-			if [ -L "${file}" ]; then
-				echo
-				echo "-------------------------------------------------------------"
-				echo "NOTE: File is a symlink to another file. REPLACE recommended."
-				echo "      The original file may simply have moved. Please review."
-				echo "-------------------------------------------------------------"
-				echo
+			show_diff "${ofile}" "${file}"
+			if [[ -L ${file} ]] ; then
+				cat <<-EOF
+
+					-------------------------------------------------------------
+					NOTE: File is a symlink to another file. REPLACE recommended.
+					      The original file may simply have moved. Please review.
+					-------------------------------------------------------------
+
+				EOF
 			fi
-			echo -n "File: ${file}
-1) Replace original with update
-2) Delete update, keeping original as is
-3) Interactively merge original with update
-4) Show differences again
-5) Save update as example config
-Please select from the menu above (-1 to ignore this update): "
+			cat <<-EOF
+
+				File: ${file}
+				1) Replace original with update
+				2) Delete update, keeping original as is
+				3) Interactively merge original with update
+				4) Show differences again
+				5) Save update as example config
+			EOF
+			printf 'Please select from the menu above (-1 to ignore this update): '
 			my_input=$(read_int)
 		fi
 
 		case ${my_input} in
-			1) echo "Replacing ${ofile} with ${file}"
-			   mv ${mv_opts} ${file} ${ofile}
-			   [ -n "${OVERWRITE_ALL}" ] && my_input=-1
-			   continue
-			   ;;
-			2) echo "Deleting ${file}"
-			   rm ${rm_opts} ${file}
-			   [ -n "${DELETE_ALL}" ] && my_input=-1
-			   continue
-			   ;;
-			3) do_merge "${file}" "${ofile}"
-			   my_input=${?}
-#			   [ ${my_input} == 255 ] && my_input=-1
-			   continue
-			   ;;
-			4) continue
-			   ;;
-			5) do_distconf "${file}" "${ofile}"
-			   ;;
-			*) continue
-			   ;;
+			1)	echo "Replacing ${ofile} with ${file}"
+				do_mv_ln ${mv_opts} "${file}" "${ofile}"
+				[ -n "${OVERWRITE_ALL}" ] && my_input=-1
+				continue
+				;;
+			2)	echo "Deleting ${file}"
+				rm ${rm_opts} "${file}"
+				[ -n "${DELETE_ALL}" ] && my_input=-1
+				continue
+				;;
+			3)	do_merge "${file}" "${ofile}"
+				my_input=${?}
+#				[ ${my_input} == 255 ] && my_input=-1
+				continue
+				;;
+			4)	continue
+				;;
+			5)	do_distconf "${file}" "${ofile}"
+				;;
+			*)	continue
+				;;
 		esac
 	done
 }
@@ -393,57 +468,48 @@ do_merge() {
 	# need to make sure the full /path/to/ exists ahead of time
 	mkdir -p "${mfile%/*}"
 
-	until (( ${my_input} == -1 )); do
+	until (( my_input == -1 )); do
 		echo "Merging ${file} and ${ofile}"
 		$(echo "${merge_command}" |
 		 sed -e "s:%merged:${mfile}:g" \
 		 	 -e "s:%orig:${ofile}:g" \
 			 -e "s:%new:${file}:g")
-		until (( ${my_input} == -1 )); do
-			echo -n "1) Replace ${ofile} with merged file
-2) Show differences between merged file and original
-3) Remerge original with update
-4) Edit merged file
-5) Return to the previous menu
-Please select from the menu above (-1 to exit, losing this merge): "
+		until (( my_input == -1 )); do
+			cat <<-EOF
+				1) Replace ${ofile} with merged file
+				2) Show differences between merged file and original
+				3) Remerge original with update
+				4) Edit merged file
+				5) Return to the previous menu
+			EOF
+			printf 'Please select from the menu above (-1 to exit, losing this merge): '
 			my_input=$(read_int)
 			case ${my_input} in
-				1) echo "Replacing ${ofile} with ${mfile}"
-				   if  [[ ${USERLAND} == BSD ]] ; then
-				       chown "$(stat -f %Su:%Sg "${ofile}")" "${mfile}"
-				       chmod $(stat -f %Mp%Lp "${ofile}") "${mfile}"
-				   else
-				       chown --reference="${ofile}" "${mfile}"
-				       chmod --reference="${ofile}" "${mfile}"
-				   fi
-				   mv ${mv_opts} "${mfile}" "${ofile}"
-				   rm ${rm_opts} "${file}"
-				   return 255
-				   ;;
-				2)
-					[[ $CLEAR_TERM == yes ]] && clear
-					if [ "${using_editor}" == 0 ]; then
-						(
-							echo "Showing differences between ${ofile} and ${mfile}"
-							diff_command "${ofile}" "${mfile}"
-						) | ${pager}
+				1)	echo "Replacing ${ofile} with ${mfile}"
+					if [[ ${USERLAND} == BSD ]] ; then
+						chown "$(stat -f %Su:%Sg "${ofile}")" "${mfile}"
+						chmod $(stat -f %Mp%Lp "${ofile}") "${mfile}"
 					else
-						echo "Beginning of differences between ${ofile} and ${mfile}"
-						diff_command "${ofile}" "${mfile}"
-						echo "End of differences between ${ofile} and ${mfile}"
+						chown --reference="${ofile}" "${mfile}"
+						chmod --reference="${ofile}" "${mfile}"
 					fi
-				   continue
-				   ;;
-				3) break
-				   ;;
-				4) ${EDITOR:-nano -w} "${mfile}"
-				   continue
-					 ;;
-				5) rm ${rm_opts} "${mfile}"
-				   return 0
-				   ;;
-				*) continue
-				   ;;
+					do_mv_ln ${mv_opts} "${mfile}" "${ofile}"
+					rm ${rm_opts} "${file}"
+					return 255
+					;;
+				2)	show_diff "${ofile}" "${mfile}"
+					continue
+					;;
+				3)	break
+					;;
+				4)	${EDITOR:-nano -w} "${mfile}"
+					continue
+					;;
+				5)	rm ${rm_opts} "${mfile}"
+					return 0
+					;;
+				*)	continue
+					;;
 			esac
 		done
 	done
@@ -455,21 +521,15 @@ do_distconf() {
 	# search for any previously saved distribution config
 	# files and number the current one accordingly
 
-	local file="${1}"
-	local ofile="${2}"
+	local file=$1 ofile=$2
 	local -i count
-	local -i fill
 	local suffix
 	local efile
 
-	for ((count = 0; count <= 9999; count++)); do
-		suffix=".dist_"
-		for ((fill = 4 - ${#count}; fill > 0; fill--)); do
-			suffix+="0"
-		done
-		suffix+="${count}"
+	for (( count = 0; count <= 9999; ++count )) ; do
+		suffix=$(printf ".dist_%04i" ${count})
 		efile="${ofile}${suffix}"
-		if [[ ! -f ${efile} ]]; then
+		if [[ ! -f ${efile} ]] ; then
 			mv ${mv_opts} "${file}" "${efile}"
 			break
 		elif diff_command "${file}" "${efile}" &> /dev/null; then
@@ -480,35 +540,51 @@ do_distconf() {
 	done
 }
 
+error() { echo "etc-update: ERROR: $*" 1>&2 ; return 1 ; }
 die() {
 	trap SIGTERM
 	trap SIGINT
+	local msg=$1 exitcode=${2:-1}
 
-	if [ "$2" -eq 0 ]; then
-		echo "Exiting: ${1}"
+	if [ ${exitcode} -eq 0 ] ; then
+		printf 'Exiting: %b\n' "${msg}"
 		scan > /dev/null
 		[ ${count} -gt 0 ] && echo "NOTE: ${count} updates remaining"
 	else
-		echo "ERROR: ${1}"
+		error "${msg}"
 	fi
 
 	rm -rf "${TMP}"
-	exit ${2}
+	exit ${exitcode}
 }
 
+_3_HELP_TEXT="-3 to auto merge all files"
+_5_HELP_TEXT="-5 to auto-merge AND not use 'mv -i'"
+_7_HELP_TEXT="-7 to discard all updates"
+_9_HELP_TEXT="-9 to discard all updates AND not use 'rm -i'"
 usage() {
 	cat <<-EOF
 	etc-update: Handle configuration file updates
 
-	Usage: etc-update [options]
+	Usage: etc-update [options] [paths to scan]
+
+	If no paths are specified, then \${CONFIG_PROTECT} will be used.
 
 	Options:
 	  -d, --debug    Enable shell debugging
 	  -h, --help     Show help and run away
+	  -p, --preen    Automerge trivial changes only and quit
+	  -v, --verbose  Show settings and such along the way
 	  -V, --version  Show version and trundle away
+
+	  --automode <mode>
+	             ${_3_HELP_TEXT}
+	             ${_5_HELP_TEXT}
+	             ${_7_HELP_TEXT}
+	             ${_9_HELP_TEXT}
 	EOF
 
-	[[ -n ${*:2} ]] && printf "\nError: %s\n" "${*:2}" 1>&2
+	[[ $# -gt 1 ]] && printf "\nError: %s\n" "${*:2}" 1>&2
 
 	exit ${1:-0}
 }
@@ -517,98 +593,121 @@ usage() {
 # Run the script
 #
 
+declare -i count=0
+declare input=0
+declare title="Gentoo's etc-update tool!"
+
+PREEN=false
 SET_X=false
+VERBOSE=false
 while [[ -n $1 ]] ; do
 	case $1 in
 		-d|--debug)   SET_X=true;;
 		-h|--help)    usage;;
-		-V|--version) emerge --version ; exit 0;;
-		*)            usage 1 "Invalid option '$1'";;
+		-p|--preen)   PREEN=true;;
+		-v|--verbose) VERBOSE=true;;
+		-V|--version) emerge --version; exit 0;;
+		--automode)   parse_automode_flag $2 && shift || usage 1 "Invalid mode '$2'";;
+		-*)           usage 1 "Invalid option '$1'";;
+		*)            break;;
 	esac
 	shift
 done
 ${SET_X} && set -x
 
-type portageq > /dev/null || exit $?
-eval $(portageq envvar -v CONFIG_PROTECT \
-	CONFIG_PROTECT_MASK PORTAGE_CONFIGROOT PORTAGE_TMPDIR ROOT USERLAND)
+type portageq >/dev/null || die "missing portageq"
+portage_vars=(
+	CONFIG_PROTECT{,_MASK}
+	PORTAGE_CONFIGROOT
+	PORTAGE_INST_{G,U}ID
+	PORTAGE_TMPDIR
+	EROOT
+	USERLAND
+	NOCOLOR
+)
+eval $(portageq envvar -v ${portage_vars[@]})
 export PORTAGE_TMPDIR
+SCAN_PATHS=${*:-${CONFIG_PROTECT}}
 
 TMP="${PORTAGE_TMPDIR}/etc-update-$$"
-trap "die terminated 1" SIGTERM
-trap "die interrupted 1" SIGINT
+trap "die terminated" SIGTERM
+trap "die interrupted" SIGINT
 
-[ -w ${PORTAGE_CONFIGROOT}etc ] || die "Need write access to ${PORTAGE_CONFIGROOT}etc" 1
-#echo $PORTAGE_TMPDIR
-#echo $CONFIG_PROTECT
-#echo $CONFIG_PROTECT_MASK
-#export PORTAGE_TMPDIR=$(/usr/lib/portage/bin/portageq envvar PORTAGE_TMPDIR)
-
-rm -rf "${TMP}" 2> /dev/null
-mkdir "${TMP}" || die "failed to create temp dir" 1
+rm -rf "${TMP}" 2>/dev/null
+mkdir "${TMP}" || die "failed to create temp dir"
 # make sure we have a secure directory to work in
-chmod 0700 "${TMP}" || die "failed to set perms on temp dir" 1
-chown ${UID:-0}:${GID:-0} "${TMP}" || die "failed to set ownership on temp dir" 1
-
-# I need the CONFIG_PROTECT value
-#CONFIG_PROTECT=$(/usr/lib/portage/bin/portageq envvar CONFIG_PROTECT)
-#CONFIG_PROTECT_MASK=$(/usr/lib/portage/bin/portageq envvar CONFIG_PROTECT_MASK)
-
-# load etc-config's configuration
-CLEAR_TERM=$(get_config clear_term)
-EU_AUTOMERGE=$(get_config eu_automerge)
-rm_opts=$(get_config rm_opts)
-mv_opts=$(get_config mv_opts)
-cp_opts=$(get_config cp_opts)
-pager=$(get_config pager)
-diff_command=$(get_config diff_command)
-using_editor=$(get_config using_editor)
-merge_command=$(get_config merge_command)
-declare -i mode=$(get_config mode)
-[[ -z ${mode} ]] && mode=0
-[[ -z ${pager} ]] && pager="cat"
-
-if [ "${using_editor}" == 0 ]; then
+chmod 0700 "${TMP}" || die "failed to set perms on temp dir"
+chown ${PORTAGE_INST_UID:-0}:${PORTAGE_INST_GID:-0} "${TMP}" || \
+	die "failed to set ownership on temp dir"
+
+# Get all the user settings from etc-update.conf
+cfg_vars=(
+	clear_term
+	eu_automerge
+	rm_opts
+	mv_opts
+	pager
+	diff_command
+	using_editor
+	merge_command
+	mode
+)
+# default them all to ""
+eval ${cfg_vars[@]/%/=}
+# then extract them all from the conf in one shot
+# (ugly var at end is due to printf appending a '|' to last item)
+get_config "($(printf '%s|' "${cfg_vars[@]}")NOVARFOROLDMEN)"
+
+# finally setup any specific defaults
+: ${mode:="0"}
+if ! cmd_var_is_valid "${pager}" ; then
+	pager=${PAGER}
+	cmd_var_is_valid "${pager}" || pager=cat
+fi
+
+[[ ${clear_term} == "yes" ]] || clear() { :; }
+
+if [[ ${using_editor} == "0" ]] ; then
 	# Sanity check to make sure diff exists and works
 	echo > "${TMP}"/.diff-test-1
 	echo > "${TMP}"/.diff-test-2
-	
+
 	if ! diff_command "${TMP}"/.diff-test-1 "${TMP}"/.diff-test-2 ; then
-		die "'${diff_command}' does not seem to work, aborting" 1
+		die "'${diff_command}' does not seem to work, aborting"
 	fi
 else
-	if ! type ${diff_command%% *} >/dev/null; then
-		die "'${diff_command}' does not seem to work, aborting" 1
+	# NOTE: cmd_var_is_valid doesn't work with diff_command="eval emacs..."
+	# because it uses type -P.
+	if ! type ${diff_command%%[[:space:]]*} >/dev/null; then
+		die "'${diff_command}' does not seem to work, aborting"
 	fi
 fi
 
-if [[ ${mode} == "1" ]] ; then
-	if ! type dialog >/dev/null || ! dialog --help >/dev/null ; then
-		die "mode=1 and 'dialog' not found or not executable, aborting" 1
-	fi
-fi
-
-#echo "rm_opts: $rm_opts, mv_opts: $mv_opts, cp_opts: $cp_opts"
-#echo "pager: $pager, diff_command: $diff_command, merge_command: $merge_command"
-
-if (( ${mode} == 0 )); then
+if [[ ${mode} == "0" ]] ; then
 	PAR=")"
 else
 	PAR=""
+	if ! type dialog >/dev/null || ! dialog --help >/dev/null ; then
+		die "mode=1 and 'dialog' not found or not executable, aborting"
+	fi
 fi
 
-declare -i count=0
-declare input=0
-declare title="Gentoo's etc-update tool!"
+if ${VERBOSE} ; then
+	for v in ${portage_vars[@]} ${cfg_vars[@]} TMP SCAN_PATHS ; do
+		echo "${v}=${!v}"
+	done
+fi
 
 scan
 
-until (( ${input} == -1 )); do
-	if (( ${count} == 0 )); then
+${PREEN} && exit 0
+
+until (( input == -1 )); do
+	if (( count == 0 )); then
 		die "Nothing left to do; exiting. :)" 0
 	fi
 	sel_file
-	if (( ${input} != -1 )); then
+	if (( input != -1 )); then
 		do_file
 	fi
 done

diff --git a/portage_with_autodep/bin/fixpackages b/portage_with_autodep/bin/fixpackages
index 5e1df70..dc43ed2 100755
--- a/portage_with_autodep/bin/fixpackages
+++ b/portage_with_autodep/bin/fixpackages
@@ -1,11 +1,12 @@
 #!/usr/bin/python
-# Copyright 1999-2006 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
 
-import os,sys
-os.environ["PORTAGE_CALLER"]="fixpackages"
+import os
+import sys
+
 try:
 	import portage
 except ImportError:
@@ -38,7 +39,7 @@ except (OSError, ValueError) as e:
 	portage.writemsg("!!! %s\n" % str(e))
 	del e
 
-_global_updates(mytrees, mtimedb["updates"])
+_global_updates(mytrees, mtimedb["updates"], if_mtime_changed=False)
 
 print()
 print("Done.")

diff --git a/portage_with_autodep/bin/glsa-check b/portage_with_autodep/bin/glsa-check
index 2f2d555..a840c32 100755
--- a/portage_with_autodep/bin/glsa-check
+++ b/portage_with_autodep/bin/glsa-check
@@ -103,8 +103,9 @@ elif mode == "list" and not params:
 # delay this for speed increase
 from portage.glsa import *
 
-vardb = portage.db[portage.settings["ROOT"]]["vartree"].dbapi
-portdb = portage.db["/"]["porttree"].dbapi
+eroot = portage.settings['EROOT']
+vardb = portage.db[eroot]["vartree"].dbapi
+portdb = portage.db[eroot]["porttree"].dbapi
 
 # build glsa lists
 completelist = get_glsa_list(portage.settings)

diff --git a/portage_with_autodep/bin/helper-functions.sh b/portage_with_autodep/bin/helper-functions.sh
new file mode 100755
index 0000000..afe14af
--- /dev/null
+++ b/portage_with_autodep/bin/helper-functions.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# For routines we want to use in ebuild-helpers/ but don't want to
+# expose to the general ebuild environment.
+
+source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}"/isolated-functions.sh
+
+#
+# API functions for doing parallel processing
+#
+numjobs() {
+	# Copied from eutils.eclass:makeopts_jobs()
+	local jobs=$(echo " ${MAKEOPTS} " | \
+		sed -r -n 's:.*[[:space:]](-j|--jobs[=[:space:]])[[:space:]]*([0-9]+).*:\2:p')
+	echo ${jobs:-1}
+}
+
+multijob_init() {
+	# Setup a pipe for children to write their pids to when they finish.
+	mj_control_pipe=$(mktemp -t multijob.XXXXXX)
+	rm "${mj_control_pipe}"
+	mkfifo "${mj_control_pipe}"
+	exec {mj_control_fd}<>${mj_control_pipe}
+	rm -f "${mj_control_pipe}"
+
+	# See how many children we can fork based on the user's settings.
+	mj_max_jobs=$(numjobs)
+	mj_num_jobs=0
+}
+
+multijob_child_init() {
+	trap 'echo ${BASHPID} $? >&'${mj_control_fd} EXIT
+	trap 'exit 1' INT TERM
+}
+
+multijob_finish_one() {
+	local pid ret
+	read -r -u ${mj_control_fd} pid ret
+	: $(( --mj_num_jobs ))
+	return ${ret}
+}
+
+multijob_finish() {
+	local ret=0
+	while [[ ${mj_num_jobs} -gt 0 ]] ; do
+		multijob_finish_one
+		: $(( ret |= $? ))
+	done
+	# Let bash clean up its internal child tracking state.
+	wait
+	return ${ret}
+}
+
+multijob_post_fork() {
+	: $(( ++mj_num_jobs ))
+	if [[ ${mj_num_jobs} -ge ${mj_max_jobs} ]] ; then
+		multijob_finish_one
+	fi
+	return $?
+}

diff --git a/portage_with_autodep/bin/install.py b/portage_with_autodep/bin/install.py
new file mode 100755
index 0000000..2c6dfbe
--- /dev/null
+++ b/portage_with_autodep/bin/install.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import stat
+import sys
+import subprocess
+import traceback
+
+import portage
+from portage.util._argparse import ArgumentParser
+from portage.util.movefile import _copyxattr
+from portage.exception import OperationNotSupported
+
+# Change back to original cwd _after_ all imports (bug #469338).
+os.chdir(os.environ["__PORTAGE_HELPER_CWD"])
+
+def parse_args(args):
+	"""
+	Parse the command line arguments using optparse for python 2.6 compatibility
+	Args:
+	  args: a list of the white space delimited command line
+	Returns:
+	  tuple of the Namespace of parsed options, and a list of order parameters
+	"""
+	parser = ArgumentParser(add_help=False)
+
+	parser.add_argument(
+		"-b",
+		action="store_true",
+		dest="shortopt_b"
+	)
+	parser.add_argument(
+		"--backup",
+		action="store",
+		dest="backup"
+		)
+	parser.add_argument(
+		"-c",
+		action="store_true",
+		dest="shortopt_c"
+	)
+	parser.add_argument(
+		"--compare",
+		"-C",
+		action="store_true",
+		dest="compare"
+	)
+	parser.add_argument(
+		"--directory",
+			"-d",
+		action="store_true",
+		dest="directory"
+	)
+	parser.add_argument(
+		"-D",
+		action="store_true",
+		dest="shortopt_D"
+	)
+	parser.add_argument(
+		"--owner",
+		"-o",
+		action="store", 
+		dest="owner"
+	)
+	parser.add_argument(
+		"--group",
+		"-g",
+		action="store",
+		dest="group"
+	)
+	parser.add_argument(
+		"--mode",
+		"-m",
+		action="store",
+		dest="mode"
+	)
+	parser.add_argument(
+		"--preserve-timestamps",
+		"-p",
+		action="store_true",
+		dest="preserve_timestamps"
+	)
+	parser.add_argument(
+		"--strip",
+		"-s",
+		action="store_true",
+		dest="strip"
+	)
+	parser.add_argument(
+		"--strip-program",
+		action="store",
+		dest="strip_program"
+	)
+	parser.add_argument(
+		"--suffix",
+		"-S",
+		action="store",
+		dest="suffix"
+	)
+	parser.add_argument(
+		"--target-directory",
+		"-t",
+		action="store",
+		dest="target_directory"
+	)
+	parser.add_argument(
+		"--no-target-directory",
+		"-T",
+		action="store_true",
+		dest="no_target_directory"
+	)
+	parser.add_argument(
+		"--context",
+		"-Z",
+		action="store",
+		dest="context"
+	)
+	parser.add_argument(
+		"--verbose",
+		"-v",
+		action="store_true",
+		dest="verbose"
+	)
+	parser.add_argument(
+		"--help",
+		action="store_true",
+		dest="help"
+	)
+	parser.add_argument(
+		"--version",
+		action="store_true",
+		dest="version"
+	)
+
+	# Use parse_known_args for maximum compatibility with
+	# getopt handling of non-option file arguments. Note
+	# that parser.add_argument("files", nargs='+') would
+	# be subtly incompatible because it requires that all
+	# of the file arguments be grouped sequentially. Also
+	# note that we have to explicitly call add_argument
+	# for known options in order for argparse to correctly
+	# separate option arguments from file arguments in all
+	# cases (it also allows for optparse compatibility).
+	parsed_args = parser.parse_known_args()
+
+	opts  = parsed_args[0]
+	files = parsed_args[1]
+	files = [f for f in files if f != "--"]	# filter out "--"
+
+	return (opts, files)
+
+
+def copy_xattrs(opts, files):
+	"""
+	Copy the extended attributes using portage.util.movefile._copyxattr
+	Args:
+	  opts:  Namespace of the parsed command line otions
+	  files: list of ordered command line parameters which should be files/directories
+	Returns:
+	  system exit code
+	"""
+	if opts.directory or not files:
+		return os.EX_OK
+
+	if opts.target_directory is None:
+		source, target = files[:-1], files[-1]
+		target_is_directory = os.path.isdir(target)
+	else:
+		source, target = files, opts.target_directory
+		target_is_directory = True
+
+	exclude = os.environ.get("PORTAGE_XATTR_EXCLUDE", "security.* system.nfs4_acl")
+
+	try:
+		if target_is_directory:
+			for s in source:
+				abs_path = os.path.join(target, os.path.basename(s))
+				_copyxattr(s, abs_path, exclude=exclude)
+		else:
+			_copyxattr(source[0], target, exclude=exclude)
+		return os.EX_OK
+
+	except OperationNotSupported:
+		traceback.print_exc()
+		return os.EX_OSERR
+
+
+def Which(filename, path=None, exclude=None):
+	"""
+	Find the absolute path of 'filename' in a given search 'path'
+	Args:
+	  filename: basename of the file
+	  path: colon delimited search path
+	  exclude: path of file to exclude
+	"""
+	if path is None:
+		path = os.environ.get('PATH', '')
+
+	if exclude is not None:
+		st = os.stat(exclude)
+		exclude = (st.st_ino, st.st_dev)
+
+	for p in path.split(':'):
+		p = os.path.join(p, filename)
+		if os.access(p, os.X_OK):
+			try:
+				st = os.stat(p)
+			except OSError:
+				# file disappeared?
+				pass
+			else:
+				if stat.S_ISREG(st.st_mode) and \
+					(exclude is None or exclude != (st.st_ino, st.st_dev)):
+					return p
+
+	return None
+
+
+def main(args):
+	opts, files = parse_args(args)
+	install_binary = Which('install', exclude=os.environ["__PORTAGE_HELPER_PATH"])
+	if install_binary is None:
+		sys.stderr.write("install: command not found\n")
+		return 127
+
+	cmdline = [install_binary]
+	cmdline += args
+
+	if sys.hexversion >= 0x3000000:
+		# We can't trust that the filesystem encoding (locale dependent)
+		# correctly matches the arguments, so use surrogateescape to
+		# pass through the original argv bytes for Python 3.
+		fs_encoding = sys.getfilesystemencoding()
+		cmdline = [x.encode(fs_encoding, 'surrogateescape') for x in cmdline]
+		files = [x.encode(fs_encoding, 'surrogateescape') for x in files]
+		if opts.target_directory is not None:
+			opts.target_directory = \
+				opts.target_directory.encode(fs_encoding, 'surrogateescape')
+
+	returncode = subprocess.call(cmdline)
+	if returncode == os.EX_OK:
+		returncode = copy_xattrs(opts, files)
+		if returncode != os.EX_OK:
+			portage.util.writemsg("!!! install: copy_xattrs failed with the "
+				"following arguments: %s\n" %
+				" ".join(portage._shell_quote(x) for x in args), noiselevel=-1)
+	return returncode
+
+
+if __name__ == "__main__":
+	sys.exit(main(sys.argv[1:]))

diff --git a/portage_with_autodep/bin/isolated-functions.sh b/portage_with_autodep/bin/isolated-functions.sh
old mode 100644
new mode 100755
index 65bb1d5..dbf988b
--- a/portage_with_autodep/bin/isolated-functions.sh
+++ b/portage_with_autodep/bin/isolated-functions.sh
@@ -173,8 +173,8 @@ die() {
 		| while read -r n ; do eerror "  ${n#RETAIN-LEADING-SPACE}" ; done
 	eerror
 	fi
-	eerror "If you need support, post the output of 'emerge --info =$CATEGORY/$PF',"
-	eerror "the complete build log and the output of 'emerge -pqv =$CATEGORY/$PF'."
+	eerror "If you need support, post the output of \`emerge --info '=$CATEGORY/$PF'\`,"
+	eerror "the complete build log and the output of \`emerge -pqv '=$CATEGORY/$PF'\`."
 	if [[ -n ${EBUILD_OVERLAY_ECLASSES} ]] ; then
 		eerror "This ebuild used the following eclasses from overlays:"
 		local x
@@ -203,7 +203,12 @@ die() {
 		fi
 	fi
 
-	if [[ "${EBUILD_PHASE/depend}" == "${EBUILD_PHASE}" ]] ; then
+	# Only call die hooks here if we are executed via ebuild.sh or
+	# misc-functions.sh, since those are the only cases where the environment
+	# contains the hook functions. When necessary (like for helpers_die), die
+	# hooks are automatically called later by a misc-functions.sh invocation.
+	if has ${BASH_SOURCE[$main_index]##*/} ebuild.sh misc-functions.sh && \
+		[[ ${EBUILD_PHASE} != depend ]] ; then
 		local x
 		for x in $EBUILD_DEATH_HOOKS; do
 			${x} "$@" >&2 1>&2
@@ -211,8 +216,15 @@ die() {
 		> "$PORTAGE_BUILDDIR/.die_hooks"
 	fi
 
-	[[ -n ${PORTAGE_LOG_FILE} ]] \
-		&& eerror "The complete build log is located at '${PORTAGE_LOG_FILE}'."
+	if [[ -n ${PORTAGE_LOG_FILE} ]] ; then
+		eerror "The complete build log is located at '${PORTAGE_LOG_FILE}'."
+		if [[ ${PORTAGE_LOG_FILE} != ${T}/* ]] ; then
+			# Display path to symlink in ${T}, as requested in bug #412865.
+			local log_ext=log
+			[[ ${PORTAGE_LOG_FILE} != *.log ]] && log_ext+=.${PORTAGE_LOG_FILE##*.}
+			eerror "For convenience, a symlink to the build log is located at '${T}/build.${log_ext}'."
+		fi
+	fi
 	if [ -f "${T}/environment" ] ; then
 		eerror "The ebuild environment file is located at '${T}/environment'."
 	elif [ -d "${T}" ] ; then
@@ -222,6 +234,7 @@ die() {
 		} > "${T}/die.env"
 		eerror "The ebuild environment file is located at '${T}/die.env'."
 	fi
+	eerror "Working directory: '$(pwd)'"
 	eerror "S: '${S}'"
 
 	[[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
@@ -392,57 +405,6 @@ eend() {
 	return ${retval}
 }
 
-KV_major() {
-	[[ -z $1 ]] && return 1
-
-	local KV=$@
-	echo "${KV%%.*}"
-}
-
-KV_minor() {
-	[[ -z $1 ]] && return 1
-
-	local KV=$@
-	KV=${KV#*.}
-	echo "${KV%%.*}"
-}
-
-KV_micro() {
-	[[ -z $1 ]] && return 1
-
-	local KV=$@
-	KV=${KV#*.*.}
-	echo "${KV%%[^[:digit:]]*}"
-}
-
-KV_to_int() {
-	[[ -z $1 ]] && return 1
-
-	local KV_MAJOR=$(KV_major "$1")
-	local KV_MINOR=$(KV_minor "$1")
-	local KV_MICRO=$(KV_micro "$1")
-	local KV_int=$(( KV_MAJOR * 65536 + KV_MINOR * 256 + KV_MICRO ))
-
-	# We make version 2.2.0 the minimum version we will handle as
-	# a sanity check ... if its less, we fail ...
-	if [[ ${KV_int} -ge 131584 ]] ; then
-		echo "${KV_int}"
-		return 0
-	fi
-
-	return 1
-}
-
-_RC_GET_KV_CACHE=""
-get_KV() {
-	[[ -z ${_RC_GET_KV_CACHE} ]] \
-		&& _RC_GET_KV_CACHE=$(uname -r)
-
-	echo $(KV_to_int "${_RC_GET_KV_CACHE}")
-
-	return $?
-}
-
 unset_colors() {
 	COLS=80
 	ENDCOL=
@@ -457,7 +419,13 @@ unset_colors() {
 
 set_colors() {
 	COLS=${COLUMNS:-0}      # bash's internal COLUMNS variable
-	(( COLS == 0 )) && COLS=$(set -- $(stty size 2>/dev/null) ; echo $2)
+	# Avoid wasteful stty calls during the "depend" phases.
+	# If stdout is a pipe, the parent process can export COLUMNS
+	# if it's relevant. Use an extra subshell for stty calls, in
+	# order to redirect "/dev/tty: No such device or address"
+	# error from bash to /dev/null.
+	[[ $COLS == 0 && $EBUILD_PHASE != depend ]] && \
+		COLS=$(set -- $( ( stty size </dev/tty ) 2>/dev/null || echo 24 80 ) ; echo $2)
 	(( COLS > 0 )) || (( COLS = 80 ))
 
 	# Now, ${ENDCOL} will move us to the end of the
@@ -471,8 +439,8 @@ set_colors() {
 		BAD=$'\e[31;01m'
 		HILITE=$'\e[36;01m'
 		BRACKET=$'\e[34;01m'
+		NORMAL=$'\e[0m'
 	fi
-	NORMAL=$'\e[0m'
 }
 
 RC_ENDCOL="yes"
@@ -536,95 +504,4 @@ has() {
 	return 1
 }
 
-# @FUNCTION: save_ebuild_env
-# @DESCRIPTION:
-# echo the current environment to stdout, filtering out redundant info.
-#
-# --exclude-init-phases causes pkg_nofetch and src_* phase functions to
-# be excluded from the output. These function are not needed for installation
-# or removal of the packages, and can therefore be safely excluded.
-#
-save_ebuild_env() {
-	(
-
-		if has --exclude-init-phases $* ; then
-			unset S _E_DOCDESTTREE_ _E_EXEDESTTREE_
-			if [[ -n $PYTHONPATH ]] ; then
-				export PYTHONPATH=${PYTHONPATH/${PORTAGE_PYM_PATH}:}
-				[[ -z $PYTHONPATH ]] && unset PYTHONPATH
-			fi
-		fi
-
-		# misc variables inherited from the calling environment
-		unset COLORTERM DISPLAY EDITOR LESS LESSOPEN LOGNAME LS_COLORS PAGER \
-			TERM TERMCAP USER ftp_proxy http_proxy no_proxy
-
-		# other variables inherited from the calling environment
-		unset CVS_RSH ECHANGELOG_USER GPG_AGENT_INFO \
-		SSH_AGENT_PID SSH_AUTH_SOCK STY WINDOW XAUTHORITY
-
-		# CCACHE and DISTCC config
-		unset ${!CCACHE_*} ${!DISTCC_*}
-
-		# There's no need to bloat environment.bz2 with internally defined
-		# functions and variables, so filter them out if possible.
-
-		for x in pkg_setup pkg_nofetch src_unpack src_prepare src_configure \
-			src_compile src_test src_install pkg_preinst pkg_postinst \
-			pkg_prerm pkg_postrm ; do
-			unset -f default_$x _eapi{0,1,2,3,4}_$x
-		done
-		unset x
-
-		unset -f assert assert_sigpipe_ok dump_trace die diefunc \
-			quiet_mode vecho elog_base eqawarn elog \
-			esyslog einfo einfon ewarn eerror ebegin _eend eend KV_major \
-			KV_minor KV_micro KV_to_int get_KV unset_colors set_colors has \
-			has_phase_defined_up_to \
-			hasg hasgq hasv hasq qa_source qa_call \
-			addread addwrite adddeny addpredict _sb_append_var \
-			lchown lchgrp esyslog use usev useq has_version portageq \
-			best_version use_with use_enable register_die_hook \
-			keepdir unpack strip_duplicate_slashes econf einstall \
-			dyn_setup dyn_unpack dyn_clean into insinto exeinto docinto \
-			insopts diropts exeopts libopts docompress \
-			abort_handler abort_prepare abort_configure abort_compile \
-			abort_test abort_install dyn_prepare dyn_configure \
-			dyn_compile dyn_test dyn_install \
-			dyn_preinst dyn_help debug-print debug-print-function \
-			debug-print-section inherit EXPORT_FUNCTIONS remove_path_entry \
-			save_ebuild_env filter_readonly_variables preprocess_ebuild_env \
-			set_unless_changed unset_unless_changed source_all_bashrcs \
-			ebuild_main ebuild_phase ebuild_phase_with_hooks \
-			_ebuild_arg_to_phase _ebuild_phase_funcs default \
-			_pipestatus \
-			${QA_INTERCEPTORS}
-
-		# portage config variables and variables set directly by portage
-		unset ACCEPT_LICENSE BAD BRACKET BUILD_PREFIX COLS \
-			DISTCC_DIR DISTDIR DOC_SYMLINKS_DIR \
-			EBUILD_FORCE_TEST EBUILD_MASTER_PID \
-			ECLASS_DEPTH ENDCOL FAKEROOTKEY \
-			GOOD HILITE HOME \
-			LAST_E_CMD LAST_E_LEN LD_PRELOAD MISC_FUNCTIONS_ARGS MOPREFIX \
-			NOCOLOR NORMAL PKGDIR PKGUSE PKG_LOGDIR PKG_TMPDIR \
-			PORTAGE_BASHRCS_SOURCED PORTAGE_NONFATAL PORTAGE_QUIET \
-			PORTAGE_SANDBOX_DENY PORTAGE_SANDBOX_PREDICT \
-			PORTAGE_SANDBOX_READ PORTAGE_SANDBOX_WRITE PREROOTPATH \
-			QA_INTERCEPTORS \
-			RC_DEFAULT_INDENT RC_DOT_PATTERN RC_ENDCOL RC_INDENTATION  \
-			ROOT ROOTPATH RPMDIR TEMP TMP TMPDIR USE_EXPAND \
-			WARN XARGS _RC_GET_KV_CACHE
-
-		# user config variables
-		unset DOC_SYMLINKS_DIR INSTALL_MASK PKG_INSTALL_MASK
-
-		declare -p
-		declare -fp
-		if [[ ${BASH_VERSINFO[0]} == 3 ]]; then
-			export
-		fi
-	)
-}
-
 true

diff --git a/portage_with_autodep/bin/lock-helper.py b/portage_with_autodep/bin/lock-helper.py
index 5f3ea9f..dfb8876 100755
--- a/portage_with_autodep/bin/lock-helper.py
+++ b/portage_with_autodep/bin/lock-helper.py
@@ -1,15 +1,16 @@
 #!/usr/bin/python
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import os
 import sys
 sys.path.insert(0, os.environ['PORTAGE_PYM_PATH'])
 import portage
+portage._disable_legacy_globals()
 
 def main(args):
 
-	if args and sys.hexversion < 0x3000000 and not isinstance(args[0], unicode):
+	if args and isinstance(args[0], bytes):
 		for i, x in enumerate(args):
 			args[i] = portage._unicode_decode(x, errors='strict')
 

diff --git a/portage_with_autodep/bin/misc-functions.sh b/portage_with_autodep/bin/misc-functions.sh
index 8c191ff..564af85 100755
--- a/portage_with_autodep/bin/misc-functions.sh
+++ b/portage_with_autodep/bin/misc-functions.sh
@@ -17,7 +17,9 @@ shift $#
 source "${PORTAGE_BIN_PATH:-/usr/lib/portage/bin}/ebuild.sh"
 
 install_symlink_html_docs() {
-	cd "${D}" || die "cd failed"
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+	cd "${ED}" || die "cd failed"
 	#symlink the html documentation (if DOC_SYMLINKS_DIR is set in make.conf)
 	if [ -n "${DOC_SYMLINKS_DIR}" ] ; then
 		local mydocdir docdir
@@ -64,11 +66,13 @@ canonicalize() {
 prepcompress() {
 	local -a include exclude incl_d incl_f
 	local f g i real_f real_d
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local ED=${D} ;; esac
 
 	# Canonicalize path names and check for their existence.
-	real_d=$(canonicalize "${D}")
+	real_d=$(canonicalize "${ED}")
 	for (( i = 0; i < ${#PORTAGE_DOCOMPRESS[@]}; i++ )); do
-		real_f=$(canonicalize "${D}${PORTAGE_DOCOMPRESS[i]}")
+		real_f=$(canonicalize "${ED}${PORTAGE_DOCOMPRESS[i]}")
 		f=${real_f#"${real_d}"}
 		if [[ ${real_f} != "${f}" ]] && [[ -d ${real_f} || -f ${real_f} ]]
 		then
@@ -79,7 +83,7 @@ prepcompress() {
 		fi
 	done
 	for (( i = 0; i < ${#PORTAGE_DOCOMPRESS_SKIP[@]}; i++ )); do
-		real_f=$(canonicalize "${D}${PORTAGE_DOCOMPRESS_SKIP[i]}")
+		real_f=$(canonicalize "${ED}${PORTAGE_DOCOMPRESS_SKIP[i]}")
 		f=${real_f#"${real_d}"}
 		if [[ ${real_f} != "${f}" ]] && [[ -d ${real_f} || -f ${real_f} ]]
 		then
@@ -128,7 +132,7 @@ prepcompress() {
 
 	# Split the include list into directories and files
 	for f in "${include[@]}"; do
-		if [[ -d ${D}${f} ]]; then
+		if [[ -d ${ED}${f} ]]; then
 			incl_d[${#incl_d[@]}]=${f}
 		else
 			incl_f[${#incl_f[@]}]=${f}
@@ -138,15 +142,101 @@ prepcompress() {
 	# Queue up for compression.
 	# ecompress{,dir} doesn't like to be called with empty argument lists.
 	[[ ${#incl_d[@]} -gt 0 ]] && ecompressdir --queue "${incl_d[@]}"
-	[[ ${#incl_f[@]} -gt 0 ]] && ecompress --queue "${incl_f[@]/#/${D}}"
+	[[ ${#incl_f[@]} -gt 0 ]] && ecompress --queue "${incl_f[@]/#/${ED}}"
 	[[ ${#exclude[@]} -gt 0 ]] && ecompressdir --ignore "${exclude[@]}"
 	return 0
 }
 
 install_qa_check() {
-	local f x
+	local f i qa_var x
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local EPREFIX= ED=${D} ;; esac
+
+	cd "${ED}" || die "cd failed"
+
+	# Merge QA_FLAGS_IGNORED and QA_DT_HASH into a single array, since
+	# QA_DT_HASH is deprecated.
+	qa_var="QA_FLAGS_IGNORED_${ARCH/-/_}"
+	eval "[[ -n \${!qa_var} ]] && QA_FLAGS_IGNORED=(\"\${${qa_var}[@]}\")"
+	if [[ ${#QA_FLAGS_IGNORED[@]} -eq 1 ]] ; then
+		local shopts=$-
+		set -o noglob
+		QA_FLAGS_IGNORED=(${QA_FLAGS_IGNORED})
+		set +o noglob
+		set -${shopts}
+	fi
+
+	qa_var="QA_DT_HASH_${ARCH/-/_}"
+	eval "[[ -n \${!qa_var} ]] && QA_DT_HASH=(\"\${${qa_var}[@]}\")"
+	if [[ ${#QA_DT_HASH[@]} -eq 1 ]] ; then
+		local shopts=$-
+		set -o noglob
+		QA_DT_HASH=(${QA_DT_HASH})
+		set +o noglob
+		set -${shopts}
+	fi
+
+	if [[ -n ${QA_DT_HASH} ]] ; then
+		QA_FLAGS_IGNORED=("${QA_FLAGS_IGNORED[@]}" "${QA_DT_HASH[@]}")
+		unset QA_DT_HASH
+	fi
 
-	cd "${D}" || die "cd failed"
+	# Merge QA_STRICT_FLAGS_IGNORED and QA_STRICT_DT_HASH, since
+	# QA_STRICT_DT_HASH is deprecated
+	if [ "${QA_STRICT_FLAGS_IGNORED-unset}" = unset ] && \
+		[ "${QA_STRICT_DT_HASH-unset}" != unset ] ; then
+		QA_STRICT_FLAGS_IGNORED=1
+		unset QA_STRICT_DT_HASH
+	fi
+
+	# Check for files built without respecting *FLAGS. Note that
+	# -frecord-gcc-switches must be in all *FLAGS variables, in
+	# order to avoid false positive results here.
+	# NOTE: This check must execute before prepall/prepstrip, since
+	# prepstrip strips the .GCC.command.line sections.
+	if type -P scanelf > /dev/null && ! has binchecks ${RESTRICT} && \
+		[[ "${CFLAGS}" == *-frecord-gcc-switches* ]] && \
+		[[ "${CXXFLAGS}" == *-frecord-gcc-switches* ]] && \
+		[[ "${FFLAGS}" == *-frecord-gcc-switches* ]] && \
+		[[ "${FCFLAGS}" == *-frecord-gcc-switches* ]] ; then
+		rm -f "${T}"/scanelf-ignored-CFLAGS.log
+		for x in $(scanelf -qyRF '%k %p' -k \!.GCC.command.line "${ED}" | \
+			sed -e "s:\!.GCC.command.line ::") ; do
+			# Separate out file types that are known to support
+			# .GCC.command.line sections, using the `file` command
+			# similar to how prepstrip uses it.
+			f=$(file "${x}") || continue
+			[[ -z ${f} ]] && continue
+			if [[ ${f} == *"SB executable"* ||
+				${f} == *"SB shared object"* ]] ; then
+				echo "${x}" >> "${T}"/scanelf-ignored-CFLAGS.log
+			fi
+		done
+
+		if [[ -f "${T}"/scanelf-ignored-CFLAGS.log ]] ; then
+
+			if [ "${QA_STRICT_FLAGS_IGNORED-unset}" = unset ] ; then
+				for x in "${QA_FLAGS_IGNORED[@]}" ; do
+					sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-CFLAGS.log
+				done
+			fi
+			# Filter anything under /usr/lib/debug/ in order to avoid
+			# duplicate warnings for splitdebug files.
+			sed -e "s#^usr/lib/debug/.*##" -e "/^\$/d" -e "s#^#/#" \
+				-i "${T}"/scanelf-ignored-CFLAGS.log
+			f=$(<"${T}"/scanelf-ignored-CFLAGS.log)
+			if [[ -n ${f} ]] ; then
+				vecho -ne '\n'
+				eqawarn "${BAD}QA Notice: Files built without respecting CFLAGS have been detected${NORMAL}"
+				eqawarn " Please include the following list of files in your report:"
+				eqawarn "${f}"
+				vecho -ne '\n'
+				sleep 1
+			else
+				rm -f "${T}"/scanelf-ignored-CFLAGS.log
+			fi
+		fi
+	fi
 
 	export STRIP_MASK
 	prepall
@@ -154,9 +244,12 @@ install_qa_check() {
 	ecompressdir --dequeue
 	ecompress --dequeue
 
+	# Prefix specific checks
+	[[ ${ED} != ${D} ]] && install_qa_check_prefix
+
 	f=
 	for x in etc/app-defaults usr/man usr/info usr/X11R6 usr/doc usr/locale ; do
-		[[ -d $D/$x ]] && f+="  $x\n"
+		[[ -d ${ED}/$x ]] && f+="  $x\n"
 	done
 
 	if [[ -n $f ]] ; then
@@ -165,18 +258,30 @@ install_qa_check() {
 		eqawarn "$f"
 	fi
 
+	if [[ -d ${ED}/etc/udev/rules.d ]] ; then
+		f=
+		for x in $(ls "${ED}/etc/udev/rules.d") ; do
+			f+="  etc/udev/rules.d/$x\n"
+		done
+		if [[ -n $f ]] ; then
+			eqawarn "QA Notice: udev rules should be installed in /lib/udev/rules.d:"
+			eqawarn
+			eqawarn "$f"
+		fi
+	fi
+
 	# Now we look for all world writable files.
-	local i
-	for i in $(find "${D}/" -type f -perm -2); do
-		vecho "QA Security Notice:"
-		vecho "- ${i:${#D}:${#i}} will be a world writable file."
+	local unsafe_files=$(find "${ED}" -type f -perm -2 | sed -e "s:^${ED}:- :")
+	if [[ -n ${unsafe_files} ]] ; then
+		vecho "QA Security Notice: world writable file(s):"
+		vecho "${unsafe_files}"
 		vecho "- This may or may not be a security problem, most of the time it is one."
 		vecho "- Please double check that $PF really needs a world writeable bit and file bugs accordingly."
 		sleep 1
-	done
+	fi
 
 	if type -P scanelf > /dev/null && ! has binchecks ${RESTRICT}; then
-		local qa_var insecure_rpath=0 tmp_quiet=${PORTAGE_QUIET}
+		local insecure_rpath=0 tmp_quiet=${PORTAGE_QUIET}
 		local x
 
 		# display warnings when using stricter because we die afterwards
@@ -196,7 +301,7 @@ install_qa_check() {
 		if [[ -n "${ROOT}" && "${ROOT}" != "/" ]]; then
 			forbidden_dirs+=" ${ROOT}"
 		fi
-		local dir l rpath_files=$(scanelf -F '%F:%r' -qBR "${D}")
+		local dir l rpath_files=$(scanelf -F '%F:%r' -qBR "${ED}")
 		f=""
 		for dir in ${forbidden_dirs}; do
 			for l in $(echo "${rpath_files}" | grep -E ":${dir}|::|: "); do
@@ -210,7 +315,7 @@ install_qa_check() {
 
 		# Reject set*id binaries with $ORIGIN in RPATH #260331
 		x=$(
-			find "${D}" -type f \( -perm -u+s -o -perm -g+s \) -print0 | \
+			find "${ED}" -type f \( -perm -u+s -o -perm -g+s \) -print0 | \
 			xargs -0 scanelf -qyRF '%r %p' | grep '$ORIGIN'
 		)
 
@@ -236,7 +341,7 @@ install_qa_check() {
 		[[ -n ${!qa_var} ]] && QA_TEXTRELS=${!qa_var}
 		[[ -n ${QA_STRICT_TEXTRELS} ]] && QA_TEXTRELS=""
 		export QA_TEXTRELS="${QA_TEXTRELS} lib*/modules/*.ko"
-		f=$(scanelf -qyRF '%t %p' "${D}" | grep -v 'usr/lib/debug/')
+		f=$(scanelf -qyRF '%t %p' "${ED}" | grep -v 'usr/lib/debug/')
 		if [[ -n ${f} ]] ; then
 			scanelf -qyRAF '%T %p' "${PORTAGE_BUILDDIR}"/ &> "${T}"/scanelf-textrel.log
 			vecho -ne '\n'
@@ -276,7 +381,7 @@ install_qa_check() {
 					[[ -n ${QA_STRICT_WX_LOAD} ]] && QA_WX_LOAD=""
 					export QA_EXECSTACK="${QA_EXECSTACK} lib*/modules/*.ko"
 					export QA_WX_LOAD="${QA_WX_LOAD} lib*/modules/*.ko"
-					f=$(scanelf -qyRAF '%e %p' "${D}" | grep -v 'usr/lib/debug/')
+					f=$(scanelf -qyRAF '%e %p' "${ED}" | grep -v 'usr/lib/debug/')
 					;;
 			esac
 			;;
@@ -300,26 +405,15 @@ install_qa_check() {
 		fi
 
 		# Check for files built without respecting LDFLAGS
-		if [[ "${LDFLAGS}" == *,--hash-style=gnu* ]] && [[ "${PN}" != *-bin ]] ; then
-			qa_var="QA_DT_HASH_${ARCH/-/_}"
-			eval "[[ -n \${!qa_var} ]] && QA_DT_HASH=(\"\${${qa_var}[@]}\")"
-			f=$(scanelf -qyRF '%k %p' -k .hash "${D}" | sed -e "s:\.hash ::")
+		if [[ "${LDFLAGS}" == *,--hash-style=gnu* ]] && \
+			! has binchecks ${RESTRICT} ; then 
+			f=$(scanelf -qyRF '%k %p' -k .hash "${ED}" | sed -e "s:\.hash ::")
 			if [[ -n ${f} ]] ; then
 				echo "${f}" > "${T}"/scanelf-ignored-LDFLAGS.log
-				if [ "${QA_STRICT_DT_HASH-unset}" == unset ] ; then
-					if [[ ${#QA_DT_HASH[@]} -gt 1 ]] ; then
-						for x in "${QA_DT_HASH[@]}" ; do
-							sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-LDFLAGS.log
-						done
-					else
-						local shopts=$-
-						set -o noglob
-						for x in ${QA_DT_HASH} ; do
-							sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-LDFLAGS.log
-						done
-						set +o noglob
-						set -${shopts}
-					fi
+				if [ "${QA_STRICT_FLAGS_IGNORED-unset}" = unset ] ; then
+					for x in "${QA_FLAGS_IGNORED[@]}" ; do
+						sed -e "s#^${x#/}\$##" -i "${T}"/scanelf-ignored-LDFLAGS.log
+					done
 				fi
 				# Filter anything under /usr/lib/debug/ in order to avoid
 				# duplicate warnings for splitdebug files.
@@ -339,39 +433,6 @@ install_qa_check() {
 			fi
 		fi
 
-		# Save NEEDED information after removing self-contained providers
-		rm -f "$PORTAGE_BUILDDIR"/build-info/NEEDED{,.ELF.2}
-		scanelf -qyRF '%a;%p;%S;%r;%n' "${D}" | { while IFS= read -r l; do
-			arch=${l%%;*}; l=${l#*;}
-			obj="/${l%%;*}"; l=${l#*;}
-			soname=${l%%;*}; l=${l#*;}
-			rpath=${l%%;*}; l=${l#*;}; [ "${rpath}" = "  -  " ] && rpath=""
-			needed=${l%%;*}; l=${l#*;}
-			if [ -z "${rpath}" -o -n "${rpath//*ORIGIN*}" ]; then
-				# object doesn't contain $ORIGIN in its runpath attribute
-				echo "${obj} ${needed}"	>> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
-				echo "${arch:3};${obj};${soname};${rpath};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2
-			else
-				dir=${obj%/*}
-				# replace $ORIGIN with the dirname of the current object for the lookup
-				opath=$(echo :${rpath}: | sed -e "s#.*:\(.*\)\$ORIGIN\(.*\):.*#\1${dir}\2#")
-				sneeded=$(echo ${needed} | tr , ' ')
-				rneeded=""
-				for lib in ${sneeded}; do
-					found=0
-					for path in ${opath//:/ }; do
-						[ -e "${D}/${path}/${lib}" ] && found=1 && break
-					done
-					[ "${found}" -eq 0 ] && rneeded="${rneeded},${lib}"
-				done
-				rneeded=${rneeded:1}
-				if [ -n "${rneeded}" ]; then
-					echo "${obj} ${rneeded}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
-					echo "${arch:3};${obj};${soname};${rpath};${rneeded}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2
-				fi
-			fi
-		done }
-
 		if [[ ${insecure_rpath} -eq 1 ]] ; then
 			die "Aborting due to serious QA concerns with RUNPATH/RPATH"
 		elif [[ -n ${die_msg} ]] && has stricter ${FEATURES} ; then
@@ -381,7 +442,7 @@ install_qa_check() {
 		# Check for shared libraries lacking SONAMEs
 		qa_var="QA_SONAME_${ARCH/-/_}"
 		eval "[[ -n \${!qa_var} ]] && QA_SONAME=(\"\${${qa_var}[@]}\")"
-		f=$(scanelf -ByF '%S %p' "${D}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${D}:/:")
+		f=$(scanelf -ByF '%S %p' "${ED}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${ED}:/:")
 		if [[ -n ${f} ]] ; then
 			echo "${f}" > "${T}"/scanelf-missing-SONAME.log
 			if [[ "${QA_STRICT_SONAME-unset}" == unset ]] ; then
@@ -415,7 +476,7 @@ install_qa_check() {
 		# Check for shared libraries lacking NEEDED entries
 		qa_var="QA_DT_NEEDED_${ARCH/-/_}"
 		eval "[[ -n \${!qa_var} ]] && QA_DT_NEEDED=(\"\${${qa_var}[@]}\")"
-		f=$(scanelf -ByF '%n %p' "${D}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${D}:/:")
+		f=$(scanelf -ByF '%n %p' "${ED}"{,usr/}lib*/lib*.so* | gawk '$2 == "" { print }' | sed -e "s:^[[:space:]]${ED}:/:")
 		if [[ -n ${f} ]] ; then
 			echo "${f}" > "${T}"/scanelf-missing-NEEDED.log
 			if [[ "${QA_STRICT_DT_NEEDED-unset}" == unset ]] ; then
@@ -449,7 +510,35 @@ install_qa_check() {
 		PORTAGE_QUIET=${tmp_quiet}
 	fi
 
-	local unsafe_files=$(find "${D}" -type f '(' -perm -2002 -o -perm -4002 ')')
+	# Create NEEDED.ELF.2 regardless of RESTRICT=binchecks, since this info is
+	# too useful not to have (it's required for things like preserve-libs), and
+	# it's tempting for ebuild authors to set RESTRICT=binchecks for packages
+	# containing pre-built binaries.
+	if type -P scanelf > /dev/null ; then
+		# Save NEEDED information after removing self-contained providers
+		rm -f "$PORTAGE_BUILDDIR"/build-info/NEEDED{,.ELF.2}
+		scanelf -qyRF '%a;%p;%S;%r;%n' "${D}" | { while IFS= read -r l; do
+			arch=${l%%;*}; l=${l#*;}
+			obj="/${l%%;*}"; l=${l#*;}
+			soname=${l%%;*}; l=${l#*;}
+			rpath=${l%%;*}; l=${l#*;}; [ "${rpath}" = "  -  " ] && rpath=""
+			needed=${l%%;*}; l=${l#*;}
+			echo "${obj} ${needed}"	>> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
+			echo "${arch:3};${obj};${soname};${rpath};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2
+		done }
+
+		[ -n "${QA_SONAME_NO_SYMLINK}" ] && \
+			echo "${QA_SONAME_NO_SYMLINK}" > \
+			"${PORTAGE_BUILDDIR}"/build-info/QA_SONAME_NO_SYMLINK
+
+		if has binchecks ${RESTRICT} && \
+			[ -s "${PORTAGE_BUILDDIR}/build-info/NEEDED.ELF.2" ] ; then
+			eqawarn "QA Notice: RESTRICT=binchecks prevented checks on these ELF files:"
+			eqawarn "$(while read -r x; do x=${x#*;} ; x=${x%%;*} ; echo "${x#${EPREFIX}}" ; done < "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2)"
+		fi
+	fi
+
+	local unsafe_files=$(find "${ED}" -type f '(' -perm -2002 -o -perm -4002 ')' | sed -e "s:^${ED}:/:")
 	if [[ -n ${unsafe_files} ]] ; then
 		eqawarn "QA Notice: Unsafe files detected (set*id and world writable)"
 		eqawarn "${unsafe_files}"
@@ -469,8 +558,8 @@ install_qa_check() {
 	# Sanity check syntax errors in init.d scripts
 	local d
 	for d in /etc/conf.d /etc/init.d ; do
-		[[ -d ${D}/${d} ]] || continue
-		for i in "${D}"/${d}/* ; do
+		[[ -d ${ED}/${d} ]] || continue
+		for i in "${ED}"/${d}/* ; do
 			[[ -L ${i} ]] && continue
 			# if empty conf.d/init.d dir exists (baselayout), then i will be "/etc/conf.d/*" and not exist
 			[[ ! -e ${i} ]] && continue
@@ -478,20 +567,27 @@ install_qa_check() {
 		done
 	done
 
+	# Look for leaking LDFLAGS into pkg-config files
+	f=$(egrep -sH '^Libs.*-Wl,(-O[012]|--hash-style)' "${ED}"/usr/*/pkgconfig/*.pc)
+	if [[ -n ${f} ]] ; then
+		eqawarn "QA Notice: pkg-config files with wrong LDFLAGS detected:"
+		eqawarn "${f//${D}}"
+	fi
+
 	# this should help to ensure that all (most?) shared libraries are executable
 	# and that all libtool scripts / static libraries are not executable
 	local j
-	for i in "${D}"opt/*/lib{,32,64} \
-	         "${D}"lib{,32,64}       \
-	         "${D}"usr/lib{,32,64}   \
-	         "${D}"usr/X11R6/lib{,32,64} ; do
+	for i in "${ED}"opt/*/lib{,32,64} \
+	         "${ED}"lib{,32,64}       \
+	         "${ED}"usr/lib{,32,64}   \
+	         "${ED}"usr/X11R6/lib{,32,64} ; do
 		[[ ! -d ${i} ]] && continue
 
 		for j in "${i}"/*.so.* "${i}"/*.so ; do
 			[[ ! -e ${j} ]] && continue
 			[[ -L ${j} ]] && continue
 			[[ -x ${j} ]] && continue
-			vecho "making executable: ${j#${D}}"
+			vecho "making executable: ${j#${ED}}"
 			chmod +x "${j}"
 		done
 
@@ -499,7 +595,7 @@ install_qa_check() {
 			[[ ! -e ${j} ]] && continue
 			[[ -L ${j} ]] && continue
 			[[ ! -x ${j} ]] && continue
-			vecho "removing executable bit: ${j#${D}}"
+			vecho "removing executable bit: ${j#${ED}}"
 			chmod -x "${j}"
 		done
 
@@ -523,7 +619,7 @@ install_qa_check() {
 	# http://bugs.gentoo.org/4411
 	abort="no"
 	local a s
-	for a in "${D}"usr/lib*/*.a ; do
+	for a in "${ED}"usr/lib*/*.a ; do
 		s=${a%.a}.so
 		if [[ ! -e ${s} ]] ; then
 			s=${s%usr/*}${s##*/usr/}
@@ -537,7 +633,7 @@ install_qa_check() {
 	[[ ${abort} == "yes" ]] && die "add those ldscripts"
 
 	# Make sure people don't store libtool files or static libs in /lib
-	f=$(ls "${D}"lib*/*.{a,la} 2>/dev/null)
+	f=$(ls "${ED}"lib*/*.{a,la} 2>/dev/null)
 	if [[ -n ${f} ]] ; then
 		vecho -ne '\n'
 		eqawarn "QA Notice: Excessive files found in the / partition"
@@ -548,9 +644,9 @@ install_qa_check() {
 
 	# Verify that the libtool files don't contain bogus $D entries.
 	local abort=no gentoo_bug=no always_overflow=no
-	for a in "${D}"usr/lib*/*.la ; do
+	for a in "${ED}"usr/lib*/*.la ; do
 		s=${a##*/}
-		if grep -qs "${D}" "${a}" ; then
+		if grep -qs "${ED}" "${a}" ; then
 			vecho -ne '\n'
 			eqawarn "QA Notice: ${s} appears to contain PORTAGE_TMPDIR paths"
 			abort="yes"
@@ -621,8 +717,8 @@ install_qa_check() {
 				#esac
 				if [[ $always_overflow = yes ]] ; then
 					eerror
-					eerror "QA Notice: Package has poor programming practices which may compile"
-					eerror "           fine but exhibit random runtime failures."
+					eerror "QA Notice: Package triggers severe warnings which indicate that it"
+					eerror "           may exhibit random runtime failures."
 					eerror
 					eerror "${f}"
 					eerror
@@ -631,8 +727,8 @@ install_qa_check() {
 					eerror
 				else
 					vecho -ne '\n'
-					eqawarn "QA Notice: Package has poor programming practices which may compile"
-					eqawarn "           fine but exhibit random runtime failures."
+					eqawarn "QA Notice: Package triggers severe warnings which indicate that it"
+					eqawarn "           may exhibit random runtime failures."
 					eqawarn "${f}"
 					vecho -ne '\n'
 				fi
@@ -658,8 +754,8 @@ install_qa_check() {
 
 			if [[ $gentoo_bug = yes ]] ; then
 				eerror
-				eerror "QA Notice: Package has poor programming practices which may compile"
-				eerror "           but will almost certainly crash on 64bit architectures."
+				eerror "QA Notice: Package triggers severe warnings which indicate that it"
+				eerror "           will almost certainly crash on 64bit architectures."
 				eerror
 				eerror "${f}"
 				eerror
@@ -668,8 +764,8 @@ install_qa_check() {
 				eerror
 			else
 				vecho -ne '\n'
-				eqawarn "QA Notice: Package has poor programming practices which may compile"
-				eqawarn "           but will almost certainly crash on 64bit architectures."
+				eqawarn "QA Notice: Package triggers severe warnings which indicate that it"
+				eqawarn "           will almost certainly crash on 64bit architectures."
 				eqawarn "${f}"
 				vecho -ne '\n'
 			fi
@@ -678,7 +774,7 @@ install_qa_check() {
 		if [[ ${abort} == "yes" ]] ; then
 			if [[ $gentoo_bug = yes || $always_overflow = yes ]] ; then
 				die "install aborted due to" \
-					"poor programming practices shown above"
+					"severe warnings shown above"
 			else
 				echo "Please do not file a Gentoo bug and instead" \
 				"report the above QA issues directly to the upstream" \
@@ -686,13 +782,13 @@ install_qa_check() {
 				while read -r line ; do eqawarn "${line}" ; done
 				eqawarn "Homepage: ${HOMEPAGE}"
 				has stricter ${FEATURES} && die "install aborted due to" \
-					"poor programming practices shown above"
+					"severe warnings shown above"
 			fi
 		fi
 	fi
 
 	# Portage regenerates this on the installed system.
-	rm -f "${D}"/usr/share/info/dir{,.gz,.bz2}
+	rm -f "${ED}"/usr/share/info/dir{,.gz,.bz2} || die "rm failed!"
 
 	if has multilib-strict ${FEATURES} && \
 	   [[ -x /usr/bin/file && -x /usr/bin/find ]] && \
@@ -701,15 +797,15 @@ install_qa_check() {
 		local abort=no dir file firstrun=yes
 		MULTILIB_STRICT_EXEMPT=$(echo ${MULTILIB_STRICT_EXEMPT} | sed -e 's:\([(|)]\):\\\1:g')
 		for dir in ${MULTILIB_STRICT_DIRS} ; do
-			[[ -d ${D}/${dir} ]] || continue
-			for file in $(find ${D}/${dir} -type f | grep -v "^${D}/${dir}/${MULTILIB_STRICT_EXEMPT}"); do
+			[[ -d ${ED}/${dir} ]] || continue
+			for file in $(find ${ED}/${dir} -type f | grep -v "^${ED}/${dir}/${MULTILIB_STRICT_EXEMPT}"); do
 				if file ${file} | egrep -q "${MULTILIB_STRICT_DENY}" ; then
 					if [[ ${firstrun} == yes ]] ; then
 						echo "Files matching a file type that is not allowed:"
 						firstrun=no
 					fi
 					abort=yes
-					echo "   ${file#${D}//}"
+					echo "   ${file#${ED}//}"
 				fi
 			done
 		done
@@ -718,7 +814,7 @@ install_qa_check() {
 
 	# ensure packages don't install systemd units automagically
 	if ! has systemd ${INHERITED} && \
-		[[ -d "${D}"/lib/systemd/system ]]
+		[[ -d "${ED}"/lib/systemd/system ]]
 	then
 		eqawarn "QA Notice: package installs systemd unit files (/lib/systemd/system)"
 		eqawarn "           but does not inherit systemd.eclass."
@@ -727,6 +823,124 @@ install_qa_check() {
 	fi
 }
 
+install_qa_check_prefix() {
+	if [[ -d ${ED}/${D} ]] ; then
+		find "${ED}/${D}" | \
+		while read i ; do
+			eqawarn "QA Notice: /${i##${ED}/${D}} installed in \${ED}/\${D}"
+		done
+		die "Aborting due to QA concerns: files installed in ${ED}/${D}"
+	fi
+
+	if [[ -d ${ED}/${EPREFIX} ]] ; then
+		find "${ED}/${EPREFIX}/" | \
+		while read i ; do
+			eqawarn "QA Notice: ${i#${D}} double prefix"
+		done
+		die "Aborting due to QA concerns: double prefix files installed"
+	fi
+
+	if [[ -d ${D} ]] ; then
+		INSTALLTOD=$(find ${D%/} | egrep -v "^${ED}" | sed -e "s|^${D%/}||" | awk '{if (length($0) <= length("'"${EPREFIX}"'")) { if (substr("'"${EPREFIX}"'", 1, length($0)) != $0) {print $0;} } else if (substr($0, 1, length("'"${EPREFIX}"'")) != "'"${EPREFIX}"'") {print $0;} }')
+		if [[ -n ${INSTALLTOD} ]] ; then
+			eqawarn "QA Notice: the following files are outside of the prefix:"
+			eqawarn "${INSTALLTOD}"
+			die "Aborting due to QA concerns: there are files installed outside the prefix"
+		fi
+	fi
+
+	# all further checks rely on ${ED} existing
+	[[ -d ${ED} ]] || return
+
+	# this does not really belong here, but it's closely tied to
+	# the code below; many runscripts generate positives here, and we
+	# know they don't work (bug #196294) so as long as that one
+	# remains an issue, simply remove them as they won't work
+	# anyway, avoid etc/init.d/functions.sh from being thrown away
+	if [[ ( -d "${ED}"/etc/conf.d || -d "${ED}"/etc/init.d ) && ! -f "${ED}"/etc/init.d/functions.sh ]] ; then
+		ewarn "removed /etc/init.d and /etc/conf.d directories until bug #196294 has been resolved"
+		rm -Rf "${ED}"/etc/{conf,init}.d
+	fi
+
+	# check shebangs, bug #282539
+	rm -f "${T}"/non-prefix-shebangs-errs
+	local WHITELIST=" /usr/bin/env "
+	# this is hell expensive, but how else?
+	find "${ED}" -executable \! -type d -print0 \
+			| xargs -0 grep -H -n -m1 "^#!" \
+			| while read f ;
+	do
+		local fn=${f%%:*}
+		local pos=${f#*:} ; pos=${pos%:*}
+		local line=${f##*:}
+		# shebang always appears on the first line ;)
+		[[ ${pos} != 1 ]] && continue
+		local oldIFS=${IFS}
+		IFS=$'\r'$'\n'$'\t'" "
+		line=( ${line#"#!"} )
+		IFS=${oldIFS}
+		[[ ${WHITELIST} == *" ${line[0]} "* ]] && continue
+		local fp=${fn#${D}} ; fp=/${fp%/*}
+		# line[0] can be an absolutised path, bug #342929
+		local eprefix=$(canonicalize ${EPREFIX})
+		local rf=${fn}
+		# in case we deal with a symlink, make sure we don't replace it
+		# with a real file (sed -i does that)
+		if [[ -L ${fn} ]] ; then
+			rf=$(readlink ${fn})
+			[[ ${rf} != /* ]] && rf=${fn%/*}/${rf}
+			# ignore symlinks pointing to outside prefix
+			# as seen in sys-devel/native-cctools
+			[[ $(canonicalize "/${rf#${D}}") != ${eprefix}/* ]] && continue
+		fi
+		# does the shebang start with ${EPREFIX}, and does it exist?
+		if [[ ${line[0]} == ${EPREFIX}/* || ${line[0]} == ${eprefix}/* ]] ; then
+			if [[ ! -e ${ROOT%/}${line[0]} && ! -e ${D%/}${line[0]} ]] ; then
+				# hmm, refers explicitly to $EPREFIX, but doesn't exist,
+				# if it's in PATH that's wrong in any case
+				if [[ ":${PATH}:" == *":${fp}:"* ]] ; then
+					echo "${fn#${D}}:${line[0]} (explicit EPREFIX but target not found)" \
+						>> "${T}"/non-prefix-shebangs-errs
+				else
+					eqawarn "${fn#${D}} has explicit EPREFIX in shebang but target not found (${line[0]})"
+				fi
+			fi
+			continue
+		fi
+		# unprefixed shebang, is the script directly in $PATH?
+		if [[ ":${PATH}:" == *":${fp}:"* ]] ; then
+			if [[ -e ${EROOT}${line[0]} || -e ${ED}${line[0]} ]] ; then
+				# is it unprefixed, but we can just fix it because a
+				# prefixed variant exists
+				eqawarn "prefixing shebang of ${fn#${D}}"
+				# statement is made idempotent on purpose, because
+				# symlinks may point to the same target, and hence the
+				# same real file may be sedded multiple times since we
+				# read the shebangs in one go upfront for performance
+				# reasons
+				sed -i -e '1s:^#! \?'"${line[0]}"':#!'"${EPREFIX}"${line[0]}':' "${rf}"
+				continue
+			else
+				# this is definitely wrong: script in $PATH and invalid shebang
+				echo "${fn#${D}}:${line[0]} (script ${fn##*/} installed in PATH but interpreter ${line[0]} not found)" \
+					>> "${T}"/non-prefix-shebangs-errs
+			fi
+		else
+			# unprefixed/invalid shebang, but outside $PATH, this may be
+			# intended (e.g. config.guess) so remain silent by default
+			has stricter ${FEATURES} && \
+				eqawarn "invalid shebang in ${fn#${D}}: ${line[0]}"
+		fi
+	done
+	if [[ -e "${T}"/non-prefix-shebangs-errs ]] ; then
+		eqawarn "QA Notice: the following files use invalid (possible non-prefixed) shebangs:"
+		while read line ; do
+			eqawarn "  ${line}"
+		done < "${T}"/non-prefix-shebangs-errs
+		rm -f "${T}"/non-prefix-shebangs-errs
+		die "Aborting due to QA concerns: invalid shebangs found"
+	fi
+}
 
 install_mask() {
 	local root="$1"
@@ -758,6 +972,9 @@ preinst_mask() {
 		 return 1
 	fi
 
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+
 	# Make sure $PWD is not ${D} so that we don't leave gmon.out files
 	# in there in case any tools were built with -pg in CFLAGS.
 	cd "${T}"
@@ -770,11 +987,11 @@ preinst_mask() {
 		fi
 	done
 
-	install_mask "${D}" "${INSTALL_MASK}"
+	install_mask "${ED}" "${INSTALL_MASK}"
 
 	# remove share dir if unnessesary
 	if has nodoc $FEATURES || has noman $FEATURES || has noinfo $FEATURES; then
-		rmdir "${D}usr/share" &> /dev/null
+		rmdir "${ED}usr/share" &> /dev/null
 	fi
 }
 
@@ -783,29 +1000,33 @@ preinst_sfperms() {
 		 eerror "${FUNCNAME}: D is unset"
 		 return 1
 	fi
+
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+
 	# Smart FileSystem Permissions
 	if has sfperms $FEATURES; then
 		local i
-		find "${D}" -type f -perm -4000 -print0 | \
+		find "${ED}" -type f -perm -4000 -print0 | \
 		while read -r -d $'\0' i ; do
 			if [ -n "$(find "$i" -perm -2000)" ] ; then
-				ebegin ">>> SetUID and SetGID: [chmod o-r] /${i#${D}}"
+				ebegin ">>> SetUID and SetGID: [chmod o-r] /${i#${ED}}"
 				chmod o-r "$i"
 				eend $?
 			else
-				ebegin ">>> SetUID: [chmod go-r] /${i#${D}}"
+				ebegin ">>> SetUID: [chmod go-r] /${i#${ED}}"
 				chmod go-r "$i"
 				eend $?
 			fi
 		done
-		find "${D}" -type f -perm -2000 -print0 | \
+		find "${ED}" -type f -perm -2000 -print0 | \
 		while read -r -d $'\0' i ; do
 			if [ -n "$(find "$i" -perm -4000)" ] ; then
 				# This case is already handled
 				# by the SetUID check above.
 				true
 			else
-				ebegin ">>> SetGID: [chmod o-r] /${i#${D}}"
+				ebegin ">>> SetGID: [chmod o-r] /${i#${ED}}"
 				chmod o-r "$i"
 				eend $?
 			fi
@@ -818,6 +1039,10 @@ preinst_suid_scan() {
 		 eerror "${FUNCNAME}: D is unset"
 		 return 1
 	fi
+
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+
 	# total suid control.
 	if has suidctl $FEATURES; then
 		local i sfconf x
@@ -826,10 +1051,10 @@ preinst_suid_scan() {
 		# to files outside of the sandbox, but this
 		# can easly be bypassed using the addwrite() function
 		addwrite "${sfconf}"
-		vecho ">>> Performing suid scan in ${D}"
-		for i in $(find "${D}" -type f \( -perm -4000 -o -perm -2000 \) ); do
+		vecho ">>> Performing suid scan in ${ED}"
+		for i in $(find "${ED}" -type f \( -perm -4000 -o -perm -2000 \) ); do
 			if [ -s "${sfconf}" ]; then
-				install_path=/${i#${D}}
+				install_path=/${i#${ED}}
 				if grep -q "^${install_path}\$" "${sfconf}" ; then
 					vecho "- ${install_path} is an approved suid file"
 				else
@@ -839,7 +1064,7 @@ preinst_suid_scan() {
 					chmod ugo-s "${i}"
 					grep "^#${install_path}$" "${sfconf}" > /dev/null || {
 						vecho ">>> Appending commented out entry to ${sfconf} for ${PF}"
-						echo "## ${ls_ret%${D}*}${install_path}" >> "${sfconf}"
+						echo "## ${ls_ret%${ED}*}${install_path}" >> "${sfconf}"
 						echo "#${install_path}" >> "${sfconf}"
 						# no delwrite() eh?
 						# delwrite ${sconf}
@@ -861,13 +1086,15 @@ preinst_selinux_labels() {
 		# SELinux file labeling (needs to always be last in dyn_preinst)
 		# only attempt to label if setfiles is executable
 		# and 'context' is available on selinuxfs.
-		if [ -f /selinux/context -a -x /usr/sbin/setfiles -a -x /usr/sbin/selinuxconfig ]; then
+		if [ -f /selinux/context -o -f /sys/fs/selinux/context ] && \
+			[ -x /usr/sbin/setfiles -a -x /usr/sbin/selinuxconfig ]; then
 			vecho ">>> Setting SELinux security labels"
 			(
 				eval "$(/usr/sbin/selinuxconfig)" || \
 					die "Failed to determine SELinux policy paths.";
 	
-				addwrite /selinux/context;
+				addwrite /selinux/context
+				addwrite /sys/fs/selinux/context
 	
 				/usr/sbin/setfiles "${file_contexts_path}" -r "${D}" "${D}"
 			) || die "Failed to set SELinux security labels."
@@ -880,10 +1107,30 @@ preinst_selinux_labels() {
 }
 
 dyn_package() {
+	local PROOT
+
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local EPREFIX= ED=${D} ;; esac
+
 	# Make sure $PWD is not ${D} so that we don't leave gmon.out files
 	# in there in case any tools were built with -pg in CFLAGS.
+
 	cd "${T}"
-	install_mask "${PORTAGE_BUILDDIR}/image" "${PKG_INSTALL_MASK}"
+
+	if [[ -n ${PKG_INSTALL_MASK} ]] ; then
+		PROOT=${T}/packaging/
+		# make a temporary copy of ${D} so that any modifications we do that
+		# are binpkg specific, do not influence the actual installed image.
+		rm -rf "${PROOT}" || die "failed removing stale package tree"
+		cp -pPR $(cp --help | grep -qs -e-l && echo -l) \
+			"${D}" "${PROOT}" \
+			|| die "failed creating packaging tree"
+
+		install_mask "${PROOT%/}${EPREFIX}/" "${PKG_INSTALL_MASK}"
+	else
+		PROOT=${D}
+	fi
+
 	local tar_options=""
 	[[ $PORTAGE_VERBOSE = 1 ]] && tar_options+=" -v"
 	# Sandbox is disabled in case the user wants to use a symlink
@@ -892,7 +1139,7 @@ dyn_package() {
 	[ -z "${PORTAGE_BINPKG_TMPFILE}" ] && \
 		die "PORTAGE_BINPKG_TMPFILE is unset"
 	mkdir -p "${PORTAGE_BINPKG_TMPFILE%/*}" || die "mkdir failed"
-	tar $tar_options -cf - $PORTAGE_BINPKG_TAR_OPTS -C "${D}" . | \
+	tar $tar_options -cf - $PORTAGE_BINPKG_TAR_OPTS -C "${PROOT}" . | \
 		$PORTAGE_BZIP2_COMMAND -c > "$PORTAGE_BINPKG_TMPFILE"
 	assert "failed to pack binary package: '$PORTAGE_BINPKG_TMPFILE'"
 	PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
@@ -913,6 +1160,9 @@ dyn_package() {
 	[ -n "${md5_hash}" ] && \
 		echo ${md5_hash} > "${PORTAGE_BUILDDIR}"/build-info/BINPKGMD5
 	vecho ">>> Done."
+
+	# cleanup our temp tree
+	[[ -n ${PKG_INSTALL_MASK} ]] && rm -rf "${PROOT}"
 	cd "${PORTAGE_BUILDDIR}"
 	>> "$PORTAGE_BUILDDIR/.packaged" || \
 		die "Failed to create $PORTAGE_BUILDDIR/.packaged"
@@ -957,10 +1207,14 @@ __END1__
 }
 
 dyn_rpm() {
+
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local EPREFIX= ;; esac
+
 	cd "${T}" || die "cd failed"
 	local machine_name=$(uname -m)
-	local dest_dir=/usr/src/rpm/RPMS/${machine_name}
-	addwrite /usr/src/rpm
+	local dest_dir=${EPREFIX}/usr/src/rpm/RPMS/${machine_name}
+	addwrite ${EPREFIX}/usr/src/rpm
 	addwrite "${RPMDIR}"
 	dyn_spec
 	rpmbuild -bb --clean --rmsource "${PF}.spec" || die "Failed to integrate rpm spec file"
@@ -985,6 +1239,21 @@ success_hooks() {
 	done
 }
 
+install_hooks() {
+	local hooks_dir="${PORTAGE_CONFIGROOT}etc/portage/hooks/install"
+	local fp
+	local ret=0
+	shopt -s nullglob
+	for fp in "${hooks_dir}"/*; do
+		if [ -x "$fp" ]; then
+			"$fp"
+			ret=$(( $ret | $? ))
+		fi
+	done
+	shopt -u nullglob
+	return $ret
+}
+
 if [ -n "${MISC_FUNCTIONS_ARGS}" ]; then
 	source_all_bashrcs
 	[ "$PORTAGE_DEBUG" == "1" ] && set -x

diff --git a/portage_with_autodep/bin/phase-functions.sh b/portage_with_autodep/bin/phase-functions.sh
new file mode 100755
index 0000000..ce251ce
--- /dev/null
+++ b/portage_with_autodep/bin/phase-functions.sh
@@ -0,0 +1,1001 @@
+#!/bin/bash
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Hardcoded bash lists are needed for backward compatibility with
+# <portage-2.1.4 since they assume that a newly installed version
+# of ebuild.sh will work for pkg_postinst, pkg_prerm, and pkg_postrm
+# when portage is upgrading itself.
+
+PORTAGE_READONLY_METADATA="DEFINED_PHASES DEPEND DESCRIPTION
+	EAPI HOMEPAGE INHERITED IUSE REQUIRED_USE KEYWORDS LICENSE
+	PDEPEND PROVIDE RDEPEND REPOSITORY RESTRICT SLOT SRC_URI"
+
+PORTAGE_READONLY_VARS="D EBUILD EBUILD_PHASE \
+	EBUILD_SH_ARGS ECLASSDIR EMERGE_FROM FILESDIR MERGE_TYPE \
+	PM_EBUILD_HOOK_DIR \
+	PORTAGE_ACTUAL_DISTDIR PORTAGE_ARCHLIST PORTAGE_BASHRC  \
+	PORTAGE_BINPKG_FILE PORTAGE_BINPKG_TAR_OPTS PORTAGE_BINPKG_TMPFILE \
+	PORTAGE_BIN_PATH PORTAGE_BUILDDIR PORTAGE_BUNZIP2_COMMAND \
+	PORTAGE_BZIP2_COMMAND PORTAGE_COLORMAP PORTAGE_CONFIGROOT \
+	PORTAGE_DEBUG PORTAGE_DEPCACHEDIR PORTAGE_EBUILD_EXIT_FILE \
+	PORTAGE_GID PORTAGE_GRPNAME PORTAGE_INST_GID PORTAGE_INST_UID \
+	PORTAGE_IPC_DAEMON PORTAGE_IUSE PORTAGE_LOG_FILE \
+	PORTAGE_MUTABLE_FILTERED_VARS PORTAGE_OVERRIDE_EPREFIX \
+	PORTAGE_PYM_PATH PORTAGE_PYTHON \
+	PORTAGE_READONLY_METADATA PORTAGE_READONLY_VARS \
+	PORTAGE_REPO_NAME PORTAGE_RESTRICT \
+	PORTAGE_SAVED_READONLY_VARS PORTAGE_SIGPIPE_STATUS \
+	PORTAGE_TMPDIR PORTAGE_UPDATE_ENV PORTAGE_USERNAME \
+	PORTAGE_VERBOSE PORTAGE_WORKDIR_MODE PORTDIR PORTDIR_OVERLAY \
+	PROFILE_PATHS REPLACING_VERSIONS REPLACED_BY_VERSION T WORKDIR \
+	__PORTAGE_TEST_HARDLINK_LOCKS"
+
+PORTAGE_SAVED_READONLY_VARS="A CATEGORY P PF PN PR PV PVR"
+
+# Variables that portage sets but doesn't mark readonly.
+# In order to prevent changed values from causing unexpected
+# interference, they are filtered out of the environment when
+# it is saved or loaded (any mutations do not persist).
+PORTAGE_MUTABLE_FILTERED_VARS="AA HOSTNAME"
+
+# @FUNCTION: filter_readonly_variables
+# @DESCRIPTION: [--filter-sandbox] [--allow-extra-vars]
+# Read an environment from stdin and echo to stdout while filtering variables
+# with names that are known to cause interference:
+#
+#   * some specific variables for which bash does not allow assignment
+#   * some specific variables that affect portage or sandbox behavior
+#   * variable names that begin with a digit or that contain any
+#     non-alphanumeric characters that are not be supported by bash
+#
+# --filter-sandbox causes all SANDBOX_* variables to be filtered, which
+# is only desired in certain cases, such as during preprocessing or when
+# saving environment.bz2 for a binary or installed package.
+#
+# --filter-features causes the special FEATURES variable to be filtered.
+# Generally, we want it to persist between phases since the user might
+# want to modify it via bashrc to enable things like splitdebug and
+# installsources for specific packages. They should be able to modify it
+# in pre_pkg_setup() and have it persist all the way through the install
+# phase. However, if FEATURES exist inside environment.bz2 then they
+# should be overridden by current settings.
+#
+# --filter-locale causes locale related variables such as LANG and LC_*
+# variables to be filtered. These variables should persist between phases,
+# in case they are modified by the ebuild. However, the current user
+# settings should be used when loading the environment from a binary or
+# installed package.
+#
+# --filter-path causes the PATH variable to be filtered. This variable
+# should persist between phases, in case it is modified by the ebuild.
+# However, old settings should be overridden when loading the
+# environment from a binary or installed package.
+#
+# ---allow-extra-vars causes some extra vars to be allowd through, such
+# as ${PORTAGE_SAVED_READONLY_VARS} and ${PORTAGE_MUTABLE_FILTERED_VARS}.
+# This is enabled automatically if EMERGE_FROM=binary, since it preserves
+# variables from when the package was originally built.
+#
+# In bash-3.2_p20+ an attempt to assign BASH_*, FUNCNAME, GROUPS or any
+# readonly variable cause the shell to exit while executing the "source"
+# builtin command. To avoid this problem, this function filters those
+# variables out and discards them. See bug #190128.
+filter_readonly_variables() {
+	local x filtered_vars
+	local readonly_bash_vars="BASHOPTS BASHPID DIRSTACK EUID
+		FUNCNAME GROUPS PIPESTATUS PPID SHELLOPTS UID"
+	local bash_misc_vars="BASH BASH_.* COLUMNS COMP_WORDBREAKS HISTCMD
+		HISTFILE HOSTNAME HOSTTYPE IFS LINENO MACHTYPE OLDPWD
+		OPTERR OPTIND OSTYPE POSIXLY_CORRECT PS4 PWD RANDOM
+		SECONDS SHELL SHLVL _"
+	local filtered_sandbox_vars="SANDBOX_ACTIVE SANDBOX_BASHRC
+		SANDBOX_DEBUG_LOG SANDBOX_DISABLED SANDBOX_LIB
+		SANDBOX_LOG SANDBOX_ON"
+	# Untrusted due to possible application of package renames to binpkgs
+	local binpkg_untrusted_vars="CATEGORY P PF PN PR PV PVR"
+	local misc_garbage_vars="_portage_filter_opts"
+	filtered_vars="$readonly_bash_vars $bash_misc_vars
+		$PORTAGE_READONLY_VARS $misc_garbage_vars"
+
+	# Don't filter/interfere with prefix variables unless they are
+	# supported by the current EAPI.
+	case "${EAPI:-0}" in
+		0|1|2)
+			[[ " ${FEATURES} " == *" force-prefix "* ]] && \
+				filtered_vars+=" ED EPREFIX EROOT"
+			;;
+		*)
+			filtered_vars+=" ED EPREFIX EROOT"
+			;;
+	esac
+
+	if has --filter-sandbox $* ; then
+		filtered_vars="${filtered_vars} SANDBOX_.*"
+	else
+		filtered_vars="${filtered_vars} ${filtered_sandbox_vars}"
+	fi
+	if has --filter-features $* ; then
+		filtered_vars="${filtered_vars} FEATURES PORTAGE_FEATURES"
+	fi
+	if has --filter-path $* ; then
+		filtered_vars+=" PATH"
+	fi
+	if has --filter-locale $* ; then
+		filtered_vars+=" LANG LC_ALL LC_COLLATE
+			LC_CTYPE LC_MESSAGES LC_MONETARY
+			LC_NUMERIC LC_PAPER LC_TIME"
+	fi
+	if ! has --allow-extra-vars $* ; then
+		if [ "${EMERGE_FROM}" = binary ] ; then
+			# preserve additional variables from build time,
+			# while excluding untrusted variables
+			filtered_vars+=" ${binpkg_untrusted_vars}"
+		else
+			filtered_vars+=" ${PORTAGE_SAVED_READONLY_VARS}"
+			filtered_vars+=" ${PORTAGE_MUTABLE_FILTERED_VARS}"
+		fi
+	fi
+
+	"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}"/filter-bash-environment.py "${filtered_vars}" || die "filter-bash-environment.py failed"
+}
+
+# @FUNCTION: preprocess_ebuild_env
+# @DESCRIPTION:
+# Filter any readonly variables from ${T}/environment, source it, and then
+# save it via save_ebuild_env(). This process should be sufficient to prevent
+# any stale variables or functions from an arbitrary environment from
+# interfering with the current environment. This is useful when an existing
+# environment needs to be loaded from a binary or installed package.
+preprocess_ebuild_env() {
+	local _portage_filter_opts="--filter-features --filter-locale --filter-path --filter-sandbox"
+
+	# If environment.raw is present, this is a signal from the python side,
+	# indicating that the environment may contain stale FEATURES and
+	# SANDBOX_{DENY,PREDICT,READ,WRITE} variables that should be filtered out.
+	# Otherwise, we don't need to filter the environment.
+	[ -f "${T}/environment.raw" ] || return 0
+
+	filter_readonly_variables $_portage_filter_opts < "${T}"/environment \
+		>> "$T/environment.filtered" || return $?
+	unset _portage_filter_opts
+	mv "${T}"/environment.filtered "${T}"/environment || return $?
+	rm -f "${T}/environment.success" || return $?
+	# WARNING: Code inside this subshell should avoid making assumptions
+	# about variables or functions after source "${T}"/environment has been
+	# called. Any variables that need to be relied upon should already be
+	# filtered out above.
+	(
+		export SANDBOX_ON=1
+		source "${T}/environment" || exit $?
+		# We have to temporarily disable sandbox since the
+		# SANDBOX_{DENY,READ,PREDICT,WRITE} values we've just loaded
+		# may be unusable (triggering in spurious sandbox violations)
+		# until we've merged them with our current values.
+		export SANDBOX_ON=0
+
+		# It's remotely possible that save_ebuild_env() has been overridden
+		# by the above source command. To protect ourselves, we override it
+		# here with our own version. ${PORTAGE_BIN_PATH} is safe to use here
+		# because it's already filtered above.
+		source "${PORTAGE_BIN_PATH}/save-ebuild-env.sh" || exit $?
+
+		# Rely on save_ebuild_env() to filter out any remaining variables
+		# and functions that could interfere with the current environment.
+		save_ebuild_env || exit $?
+		>> "$T/environment.success" || exit $?
+	) > "${T}/environment.filtered"
+	local retval
+	if [ -e "${T}/environment.success" ] ; then
+		filter_readonly_variables --filter-features < \
+			"${T}/environment.filtered" > "${T}/environment"
+		retval=$?
+	else
+		retval=1
+	fi
+	rm -f "${T}"/environment.{filtered,raw,success}
+	return ${retval}
+}
+
+ebuild_phase() {
+	declare -F "$1" >/dev/null && qa_call $1
+}
+
+ebuild_phase_with_hooks() {
+	local x phase_name=${1}
+	for x in {pre_,,post_}${phase_name} ; do
+		ebuild_phase ${x}
+	done
+}
+
+dyn_pretend() {
+	if [[ -e $PORTAGE_BUILDDIR/.pretended ]] ; then
+		vecho ">>> It appears that '$PF' is already pretended; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.pretended' to force pretend."
+		return 0
+	fi
+	ebuild_phase pre_pkg_pretend
+	ebuild_phase pkg_pretend
+	>> "$PORTAGE_BUILDDIR/.pretended" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.pretended"
+	ebuild_phase post_pkg_pretend
+}
+
+dyn_setup() {
+	if [[ -e $PORTAGE_BUILDDIR/.setuped ]] ; then
+		vecho ">>> It appears that '$PF' is already setup; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.setuped' to force setup."
+		return 0
+	fi
+	ebuild_phase pre_pkg_setup
+	ebuild_phase pkg_setup
+	>> "$PORTAGE_BUILDDIR/.setuped" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.setuped"
+	ebuild_phase post_pkg_setup
+}
+
+dyn_unpack() {
+	if [[ -f ${PORTAGE_BUILDDIR}/.unpacked ]] ; then
+		vecho ">>> WORKDIR is up-to-date, keeping..."
+		return 0
+	fi
+	if [ ! -d "${WORKDIR}" ]; then
+		install -m${PORTAGE_WORKDIR_MODE:-0700} -d "${WORKDIR}" || die "Failed to create dir '${WORKDIR}'"
+	fi
+	cd "${WORKDIR}" || die "Directory change failed: \`cd '${WORKDIR}'\`"
+	ebuild_phase pre_src_unpack
+	vecho ">>> Unpacking source..."
+	ebuild_phase src_unpack
+	>> "$PORTAGE_BUILDDIR/.unpacked" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.unpacked"
+	vecho ">>> Source unpacked in ${WORKDIR}"
+	ebuild_phase post_src_unpack
+}
+
+dyn_clean() {
+	if [ -z "${PORTAGE_BUILDDIR}" ]; then
+		echo "Aborting clean phase because PORTAGE_BUILDDIR is unset!"
+		return 1
+	elif [ ! -d "${PORTAGE_BUILDDIR}" ] ; then
+		return 0
+	fi
+	if has chflags $FEATURES ; then
+		chflags -R noschg,nouchg,nosappnd,nouappnd "${PORTAGE_BUILDDIR}"
+		chflags -R nosunlnk,nouunlnk "${PORTAGE_BUILDDIR}" 2>/dev/null
+	fi
+
+	rm -rf "${PORTAGE_BUILDDIR}/image" "${PORTAGE_BUILDDIR}/homedir"
+	rm -f "${PORTAGE_BUILDDIR}/.installed"
+
+	if [[ $EMERGE_FROM = binary ]] || \
+		! has keeptemp $FEATURES && ! has keepwork $FEATURES ; then
+		rm -rf "${T}"
+	fi
+
+	if [[ $EMERGE_FROM = binary ]] || ! has keepwork $FEATURES; then
+		rm -f "$PORTAGE_BUILDDIR"/.{ebuild_changed,logid,pretended,setuped,unpacked,prepared} \
+			"$PORTAGE_BUILDDIR"/.{configured,compiled,tested,packaged} \
+			"$PORTAGE_BUILDDIR"/.die_hooks \
+			"$PORTAGE_BUILDDIR"/.ipc_{in,out,lock} \
+			"$PORTAGE_BUILDDIR"/.exit_status
+
+		rm -rf "${PORTAGE_BUILDDIR}/build-info"
+		rm -rf "${WORKDIR}"
+	fi
+
+	if [ -f "${PORTAGE_BUILDDIR}/.unpacked" ]; then
+		find "${PORTAGE_BUILDDIR}" -type d ! -regex "^${WORKDIR}" | sort -r | tr "\n" "\0" | $XARGS -0 rmdir &>/dev/null
+	fi
+
+	# do not bind this to doebuild defined DISTDIR; don't trust doebuild, and if mistakes are made it'll
+	# result in it wiping the users distfiles directory (bad).
+	rm -rf "${PORTAGE_BUILDDIR}/distdir"
+
+	# Some kernels, such as Solaris, return EINVAL when an attempt
+	# is made to remove the current working directory.
+	cd "$PORTAGE_BUILDDIR"/../..
+	rmdir "$PORTAGE_BUILDDIR" 2>/dev/null
+
+	true
+}
+
+abort_handler() {
+	local msg
+	if [ "$2" != "fail" ]; then
+		msg="${EBUILD}: ${1} aborted; exiting."
+	else
+		msg="${EBUILD}: ${1} failed; exiting."
+	fi
+	echo
+	echo "$msg"
+	echo
+	eval ${3}
+	#unset signal handler
+	trap - SIGINT SIGQUIT
+}
+
+abort_prepare() {
+	abort_handler src_prepare $1
+	rm -f "$PORTAGE_BUILDDIR/.prepared"
+	exit 1
+}
+
+abort_configure() {
+	abort_handler src_configure $1
+	rm -f "$PORTAGE_BUILDDIR/.configured"
+	exit 1
+}
+
+abort_compile() {
+	abort_handler "src_compile" $1
+	rm -f "${PORTAGE_BUILDDIR}/.compiled"
+	exit 1
+}
+
+abort_test() {
+	abort_handler "dyn_test" $1
+	rm -f "${PORTAGE_BUILDDIR}/.tested"
+	exit 1
+}
+
+abort_install() {
+	abort_handler "src_install" $1
+	rm -rf "${PORTAGE_BUILDDIR}/image"
+	exit 1
+}
+
+has_phase_defined_up_to() {
+	local phase
+	for phase in unpack prepare configure compile install; do
+		has ${phase} ${DEFINED_PHASES} && return 0
+		[[ ${phase} == $1 ]] && return 1
+	done
+	# We shouldn't actually get here
+	return 1
+}
+
+dyn_prepare() {
+
+	if [[ -e $PORTAGE_BUILDDIR/.prepared ]] ; then
+		vecho ">>> It appears that '$PF' is already prepared; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.prepared' to force prepare."
+		return 0
+	fi
+
+	if [[ -d $S ]] ; then
+		cd "${S}"
+	elif has $EAPI 0 1 2 3 3_pre2 ; then
+		cd "${WORKDIR}"
+	elif [[ -z ${A} ]] && ! has_phase_defined_up_to prepare; then
+		cd "${WORKDIR}"
+	else
+		die "The source directory '${S}' doesn't exist"
+	fi
+
+	trap abort_prepare SIGINT SIGQUIT
+
+	ebuild_phase pre_src_prepare
+	vecho ">>> Preparing source in $PWD ..."
+	ebuild_phase src_prepare
+	>> "$PORTAGE_BUILDDIR/.prepared" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.prepared"
+	vecho ">>> Source prepared."
+	ebuild_phase post_src_prepare
+
+	trap - SIGINT SIGQUIT
+}
+
+dyn_configure() {
+
+	if [[ -e $PORTAGE_BUILDDIR/.configured ]] ; then
+		vecho ">>> It appears that '$PF' is already configured; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.configured' to force configuration."
+		return 0
+	fi
+
+	if [[ -d $S ]] ; then
+		cd "${S}"
+	elif has $EAPI 0 1 2 3 3_pre2 ; then
+		cd "${WORKDIR}"
+	elif [[ -z ${A} ]] && ! has_phase_defined_up_to configure; then
+		cd "${WORKDIR}"
+	else
+		die "The source directory '${S}' doesn't exist"
+	fi
+
+	trap abort_configure SIGINT SIGQUIT
+
+	ebuild_phase pre_src_configure
+
+	vecho ">>> Configuring source in $PWD ..."
+	ebuild_phase src_configure
+	>> "$PORTAGE_BUILDDIR/.configured" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.configured"
+	vecho ">>> Source configured."
+
+	ebuild_phase post_src_configure
+
+	trap - SIGINT SIGQUIT
+}
+
+dyn_compile() {
+
+	if [[ -e $PORTAGE_BUILDDIR/.compiled ]] ; then
+		vecho ">>> It appears that '${PF}' is already compiled; skipping."
+		vecho ">>> Remove '$PORTAGE_BUILDDIR/.compiled' to force compilation."
+		return 0
+	fi
+
+	if [[ -d $S ]] ; then
+		cd "${S}"
+	elif has $EAPI 0 1 2 3 3_pre2 ; then
+		cd "${WORKDIR}"
+	elif [[ -z ${A} ]] && ! has_phase_defined_up_to compile; then
+		cd "${WORKDIR}"
+	else
+		die "The source directory '${S}' doesn't exist"
+	fi
+
+	trap abort_compile SIGINT SIGQUIT
+
+	if has distcc $FEATURES && has distcc-pump $FEATURES ; then
+		if [[ -z $INCLUDE_SERVER_PORT ]] || [[ ! -w $INCLUDE_SERVER_PORT ]] ; then
+			eval $(pump --startup)
+			trap "pump --shutdown" EXIT
+		fi
+	fi
+
+	ebuild_phase pre_src_compile
+
+	vecho ">>> Compiling source in $PWD ..."
+	ebuild_phase src_compile
+	>> "$PORTAGE_BUILDDIR/.compiled" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.compiled"
+	vecho ">>> Source compiled."
+
+	ebuild_phase post_src_compile
+
+	trap - SIGINT SIGQUIT
+}
+
+dyn_test() {
+
+	if [[ -e $PORTAGE_BUILDDIR/.tested ]] ; then
+		vecho ">>> It appears that ${PN} has already been tested; skipping."
+		vecho ">>> Remove '${PORTAGE_BUILDDIR}/.tested' to force test."
+		return
+	fi
+
+	if [ "${EBUILD_FORCE_TEST}" == "1" ] ; then
+		# If USE came from ${T}/environment then it might not have USE=test
+		# like it's supposed to here.
+		! has test ${USE} && export USE="${USE} test"
+	fi
+
+	trap "abort_test" SIGINT SIGQUIT
+	if [ -d "${S}" ]; then
+		cd "${S}"
+	else
+		cd "${WORKDIR}"
+	fi
+
+	if ! has test $FEATURES && [ "${EBUILD_FORCE_TEST}" != "1" ]; then
+		vecho ">>> Test phase [not enabled]: ${CATEGORY}/${PF}"
+	elif has test $RESTRICT; then
+		einfo "Skipping make test/check due to ebuild restriction."
+		vecho ">>> Test phase [explicitly disabled]: ${CATEGORY}/${PF}"
+	else
+		local save_sp=${SANDBOX_PREDICT}
+		addpredict /
+		ebuild_phase pre_src_test
+		ebuild_phase src_test
+		>> "$PORTAGE_BUILDDIR/.tested" || \
+			die "Failed to create $PORTAGE_BUILDDIR/.tested"
+		ebuild_phase post_src_test
+		SANDBOX_PREDICT=${save_sp}
+	fi
+
+	trap - SIGINT SIGQUIT
+}
+
+dyn_install() {
+	[ -z "$PORTAGE_BUILDDIR" ] && die "${FUNCNAME}: PORTAGE_BUILDDIR is unset"
+	if has noauto $FEATURES ; then
+		rm -f "${PORTAGE_BUILDDIR}/.installed"
+	elif [[ -e $PORTAGE_BUILDDIR/.installed ]] ; then
+		vecho ">>> It appears that '${PF}' is already installed; skipping."
+		vecho ">>> Remove '${PORTAGE_BUILDDIR}/.installed' to force install."
+		return 0
+	fi
+	trap "abort_install" SIGINT SIGQUIT
+	ebuild_phase pre_src_install
+
+	_x=${ED}
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) _x=${D} ;; esac
+	rm -rf "${D}"
+	mkdir -p "${_x}"
+	unset _x
+
+	if [[ -d $S ]] ; then
+		cd "${S}"
+	elif has $EAPI 0 1 2 3 3_pre2 ; then
+		cd "${WORKDIR}"
+	elif [[ -z ${A} ]] && ! has_phase_defined_up_to install; then
+		cd "${WORKDIR}"
+	else
+		die "The source directory '${S}' doesn't exist"
+	fi
+
+	vecho
+	vecho ">>> Install ${PF} into ${D} category ${CATEGORY}"
+	#our custom version of libtool uses $S and $D to fix
+	#invalid paths in .la files
+	export S D
+
+	# Reset exeinto(), docinto(), insinto(), and into() state variables
+	# in case the user is running the install phase multiple times
+	# consecutively via the ebuild command.
+	export DESTTREE=/usr
+	export INSDESTTREE=""
+	export _E_EXEDESTTREE_=""
+	export _E_DOCDESTTREE_=""
+
+	ebuild_phase src_install
+	>> "$PORTAGE_BUILDDIR/.installed" || \
+		die "Failed to create $PORTAGE_BUILDDIR/.installed"
+	vecho ">>> Completed installing ${PF} into ${D}"
+	vecho
+	ebuild_phase post_src_install
+
+	cd "${PORTAGE_BUILDDIR}"/build-info
+	set -f
+	local f x
+	IFS=$' \t\n\r'
+	for f in CATEGORY DEFINED_PHASES FEATURES INHERITED IUSE \
+		PF PKGUSE SLOT KEYWORDS HOMEPAGE DESCRIPTION ; do
+		x=$(echo -n ${!f})
+		[[ -n $x ]] && echo "$x" > $f
+	done
+	if [[ $CATEGORY != virtual ]] ; then
+		for f in ASFLAGS CBUILD CC CFLAGS CHOST CTARGET CXX \
+			CXXFLAGS EXTRA_ECONF EXTRA_EINSTALL EXTRA_MAKE \
+			LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
+			x=$(echo -n ${!f})
+			[[ -n $x ]] && echo "$x" > $f
+		done
+	fi
+	echo "${USE}"       > USE
+	echo "${EAPI:-0}"   > EAPI
+
+	# Save EPREFIX, since it makes it easy to use chpathtool to
+	# adjust the content of a binary package so that it will
+	# work in a different EPREFIX from the one is was built for.
+	case "${EAPI:-0}" in
+		0|1|2)
+			[[ " ${FEATURES} " == *" force-prefix "* ]] && \
+				[ -n "${EPREFIX}" ] && echo "${EPREFIX}" > EPREFIX
+			;;
+		*)
+			[ -n "${EPREFIX}" ] && echo "${EPREFIX}" > EPREFIX
+			;;
+	esac
+
+	set +f
+
+	# local variables can leak into the saved environment.
+	unset f
+
+	save_ebuild_env --exclude-init-phases | filter_readonly_variables \
+		--filter-path --filter-sandbox --allow-extra-vars > environment
+	assert "save_ebuild_env failed"
+
+	${PORTAGE_BZIP2_COMMAND} -f9 environment
+
+	cp "${EBUILD}" "${PF}.ebuild"
+	[ -n "${PORTAGE_REPO_NAME}" ]  && echo "${PORTAGE_REPO_NAME}" > repository
+	if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT}
+	then
+		>> DEBUGBUILD
+	fi
+	trap - SIGINT SIGQUIT
+}
+
+dyn_preinst() {
+	if [ -z "${D}" ]; then
+		eerror "${FUNCNAME}: D is unset"
+		return 1
+	fi
+	ebuild_phase_with_hooks pkg_preinst
+}
+
+dyn_help() {
+	echo
+	echo "Portage"
+	echo "Copyright 1999-2010 Gentoo Foundation"
+	echo
+	echo "How to use the ebuild command:"
+	echo
+	echo "The first argument to ebuild should be an existing .ebuild file."
+	echo
+	echo "One or more of the following options can then be specified.  If more"
+	echo "than one option is specified, each will be executed in order."
+	echo
+	echo "  help        : show this help screen"
+	echo "  pretend     : execute package specific pretend actions"
+	echo "  setup       : execute package specific setup actions"
+	echo "  fetch       : download source archive(s) and patches"
+	echo "  digest      : create a manifest file for the package"
+	echo "  manifest    : create a manifest file for the package"
+	echo "  unpack      : unpack sources (auto-dependencies if needed)"
+	echo "  prepare     : prepare sources (auto-dependencies if needed)"
+	echo "  configure   : configure sources (auto-fetch/unpack if needed)"
+	echo "  compile     : compile sources (auto-fetch/unpack/configure if needed)"
+	echo "  test        : test package (auto-fetch/unpack/configure/compile if needed)"
+	echo "  preinst     : execute pre-install instructions"
+	echo "  postinst    : execute post-install instructions"
+	echo "  install     : install the package to the temporary install directory"
+	echo "  qmerge      : merge image into live filesystem, recording files in db"
+	echo "  merge       : do fetch, unpack, compile, install and qmerge"
+	echo "  prerm       : execute pre-removal instructions"
+	echo "  postrm      : execute post-removal instructions"
+	echo "  unmerge     : remove package from live filesystem"
+	echo "  config      : execute package specific configuration actions"
+	echo "  package     : create a tarball package in ${PKGDIR}/All"
+	echo "  rpm         : build a RedHat RPM package"
+	echo "  clean       : clean up all source and temporary files"
+	echo
+	echo "The following settings will be used for the ebuild process:"
+	echo
+	echo "  package     : ${PF}"
+	echo "  slot        : ${SLOT}"
+	echo "  category    : ${CATEGORY}"
+	echo "  description : ${DESCRIPTION}"
+	echo "  system      : ${CHOST}"
+	echo "  c flags     : ${CFLAGS}"
+	echo "  c++ flags   : ${CXXFLAGS}"
+	echo "  make flags  : ${MAKEOPTS}"
+	echo -n "  build mode  : "
+	if has nostrip ${FEATURES} ${RESTRICT} || has strip ${RESTRICT} ;
+	then
+		echo "debug (large)"
+	else
+		echo "production (stripped)"
+	fi
+	echo "  merge to    : ${ROOT}"
+	echo
+	if [ -n "$USE" ]; then
+		echo "Additionally, support for the following optional features will be enabled:"
+		echo
+		echo "  ${USE}"
+	fi
+	echo
+}
+
+# @FUNCTION: _ebuild_arg_to_phase
+# @DESCRIPTION:
+# Translate a known ebuild(1) argument into the precise
+# name of it's corresponding ebuild phase.
+_ebuild_arg_to_phase() {
+	[ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
+	local eapi=$1
+	local arg=$2
+	local phase_func=""
+
+	case "$arg" in
+		pretend)
+			! has $eapi 0 1 2 3 3_pre2 && \
+				phase_func=pkg_pretend
+			;;
+		setup)
+			phase_func=pkg_setup
+			;;
+		nofetch)
+			phase_func=pkg_nofetch
+			;;
+		unpack)
+			phase_func=src_unpack
+			;;
+		prepare)
+			! has $eapi 0 1 && \
+				phase_func=src_prepare
+			;;
+		configure)
+			! has $eapi 0 1 && \
+				phase_func=src_configure
+			;;
+		compile)
+			phase_func=src_compile
+			;;
+		test)
+			phase_func=src_test
+			;;
+		install)
+			phase_func=src_install
+			;;
+		preinst)
+			phase_func=pkg_preinst
+			;;
+		postinst)
+			phase_func=pkg_postinst
+			;;
+		prerm)
+			phase_func=pkg_prerm
+			;;
+		postrm)
+			phase_func=pkg_postrm
+			;;
+	esac
+
+	[[ -z $phase_func ]] && return 1
+	echo "$phase_func"
+	return 0
+}
+
+_ebuild_phase_funcs() {
+	[ $# -ne 2 ] && die "expected exactly 2 args, got $#: $*"
+	local eapi=$1
+	local phase_func=$2
+	local default_phases="pkg_nofetch src_unpack src_prepare src_configure
+		src_compile src_install src_test"
+	local x y default_func=""
+
+	for x in pkg_nofetch src_unpack src_test ; do
+		declare -F $x >/dev/null || \
+			eval "$x() { _eapi0_$x \"\$@\" ; }"
+	done
+
+	case $eapi in
+
+		0|1)
+
+			if ! declare -F src_compile >/dev/null ; then
+				case $eapi in
+					0)
+						src_compile() { _eapi0_src_compile "$@" ; }
+						;;
+					*)
+						src_compile() { _eapi1_src_compile "$@" ; }
+						;;
+				esac
+			fi
+
+			for x in $default_phases ; do
+				eval "default_$x() {
+					die \"default_$x() is not supported with EAPI='$eapi' during phase $phase_func\"
+				}"
+			done
+
+			eval "default() {
+				die \"default() is not supported with EAPI='$eapi' during phase $phase_func\"
+			}"
+
+			;;
+
+		*)
+
+			declare -F src_configure >/dev/null || \
+				src_configure() { _eapi2_src_configure "$@" ; }
+
+			declare -F src_compile >/dev/null || \
+				src_compile() { _eapi2_src_compile "$@" ; }
+
+			has $eapi 2 3 3_pre2 || declare -F src_install >/dev/null || \
+				src_install() { _eapi4_src_install "$@" ; }
+
+			if has $phase_func $default_phases ; then
+
+				_eapi2_pkg_nofetch   () { _eapi0_pkg_nofetch          "$@" ; }
+				_eapi2_src_unpack    () { _eapi0_src_unpack           "$@" ; }
+				_eapi2_src_prepare   () { true                             ; }
+				_eapi2_src_test      () { _eapi0_src_test             "$@" ; }
+				_eapi2_src_install   () { die "$FUNCNAME is not supported" ; }
+
+				for x in $default_phases ; do
+					eval "default_$x() { _eapi2_$x \"\$@\" ; }"
+				done
+
+				eval "default() { _eapi2_$phase_func \"\$@\" ; }"
+
+				case $eapi in
+					2|3)
+						;;
+					*)
+						eval "default_src_install() { _eapi4_src_install \"\$@\" ; }"
+						[[ $phase_func = src_install ]] && \
+							eval "default() { _eapi4_$phase_func \"\$@\" ; }"
+						;;
+				esac
+
+			else
+
+				for x in $default_phases ; do
+					eval "default_$x() {
+						die \"default_$x() is not supported in phase $default_func\"
+					}"
+				done
+
+				eval "default() {
+					die \"default() is not supported with EAPI='$eapi' during phase $phase_func\"
+				}"
+
+			fi
+
+			;;
+	esac
+}
+
+ebuild_main() {
+
+	# Subshell/helper die support (must export for the die helper).
+	# Since this function is typically executed in a subshell,
+	# setup EBUILD_MASTER_PID to refer to the current $BASHPID,
+	# which seems to give the best results when further
+	# nested subshells call die.
+	export EBUILD_MASTER_PID=$BASHPID
+	trap 'exit 1' SIGTERM
+
+	#a reasonable default for $S
+	[[ -z ${S} ]] && export S=${WORKDIR}/${P}
+
+	if [[ -s $SANDBOX_LOG ]] ; then
+		# We use SANDBOX_LOG to check for sandbox violations,
+		# so we ensure that there can't be a stale log to
+		# interfere with our logic.
+		local x=
+		if [[ -n SANDBOX_ON ]] ; then
+			x=$SANDBOX_ON
+			export SANDBOX_ON=0
+		fi
+
+		rm -f "$SANDBOX_LOG" || \
+			die "failed to remove stale sandbox log: '$SANDBOX_LOG'"
+
+		if [[ -n $x ]] ; then
+			export SANDBOX_ON=$x
+		fi
+		unset x
+	fi
+
+	# Force configure scripts that automatically detect ccache to
+	# respect FEATURES="-ccache".
+	has ccache $FEATURES || export CCACHE_DISABLE=1
+
+	local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
+	[[ -n $phase_func ]] && _ebuild_phase_funcs "$EAPI" "$phase_func"
+	unset phase_func
+
+	source_all_bashrcs
+
+	case ${1} in
+	nofetch)
+		ebuild_phase_with_hooks pkg_nofetch
+		;;
+	prerm|postrm|postinst|config|info)
+		if has "${1}" config info && \
+			! declare -F "pkg_${1}" >/dev/null ; then
+			ewarn  "pkg_${1}() is not defined: '${EBUILD##*/}'"
+		fi
+		export SANDBOX_ON="0"
+		if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+			ebuild_phase_with_hooks pkg_${1}
+		else
+			set -x
+			ebuild_phase_with_hooks pkg_${1}
+			set +x
+		fi
+		if [[ $EBUILD_PHASE == postinst ]] && [[ -n $PORTAGE_UPDATE_ENV ]]; then
+			# Update environment.bz2 in case installation phases
+			# need to pass some variables to uninstallation phases.
+			save_ebuild_env --exclude-init-phases | \
+				filter_readonly_variables --filter-path \
+				--filter-sandbox --allow-extra-vars \
+				| ${PORTAGE_BZIP2_COMMAND} -c -f9 > "$PORTAGE_UPDATE_ENV"
+			assert "save_ebuild_env failed"
+		fi
+		;;
+	unpack|prepare|configure|compile|test|clean|install)
+		if [[ ${SANDBOX_DISABLED:-0} = 0 ]] ; then
+			export SANDBOX_ON="1"
+		else
+			export SANDBOX_ON="0"
+		fi
+
+		case "${1}" in
+		configure|compile)
+
+			local x
+			for x in ASFLAGS CCACHE_DIR CCACHE_SIZE \
+				CFLAGS CXXFLAGS LDFLAGS LIBCFLAGS LIBCXXFLAGS ; do
+				[[ ${!x+set} = set ]] && export $x
+			done
+			unset x
+
+			has distcc $FEATURES && [[ -n $DISTCC_DIR ]] && \
+				[[ ${SANDBOX_WRITE/$DISTCC_DIR} = $SANDBOX_WRITE ]] && \
+				addwrite "$DISTCC_DIR"
+
+			x=LIBDIR_$ABI
+			[ -z "$PKG_CONFIG_PATH" -a -n "$ABI" -a -n "${!x}" ] && \
+				export PKG_CONFIG_PATH=/usr/${!x}/pkgconfig
+
+			if has noauto $FEATURES && \
+				[[ ! -f $PORTAGE_BUILDDIR/.unpacked ]] ; then
+				echo
+				echo "!!! We apparently haven't unpacked..." \
+					"This is probably not what you"
+				echo "!!! want to be doing... You are using" \
+					"FEATURES=noauto so I'll assume"
+				echo "!!! that you know what you are doing..." \
+					"You have 5 seconds to abort..."
+				echo
+
+				local x
+				for x in 1 2 3 4 5 6 7 8; do
+					LC_ALL=C sleep 0.25
+				done
+
+				sleep 3
+			fi
+
+			cd "$PORTAGE_BUILDDIR"
+			if [ ! -d build-info ] ; then
+				mkdir build-info
+				cp "$EBUILD" "build-info/$PF.ebuild"
+			fi
+
+			#our custom version of libtool uses $S and $D to fix
+			#invalid paths in .la files
+			export S D
+
+			;;
+		esac
+
+		if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+			dyn_${1}
+		else
+			set -x
+			dyn_${1}
+			set +x
+		fi
+		export SANDBOX_ON="0"
+		;;
+	help|pretend|setup|preinst)
+		#pkg_setup needs to be out of the sandbox for tmp file creation;
+		#for example, awking and piping a file in /tmp requires a temp file to be created
+		#in /etc.  If pkg_setup is in the sandbox, both our lilo and apache ebuilds break.
+		export SANDBOX_ON="0"
+		if [ "${PORTAGE_DEBUG}" != "1" ] || [ "${-/x/}" != "$-" ]; then
+			dyn_${1}
+		else
+			set -x
+			dyn_${1}
+			set +x
+		fi
+		;;
+	_internal_test)
+		;;
+	*)
+		export SANDBOX_ON="1"
+		echo "Unrecognized arg '${1}'"
+		echo
+		dyn_help
+		exit 1
+		;;
+	esac
+
+	# Save the env only for relevant phases.
+	if ! has "${1}" clean help info nofetch ; then
+		umask 002
+		save_ebuild_env | filter_readonly_variables \
+			--filter-features > "$T/environment"
+		assert "save_ebuild_env failed"
+		chown portage:portage "$T/environment" &>/dev/null
+		chmod g+w "$T/environment" &>/dev/null
+	fi
+	[[ -n $PORTAGE_EBUILD_EXIT_FILE ]] && > "$PORTAGE_EBUILD_EXIT_FILE"
+	if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+		[[ ! -s $SANDBOX_LOG ]]
+		"$PORTAGE_BIN_PATH"/ebuild-ipc exit $?
+	fi
+}

diff --git a/portage_with_autodep/bin/phase-helpers.sh b/portage_with_autodep/bin/phase-helpers.sh
new file mode 100755
index 0000000..946520b
--- /dev/null
+++ b/portage_with_autodep/bin/phase-helpers.sh
@@ -0,0 +1,663 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+export DESTTREE=/usr
+export INSDESTTREE=""
+export _E_EXEDESTTREE_=""
+export _E_DOCDESTTREE_=""
+export INSOPTIONS="-m0644"
+export EXEOPTIONS="-m0755"
+export LIBOPTIONS="-m0644"
+export DIROPTIONS="-m0755"
+export MOPREFIX=${PN}
+declare -a PORTAGE_DOCOMPRESS=( /usr/share/{doc,info,man} )
+declare -a PORTAGE_DOCOMPRESS_SKIP=( /usr/share/doc/${PF}/html )
+
+into() {
+	if [ "$1" == "/" ]; then
+		export DESTTREE=""
+	else
+		export DESTTREE=$1
+		[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+			case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+		if [ ! -d "${ED}${DESTTREE}" ]; then
+			install -d "${ED}${DESTTREE}"
+			local ret=$?
+			if [[ $ret -ne 0 ]] ; then
+				helpers_die "${FUNCNAME[0]} failed"
+				return $ret
+			fi
+		fi
+	fi
+}
+
+insinto() {
+	if [ "$1" == "/" ]; then
+		export INSDESTTREE=""
+	else
+		export INSDESTTREE=$1
+		[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+			case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+		if [ ! -d "${ED}${INSDESTTREE}" ]; then
+			install -d "${ED}${INSDESTTREE}"
+			local ret=$?
+			if [[ $ret -ne 0 ]] ; then
+				helpers_die "${FUNCNAME[0]} failed"
+				return $ret
+			fi
+		fi
+	fi
+}
+
+exeinto() {
+	if [ "$1" == "/" ]; then
+		export _E_EXEDESTTREE_=""
+	else
+		export _E_EXEDESTTREE_="$1"
+		[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+			case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+		if [ ! -d "${ED}${_E_EXEDESTTREE_}" ]; then
+			install -d "${ED}${_E_EXEDESTTREE_}"
+			local ret=$?
+			if [[ $ret -ne 0 ]] ; then
+				helpers_die "${FUNCNAME[0]} failed"
+				return $ret
+			fi
+		fi
+	fi
+}
+
+docinto() {
+	if [ "$1" == "/" ]; then
+		export _E_DOCDESTTREE_=""
+	else
+		export _E_DOCDESTTREE_="$1"
+		[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+			case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+		if [ ! -d "${ED}usr/share/doc/${PF}/${_E_DOCDESTTREE_}" ]; then
+			install -d "${ED}usr/share/doc/${PF}/${_E_DOCDESTTREE_}"
+			local ret=$?
+			if [[ $ret -ne 0 ]] ; then
+				helpers_die "${FUNCNAME[0]} failed"
+				return $ret
+			fi
+		fi
+	fi
+}
+
+insopts() {
+	export INSOPTIONS="$@"
+
+	# `install` should never be called with '-s' ...
+	has -s ${INSOPTIONS} && die "Never call insopts() with -s"
+}
+
+diropts() {
+	export DIROPTIONS="$@"
+}
+
+exeopts() {
+	export EXEOPTIONS="$@"
+
+	# `install` should never be called with '-s' ...
+	has -s ${EXEOPTIONS} && die "Never call exeopts() with -s"
+}
+
+libopts() {
+	export LIBOPTIONS="$@"
+
+	# `install` should never be called with '-s' ...
+	has -s ${LIBOPTIONS} && die "Never call libopts() with -s"
+}
+
+docompress() {
+	has "${EAPI}" 0 1 2 3 && die "'docompress' not supported in this EAPI"
+
+	local f g
+	if [[ $1 = "-x" ]]; then
+		shift
+		for f; do
+			f=$(strip_duplicate_slashes "${f}"); f=${f%/}
+			[[ ${f:0:1} = / ]] || f="/${f}"
+			for g in "${PORTAGE_DOCOMPRESS_SKIP[@]}"; do
+				[[ ${f} = "${g}" ]] && continue 2
+			done
+			PORTAGE_DOCOMPRESS_SKIP[${#PORTAGE_DOCOMPRESS_SKIP[@]}]=${f}
+		done
+	else
+		for f; do
+			f=$(strip_duplicate_slashes "${f}"); f=${f%/}
+			[[ ${f:0:1} = / ]] || f="/${f}"
+			for g in "${PORTAGE_DOCOMPRESS[@]}"; do
+				[[ ${f} = "${g}" ]] && continue 2
+			done
+			PORTAGE_DOCOMPRESS[${#PORTAGE_DOCOMPRESS[@]}]=${f}
+		done
+	fi
+}
+
+# adds ".keep" files so that dirs aren't auto-cleaned
+keepdir() {
+	dodir "$@"
+	local x
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+	if [ "$1" == "-R" ] || [ "$1" == "-r" ]; then
+		shift
+		find "$@" -type d -printf "${ED}%p/.keep_${CATEGORY}_${PN}-${SLOT}\n" \
+			| tr "\n" "\0" | \
+			while read -r -d $'\0' ; do
+				>> "$REPLY" || \
+					die "Failed to recursively create .keep files"
+			done
+	else
+		for x in "$@"; do
+			>> "${ED}${x}/.keep_${CATEGORY}_${PN}-${SLOT}" || \
+				die "Failed to create .keep in ${ED}${x}"
+		done
+	fi
+}
+
+
+useq() {
+	has $EBUILD_PHASE prerm postrm || eqawarn \
+		"QA Notice: The 'useq' function is deprecated (replaced by 'use')"
+	use ${1}
+}
+
+usev() {
+	if use ${1}; then
+		echo "${1#!}"
+		return 0
+	fi
+	return 1
+}
+
+use() {
+	local u=$1
+	local found=0
+
+	# if we got something like '!flag', then invert the return value
+	if [[ ${u:0:1} == "!" ]] ; then
+		u=${u:1}
+		found=1
+	fi
+
+	if [[ $EBUILD_PHASE = depend ]] ; then
+		# TODO: Add a registration interface for eclasses to register
+		# any number of phase hooks, so that global scope eclass
+		# initialization can by migrated to phase hooks in new EAPIs.
+		# Example: add_phase_hook before pkg_setup $ECLASS_pre_pkg_setup
+		#if [[ -n $EAPI ]] && ! has "$EAPI" 0 1 2 3 ; then
+		#	die "use() called during invalid phase: $EBUILD_PHASE"
+		#fi
+		true
+
+	# Make sure we have this USE flag in IUSE
+	elif [[ -n $PORTAGE_IUSE && -n $EBUILD_PHASE ]] ; then
+		[[ $u =~ $PORTAGE_IUSE ]] || \
+			eqawarn "QA Notice: USE Flag '${u}' not" \
+				"in IUSE for ${CATEGORY}/${PF}"
+	fi
+
+	if has ${u} ${USE} ; then
+		return ${found}
+	else
+		return $((!found))
+	fi
+}
+
+use_with() {
+	if [ -z "$1" ]; then
+		echo "!!! use_with() called without a parameter." >&2
+		echo "!!! use_with <USEFLAG> [<flagname> [value]]" >&2
+		return 1
+	fi
+
+	if ! has "${EAPI:-0}" 0 1 2 3 ; then
+		local UW_SUFFIX=${3+=$3}
+	else
+		local UW_SUFFIX=${3:+=$3}
+	fi
+	local UWORD=${2:-$1}
+
+	if use $1; then
+		echo "--with-${UWORD}${UW_SUFFIX}"
+	else
+		echo "--without-${UWORD}"
+	fi
+	return 0
+}
+
+use_enable() {
+	if [ -z "$1" ]; then
+		echo "!!! use_enable() called without a parameter." >&2
+		echo "!!! use_enable <USEFLAG> [<flagname> [value]]" >&2
+		return 1
+	fi
+
+	if ! has "${EAPI:-0}" 0 1 2 3 ; then
+		local UE_SUFFIX=${3+=$3}
+	else
+		local UE_SUFFIX=${3:+=$3}
+	fi
+	local UWORD=${2:-$1}
+
+	if use $1; then
+		echo "--enable-${UWORD}${UE_SUFFIX}"
+	else
+		echo "--disable-${UWORD}"
+	fi
+	return 0
+}
+
+unpack() {
+	local srcdir
+	local x
+	local y
+	local myfail
+	local eapi=${EAPI:-0}
+	[ -z "$*" ] && die "Nothing passed to the 'unpack' command"
+
+	for x in "$@"; do
+		vecho ">>> Unpacking ${x} to ${PWD}"
+		y=${x%.*}
+		y=${y##*.}
+
+		if [[ ${x} == "./"* ]] ; then
+			srcdir=""
+		elif [[ ${x} == ${DISTDIR%/}/* ]] ; then
+			die "Arguments to unpack() cannot begin with \${DISTDIR}."
+		elif [[ ${x} == "/"* ]] ; then
+			die "Arguments to unpack() cannot be absolute"
+		else
+			srcdir="${DISTDIR}/"
+		fi
+		[[ ! -s ${srcdir}${x} ]] && die "${x} does not exist"
+
+		_unpack_tar() {
+			if [ "${y}" == "tar" ]; then
+				$1 -c -- "$srcdir$x" | tar xof -
+				assert_sigpipe_ok "$myfail"
+			else
+				local cwd_dest=${x##*/}
+				cwd_dest=${cwd_dest%.*}
+				$1 -c -- "${srcdir}${x}" > "${cwd_dest}" || die "$myfail"
+			fi
+		}
+
+		myfail="failure unpacking ${x}"
+		case "${x##*.}" in
+			tar)
+				tar xof "$srcdir$x" || die "$myfail"
+				;;
+			tgz)
+				tar xozf "$srcdir$x" || die "$myfail"
+				;;
+			tbz|tbz2)
+				${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- "$srcdir$x" | tar xof -
+				assert_sigpipe_ok "$myfail"
+				;;
+			ZIP|zip|jar)
+				# unzip will interactively prompt under some error conditions,
+				# as reported in bug #336285
+				( set +x ; while true ; do echo n || break ; done ) | \
+				unzip -qo "${srcdir}${x}" || die "$myfail"
+				;;
+			gz|Z|z)
+				_unpack_tar "gzip -d"
+				;;
+			bz2|bz)
+				_unpack_tar "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d}"
+				;;
+			7Z|7z)
+				local my_output
+				my_output="$(7z x -y "${srcdir}${x}")"
+				if [ $? -ne 0 ]; then
+					echo "${my_output}" >&2
+					die "$myfail"
+				fi
+				;;
+			RAR|rar)
+				unrar x -idq -o+ "${srcdir}${x}" || die "$myfail"
+				;;
+			LHa|LHA|lha|lzh)
+				lha xfq "${srcdir}${x}" || die "$myfail"
+				;;
+			a)
+				ar x "${srcdir}${x}" || die "$myfail"
+				;;
+			deb)
+				# Unpacking .deb archives can not always be done with
+				# `ar`.  For instance on AIX this doesn't work out.  If
+				# we have `deb2targz` installed, prefer it over `ar` for
+				# that reason.  We just make sure on AIX `deb2targz` is
+				# installed.
+				if type -P deb2targz > /dev/null; then
+					y=${x##*/}
+					local created_symlink=0
+					if [ ! "$srcdir$x" -ef "$y" ] ; then
+						# deb2targz always extracts into the same directory as
+						# the source file, so create a symlink in the current
+						# working directory if necessary.
+						ln -sf "$srcdir$x" "$y" || die "$myfail"
+						created_symlink=1
+					fi
+					deb2targz "$y" || die "$myfail"
+					if [ $created_symlink = 1 ] ; then
+						# Clean up the symlink so the ebuild
+						# doesn't inadvertently install it.
+						rm -f "$y"
+					fi
+					mv -f "${y%.deb}".tar.gz data.tar.gz || die "$myfail"
+				else
+					ar x "$srcdir$x" || die "$myfail"
+				fi
+				;;
+			lzma)
+				_unpack_tar "lzma -d"
+				;;
+			xz)
+				if has $eapi 0 1 2 ; then
+					vecho "unpack ${x}: file format not recognized. Ignoring."
+				else
+					_unpack_tar "xz -d"
+				fi
+				;;
+			*)
+				vecho "unpack ${x}: file format not recognized. Ignoring."
+				;;
+		esac
+	done
+	# Do not chmod '.' since it's probably ${WORKDIR} and PORTAGE_WORKDIR_MODE
+	# should be preserved.
+	find . -mindepth 1 -maxdepth 1 ! -type l -print0 | \
+		${XARGS} -0 chmod -fR a+rX,u+w,g-w,o-w
+}
+
+econf() {
+	local x
+
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local EPREFIX= ;; esac
+
+	_hasg() {
+		local x s=$1
+		shift
+		for x ; do [[ ${x} == ${s} ]] && echo "${x}" && return 0 ; done
+		return 1
+	}
+
+	_hasgq() { _hasg "$@" >/dev/null ; }
+
+	local phase_func=$(_ebuild_arg_to_phase "$EAPI" "$EBUILD_PHASE")
+	if [[ -n $phase_func ]] ; then
+		if has "$EAPI" 0 1 ; then
+			[[ $phase_func != src_compile ]] && \
+				eqawarn "QA Notice: econf called in" \
+					"$phase_func instead of src_compile"
+		else
+			[[ $phase_func != src_configure ]] && \
+				eqawarn "QA Notice: econf called in" \
+					"$phase_func instead of src_configure"
+		fi
+	fi
+
+	: ${ECONF_SOURCE:=.}
+	if [ -x "${ECONF_SOURCE}/configure" ]; then
+		if [[ -n $CONFIG_SHELL && \
+			"$(head -n1 "$ECONF_SOURCE/configure")" =~ ^'#!'[[:space:]]*/bin/sh([[:space:]]|$) ]] ; then
+			sed -e "1s:^#![[:space:]]*/bin/sh:#!$CONFIG_SHELL:" -i "$ECONF_SOURCE/configure" || \
+				die "Substition of shebang in '$ECONF_SOURCE/configure' failed"
+		fi
+		if [ -e "${EPREFIX}"/usr/share/gnuconfig/ ]; then
+			find "${WORKDIR}" -type f '(' \
+			-name config.guess -o -name config.sub ')' -print0 | \
+			while read -r -d $'\0' x ; do
+				vecho " * econf: updating ${x/${WORKDIR}\/} with ${EPREFIX}/usr/share/gnuconfig/${x##*/}"
+				cp -f "${EPREFIX}"/usr/share/gnuconfig/"${x##*/}" "${x}"
+			done
+		fi
+
+		# EAPI=4 adds --disable-dependency-tracking to econf
+		if ! has "$EAPI" 0 1 2 3 3_pre2 && \
+			"${ECONF_SOURCE}/configure" --help 2>/dev/null | \
+			grep -q disable-dependency-tracking ; then
+			set -- --disable-dependency-tracking "$@"
+		fi
+
+		# if the profile defines a location to install libs to aside from default, pass it on.
+		# if the ebuild passes in --libdir, they're responsible for the conf_libdir fun.
+		local CONF_LIBDIR LIBDIR_VAR="LIBDIR_${ABI}"
+		if [[ -n ${ABI} && -n ${!LIBDIR_VAR} ]] ; then
+			CONF_LIBDIR=${!LIBDIR_VAR}
+		fi
+		if [[ -n ${CONF_LIBDIR} ]] && ! _hasgq --libdir=\* "$@" ; then
+			export CONF_PREFIX=$(_hasg --exec-prefix=\* "$@")
+			[[ -z ${CONF_PREFIX} ]] && CONF_PREFIX=$(_hasg --prefix=\* "$@")
+			: ${CONF_PREFIX:=${EPREFIX}/usr}
+			CONF_PREFIX=${CONF_PREFIX#*=}
+			[[ ${CONF_PREFIX} != /* ]] && CONF_PREFIX="/${CONF_PREFIX}"
+			[[ ${CONF_LIBDIR} != /* ]] && CONF_LIBDIR="/${CONF_LIBDIR}"
+			set -- --libdir="$(strip_duplicate_slashes ${CONF_PREFIX}${CONF_LIBDIR})" "$@"
+		fi
+
+		set -- \
+			--prefix="${EPREFIX}"/usr \
+			${CBUILD:+--build=${CBUILD}} \
+			--host=${CHOST} \
+			${CTARGET:+--target=${CTARGET}} \
+			--mandir="${EPREFIX}"/usr/share/man \
+			--infodir="${EPREFIX}"/usr/share/info \
+			--datadir="${EPREFIX}"/usr/share \
+			--sysconfdir="${EPREFIX}"/etc \
+			--localstatedir="${EPREFIX}"/var/lib \
+			"$@" \
+			${EXTRA_ECONF}
+		vecho "${ECONF_SOURCE}/configure" "$@"
+
+		if ! "${ECONF_SOURCE}/configure" "$@" ; then
+
+			if [ -s config.log ]; then
+				echo
+				echo "!!! Please attach the following file when seeking support:"
+				echo "!!! ${PWD}/config.log"
+			fi
+			die "econf failed"
+		fi
+	elif [ -f "${ECONF_SOURCE}/configure" ]; then
+		die "configure is not executable"
+	else
+		die "no configure script found"
+	fi
+}
+
+einstall() {
+	# CONF_PREFIX is only set if they didn't pass in libdir above.
+	local LOCAL_EXTRA_EINSTALL="${EXTRA_EINSTALL}"
+	[[ " ${FEATURES} " == *" force-prefix "* ]] || \
+		case "$EAPI" in 0|1|2) local ED=${D} ;; esac
+	LIBDIR_VAR="LIBDIR_${ABI}"
+	if [ -n "${ABI}" -a -n "${!LIBDIR_VAR}" ]; then
+		CONF_LIBDIR="${!LIBDIR_VAR}"
+	fi
+	unset LIBDIR_VAR
+	if [ -n "${CONF_LIBDIR}" ] && [ "${CONF_PREFIX:+set}" = set ]; then
+		EI_DESTLIBDIR="${D}/${CONF_PREFIX}/${CONF_LIBDIR}"
+		EI_DESTLIBDIR="$(strip_duplicate_slashes ${EI_DESTLIBDIR})"
+		LOCAL_EXTRA_EINSTALL="libdir=${EI_DESTLIBDIR} ${LOCAL_EXTRA_EINSTALL}"
+		unset EI_DESTLIBDIR
+	fi
+
+	if [ -f ./[mM]akefile -o -f ./GNUmakefile ] ; then
+		if [ "${PORTAGE_DEBUG}" == "1" ]; then
+			${MAKE:-make} -n prefix="${ED}usr" \
+				datadir="${ED}usr/share" \
+				infodir="${ED}usr/share/info" \
+				localstatedir="${ED}var/lib" \
+				mandir="${ED}usr/share/man" \
+				sysconfdir="${ED}etc" \
+				${LOCAL_EXTRA_EINSTALL} \
+				${MAKEOPTS} ${EXTRA_EMAKE} -j1 \
+				"$@" install
+		fi
+		${MAKE:-make} prefix="${ED}usr" \
+			datadir="${ED}usr/share" \
+			infodir="${ED}usr/share/info" \
+			localstatedir="${ED}var/lib" \
+			mandir="${ED}usr/share/man" \
+			sysconfdir="${ED}etc" \
+			${LOCAL_EXTRA_EINSTALL} \
+			${MAKEOPTS} ${EXTRA_EMAKE} -j1 \
+			"$@" install || die "einstall failed"
+	else
+		die "no Makefile found"
+	fi
+}
+
+_eapi0_pkg_nofetch() {
+	[ -z "${SRC_URI}" ] && return
+
+	elog "The following are listed in SRC_URI for ${PN}:"
+	local x
+	for x in $(echo ${SRC_URI}); do
+		elog "   ${x}"
+	done
+}
+
+_eapi0_src_unpack() {
+	[[ -n ${A} ]] && unpack ${A}
+}
+
+_eapi0_src_compile() {
+	if [ -x ./configure ] ; then
+		econf
+	fi
+	_eapi2_src_compile
+}
+
+_eapi0_src_test() {
+	# Since we don't want emake's automatic die
+	# support (EAPI 4 and later), and we also don't
+	# want the warning messages that it produces if
+	# we call it in 'nonfatal' mode, we use emake_cmd
+	# to emulate the desired parts of emake behavior.
+	local emake_cmd="${MAKE:-make} ${MAKEOPTS} ${EXTRA_EMAKE}"
+	if $emake_cmd -j1 check -n &> /dev/null; then
+		vecho ">>> Test phase [check]: ${CATEGORY}/${PF}"
+		$emake_cmd -j1 check || \
+			die "Make check failed. See above for details."
+	elif $emake_cmd -j1 test -n &> /dev/null; then
+		vecho ">>> Test phase [test]: ${CATEGORY}/${PF}"
+		$emake_cmd -j1 test || \
+			die "Make test failed. See above for details."
+	else
+		vecho ">>> Test phase [none]: ${CATEGORY}/${PF}"
+	fi
+}
+
+_eapi1_src_compile() {
+	_eapi2_src_configure
+	_eapi2_src_compile
+}
+
+_eapi2_src_configure() {
+	if [[ -x ${ECONF_SOURCE:-.}/configure ]] ; then
+		econf
+	fi
+}
+
+_eapi2_src_compile() {
+	if [ -f Makefile ] || [ -f GNUmakefile ] || [ -f makefile ]; then
+		emake || die "emake failed"
+	fi
+}
+
+_eapi4_src_install() {
+	if [[ -f Makefile || -f GNUmakefile || -f makefile ]] ; then
+		emake DESTDIR="${D}" install
+	fi
+
+	if ! declare -p DOCS &>/dev/null ; then
+		local d
+		for d in README* ChangeLog AUTHORS NEWS TODO CHANGES \
+				THANKS BUGS FAQ CREDITS CHANGELOG ; do
+			[[ -s "${d}" ]] && dodoc "${d}"
+		done
+	elif [[ $(declare -p DOCS) == "declare -a "* ]] ; then
+		dodoc "${DOCS[@]}"
+	else
+		dodoc ${DOCS}
+	fi
+}
+
+# @FUNCTION: has_version
+# @USAGE: <DEPEND ATOM>
+# @DESCRIPTION:
+# Return true if given package is installed. Otherwise return false.
+# Callers may override the ROOT variable in order to match packages from an
+# alternative ROOT.
+has_version() {
+
+	local eroot
+	case "$EAPI" in
+		0|1|2)
+			[[ " ${FEATURES} " == *" force-prefix "* ]] && \
+				eroot=${ROOT%/}${EPREFIX}/ || eroot=${ROOT}
+			;;
+		*)
+			eroot=${ROOT%/}${EPREFIX}/
+			;;
+	esac
+	if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+		"$PORTAGE_BIN_PATH"/ebuild-ipc has_version "${eroot}" "$1"
+	else
+		PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+		"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" has_version "${eroot}" "$1"
+	fi
+	local retval=$?
+	case "${retval}" in
+		0|1)
+			return ${retval}
+			;;
+		*)
+			die "unexpected portageq exit code: ${retval}"
+			;;
+	esac
+}
+
+# @FUNCTION: best_version
+# @USAGE: <DEPEND ATOM>
+# @DESCRIPTION:
+# Returns the best/most-current match.
+# Callers may override the ROOT variable in order to match packages from an
+# alternative ROOT.
+best_version() {
+
+	local eroot
+	case "$EAPI" in
+		0|1|2)
+			[[ " ${FEATURES} " == *" force-prefix "* ]] && \
+				eroot=${ROOT%/}${EPREFIX}/ || eroot=${ROOT}
+			;;
+		*)
+			eroot=${ROOT%/}${EPREFIX}/
+			;;
+	esac
+	if [[ -n $PORTAGE_IPC_DAEMON ]] ; then
+		"$PORTAGE_BIN_PATH"/ebuild-ipc best_version "${eroot}" "$1"
+	else
+		PYTHONPATH=${PORTAGE_PYM_PATH}${PYTHONPATH:+:}${PYTHONPATH} \
+		"${PORTAGE_PYTHON:-/usr/bin/python}" "${PORTAGE_BIN_PATH}/portageq" best_version "${eroot}" "$1"
+	fi
+	local retval=$?
+	case "${retval}" in
+		0|1)
+			return ${retval}
+			;;
+		*)
+			die "unexpected portageq exit code: ${retval}"
+			;;
+	esac
+}

diff --git a/portage_with_autodep/bin/portageq b/portage_with_autodep/bin/portageq
index 57a7c39..280fe94 100755
--- a/portage_with_autodep/bin/portageq
+++ b/portage_with_autodep/bin/portageq
@@ -1,5 +1,5 @@
 #!/usr/bin/python -O
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -42,12 +42,15 @@ except ImportError:
 del pym_path
 
 from portage import os
+from portage.eapi import eapi_has_repo_deps
 from portage.util import writemsg, writemsg_stdout
+from portage.output import colormap
 portage.proxy.lazyimport.lazyimport(globals(),
 	'subprocess',
 	'_emerge.Package:Package',
 	'_emerge.RootConfig:RootConfig',
 	'portage.dbapi._expand_new_virt:expand_new_virt',
+	'portage._sets.base:InternalPackageSet',
 )
 
 def eval_atom_use(atom):
@@ -78,17 +81,18 @@ def eval_atom_use(atom):
 #
 
 def has_version(argv):
-	"""<root> <category/package>
+	"""<eroot> <category/package>
 	Return code 0 if it's available, 1 otherwise.
 	"""
 	if (len(argv) < 2):
 		print("ERROR: insufficient parameters!")
-		sys.exit(2)
+		return 2
 
 	warnings = []
 
+	allow_repo = atom_validate_strict is False or eapi_has_repo_deps(eapi)
 	try:
-		atom = portage.dep.Atom(argv[1])
+		atom = portage.dep.Atom(argv[1], allow_repo=allow_repo)
 	except portage.exception.InvalidAtom:
 		if atom_validate_strict:
 			portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
@@ -99,7 +103,7 @@ def has_version(argv):
 	else:
 		if atom_validate_strict:
 			try:
-				atom = portage.dep.Atom(argv[1], eapi=eapi)
+				atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
 			except portage.exception.InvalidAtom as e:
 				warnings.append(
 					portage._unicode_decode("QA Notice: %s: %s") % \
@@ -112,11 +116,11 @@ def has_version(argv):
 	try:
 		mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
 		if mylist:
-			sys.exit(0)
+			return 0
 		else:
-			sys.exit(1)
+			return 1
 	except KeyError:
-		sys.exit(1)
+		return 1
 	except portage.exception.InvalidAtom:
 		portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
 			noiselevel=-1)
@@ -125,17 +129,18 @@ has_version.uses_root = True
 
 
 def best_version(argv):
-	"""<root> <category/package>
+	"""<eroot> <category/package>
 	Returns category/package-version (without .ebuild).
 	"""
 	if (len(argv) < 2):
 		print("ERROR: insufficient parameters!")
-		sys.exit(2)
+		return 2
 
 	warnings = []
 
+	allow_repo = atom_validate_strict is False or eapi_has_repo_deps(eapi)
 	try:
-		atom = portage.dep.Atom(argv[1])
+		atom = portage.dep.Atom(argv[1], allow_repo=allow_repo)
 	except portage.exception.InvalidAtom:
 		if atom_validate_strict:
 			portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1],
@@ -146,7 +151,7 @@ def best_version(argv):
 	else:
 		if atom_validate_strict:
 			try:
-				atom = portage.dep.Atom(argv[1], eapi=eapi)
+				atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
 			except portage.exception.InvalidAtom as e:
 				warnings.append(
 					portage._unicode_decode("QA Notice: %s: %s") % \
@@ -160,31 +165,31 @@ def best_version(argv):
 		mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
 		print(portage.best(mylist))
 	except KeyError:
-		sys.exit(1)
+		return 1
 best_version.uses_root = True
 
 
 def mass_best_version(argv):
-	"""<root> [<category/package>]+
+	"""<eroot> [<category/package>]+
 	Returns category/package-version (without .ebuild).
 	"""
 	if (len(argv) < 2):
 		print("ERROR: insufficient parameters!")
-		sys.exit(2)
+		return 2
 	try:
 		for pack in argv[1:]:
 			mylist=portage.db[argv[0]]["vartree"].dbapi.match(pack)
 			print(pack+":"+portage.best(mylist))
 	except KeyError:
-		sys.exit(1)
+		return 1
 mass_best_version.uses_root = True
 
 def metadata(argv):
 	if (len(argv) < 4):
 		print("ERROR: insufficient parameters!", file=sys.stderr)
-		sys.exit(2)
+		return 2
 
-	root, pkgtype, pkgspec = argv[0:3]
+	eroot, pkgtype, pkgspec = argv[0:3]
 	metakeys = argv[3:]
 	type_map = {
 		"ebuild":"porttree",
@@ -192,20 +197,20 @@ def metadata(argv):
 		"installed":"vartree"}
 	if pkgtype not in type_map:
 		print("Unrecognized package type: '%s'" % pkgtype, file=sys.stderr)
-		sys.exit(1)
+		return 1
 	trees = portage.db
-	if os.path.realpath(root) == os.path.realpath(portage.settings["ROOT"]):
-		root = portage.settings["ROOT"] # contains the normalized $ROOT
+	repo = portage.dep.dep_getrepo(pkgspec)
+	pkgspec = portage.dep.remove_slot(pkgspec)
 	try:
-			values = trees[root][type_map[pkgtype]].dbapi.aux_get(
-				pkgspec, metakeys)
+			values = trees[eroot][type_map[pkgtype]].dbapi.aux_get(
+				pkgspec, metakeys, myrepo=repo)
 			writemsg_stdout(''.join('%s\n' % x for x in values), noiselevel=-1)
 	except KeyError:
 		print("Package not found: '%s'" % pkgspec, file=sys.stderr)
-		sys.exit(1)
+		return 1
 
 metadata.__doc__ = """
-<root> <pkgtype> <category/package> [<key>]+
+<eroot> <pkgtype> <category/package> [<key>]+
 Returns metadata values for the specified package.
 Available keys: %s
 """  % ','.join(sorted(x for x in portage.auxdbkeys \
@@ -214,10 +219,10 @@ if not x.startswith('UNUSED_')))
 metadata.uses_root = True
 
 def contents(argv):
-	"""<root> <category/package>
+	"""<eroot> <category/package>
 	List the files that are installed for a given package, with
 	one file listed on each line. All file names will begin with
-	<root>.
+	<eroot>.
 	"""
 	if len(argv) != 2:
 		print("ERROR: expected 2 parameters, got %d!" % len(argv))
@@ -236,11 +241,11 @@ def contents(argv):
 contents.uses_root = True
 
 def owners(argv):
-	"""<root> [<filename>]+
+	"""<eroot> [<filename>]+
 	Given a list of files, print the packages that own the files and which
 	files belong to each package. Files owned by a package are listed on
 	the lines below it, indented by a single tab character (\\t). All file
-	paths must either start with <root> or be a basename alone.
+	paths must either start with <eroot> or be a basename alone.
 	Returns 1 if no owners could be found, and 0 otherwise.
 	"""
 	if len(argv) < 2:
@@ -249,9 +254,9 @@ def owners(argv):
 		return 2
 
 	from portage import catsplit, dblink
-	settings = portage.settings
-	root = settings["ROOT"]
-	vardb = portage.db[root]["vartree"].dbapi
+	eroot = argv[0]
+	vardb = portage.db[eroot]["vartree"].dbapi
+	root = portage.settings['ROOT']
 
 	cwd = None
 	try:
@@ -272,8 +277,8 @@ def owners(argv):
 				return 2
 			f = os.path.join(cwd, f)
 			f = portage.normalize_path(f)
-		if not is_basename and not f.startswith(root):
-			sys.stderr.write("ERROR: file paths must begin with <root>!\n")
+		if not is_basename and not f.startswith(eroot):
+			sys.stderr.write("ERROR: file paths must begin with <eroot>!\n")
 			sys.stderr.flush()
 			return 2
 		if is_basename:
@@ -317,9 +322,9 @@ def owners(argv):
 owners.uses_root = True
 
 def is_protected(argv):
-	"""<root> <filename>
+	"""<eroot> <filename>
 	Given a single filename, return code 0 if it's protected, 1 otherwise.
-	The filename must begin with <root>.
+	The filename must begin with <eroot>.
 	"""
 	if len(argv) != 2:
 		sys.stderr.write("ERROR: expected 2 parameters, got %d!\n" % len(argv))
@@ -345,7 +350,7 @@ def is_protected(argv):
 		f = portage.normalize_path(f)
 
 	if not f.startswith(root):
-		err.write("ERROR: file paths must begin with <root>!\n")
+		err.write("ERROR: file paths must begin with <eroot>!\n")
 		err.flush()
 		return 2
 
@@ -364,9 +369,9 @@ def is_protected(argv):
 is_protected.uses_root = True
 
 def filter_protected(argv):
-	"""<root>
+	"""<eroot>
 	Read filenames from stdin and write them to stdout if they are protected.
-	All filenames are delimited by \\n and must begin with <root>.
+	All filenames are delimited by \\n and must begin with <eroot>.
 	"""
 	if len(argv) != 1:
 		sys.stderr.write("ERROR: expected 1 parameter, got %d!\n" % len(argv))
@@ -406,7 +411,7 @@ def filter_protected(argv):
 			f = portage.normalize_path(f)
 
 		if not f.startswith(root):
-			err.write("ERROR: file paths must begin with <root>!\n")
+			err.write("ERROR: file paths must begin with <eroot>!\n")
 			err.flush()
 			errors += 1
 			continue
@@ -424,7 +429,7 @@ def filter_protected(argv):
 filter_protected.uses_root = True
 
 def best_visible(argv):
-	"""<root> [pkgtype] <atom>
+	"""<eroot> [pkgtype] <atom>
 	Returns category/package-version (without .ebuild).
 	The pkgtype argument defaults to "ebuild" if unspecified,
 	otherwise it must be one of ebuild, binary, or installed.
@@ -450,7 +455,8 @@ def best_visible(argv):
 			noiselevel=-1)
 		return 2
 
-	db = portage.db[portage.settings["ROOT"]][type_map[pkgtype]].dbapi
+	eroot = argv[0]
+	db = portage.db[eroot][type_map[pkgtype]].dbapi
 
 	try:
 		atom = portage.dep_expand(atom, mydb=db, settings=portage.settings)
@@ -460,43 +466,80 @@ def best_visible(argv):
 		return 2
 
 	root_config = RootConfig(portage.settings,
-		portage.db[portage.settings["ROOT"]], None)
+		portage.db[eroot], None)
 
-	try:
+	if hasattr(db, "xmatch"):
+		cpv_list = db.xmatch("match-all-cpv-only", atom)
+	else:
+		cpv_list = db.match(atom)
+
+	if cpv_list:
 		# reversed, for descending order
-		for cpv in reversed(db.match(atom)):
-			metadata = dict(zip(Package.metadata_keys,
-				db.aux_get(cpv, Package.metadata_keys, myrepo=atom.repo)))
-			pkg = Package(built=(pkgtype != "ebuild"), cpv=cpv,
-				installed=(pkgtype=="installed"), metadata=metadata,
-				root_config=root_config, type_name=pkgtype)
-			if pkg.visible:
-				writemsg_stdout("%s\n" % (pkg.cpv,), noiselevel=-1)
-				return os.EX_OK
-	except KeyError:
-		pass
+		cpv_list.reverse()
+		# verify match, since the atom may match the package
+		# for a given cpv from one repo but not another, and
+		# we can use match-all-cpv-only to avoid redundant
+		# metadata access.
+		atom_set = InternalPackageSet(initial_atoms=(atom,))
+
+		if atom.repo is None and hasattr(db, "getRepositories"):
+			repo_list = db.getRepositories()
+		else:
+			repo_list = [atom.repo]
+
+		for cpv in cpv_list:
+			for repo in repo_list:
+				try:
+					metadata = dict(zip(Package.metadata_keys,
+						db.aux_get(cpv, Package.metadata_keys, myrepo=repo)))
+				except KeyError:
+					continue
+				pkg = Package(built=(pkgtype != "ebuild"), cpv=cpv,
+					installed=(pkgtype=="installed"), metadata=metadata,
+					root_config=root_config, type_name=pkgtype)
+				if not atom_set.findAtomForPackage(pkg):
+					continue
+
+				if pkg.visible:
+					writemsg_stdout("%s\n" % (pkg.cpv,), noiselevel=-1)
+					return os.EX_OK
+
+	# No package found, write out an empty line.
+	writemsg_stdout("\n", noiselevel=-1)
+
 	return 1
 best_visible.uses_root = True
 
 
 def mass_best_visible(argv):
-	"""<root> [<category/package>]+
+	"""<root> [<type>] [<category/package>]+
 	Returns category/package-version (without .ebuild).
+	The pkgtype argument defaults to "ebuild" if unspecified,
+	otherwise it must be one of ebuild, binary, or installed.
 	"""
+	type_map = {
+		"ebuild":"porttree",
+		"binary":"bintree",
+		"installed":"vartree"}
+
 	if (len(argv) < 2):
 		print("ERROR: insufficient parameters!")
-		sys.exit(2)
+		return 2
 	try:
-		for pack in argv[1:]:
-			mylist=portage.db[argv[0]]["porttree"].dbapi.match(pack)
-			print(pack+":"+portage.best(mylist))
+		root = argv.pop(0)
+		pkgtype = "ebuild"
+		if argv[0] in type_map:
+			pkgtype = argv.pop(0)
+		for pack in argv:
+			writemsg_stdout("%s:" % pack, noiselevel=-1)
+			best_visible([root, pkgtype, pack])
 	except KeyError:
-		sys.exit(1)
+		return 1
 mass_best_visible.uses_root = True
 
 
 def all_best_visible(argv):
-	"""<root>
+	"""<eroot>
 	Returns all best_visible packages (without .ebuild).
 	"""
 	if len(argv) < 1:
@@ -513,30 +556,57 @@ all_best_visible.uses_root = True
 
 
 def match(argv):
-	"""<root> <atom>
+	"""<eroot> <atom>
 	Returns a \\n separated list of category/package-version.
 	When given an empty string, all installed packages will
 	be listed.
 	"""
 	if len(argv) != 2:
 		print("ERROR: expected 2 parameters, got %d!" % len(argv))
-		sys.exit(2)
+		return 2
 	root, atom = argv
-	if atom:
-		if atom_validate_strict and not portage.isvalidatom(atom):
-			portage.writemsg("ERROR: Invalid atom: '%s'\n" % atom,
-				noiselevel=-1)
-			return 2
-		results = portage.db[root]["vartree"].dbapi.match(atom)
-	else:
-		results = portage.db[root]["vartree"].dbapi.cpv_all()
+	if not atom:
+		atom = "*/*"
+
+	vardb = portage.db[root]["vartree"].dbapi
+	try:
+		atom = portage.dep.Atom(atom, allow_wildcard=True, allow_repo=True)
+	except portage.exception.InvalidAtom:
+		# maybe it's valid but missing category
+		atom = portage.dep_expand(atom, mydb=vardb, settings=vardb.settings)
+
+	if atom.extended_syntax:
+		if atom == "*/*":
+			results = vardb.cpv_all()
+		else:
+			results = []
+			require_metadata = atom.slot or atom.repo
+			for cpv in vardb.cpv_all():
+
+				if not portage.dep.extended_cp_match(
+					atom.cp, portage.cpv_getkey(cpv)):
+					continue
+
+				if require_metadata:
+					slot, repo = vardb.aux_get(cpv, ["SLOT", "repository"])
+
+					if atom.slot is not None and atom.slot != slot:
+						continue
+
+					if atom.repo is not None and atom.repo != repo:
+						continue
+
+				results.append(cpv)
+
 		results.sort()
+	else:
+		results = vardb.match(atom)
 	for cpv in results:
 		print(cpv)
 match.uses_root = True
 
 def expand_virtual(argv):
-	"""<root> <atom>
+	"""<eroot> <atom>
 	Returns a \\n separated list of atoms expanded from a
 	given virtual atom (GLEP 37 virtuals only),
 	excluding blocker atoms. Satisfied
@@ -630,6 +700,13 @@ def distdir(argv):
 	print(portage.settings["DISTDIR"])
 
 
+def colormap(argv):
+	"""
+	Display the color.map as environment variables.
+	"""
+	print(portage.output.colormap())
+
+
 def envvar(argv):
 	"""<variable>+
 	Returns a specific environment variable as exists prior to ebuild.sh.
@@ -641,7 +718,7 @@ def envvar(argv):
 
 	if len(argv) == 0:
 		print("ERROR: insufficient parameters!")
-		sys.exit(2)
+		return 2
 
 	for arg in argv:
 		if verbose:
@@ -650,29 +727,33 @@ def envvar(argv):
 			print(portage.settings[arg])
 
 def get_repos(argv):
-	"""<root>
-	Returns all repos with names (repo_name file) argv[0] = $ROOT
+	"""<eroot>
+	Returns all repos with names (repo_name file) argv[0] = $EROOT
 	"""
 	if len(argv) < 1:
 		print("ERROR: insufficient parameters!")
-		sys.exit(2)
+		return 2
 	print(" ".join(portage.db[argv[0]]["porttree"].dbapi.getRepositories()))
 
+get_repos.uses_root = True
+
 def get_repo_path(argv):
-	"""<root> <repo_id>+
-	Returns the path to the repo named argv[1], argv[0] = $ROOT
+	"""<eroot> <repo_id>+
+	Returns the path to the repo named argv[1], argv[0] = $EROOT
 	"""
 	if len(argv) < 2:
 		print("ERROR: insufficient parameters!")
-		sys.exit(2)
+		return 2
 	for arg in argv[1:]:
 		path = portage.db[argv[0]]["porttree"].dbapi.getRepositoryPath(arg)
 		if path is None:
 			path = ""
 		print(path)
 
+get_repo_path.uses_root = True
+
 def list_preserved_libs(argv):
-	"""<root>
+	"""<eroot>
 	Print a list of libraries preserved during a package update in the form
 	package: path. Returns 1 if no preserved libraries could be found,
 	0 otherwise.
@@ -680,7 +761,7 @@ def list_preserved_libs(argv):
 
 	if len(argv) != 1:
 		print("ERROR: wrong number of arguments")
-		sys.exit(2)
+		return 2
 	mylibs = portage.db[argv[0]]["vartree"].dbapi._plib_registry.getPreservedLibs()
 	rValue = 1
 	msg = []
@@ -788,16 +869,22 @@ def main():
 			sys.stderr.write("Run portageq with --help for info\n")
 			sys.stderr.flush()
 			sys.exit(os.EX_USAGE)
-		os.environ["ROOT"] = sys.argv[2]
+		eprefix = portage.const.EPREFIX
+		eroot = portage.util.normalize_path(sys.argv[2])
+		if eprefix:
+			root = eroot[:1-len(eprefix)]
+		else:
+			root = eroot
+		os.environ["ROOT"] = root
 
 	args = sys.argv[2:]
-	if args and sys.hexversion < 0x3000000 and not isinstance(args[0], unicode):
+	if args and isinstance(args[0], bytes):
 		for i in range(len(args)):
 			args[i] = portage._unicode_decode(args[i])
 
 	try:
 		if uses_root:
-			args[0] = portage.settings["ROOT"]
+			args[0] = portage.settings['EROOT']
 		retval = function(args)
 		if retval:
 			sys.exit(retval)

diff --git a/portage_with_autodep/bin/quickpkg b/portage_with_autodep/bin/quickpkg
index 09723f5..d908c03 100755
--- a/portage_with_autodep/bin/quickpkg
+++ b/portage_with_autodep/bin/quickpkg
@@ -21,9 +21,9 @@ except ImportError:
 from portage import os
 from portage import xpak
 from portage.dbapi.dep_expand import dep_expand
-from portage.dep import use_reduce
-from portage.exception import InvalidAtom, InvalidData, InvalidDependString, \
-	PackageSetNotFound, PermissionDenied
+from portage.dep import Atom, extended_cp_match, use_reduce
+from portage.exception import (AmbiguousPackageName, InvalidAtom, InvalidData,
+	InvalidDependString, PackageSetNotFound, PermissionDenied)
 from portage.util import ConfigProtect, ensure_dirs, shlex_split
 from portage.dbapi.vartree import dblink, tar_contents
 from portage.checksum import perform_md5
@@ -31,8 +31,9 @@ from portage._sets import load_default_config, SETPREFIX
 
 def quickpkg_atom(options, infos, arg, eout):
 	settings = portage.settings
-	root = portage.settings["ROOT"]
-	trees = portage.db[root]
+	root = portage.settings['ROOT']
+	eroot = portage.settings['EROOT']
+	trees = portage.db[eroot]
 	vartree = trees["vartree"]
 	vardb = vartree.dbapi
 	bintree = trees["bintree"]
@@ -43,7 +44,7 @@ def quickpkg_atom(options, infos, arg, eout):
 
 	try:
 		atom = dep_expand(arg, mydb=vardb, settings=vartree.settings)
-	except ValueError as e:
+	except AmbiguousPackageName as e:
 		# Multiple matches thrown from cpv_expand
 		eout.eerror("Please use a more specific atom: %s" % \
 			" ".join(e.args[0]))
@@ -65,10 +66,7 @@ def quickpkg_atom(options, infos, arg, eout):
 	for cpv in matches:
 		excluded_config_files = []
 		bintree.prevent_collision(cpv)
-		cat, pkg = portage.catsplit(cpv)
-		dblnk = dblink(cat, pkg, root,
-			vartree.settings, treetype="vartree",
-			vartree=vartree)
+		dblnk = vardb._dblink(cpv)
 		have_lock = False
 		try:
 			dblnk.lockdb()
@@ -101,7 +99,7 @@ def quickpkg_atom(options, infos, arg, eout):
 			contents = dblnk.getcontents()
 			protect = None
 			if not include_config:
-				confprot = ConfigProtect(root,
+				confprot = ConfigProtect(eroot,
 					shlex_split(settings.get("CONFIG_PROTECT", "")),
 					shlex_split(settings.get("CONFIG_PROTECT_MASK", "")))
 				def protect(filename):
@@ -161,8 +159,8 @@ def quickpkg_atom(options, infos, arg, eout):
 		infos["missing"].append(arg)
 
 def quickpkg_set(options, infos, arg, eout):
-	root = portage.settings["ROOT"]
-	trees = portage.db[root]
+	eroot = portage.settings['EROOT']
+	trees = portage.db[eroot]
 	vartree = trees["vartree"]
 
 	settings = vartree.settings
@@ -187,9 +185,43 @@ def quickpkg_set(options, infos, arg, eout):
 	for atom in atoms:
 		quickpkg_atom(options, infos, atom, eout)
 
+
+def quickpkg_extended_atom(options, infos, atom, eout):
+	eroot = portage.settings['EROOT']
+	trees = portage.db[eroot]
+	vartree = trees["vartree"]
+	vardb = vartree.dbapi
+
+	require_metadata = atom.slot or atom.repo
+	atoms = []
+	for cpv in vardb.cpv_all():
+		cpv_atom = Atom("=%s" % cpv)
+
+		if atom == "*/*":
+			atoms.append(cpv_atom)
+			continue
+
+		if not extended_cp_match(atom.cp, cpv_atom.cp):
+			continue
+
+		if require_metadata:
+			slot, repo = vardb.aux_get(cpv, ["SLOT", "repository"])
+
+			if atom.slot and atom.slot != slot:
+				continue
+
+			if atom.repo and atom.repo != repo:
+				continue
+
+		atoms.append(cpv_atom)
+
+	for atom in atoms:
+		quickpkg_atom(options, infos, atom, eout)
+
+
 def quickpkg_main(options, args, eout):
-	root = portage.settings["ROOT"]
-	trees = portage.db[root]
+	eroot = portage.settings['EROOT']
+	trees = portage.db[eroot]
 	bintree = trees["bintree"]
 
 	try:
@@ -207,8 +239,17 @@ def quickpkg_main(options, args, eout):
 	for arg in args:
 		if arg[0] == SETPREFIX:
 			quickpkg_set(options, infos, arg, eout)
-		else:
+			continue
+		try:
+			atom = Atom(arg, allow_wildcard=True, allow_repo=True)
+		except (InvalidAtom, InvalidData):
+			# maybe it's valid but missing category (requires dep_expand)
 			quickpkg_atom(options, infos, arg, eout)
+		else:
+			if atom.extended_syntax:
+				quickpkg_extended_atom(options, infos, atom, eout)
+			else:
+				quickpkg_atom(options, infos, atom, eout)
 
 	if not infos["successes"]:
 		eout.eerror("No packages found")

diff --git a/portage_with_autodep/bin/regenworld b/portage_with_autodep/bin/regenworld
index 6b5af4c..3199fdf 100755
--- a/portage_with_autodep/bin/regenworld
+++ b/portage_with_autodep/bin/regenworld
@@ -47,7 +47,6 @@ def isunwanted(pkgline):
 		__uniqlist__.append(pkgline)
 		return True
 
-root = portage.settings['ROOT']
 eroot = portage.settings['EROOT']
 world_file = os.path.join(eroot, portage.WORLD_FILE)
 
@@ -78,7 +77,7 @@ for mykey in syslist:
 	# drop the asterix
 	mykey = mykey[1:]
 	#print("candidate:",mykey)
-	mylist = portage.db[root]["vartree"].dbapi.match(mykey)
+	mylist = portage.db[eroot]["vartree"].dbapi.match(mykey)
 	if mylist:
 		mykey=portage.cpv_getkey(mylist[0])
 		if mykey not in realsyslist:
@@ -87,7 +86,7 @@ for mykey in syslist:
 for mykey in biglist:
 	#print("checking:",mykey)
 	try:
-		mylist = portage.db[root]["vartree"].dbapi.match(mykey)
+		mylist = portage.db[eroot]["vartree"].dbapi.match(mykey)
 	except (portage.exception.InvalidAtom, KeyError):
 		if "--debug" in sys.argv:
 			print("* ignoring broken log entry for %s (likely injected)" % mykey)

diff --git a/portage_with_autodep/bin/repoman b/portage_with_autodep/bin/repoman
index f1fbc24..fd87847 100755
--- a/portage_with_autodep/bin/repoman
+++ b/portage_with_autodep/bin/repoman
@@ -1,5 +1,5 @@
 #!/usr/bin/python -O
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 # Next to do: dep syntax checking in mask files
@@ -18,8 +18,10 @@ import optparse
 import re
 import signal
 import stat
+import subprocess
 import sys
 import tempfile
+import textwrap
 import time
 import platform
 
@@ -43,7 +45,7 @@ portage.dep._internal_warnings = True
 try:
 	import xml.etree.ElementTree
 	from xml.parsers.expat import ExpatError
-except ImportError:
+except (ImportError, SystemError):
 	msg = ["Please enable python's \"xml\" USE flag in order to use repoman."]
 	from portage.output import EOutput
 	out = EOutput()
@@ -67,16 +69,16 @@ from portage import cvstree, normalize_path
 from portage import util
 from portage.exception import (FileNotFound, MissingParameter,
 	ParseError, PermissionDenied)
-from portage.manifest import Manifest
+from portage.manifest import _prohibited_filename_chars_re as \
+	disallowed_filename_chars_re
 from portage.process import find_binary, spawn
 from portage.output import bold, create_color_func, \
 	green, nocolor, red
 from portage.output import ConsoleStyleFile, StyleWriter
-from portage.util import cmp_sort_key, writemsg_level
+from portage.util import writemsg_level
+from portage.util._desktop_entry import validate_desktop_entry
 from portage.package.ebuild.digestgen import digestgen
-from portage.eapi import eapi_has_slot_deps, \
-	eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_iuse_defaults, \
-	eapi_has_required_use, eapi_has_use_dep_defaults
+from portage.eapi import eapi_has_iuse_defaults, eapi_has_required_use
 
 if sys.hexversion >= 0x3000000:
 	basestring = str
@@ -86,7 +88,6 @@ util.initialize_logger()
 # 14 is the length of DESCRIPTION=""
 max_desc_len = 100
 allowed_filename_chars="a-zA-Z0-9._-+:"
-disallowed_filename_chars_re = re.compile(r'[^a-zA-Z0-9._\-+:]')
 pv_toolong_re = re.compile(r'[0-9]{19,}')
 bad = create_color_func("BAD")
 
@@ -96,8 +97,8 @@ os.umask(0o22)
 # behave incrementally.
 repoman_incrementals = tuple(x for x in \
 	portage.const.INCREMENTALS if x != 'ACCEPT_KEYWORDS')
-repoman_settings = portage.config(local_config=False)
-repoman_settings.lock()
+config_root = os.environ.get("PORTAGE_CONFIGROOT")
+repoman_settings = portage.config(config_root=config_root, local_config=False)
 
 if repoman_settings.get("NOCOLOR", "").lower() in ("yes", "true") or \
 	repoman_settings.get('TERM') == 'dumb' or \
@@ -156,7 +157,7 @@ def ParseArgs(argv, qahelp):
 	  (opts, args), just like a call to parser.parse_args()
 	"""
 
-	if argv and sys.hexversion < 0x3000000 and not isinstance(argv[0], unicode):
+	if argv and isinstance(argv[0], bytes):
 		argv = [portage._unicode_decode(x) for x in argv]
 
 	modes = {
@@ -188,12 +189,21 @@ def ParseArgs(argv, qahelp):
 	parser.add_option('-M', '--commitmsgfile', dest='commitmsgfile',
 		help='specify a path to a file that contains a commit message')
 
+	parser.add_option('--digest',
+		type='choice', choices=('y', 'n'), metavar='<y|n>',
+		help='Automatically update Manifest digests for modified files')
+
 	parser.add_option('-p', '--pretend', dest='pretend', default=False,
 		action='store_true', help='don\'t commit or fix anything; just show what would be done')
 	
 	parser.add_option('-q', '--quiet', dest="quiet", action="count", default=0,
 		help='do not print unnecessary messages')
 
+	parser.add_option(
+		'--echangelog', type='choice', choices=('y', 'n', 'force'), metavar="<y|n|force>",
+		help='for commit mode, call echangelog if ChangeLog is unmodified (or '
+		'regardless of modification if \'force\' is specified)')
+
 	parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
 		help='Commit with QA violations')
 
@@ -209,9 +219,18 @@ def ParseArgs(argv, qahelp):
 	parser.add_option('-x', '--xmlparse', dest='xml_parse', action='store_true',
 		default=False, help='forces the metadata.xml parse check to be carried out')
 
+	parser.add_option(
+		'--if-modified', type='choice', choices=('y', 'n'), default='n',
+		metavar="<y|n>",
+		help='only check packages that have uncommitted modifications')
+
 	parser.add_option('-i', '--ignore-arches', dest='ignore_arches', action='store_true',
 		default=False, help='ignore arch-specific failures (where arch != host)')
 
+	parser.add_option("--ignore-default-opts",
+		action="store_true",
+		help="do not use the REPOMAN_DEFAULT_OPTS environment variable")
+
 	parser.add_option('-I', '--ignore-masked', dest='ignore_masked', action='store_true',
 		default=False, help='ignore masked packages (not allowed with commit mode)')
 
@@ -241,6 +260,11 @@ def ParseArgs(argv, qahelp):
 
 	opts, args = parser.parse_args(argv[1:])
 
+	if not opts.ignore_default_opts:
+		default_opts = repoman_settings.get("REPOMAN_DEFAULT_OPTS", "").split()
+		if default_opts:
+			opts, args = parser.parse_args(default_opts + sys.argv[1:])
+
 	if opts.mode == 'help':
 		parser.print_help(short=False)
 
@@ -285,8 +309,8 @@ qahelp={
 	"ebuild.notadded":"Ebuilds that exist but have not been added to cvs",
 	"ebuild.patches":"PATCHES variable should be a bash array to ensure white space safety",
 	"changelog.notadded":"ChangeLogs that exist but have not been added to cvs",
-	"dependency.unknown" : "Ebuild has a dependency that refers to an unknown package (which may be provided by an overlay)",
-	"file.executable":"Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do note need the executable bit",
+	"dependency.unknown" : "Ebuild has a dependency that refers to an unknown package (which may be valid if it is a blocker for a renamed/removed package, or is an alternative choice provided by an overlay)",
+	"file.executable":"Ebuilds, digests, metadata.xml, Manifest, and ChangeLog do not need the executable bit",
 	"file.size":"Files in the files directory must be under 20 KiB",
 	"file.size.fatal":"Files in the files directory must be under 60 KiB",
 	"file.name":"File/dir name must be composed of only the following chars: %s " % allowed_filename_chars,
@@ -303,7 +327,7 @@ qahelp={
 	"LICENSE.virtual":"Virtuals that have a non-empty LICENSE variable",
 	"DESCRIPTION.missing":"Ebuilds that have a missing or empty DESCRIPTION variable",
 	"DESCRIPTION.toolong":"DESCRIPTION is over %d characters" % max_desc_len,
-	"EAPI.definition":"EAPI is defined after an inherit call (must be defined before)",
+	"EAPI.definition":"EAPI definition does not conform to PMS section 7.3.1 (first non-comment, non-blank line)",
 	"EAPI.deprecated":"Ebuilds that use features that are deprecated in the current EAPI",
 	"EAPI.incompatible":"Ebuilds that use features that are only available with a different EAPI",
 	"EAPI.unsupported":"Ebuilds that have an unsupported EAPI version (you must upgrade portage)",
@@ -355,8 +379,6 @@ qahelp={
 	"digest.assumed":"Existing digest must be assumed correct (Package level only)",
 	"digest.missing":"Some files listed in SRC_URI aren't referenced in the Manifest",
 	"digest.unused":"Some files listed in the Manifest aren't referenced in SRC_URI",
-	"ebuild.nostable":"There are no ebuilds that are marked as stable for your ARCH",
-	"ebuild.allmasked":"All ebuilds are masked for this package (Package level only)",
 	"ebuild.majorsyn":"This ebuild has a major syntax error that may cause the ebuild to fail partially or fully",
 	"ebuild.minorsyn":"This ebuild has a minor syntax error that contravenes gentoo coding style",
 	"ebuild.badheader":"This ebuild has a malformed header",
@@ -381,8 +403,6 @@ qawarnings = set((
 "digest.assumed",
 "digest.unused",
 "ebuild.notadded",
-"ebuild.nostable",
-"ebuild.allmasked",
 "ebuild.nesteddie",
 "desktop.invalid",
 "DEPEND.badmasked","RDEPEND.badmasked","PDEPEND.badmasked",
@@ -401,7 +421,6 @@ qawarnings = set((
 "RDEPEND.implicit",
 "RDEPEND.suspect",
 "RESTRICT.invalid",
-"SRC_URI.mirror",
 "ebuild.minorsyn",
 "ebuild.badheader",
 "ebuild.patches",
@@ -414,7 +433,6 @@ qawarnings = set((
 "portage.internal",
 "usage.obsolete",
 "upstream.workaround",
-"virtual.oldstyle",
 "LIVEVCS.stable",
 "LIVEVCS.unmasked",
 ))
@@ -524,14 +542,13 @@ else:
 	else:
 		vcs = None
 
-# Note: We don't use ChangeLogs in distributed SCMs.
-# It will be generated on server side from scm log,
-# before package moves to the rsync server.
-# This is needed because we try to avoid merge collisions.
-check_changelog = vcs in ('cvs', 'svn')
+if options.if_modified == "y" and vcs is None:
+	logging.info("Not in a version controlled repository; "
+		"disabling --if-modified.")
+	options.if_modified = "n"
 
 # Disable copyright/mtime check if vcs does not preserve mtime (bug #324075).
-vcs_preserves_mtime = vcs not in ('git',)
+vcs_preserves_mtime = vcs in ('cvs',)
 
 vcs_local_opts = repoman_settings.get("REPOMAN_VCS_LOCAL_OPTS", "").split()
 vcs_global_opts = repoman_settings.get("REPOMAN_VCS_GLOBAL_OPTS")
@@ -542,50 +559,115 @@ if vcs_global_opts is None:
 		vcs_global_opts = ""
 vcs_global_opts = vcs_global_opts.split()
 
-if vcs == "cvs" and \
-	"commit" == options.mode and \
-	"RMD160" not in portage.checksum.hashorigin_map:
-	from portage.util import grablines
-	repo_lines = grablines("./CVS/Repository")
-	if repo_lines and \
-		"gentoo-x86" == repo_lines[0].strip().split(os.path.sep)[0]:
-		msg = "Please install " \
-		"pycrypto or enable python's ssl USE flag in order " \
-		"to enable RMD160 hash support. See bug #198398 for " \
-		"more information."
-		prefix = bad(" * ")
-		from textwrap import wrap
-		for line in wrap(msg, 70):
-			print(prefix + line)
-		sys.exit(1)
-	del repo_lines
-
 if options.mode == 'commit' and not options.pretend and not vcs:
 	logging.info("Not in a version controlled repository; enabling pretend mode.")
 	options.pretend = True
 
 # Ensure that PORTDIR_OVERLAY contains the repository corresponding to $PWD.
-repoman_settings = portage.config(local_config=False)
 repoman_settings['PORTDIR_OVERLAY'] = "%s %s" % \
-	(repoman_settings.get('PORTDIR_OVERLAY', ''), portdir_overlay)
+	(repoman_settings.get('PORTDIR_OVERLAY', ''),
+	portage._shell_quote(portdir_overlay))
 # We have to call the config constructor again so
 # that config.repositories is initialized correctly.
-repoman_settings = portage.config(local_config=False, env=dict(os.environ,
-	PORTDIR_OVERLAY=repoman_settings['PORTDIR_OVERLAY']))
+repoman_settings = portage.config(config_root=config_root, local_config=False,
+	env=dict(os.environ, PORTDIR_OVERLAY=repoman_settings['PORTDIR_OVERLAY']))
 
-root = '/'
+root = repoman_settings['EROOT']
 trees = {
-	root : {'porttree' : portage.portagetree(root, settings=repoman_settings)}
+	root : {'porttree' : portage.portagetree(settings=repoman_settings)}
 }
 portdb = trees[root]['porttree'].dbapi
 
 # Constrain dependency resolution to the master(s)
 # that are specified in layout.conf.
-portdir_overlay = os.path.realpath(portdir_overlay)
-repo_info = portdb._repo_info[portdir_overlay]
-portdb.porttrees = list(repo_info.eclass_db.porttrees)
+repodir = os.path.realpath(portdir_overlay)
+repo_config = repoman_settings.repositories.get_repo_for_location(repodir)
+portdb.porttrees = list(repo_config.eclass_db.porttrees)
 portdir = portdb.porttrees[0]
 
+if repo_config.allow_provide_virtual:
+	qawarnings.add("virtual.oldstyle")
+
+if repo_config.sign_commit:
+	if vcs == 'git':
+		# NOTE: It's possible to use --gpg-sign=key_id to specify the key in
+		# the commit arguments. If key_id is unspecified, then it must be
+		# configured by `git config user.signingkey key_id`.
+		vcs_local_opts.append("--gpg-sign")
+
+# In order to disable manifest signatures, repos may set
+# "sign-manifests = false" in metadata/layout.conf. This
+# can be used to prevent merge conflicts like those that
+# thin-manifests is designed to prevent.
+sign_manifests = "sign" in repoman_settings.features and \
+	repo_config.sign_manifest
+
+manifest_hashes = repo_config.manifest_hashes
+if manifest_hashes is None:
+	manifest_hashes = portage.const.MANIFEST2_HASH_DEFAULTS
+
+if options.mode in ("commit", "fix", "manifest"):
+	if portage.const.MANIFEST2_REQUIRED_HASH not in manifest_hashes:
+		msg = ("The 'manifest-hashes' setting in the '%s' repository's "
+		"metadata/layout.conf does not contain the '%s' hash which "
+		"is required by this portage version. You will have to "
+		"upgrade portage if you want to generate valid manifests for "
+		"this repository.") % \
+		(repo_config.name, portage.const.MANIFEST2_REQUIRED_HASH)
+		for line in textwrap.wrap(msg, 70):
+			logging.error(line)
+		sys.exit(1)
+
+	unsupported_hashes = manifest_hashes.difference(
+		portage.const.MANIFEST2_HASH_FUNCTIONS)
+	if unsupported_hashes:
+		msg = ("The 'manifest-hashes' setting in the '%s' repository's "
+		"metadata/layout.conf contains one or more hash types '%s' "
+		"which are not supported by this portage version. You will "
+		"have to upgrade portage if you want to generate valid "
+		"manifests for this repository.") % \
+		(repo_config.name, " ".join(sorted(unsupported_hashes)))
+		for line in textwrap.wrap(msg, 70):
+			logging.error(line)
+		sys.exit(1)
+
+if "commit" == options.mode and \
+	repo_config.name == "gentoo" and \
+	"RMD160" in manifest_hashes and \
+	"RMD160" not in portage.checksum.hashorigin_map:
+	msg = "Please install " \
+	"pycrypto or enable python's ssl USE flag in order " \
+	"to enable RMD160 hash support. See bug #198398 for " \
+	"more information."
+	prefix = bad(" * ")
+	for line in textwrap.wrap(msg, 70):
+		print(prefix + line)
+	sys.exit(1)
+
+if options.echangelog is None and repo_config.update_changelog:
+	options.echangelog = 'y'
+
+if vcs is None:
+	options.echangelog = 'n'
+
+# The --echangelog option causes automatic ChangeLog generation,
+# which invalidates changelog.ebuildadded and changelog.missing
+# checks.
+# Note: Some don't use ChangeLogs in distributed SCMs.
+# It will be generated on server side from scm log,
+# before package moves to the rsync server.
+# This is needed because they try to avoid merge collisions.
+# Gentoo's Council decided to always use the ChangeLog file.
+# TODO: shouldn't this just be switched on the repo, iso the VCS?
+check_changelog = options.echangelog not in ('y', 'force') and vcs in ('cvs', 'svn')
+
+if 'digest' in repoman_settings.features and options.digest != 'n':
+	options.digest = 'y'
+
+logging.debug("vcs: %s" % (vcs,))
+logging.debug("repo config: %s" % (repo_config,))
+logging.debug("options: %s" % (options,))
+
 # Generate an appropriate PORTDIR_OVERLAY value for passing into the
 # profile-specific config constructor calls.
 env = os.environ.copy()
@@ -601,19 +683,19 @@ logging.info('PORTDIR_OVERLAY = "%s"' % env['PORTDIR_OVERLAY'])
 env['FEATURES'] = env.get('FEATURES', '') + ' -unknown-features-warn'
 
 categories = []
-for path in set([portdir, portdir_overlay]):
+for path in repo_config.eclass_db.porttrees:
 	categories.extend(portage.util.grabfile(
 		os.path.join(path, 'profiles', 'categories')))
-repoman_settings.categories = tuple(sorted(
-	portage.util.stack_lists([categories], incremental=1)))
-del categories
+repoman_settings.categories = frozenset(
+	portage.util.stack_lists([categories], incremental=1))
+categories = repoman_settings.categories
 
 portdb.settings = repoman_settings
 root_config = RootConfig(repoman_settings, trees[root], None)
 # We really only need to cache the metadata that's necessary for visibility
 # filtering. Anything else can be discarded to reduce memory consumption.
 portdb._aux_cache_keys.clear()
-portdb._aux_cache_keys.update(["EAPI", "KEYWORDS", "SLOT"])
+portdb._aux_cache_keys.update(["EAPI", "IUSE", "KEYWORDS", "repository", "SLOT"])
 
 reposplit = myreporoot.split(os.path.sep)
 repolevel = len(reposplit)
@@ -628,11 +710,13 @@ if options.mode == 'commit' and repolevel not in [1,2,3]:
 	print(red("***"))
 	err("Unable to identify level we're commiting from for %s" % '/'.join(reposplit))
 
-startdir = normalize_path(mydir)
-repodir = startdir
-for x in range(0, repolevel - 1):
-	repodir = os.path.dirname(repodir)
-repodir = os.path.realpath(repodir)
+# Make startdir relative to the canonical repodir, so that we can pass
+# it to digestgen and it won't have to be canonicalized again.
+if repolevel == 1:
+	startdir = repodir
+else:
+	startdir = normalize_path(mydir)
+	startdir = os.path.join(repodir, *startdir.split(os.sep)[-2-repolevel+3:])
 
 def caterror(mycat):
 	err(mycat+" is not an official category.  Skipping QA checks in this directory.\nPlease ensure that you add "+catdir+" to "+repodir+"/profiles/categories\nif it is a new category.")
@@ -792,7 +876,7 @@ scanlist=[]
 if repolevel==2:
 	#we are inside a category directory
 	catdir=reposplit[-1]
-	if catdir not in repoman_settings.categories:
+	if catdir not in categories:
 		caterror(catdir)
 	mydirlist=os.listdir(startdir)
 	for x in mydirlist:
@@ -802,7 +886,7 @@ if repolevel==2:
 			scanlist.append(catdir+"/"+x)
 	repo_subdir = catdir + os.sep
 elif repolevel==1:
-	for x in repoman_settings.categories:
+	for x in categories:
 		if not os.path.isdir(startdir+"/"+x):
 			continue
 		for y in os.listdir(startdir+"/"+x):
@@ -813,7 +897,7 @@ elif repolevel==1:
 	repo_subdir = ""
 elif repolevel==3:
 	catdir = reposplit[-2]
-	if catdir not in repoman_settings.categories:
+	if catdir not in categories:
 		caterror(catdir)
 	scanlist.append(catdir+"/"+reposplit[-1])
 	repo_subdir = scanlist[-1] + os.sep
@@ -828,6 +912,53 @@ scanlist.sort()
 
 logging.debug("Found the following packages to scan:\n%s" % '\n'.join(scanlist))
 
+def vcs_files_to_cps(vcs_file_iter):
+	"""
+	Iterate over the given modified file paths returned from the vcs,
+	and return a frozenset containing category/pn strings for each
+	modified package.
+	"""
+
+	modified_cps = []
+
+	if repolevel == 3:
+		if reposplit[-2] in categories and \
+			next(vcs_file_iter, None) is not None:
+			modified_cps.append("/".join(reposplit[-2:]))
+
+	elif repolevel == 2:
+		category = reposplit[-1]
+		if category in categories:
+			for filename in vcs_file_iter:
+				f_split = filename.split(os.sep)
+				# ['.', pn,...]
+				if len(f_split) > 2:
+					modified_cps.append(category + "/" + f_split[1])
+
+	else:
+		# repolevel == 1
+		for filename in vcs_file_iter:
+			f_split = filename.split(os.sep)
+			# ['.', category, pn,...]
+			if len(f_split) > 3 and f_split[1] in categories:
+				modified_cps.append("/".join(f_split[1:3]))
+
+	return frozenset(modified_cps)
+
+def git_supports_gpg_sign():
+	status, cmd_output = \
+		subprocess_getstatusoutput("git --version")
+	cmd_output = cmd_output.split()
+	if cmd_output:
+		version = re.match(r'^(\d+)\.(\d+)\.(\d+)', cmd_output[-1])
+		if version is not None:
+			version = [int(x) for x in version.groups()[1:]]
+			if version[0] > 1 or \
+				(version[0] == 1 and version[1] > 7) or \
+				(version[0] == 1 and version[1] == 7 and version[2] >= 9):
+				return True
+	return False
+
 def dev_keywords(profiles):
 	"""
 	Create a set of KEYWORDS values that exist in 'dev'
@@ -896,7 +1027,7 @@ def fetch_metadata_dtd():
 	Fetch metadata.dtd if it doesn't exist or the ctime is older than
 	metadata_dtd_ctime_interval.
 	@rtype: bool
-	@returns: True if successful, otherwise False
+	@return: True if successful, otherwise False
 	"""
 
 	must_fetch = True
@@ -995,25 +1126,51 @@ if vcs == "cvs":
 	mycvstree = cvstree.getentries("./", recursive=1)
 	mychanged = cvstree.findchanged(mycvstree, recursive=1, basedir="./")
 	mynew = cvstree.findnew(mycvstree, recursive=1, basedir="./")
-if vcs == "svn":
-	svnstatus = os.popen("svn status").readlines()
+	if options.if_modified == "y":
+		myremoved = cvstree.findremoved(mycvstree, recursive=1, basedir="./")
+
+elif vcs == "svn":
+	with os.popen("svn status") as f:
+		svnstatus = f.readlines()
 	mychanged = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem and elem[:1] in "MR" ]
 	mynew     = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A") ]
+	if options.if_modified == "y":
+		myremoved = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
+
 elif vcs == "git":
-	mychanged = os.popen("git diff-index --name-only --relative --diff-filter=M HEAD").readlines()
+	with os.popen("git diff-index --name-only "
+		"--relative --diff-filter=M HEAD") as f:
+		mychanged = f.readlines()
 	mychanged = ["./" + elem[:-1] for elem in mychanged]
 
-	mynew = os.popen("git diff-index --name-only --relative --diff-filter=A HEAD").readlines()
+	with os.popen("git diff-index --name-only "
+		"--relative --diff-filter=A HEAD") as f:
+		mynew = f.readlines()
 	mynew = ["./" + elem[:-1] for elem in mynew]
+	if options.if_modified == "y":
+		with os.popen("git diff-index --name-only "
+			"--relative --diff-filter=D HEAD") as f:
+			myremoved = f.readlines()
+		myremoved = ["./" + elem[:-1] for elem in myremoved]
+
 elif vcs == "bzr":
-	bzrstatus = os.popen("bzr status -S .").readlines()
+	with os.popen("bzr status -S .") as f:
+		bzrstatus = f.readlines()
 	mychanged = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M" ]
 	mynew     = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] == "NK" or elem[0:1] == "R" ) ]
+	if options.if_modified == "y":
+		myremoved = [ "./" + elem.split()[-3:-2][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] == "K" or elem[0:1] == "R" ) ]
+
 elif vcs == "hg":
-	mychanged = os.popen("hg status --no-status --modified .").readlines()
+	with os.popen("hg status --no-status --modified .") as f:
+		mychanged = f.readlines()
 	mychanged = ["./" + elem.rstrip() for elem in mychanged]
 	mynew = os.popen("hg status --no-status --added .").readlines()
 	mynew = ["./" + elem.rstrip() for elem in mynew]
+	if options.if_modified == "y":
+		with os.popen("hg status --no-status --removed .") as f:
+			myremoved = f.readlines()
+		myremoved = ["./" + elem.rstrip() for elem in myremoved]
 
 if vcs:
 	new_ebuilds.update(x for x in mynew if x.endswith(".ebuild"))
@@ -1021,9 +1178,18 @@ if vcs:
 	modified_changelogs.update(x for x in chain(mychanged, mynew) \
 		if os.path.basename(x) == "ChangeLog")
 
+def vcs_new_changed(relative_path):
+	for x in chain(mychanged, mynew):
+		if x == relative_path:
+			return True
+	return False
+
 have_pmasked = False
 have_dev_keywords = False
 dofail = 0
+
+# NOTE: match-all caches are not shared due to potential
+# differences between profiles in _get_implicit_iuse.
 arch_caches={}
 arch_xmatch_caches = {}
 shared_xmatch_caches = {"cp-list":{}}
@@ -1037,7 +1203,10 @@ check_ebuild_notadded = not \
 # Build a regex from thirdpartymirrors for the SRC_URI.mirror check.
 thirdpartymirrors = []
 for v in repoman_settings.thirdpartymirrors().values():
-	thirdpartymirrors.extend(v)
+	for v in v:
+		if not v.endswith("/"):
+			v += "/"
+		thirdpartymirrors.append(v)
 
 class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
 	"""
@@ -1056,9 +1225,16 @@ except FileNotFound:
 	# disable for non-gentoo repoman users who may not have herds.
 	herd_base = None
 
-for x in scanlist:
+effective_scanlist = scanlist
+if options.if_modified == "y":
+	effective_scanlist = sorted(vcs_files_to_cps(
+		chain(mychanged, mynew, myremoved)))
+
+for x in effective_scanlist:
 	#ebuilds and digests added to cvs respectively.
 	logging.info("checking package %s" % x)
+	# save memory by discarding xmatch caches from previous package(s)
+	arch_xmatch_caches.clear()
 	eadded=[]
 	catdir,pkgdir=x.split("/")
 	checkdir=repodir+"/"+x
@@ -1071,8 +1247,7 @@ for x in scanlist:
 	generated_manifest = False
 
 	if options.mode == "manifest" or \
-	  (options.mode != 'manifest-check' and \
-	  'digest' in repoman_settings.features) or \
+	  (options.mode != 'manifest-check' and options.digest == 'y') or \
 	  options.mode in ('commit', 'fix') and not options.pretend:
 		auto_assumed = set()
 		fetchlist_dict = portage.FetchlistDict(checkdir,
@@ -1081,7 +1256,9 @@ for x in scanlist:
 			portage._doebuild_manifest_exempt_depend += 1
 			try:
 				distdir = repoman_settings['DISTDIR']
-				mf = portage.manifest.Manifest(checkdir, distdir,
+				mf = repoman_settings.repositories.get_repo_for_location(
+					os.path.dirname(os.path.dirname(checkdir)))
+				mf = mf.load_manifest(checkdir, distdir,
 					fetchlist_dict=fetchlist_dict)
 				mf.create(requiredDistfiles=None,
 					assumeDistHashesAlways=True)
@@ -1175,17 +1352,6 @@ for x in scanlist:
 			pkgs[pf] = Package(cpv=cpv, metadata=myaux,
 				root_config=root_config, type_name="ebuild")
 
-	# Sort ebuilds in ascending order for the KEYWORDS.dropped check.
-	pkgsplits = {}
-	for i in range(len(ebuildlist)):
-		ebuild_split = portage.pkgsplit(ebuildlist[i])
-		pkgsplits[ebuild_split] = ebuildlist[i]
-		ebuildlist[i] = ebuild_split
-	ebuildlist.sort(key=cmp_sort_key(portage.pkgcmp))
-	for i in range(len(ebuildlist)):
-		ebuildlist[i] = pkgsplits[ebuildlist[i]]
-	del pkgsplits
-
 	slot_keywords = {}
 
 	if len(pkgs) != len(ebuildlist):
@@ -1197,20 +1363,34 @@ for x in scanlist:
 		can_force = False
 		continue
 
+	# Sort ebuilds in ascending order for the KEYWORDS.dropped check.
+	ebuildlist = sorted(pkgs.values())
+	ebuildlist = [pkg.pf for pkg in ebuildlist]
+
 	for y in checkdirlist:
 		m = disallowed_filename_chars_re.search(y.strip(os.sep))
 		if m is not None:
+			y_relative = os.path.join(checkdir_relative, y)
+			if vcs is not None and not vcs_new_changed(y_relative):
+				# If the file isn't in the VCS new or changed set, then
+				# assume that it's an irrelevant temporary file (Manifest
+				# entries are not generated for file names containing
+				# prohibited characters). See bug #406877.
+				m = None
+		if m is not None:
 			stats["file.name"] += 1
 			fails["file.name"].append("%s/%s: char '%s'" % \
 				(checkdir, y, m.group(0)))
 
 		if not (y in ("ChangeLog", "metadata.xml") or y.endswith(".ebuild")):
 			continue
+		f = None
 		try:
 			line = 1
-			for l in io.open(_unicode_encode(os.path.join(checkdir, y),
+			f = io.open(_unicode_encode(os.path.join(checkdir, y),
 				encoding=_encodings['fs'], errors='strict'),
-				mode='r', encoding=_encodings['repo.content']):
+				mode='r', encoding=_encodings['repo.content'])
+			for l in f:
 				line +=1
 		except UnicodeDecodeError as ue:
 			stats["file.UTF8"] += 1
@@ -1220,6 +1400,9 @@ for x in scanlist:
 			if l2 != 0:
 				s = s[s.rfind("\n") + 1:]
 			fails["file.UTF8"].append("%s/%s: line %i, just after: '%s'" % (checkdir, y, line, s))
+		finally:
+			if f is not None:
+				f.close()
 
 	if vcs in ("git", "hg") and check_ebuild_notadded:
 		if vcs == "git":
@@ -1286,7 +1469,9 @@ for x in scanlist:
 				raise
 			continue
 
-	mf = Manifest(checkdir, repoman_settings["DISTDIR"])
+	mf = repoman_settings.repositories.get_repo_for_location(
+		os.path.dirname(os.path.dirname(checkdir)))
+	mf = mf.load_manifest(checkdir, repoman_settings["DISTDIR"])
 	mydigests=mf.getTypeDigests("DIST")
 
 	fetchlist_dict = portage.FetchlistDict(checkdir, repoman_settings, portdb)
@@ -1361,20 +1546,27 @@ for x in scanlist:
 			m = disallowed_filename_chars_re.search(
 				os.path.basename(y.rstrip(os.sep)))
 			if m is not None:
+				y_relative = os.path.join(checkdir_relative, "files", y)
+				if vcs is not None and not vcs_new_changed(y_relative):
+					# If the file isn't in the VCS new or changed set, then
+					# assume that it's an irrelevant temporary file (Manifest
+					# entries are not generated for file names containing
+					# prohibited characters). See bug #406877.
+					m = None
+			if m is not None:
 				stats["file.name"] += 1
 				fails["file.name"].append("%s/files/%s: char '%s'" % \
 					(checkdir, y, m.group(0)))
 
 			if desktop_file_validate and desktop_pattern.match(y):
-				status, cmd_output = subprocess_getstatusoutput(
-					"'%s' '%s'" % (desktop_file_validate, full_path))
-				if os.WIFEXITED(status) and os.WEXITSTATUS(status) != os.EX_OK:
+				cmd_output = validate_desktop_entry(full_path)
+				if cmd_output:
 					# Note: in the future we may want to grab the
 					# warnings in addition to the errors. We're
 					# just doing errors now since we don't want
 					# to generate too much noise at first.
 					error_re = re.compile(r'.*\s*error:\s*(.*)')
-					for line in cmd_output.splitlines():
+					for line in cmd_output:
 						error_match = error_re.match(line)
 						if error_match is None:
 							continue
@@ -1446,7 +1638,6 @@ for x in scanlist:
 	changelog_path = os.path.join(checkdir_relative, "ChangeLog")
 	changelog_modified = changelog_path in modified_changelogs
 
-	allmasked = True
 	# detect unused local USE-descriptions
 	used_useflags = set()
 
@@ -1599,7 +1790,7 @@ for x in scanlist:
 		Ebuilds that inherit a "Live" eclass (darcs,subversion,git,cvs,etc..) should
 		not be allowed to be marked stable
 		"""
-		if live_ebuild:
+		if live_ebuild and repo_config.name == "gentoo":
 			bad_stable_keywords = []
 			for keyword in keywords:
 				if not keyword.startswith("~") and \
@@ -1629,13 +1820,12 @@ for x in scanlist:
 					arches.append([keyword, keyword[1:], [keyword[1:], keyword]])
 				else:
 					arches.append([keyword, keyword, [keyword]])
-					allmasked = False
 			if not arches:
 				# Use an empty profile for checking dependencies of
 				# packages that have empty KEYWORDS.
 				arches.append(['**', '**', ['**']])
 
-		unknown_pkgs = {}
+		unknown_pkgs = set()
 		baddepsyntax = False
 		badlicsyntax = False
 		badprovsyntax = False
@@ -1672,11 +1862,13 @@ for x in scanlist:
 					if atom == "||":
 						continue
 
-					if not atom.blocker and \
-						not portdb.cp_list(atom.cp) and \
+					# Skip dependency.unknown for blockers, so that we
+					# don't encourage people to remove necessary blockers,
+					# as discussed in bug #382407.
+					if atom.blocker is None and \
+						not portdb.xmatch("match-all", atom) and \
 						not atom.cp.startswith("virtual/"):
-						unknown_pkgs.setdefault(atom.cp, set()).add(
-							(mytype, atom.unevaluated_atom))
+						unknown_pkgs.add((mytype, atom.unevaluated_atom))
 
 					is_blocker = atom.blocker
 
@@ -1766,12 +1958,12 @@ for x in scanlist:
 		#keyword checks
 		myuse = myaux["KEYWORDS"].split()
 		for mykey in myuse:
-			myskey=mykey[:]
-			if myskey[0]=="-":
-				myskey=myskey[1:]
-			if myskey[0]=="~":
-				myskey=myskey[1:]
-			if mykey!="-*":
+			if mykey not in ("-*", "*", "~*"):
+				myskey = mykey
+				if myskey[:1] == "-":
+					myskey = myskey[1:]
+				if myskey[:1] == "~":
+					myskey = myskey[1:]
 				if myskey not in kwlist:
 					stats["KEYWORDS.invalid"] += 1
 					fails["KEYWORDS.invalid"].append(x+"/"+y+".ebuild: %s" % mykey)
@@ -1858,9 +2050,11 @@ for x in scanlist:
 					dep_settings = portage.config(
 						config_profile_path=prof.abs_path,
 						config_incrementals=repoman_incrementals,
+						config_root=config_root,
 						local_config=False,
 						_unmatched_removal=options.unmatched_removal,
 						env=env)
+					dep_settings.categories = repoman_settings.categories
 					if options.without_mask:
 						dep_settings._mask_manager = \
 							copy.deepcopy(dep_settings._mask_manager)
@@ -1876,7 +2070,7 @@ for x in scanlist:
 					xcache.update(shared_xmatch_caches)
 					arch_xmatch_caches[xmatch_cache_key] = xcache
 
-				trees["/"]["porttree"].settings = dep_settings
+				trees[root]["porttree"].settings = dep_settings
 				portdb.settings = dep_settings
 				portdb.xcache = xcache
 				# for package.use.mask support inside dep_check
@@ -1887,7 +2081,7 @@ for x in scanlist:
 
 				if not baddepsyntax:
 					ismasked = not ebuild_archs or \
-						pkg.cpv not in portdb.xmatch("list-visible", pkg.cp)
+						pkg.cpv not in portdb.xmatch("match-visible", pkg.cp)
 					if ismasked:
 						if not have_pmasked:
 							have_pmasked = bool(dep_settings._getMaskAtom(
@@ -1921,12 +2115,16 @@ for x in scanlist:
 
 						if success:
 							if atoms:
+
+								# Don't bother with dependency.unknown for
+								# cases in which *DEPEND.bad is triggered.
 								for atom in atoms:
+									# dep_check returns all blockers and they
+									# aren't counted for *DEPEND.bad, so we
+									# ignore them here.
 									if not atom.blocker:
-										# Don't bother with dependency.unknown
-										# for cases in which *DEPEND.bad is
-										# triggered.
-										unknown_pkgs.pop(atom.cp, None)
+										unknown_pkgs.discard(
+											(mytype, atom.unevaluated_atom))
 
 								if not prof.sub_path:
 									# old-style virtuals currently aren't
@@ -1957,25 +2155,14 @@ for x in scanlist:
 								prof, repr(atoms)))
 
 		if not baddepsyntax and unknown_pkgs:
-			all_unknown = set()
-			all_unknown.update(*unknown_pkgs.values())
 			type_map = {}
-			for mytype, atom in all_unknown:
+			for mytype, atom in unknown_pkgs:
 				type_map.setdefault(mytype, set()).add(atom)
 			for mytype, atoms in type_map.items():
 				stats["dependency.unknown"] += 1
 				fails["dependency.unknown"].append("%s: %s: %s" %
 					(relative_path, mytype, ", ".join(sorted(atoms))))
 
-	# Check for 'all unstable' or 'all masked' -- ACCEPT_KEYWORDS is stripped
-	# XXX -- Needs to be implemented in dep code. Can't determine ~arch nicely.
-	#if not portage.portdb.xmatch("bestmatch-visible",x):
-	#    stats["ebuild.nostable"]+=1
-	#    fails["ebuild.nostable"].append(x)
-	if ebuildlist and allmasked and repolevel == 3:
-		stats["ebuild.allmasked"]+=1
-		fails["ebuild.allmasked"].append(x)
-
 	# check if there are unused local USE-descriptions in metadata.xml
 	# (unless there are any invalids, to avoid noise)
 	if allvalid:
@@ -1985,6 +2172,9 @@ for x in scanlist:
 				"%s/metadata.xml: unused local USE-description: '%s'" % \
 				(x, myflag))
 
+if options.if_modified == "y" and len(effective_scanlist) < 1:
+	logging.warn("--if-modified is enabled, but no modified packages were found!")
+
 if options.mode == "manifest":
 	sys.exit(dofail)
 
@@ -2099,7 +2289,8 @@ else:
 			err("Error retrieving CVS tree; exiting.")
 	if vcs == "svn":
 		try:
-			svnstatus=os.popen("svn status --no-ignore").readlines()
+			with os.popen("svn status --no-ignore") as f:
+				svnstatus = f.readlines()
 			myunadded = [ "./"+elem.rstrip().split()[1] for elem in svnstatus if elem.startswith("?") or elem.startswith("I") ]
 		except SystemExit as e:
 			raise  # TODO propagate this
@@ -2112,20 +2303,23 @@ else:
 		myf.close()
 	if vcs == "bzr":
 		try:
-			bzrstatus=os.popen("bzr status -S .").readlines()
+			with os.popen("bzr status -S .") as f:
+				bzrstatus = f.readlines()
 			myunadded = [ "./"+elem.rstrip().split()[1].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("?") or elem[0:2] == " D" ]
 		except SystemExit as e:
 			raise  # TODO propagate this
 		except:
 			err("Error retrieving bzr info; exiting.")
 	if vcs == "hg":
-		myunadded = os.popen("hg status --no-status --unknown .").readlines()
+		with os.popen("hg status --no-status --unknown .") as f:
+			myunadded = f.readlines()
 		myunadded = ["./" + elem.rstrip() for elem in myunadded]
 		
 		# Mercurial doesn't handle manually deleted files as removed from
 		# the repository, so the user need to remove them before commit,
 		# using "hg remove [FILES]"
-		mydeleted = os.popen("hg status --no-status --deleted .").readlines()
+		with os.popen("hg status --no-status --deleted .") as f:
+			mydeleted = f.readlines()
 		mydeleted = ["./" + elem.rstrip() for elem in mydeleted]
 
 
@@ -2141,36 +2335,6 @@ else:
 				myautoadd+=[myunadded[x]]
 				del myunadded[x]
 
-	if myautoadd:
-		print(">>> Auto-Adding missing Manifest(s)...")
-		if options.pretend:
-			if vcs == "cvs":
-				print("(cvs add "+" ".join(myautoadd)+")")
-			elif vcs == "svn":
-				print("(svn add "+" ".join(myautoadd)+")")
-			elif vcs == "git":
-				print("(git add "+" ".join(myautoadd)+")")
-			elif vcs == "bzr":
-				print("(bzr add "+" ".join(myautoadd)+")")
-			elif vcs == "hg":
-				print("(hg add "+" ".join(myautoadd)+")")
-			retval=0
-		else:
-			if vcs == "cvs":
-				retval=os.system("cvs add "+" ".join(myautoadd))
-			elif vcs == "svn":
-				retval=os.system("svn add "+" ".join(myautoadd))
-			elif vcs == "git":
-				retval=os.system("git add "+" ".join(myautoadd))
-			elif vcs == "bzr":
-				retval=os.system("bzr add "+" ".join(myautoadd))
-			elif vcs == "hg":
-				retval=os.system("hg add "+" ".join(myautoadd))
-		if retval:
-			writemsg_level("!!! Exiting on %s (shell) error code: %s\n" % \
-				(vcs, retval), level=logging.ERROR, noiselevel=-1)
-			sys.exit(retval)
-
 	if myunadded:
 		print(red("!!! The following files are in your local tree but are not added to the master"))
 		print(red("!!! tree. Please remove them from the local tree or add them to the master tree."))
@@ -2200,28 +2364,37 @@ else:
 
 
 	if vcs == "svn":
-		svnstatus = os.popen("svn status").readlines()
+		with os.popen("svn status") as f:
+			svnstatus = f.readlines()
 		mychanged = [ "./" + elem.split()[-1:][0] for elem in svnstatus if (elem[:1] in "MR" or elem[1:2] in "M")]
 		mynew     = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("A")]
 		myremoved = [ "./" + elem.split()[-1:][0] for elem in svnstatus if elem.startswith("D")]
 
 		# Subversion expands keywords specified in svn:keywords properties.
-		props = os.popen("svn propget -R svn:keywords").readlines()
+		with os.popen("svn propget -R svn:keywords") as f:
+			props = f.readlines()
 		expansion = dict(("./" + prop.split(" - ")[0], prop.split(" - ")[1].split()) \
 			for prop in props if " - " in prop)
 
 	elif vcs == "git":
-		mychanged = os.popen("git diff-index --name-only --relative --diff-filter=M HEAD").readlines()
+		with os.popen("git diff-index --name-only "
+			"--relative --diff-filter=M HEAD") as f:
+			mychanged = f.readlines()
 		mychanged = ["./" + elem[:-1] for elem in mychanged]
 
-		mynew = os.popen("git diff-index --name-only --relative --diff-filter=A HEAD").readlines()
+		with os.popen("git diff-index --name-only "
+			"--relative --diff-filter=A HEAD") as f:
+			mynew = f.readlines()
 		mynew = ["./" + elem[:-1] for elem in mynew]
 
-		myremoved = os.popen("git diff-index --name-only --relative --diff-filter=D HEAD").readlines()
+		with os.popen("git diff-index --name-only "
+			"--relative --diff-filter=D HEAD") as f:
+			myremoved = f.readlines()
 		myremoved = ["./" + elem[:-1] for elem in myremoved]
 
 	if vcs == "bzr":
-		bzrstatus = os.popen("bzr status -S .").readlines()
+		with os.popen("bzr status -S .") as f:
+			bzrstatus = f.readlines()
 		mychanged = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and elem[1:2] == "M" ]
 		mynew     = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem and ( elem[1:2] in "NK" or elem[0:1] == "R" ) ]
 		myremoved = [ "./" + elem.split()[-1:][0].split('/')[-1:][0] for elem in bzrstatus if elem.startswith("-") ]
@@ -2229,11 +2402,16 @@ else:
 		# Bazaar expands nothing.
 
 	if vcs == "hg":
-		mychanged = os.popen("hg status --no-status --modified .").readlines()
+		with os.popen("hg status --no-status --modified .") as f:
+			mychanged = f.readlines()
 		mychanged = ["./" + elem.rstrip() for elem in mychanged]
-		mynew = os.popen("hg status --no-status --added .").readlines()
+
+		with os.popen("hg status --no-status --added .") as f:
+			mynew = f.readlines()
 		mynew = ["./" + elem.rstrip() for elem in mynew]
-		myremoved = os.popen("hg status --no-status --removed .").readlines()
+
+		with os.popen("hg status --no-status --removed .") as f:
+			myremoved = f.readlines()
 		myremoved = ["./" + elem.rstrip() for elem in myremoved]
 
 	if vcs:
@@ -2253,20 +2431,143 @@ else:
 			mymanifests.add(f)
 		else:
 			myupdates.add(f)
-	if vcs in ('git', 'hg'):
-		myupdates.difference_update(myremoved)
+	myupdates.difference_update(myremoved)
 	myupdates = list(myupdates)
 	mymanifests = list(mymanifests)
 	myheaders = []
 	mydirty = []
 
+	commitmessage = options.commitmsg
+	if options.commitmsgfile:
+		try:
+			f = io.open(_unicode_encode(options.commitmsgfile,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['content'], errors='replace')
+			commitmessage = f.read()
+			f.close()
+			del f
+		except (IOError, OSError) as e:
+			if e.errno == errno.ENOENT:
+				portage.writemsg("!!! File Not Found: --commitmsgfile='%s'\n" % options.commitmsgfile)
+			else:
+				raise
+		# We've read the content so the file is no longer needed.
+		commitmessagefile = None
+	if not commitmessage or not commitmessage.strip():
+		try:
+			editor = os.environ.get("EDITOR")
+			if editor and utilities.editor_is_executable(editor):
+				commitmessage = utilities.get_commit_message_with_editor(
+					editor, message=qa_output)
+			else:
+				commitmessage = utilities.get_commit_message_with_stdin()
+		except KeyboardInterrupt:
+			exithandler()
+		if not commitmessage or not commitmessage.strip():
+			print("* no commit message?  aborting commit.")
+			sys.exit(1)
+	commitmessage = commitmessage.rstrip()
+	changelog_msg = commitmessage
+	portage_version = getattr(portage, "VERSION", None)
+	if portage_version is None:
+		sys.stderr.write("Failed to insert portage version in message!\n")
+		sys.stderr.flush()
+		portage_version = "Unknown"
+	unameout = platform.system() + " "
+	if platform.system() in ["Darwin", "SunOS"]:
+		unameout += platform.processor()
+	else:
+		unameout += platform.machine()
+	commitmessage += "\n\n(Portage version: %s/%s/%s" % \
+		(portage_version, vcs, unameout)
+	if options.force:
+		commitmessage += ", RepoMan options: --force"
+	commitmessage += ")"
+
+	if options.echangelog in ('y', 'force'):
+		logging.info("checking for unmodified ChangeLog files")
+		committer_name = utilities.get_committer_name(env=repoman_settings)
+		for x in sorted(vcs_files_to_cps(
+			chain(myupdates, mymanifests, myremoved))):
+			catdir, pkgdir = x.split("/")
+			checkdir = repodir + "/" + x
+			checkdir_relative = ""
+			if repolevel < 3:
+				checkdir_relative = os.path.join(pkgdir, checkdir_relative)
+			if repolevel < 2:
+				checkdir_relative = os.path.join(catdir, checkdir_relative)
+			checkdir_relative = os.path.join(".", checkdir_relative)
+
+			changelog_path = os.path.join(checkdir_relative, "ChangeLog")
+			changelog_modified = changelog_path in modified_changelogs
+			if changelog_modified and options.echangelog != 'force':
+				continue
+
+			# get changes for this package
+			cdrlen = len(checkdir_relative)
+			clnew = [elem[cdrlen:] for elem in mynew if elem.startswith(checkdir_relative)]
+			clremoved = [elem[cdrlen:] for elem in myremoved if elem.startswith(checkdir_relative)]
+			clchanged = [elem[cdrlen:] for elem in mychanged if elem.startswith(checkdir_relative)]
+
+			# Skip ChangeLog generation if only the Manifest was modified,
+			# as discussed in bug #398009.
+			nontrivial_cl_files = set()
+			nontrivial_cl_files.update(clnew, clremoved, clchanged)
+			nontrivial_cl_files.difference_update(['Manifest'])
+			if not nontrivial_cl_files and options.echangelog != 'force':
+				continue
+
+			new_changelog = utilities.UpdateChangeLog(checkdir_relative,
+				committer_name, changelog_msg,
+				os.path.join(repodir, 'skel.ChangeLog'),
+				catdir, pkgdir,
+				new=clnew, removed=clremoved, changed=clchanged,
+				pretend=options.pretend)
+			if new_changelog is None:
+				writemsg_level("!!! Updating the ChangeLog failed\n", \
+					level=logging.ERROR, noiselevel=-1)
+				sys.exit(1)
+
+			# if the ChangeLog was just created, add it to vcs
+			if new_changelog:
+				myautoadd.append(changelog_path)
+				# myautoadd is appended to myupdates below
+			else:
+				myupdates.append(changelog_path)
+
+	if myautoadd:
+		print(">>> Auto-Adding missing Manifest/ChangeLog file(s)...")
+		add_cmd = [vcs, "add"]
+		add_cmd += myautoadd
+		if options.pretend:
+			portage.writemsg_stdout("(%s)\n" % " ".join(add_cmd),
+				noiselevel=-1)
+		else:
+			if not (sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000):
+				# Python 3.1 produces the following TypeError if raw bytes are
+				# passed to subprocess.call():
+				#   File "/usr/lib/python3.1/subprocess.py", line 646, in __init__
+				#     errread, errwrite)
+				#   File "/usr/lib/python3.1/subprocess.py", line 1157, in _execute_child
+				#     raise child_exception
+				# TypeError: expected an object with the buffer interface
+				add_cmd = [_unicode_encode(arg) for arg in add_cmd]
+			retcode = subprocess.call(add_cmd)
+			if retcode != os.EX_OK:
+				logging.error(
+					"Exiting on %s error code: %s\n" % (vcs, retcode))
+				sys.exit(retcode)
+
+		myupdates += myautoadd
+
 	print("* %s files being committed..." % green(str(len(myupdates))), end=' ')
+
 	if vcs not in ('cvs', 'svn'):
 		# With git, bzr and hg, there's never any keyword expansion, so
 		# there's no need to regenerate manifests and all files will be
 		# committed in one big commit at the end.
 		print()
-	else:
+	elif not repo_config.thin_manifest:
 		if vcs == 'cvs':
 			headerstring = "'\$(Header|Id).*\$'"
 		elif vcs == "svn":
@@ -2315,60 +2616,15 @@ else:
 	logging.info("myupdates: %s", myupdates)
 	logging.info("myheaders: %s", myheaders)
 
-	commitmessage = options.commitmsg
-	if options.commitmsgfile:
-		try:
-			f = io.open(_unicode_encode(options.commitmsgfile,
-			encoding=_encodings['fs'], errors='strict'),
-			mode='r', encoding=_encodings['content'], errors='replace')
-			commitmessage = f.read()
-			f.close()
-			del f
-		except (IOError, OSError) as e:
-			if e.errno == errno.ENOENT:
-				portage.writemsg("!!! File Not Found: --commitmsgfile='%s'\n" % options.commitmsgfile)
-			else:
-				raise
-		# We've read the content so the file is no longer needed.
-		commitmessagefile = None
-	if not commitmessage or not commitmessage.strip():
-		try:
-			editor = os.environ.get("EDITOR")
-			if editor and utilities.editor_is_executable(editor):
-				commitmessage = utilities.get_commit_message_with_editor(
-					editor, message=qa_output)
-			else:
-				commitmessage = utilities.get_commit_message_with_stdin()
-		except KeyboardInterrupt:
-			exithandler()
-		if not commitmessage or not commitmessage.strip():
-			print("* no commit message?  aborting commit.")
-			sys.exit(1)
-	commitmessage = commitmessage.rstrip()
-	portage_version = getattr(portage, "VERSION", None)
-	if portage_version is None:
-		sys.stderr.write("Failed to insert portage version in message!\n")
-		sys.stderr.flush()
-		portage_version = "Unknown"
-	unameout = platform.system() + " "
-	if platform.system() in ["Darwin", "SunOS"]:
-		unameout += platform.processor()
-	else:
-		unameout += platform.machine()
-	commitmessage += "\n\n(Portage version: %s/%s/%s" % \
-		(portage_version, vcs, unameout)
-	if options.force:
-		commitmessage += ", RepoMan options: --force"
-	commitmessage += ")"
-
 	if options.ask and userquery('Commit changes?', True) != 'Yes':
 		print("* aborting commit.")
-		sys.exit(1)
+		sys.exit(128 + signal.SIGINT)
 
-	if vcs in ('cvs', 'svn') and (myupdates or myremoved):
+	# Handle the case where committed files have keywords which
+	# will change and need a priming commit before the Manifest
+	# can be committed.
+	if (myupdates or myremoved) and myheaders:
 		myfiles = myupdates + myremoved
-		if not myheaders and "sign" not in repoman_settings.features:
-			myfiles += mymanifests
 		fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
 		mymsg = os.fdopen(fd, "wb")
 		mymsg.write(_unicode_encode(commitmessage))
@@ -2469,121 +2725,25 @@ else:
 			portage.util.write_atomic(x, b''.join(mylines),
 				mode='wb')
 
-	manifest_commit_required = True
-	if vcs in ('cvs', 'svn') and (myupdates or myremoved):
-		myfiles = myupdates + myremoved
-		for x in range(len(myfiles)-1, -1, -1):
-			if myfiles[x].count("/") < 4-repolevel:
-				del myfiles[x]
-		mydone=[]
-		if repolevel==3:   # In a package dir
-			repoman_settings["O"] = startdir
-			digestgen(mysettings=repoman_settings, myportdb=portdb)
-		elif repolevel==2: # In a category dir
-			for x in myfiles:
-				xs=x.split("/")
-				if len(xs) < 4-repolevel:
-					continue
-				if xs[0]==".":
-					xs=xs[1:]
-				if xs[0] in mydone:
-					continue
-				mydone.append(xs[0])
-				repoman_settings["O"] = os.path.join(startdir, xs[0])
-				if not os.path.isdir(repoman_settings["O"]):
-					continue
-				digestgen(mysettings=repoman_settings, myportdb=portdb)
-		elif repolevel==1: # repo-cvsroot
-			print(green("RepoMan sez:"), "\"You're rather crazy... doing the entire repository.\"\n")
-			for x in myfiles:
-				xs=x.split("/")
-				if len(xs) < 4-repolevel:
-					continue
-				if xs[0]==".":
-					xs=xs[1:]
-				if "/".join(xs[:2]) in mydone:
-					continue
-				mydone.append("/".join(xs[:2]))
-				repoman_settings["O"] = os.path.join(startdir, xs[0], xs[1])
-				if not os.path.isdir(repoman_settings["O"]):
-					continue
-				digestgen(mysettings=repoman_settings, myportdb=portdb)
-		else:
-			print(red("I'm confused... I don't know where I am!"))
-			sys.exit(1)
-
-		# Force an unsigned commit when more than one Manifest needs to be signed.
-		if repolevel < 3 and "sign" in repoman_settings.features:
-
-			fd, commitmessagefile = tempfile.mkstemp(".repoman.msg")
-			mymsg = os.fdopen(fd, "wb")
-			mymsg.write(_unicode_encode(commitmessage))
-			mymsg.write(b"\n (Unsigned Manifest commit)")
-			mymsg.close()
+	if repolevel == 1:
+		print(green("RepoMan sez:"), "\"You're rather crazy... "
+			"doing the entire repository.\"\n")
 
-			commit_cmd = [vcs]
-			commit_cmd.extend(vcs_global_opts)
-			commit_cmd.append("commit")
-			commit_cmd.extend(vcs_local_opts)
-			commit_cmd.extend(["-F", commitmessagefile])
-			commit_cmd.extend(f.lstrip("./") for f in mymanifests)
+	if vcs in ('cvs', 'svn') and (myupdates or myremoved):
 
-			try:
-				if options.pretend:
-					print("(%s)" % (" ".join(commit_cmd),))
-				else:
-					retval = spawn(commit_cmd, env=os.environ)
-					if retval:
-						writemsg_level(("!!! Exiting on %s (shell) " + \
-							"error code: %s\n") % (vcs, retval),
-							level=logging.ERROR, noiselevel=-1)
-						sys.exit(retval)
-			finally:
-				try:
-					os.unlink(commitmessagefile)
-				except OSError:
-					pass
-			manifest_commit_required = False
+		for x in sorted(vcs_files_to_cps(
+			chain(myupdates, myremoved, mymanifests))):
+			repoman_settings["O"] = os.path.join(repodir, x)
+			digestgen(mysettings=repoman_settings, myportdb=portdb)
 
 	signed = False
-	if "sign" in repoman_settings.features:
+	if sign_manifests:
 		signed = True
-		myfiles = myupdates + myremoved + mymanifests
 		try:
-			if repolevel==3:   # In a package dir
-				repoman_settings["O"] = "."
+			for x in sorted(vcs_files_to_cps(
+				chain(myupdates, myremoved, mymanifests))):
+				repoman_settings["O"] = os.path.join(repodir, x)
 				gpgsign(os.path.join(repoman_settings["O"], "Manifest"))
-			elif repolevel==2: # In a category dir
-				mydone=[]
-				for x in myfiles:
-					xs=x.split("/")
-					if len(xs) < 4-repolevel:
-						continue
-					if xs[0]==".":
-						xs=xs[1:]
-					if xs[0] in mydone:
-						continue
-					mydone.append(xs[0])
-					repoman_settings["O"] = os.path.join(".", xs[0])
-					if not os.path.isdir(repoman_settings["O"]):
-						continue
-					gpgsign(os.path.join(repoman_settings["O"], "Manifest"))
-			elif repolevel==1: # repo-cvsroot
-				print(green("RepoMan sez:"), "\"You're rather crazy... doing the entire repository.\"\n")
-				mydone=[]
-				for x in myfiles:
-					xs=x.split("/")
-					if len(xs) < 4-repolevel:
-						continue
-					if xs[0]==".":
-						xs=xs[1:]
-					if "/".join(xs[:2]) in mydone:
-						continue
-					mydone.append("/".join(xs[:2]))
-					repoman_settings["O"] = os.path.join(".", xs[0], xs[1])
-					if not os.path.isdir(repoman_settings["O"]):
-						continue
-					gpgsign(os.path.join(repoman_settings["O"], "Manifest"))
 		except portage.exception.PortageException as e:
 			portage.writemsg("!!! %s\n" % str(e))
 			portage.writemsg("!!! Disabled FEATURES='sign'\n")
@@ -2610,10 +2770,13 @@ else:
 					level=logging.ERROR, noiselevel=-1)
 				sys.exit(retval)
 
-	if vcs in ['git', 'bzr', 'hg'] or manifest_commit_required or signed:
+	if True:
 
 		myfiles = mymanifests[:]
-		if vcs in ['git', 'bzr', 'hg']:
+		# If there are no header (SVN/CVS keywords) changes in
+		# the files, this Manifest commit must include the
+		# other (yet uncommitted) files.
+		if not myheaders:
 			myfiles += myupdates
 			myfiles += myremoved
 		myfiles.sort()
@@ -2652,6 +2815,13 @@ else:
 			else:
 				retval = spawn(commit_cmd, env=os.environ)
 				if retval != os.EX_OK:
+
+					if repo_config.sign_commit and vcs == 'git' and \
+						not git_supports_gpg_sign():
+						# Inform user that newer git is needed (bug #403323).
+						logging.error(
+							"Git >=1.7.9 is required for signed commits!")
+
 					writemsg_level(("!!! Exiting on %s (shell) " + \
 						"error code: %s\n") % (vcs, retval),
 						level=logging.ERROR, noiselevel=-1)

diff --git a/portage_with_autodep/bin/save-ebuild-env.sh b/portage_with_autodep/bin/save-ebuild-env.sh
new file mode 100755
index 0000000..23e7aaa
--- /dev/null
+++ b/portage_with_autodep/bin/save-ebuild-env.sh
@@ -0,0 +1,100 @@
+#!/bin/bash
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# @FUNCTION: save_ebuild_env
+# @DESCRIPTION:
+# echo the current environment to stdout, filtering out redundant info.
+#
+# --exclude-init-phases causes pkg_nofetch and src_* phase functions to
+# be excluded from the output. These function are not needed for installation
+# or removal of the packages, and can therefore be safely excluded.
+#
+save_ebuild_env() {
+	(
+	if has --exclude-init-phases $* ; then
+		unset S _E_DOCDESTTREE_ _E_EXEDESTTREE_ \
+			PORTAGE_DOCOMPRESS PORTAGE_DOCOMPRESS_SKIP
+		if [[ -n $PYTHONPATH &&
+			${PYTHONPATH%%:*} -ef $PORTAGE_PYM_PATH ]] ; then
+			if [[ $PYTHONPATH == *:* ]] ; then
+				export PYTHONPATH=${PYTHONPATH#*:}
+			else
+				unset PYTHONPATH
+			fi
+		fi
+	fi
+
+	# misc variables inherited from the calling environment
+	unset COLORTERM DISPLAY EDITOR LESS LESSOPEN LOGNAME LS_COLORS PAGER \
+		TERM TERMCAP USER ftp_proxy http_proxy no_proxy
+
+	# other variables inherited from the calling environment
+	unset CVS_RSH ECHANGELOG_USER GPG_AGENT_INFO \
+	SSH_AGENT_PID SSH_AUTH_SOCK STY WINDOW XAUTHORITY
+
+	# CCACHE and DISTCC config
+	unset ${!CCACHE_*} ${!DISTCC_*}
+
+	# There's no need to bloat environment.bz2 with internally defined
+	# functions and variables, so filter them out if possible.
+
+	for x in pkg_setup pkg_nofetch src_unpack src_prepare src_configure \
+		src_compile src_test src_install pkg_preinst pkg_postinst \
+		pkg_prerm pkg_postrm ; do
+		unset -f default_$x _eapi{0,1,2,3,4}_$x
+	done
+	unset x
+
+	unset -f assert assert_sigpipe_ok dump_trace die diefunc \
+		quiet_mode vecho elog_base eqawarn elog \
+		esyslog einfo einfon ewarn eerror ebegin _eend eend KV_major \
+		KV_minor KV_micro KV_to_int get_KV unset_colors set_colors has \
+		has_phase_defined_up_to \
+		hasv hasq qa_source qa_call \
+		addread addwrite adddeny addpredict _sb_append_var \
+		use usev useq has_version portageq \
+		best_version use_with use_enable register_die_hook \
+		keepdir unpack strip_duplicate_slashes econf einstall \
+		dyn_setup dyn_unpack dyn_clean into insinto exeinto docinto \
+		insopts diropts exeopts libopts docompress \
+		abort_handler abort_prepare abort_configure abort_compile \
+		abort_test abort_install dyn_prepare dyn_configure \
+		dyn_compile dyn_test dyn_install \
+		dyn_preinst dyn_pretend dyn_help debug-print debug-print-function \
+		debug-print-section helpers_die inherit EXPORT_FUNCTIONS \
+		nonfatal register_success_hook remove_path_entry \
+		save_ebuild_env filter_readonly_variables preprocess_ebuild_env \
+		set_unless_changed unset_unless_changed source_all_bashrcs \
+		ebuild_main ebuild_phase ebuild_phase_with_hooks \
+		_ebuild_arg_to_phase _ebuild_phase_funcs default \
+		_hasg _hasgq _unpack_tar \
+		${QA_INTERCEPTORS}
+
+	# portage config variables and variables set directly by portage
+	unset ACCEPT_LICENSE BAD BRACKET BUILD_PREFIX COLS \
+		DISTCC_DIR DISTDIR DOC_SYMLINKS_DIR \
+		EBUILD_FORCE_TEST EBUILD_MASTER_PID \
+		ECLASS_DEPTH ENDCOL FAKEROOTKEY \
+		GOOD HILITE HOME \
+		LAST_E_CMD LAST_E_LEN LD_PRELOAD MISC_FUNCTIONS_ARGS MOPREFIX \
+		NOCOLOR NORMAL PKGDIR PKGUSE PKG_LOGDIR PKG_TMPDIR \
+		PORTAGE_BASHRCS_SOURCED PORTAGE_COMPRESS_EXCLUDE_SUFFIXES \
+		PORTAGE_NONFATAL PORTAGE_QUIET \
+		PORTAGE_SANDBOX_DENY PORTAGE_SANDBOX_PREDICT \
+		PORTAGE_SANDBOX_READ PORTAGE_SANDBOX_WRITE PREROOTPATH \
+		QA_INTERCEPTORS \
+		RC_DEFAULT_INDENT RC_DOT_PATTERN RC_ENDCOL RC_INDENTATION  \
+		ROOT ROOTPATH RPMDIR TEMP TMP TMPDIR USE_EXPAND \
+		WARN XARGS _RC_GET_KV_CACHE
+
+	# user config variables
+	unset DOC_SYMLINKS_DIR INSTALL_MASK PKG_INSTALL_MASK
+
+	declare -p
+	declare -fp
+	if [[ ${BASH_VERSINFO[0]} == 3 ]]; then
+		export
+	fi
+	)
+}

diff --git a/portage_with_autodep/bin/xattr-helper.py b/portage_with_autodep/bin/xattr-helper.py
new file mode 100755
index 0000000..6d99521
--- /dev/null
+++ b/portage_with_autodep/bin/xattr-helper.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Dump and restore extended attributes.
+
+We use formats like that used by getfattr --dump.  This is meant for shell
+helpers to save/restore.  If you're looking for a python/portage API, see
+portage.util.movefile._copyxattr instead.
+
+https://en.wikipedia.org/wiki/Extended_file_attributes
+"""
+
+import array
+import os
+import re
+import sys
+
+from portage.util._argparse import ArgumentParser
+
+if hasattr(os, "getxattr"):
+
+	class xattr(object):
+		get = os.getxattr
+		set = os.setxattr
+		list = os.listxattr
+
+else:
+	import xattr
+
+
+_UNQUOTE_RE = re.compile(br'\\[0-7]{3}')
+_FS_ENCODING = sys.getfilesystemencoding()
+
+
+if sys.hexversion < 0x3000000:
+
+	def octal_quote_byte(b):
+		return b'\\%03o' % ord(b)
+
+	def unicode_encode(s):
+		if isinstance(s, unicode):
+			s = s.encode(_FS_ENCODING)
+		return s
+else:
+
+	def octal_quote_byte(b):
+		return ('\\%03o' % ord(b)).encode('ascii')
+
+	def unicode_encode(s):
+		if isinstance(s, str):
+			s = s.encode(_FS_ENCODING)
+		return s
+
+
+def quote(s, quote_chars):
+	"""Convert all |quote_chars| in |s| to escape sequences
+
+	This is normally used to escape any embedded quotation marks.
+	"""
+	quote_re = re.compile(b'[' + quote_chars + b']')
+	result = []
+	pos = 0
+	s_len = len(s)
+
+	while pos < s_len:
+		m = quote_re.search(s, pos=pos)
+		if m is None:
+			result.append(s[pos:])
+			pos = s_len
+		else:
+			start = m.start()
+			result.append(s[pos:start])
+			result.append(octal_quote_byte(s[start:start+1]))
+			pos = start + 1
+
+	return b''.join(result)
+
+
+def unquote(s):
+	"""Process all escape sequences in |s|"""
+	result = []
+	pos = 0
+	s_len = len(s)
+
+	while pos < s_len:
+		m = _UNQUOTE_RE.search(s, pos=pos)
+		if m is None:
+			result.append(s[pos:])
+			pos = s_len
+		else:
+			start = m.start()
+			result.append(s[pos:start])
+			pos = start + 4
+			a = array.array('B')
+			a.append(int(s[start + 1:pos], 8))
+			try:
+				# Python >= 3.2
+				result.append(a.tobytes())
+			except AttributeError:
+				result.append(a.tostring())
+
+	return b''.join(result)
+
+
+def dump_xattrs(pathnames, file_out):
+	"""Dump the xattr data for |pathnames| to |file_out|"""
+	# NOTE: Always quote backslashes, in order to ensure that they are
+	# not interpreted as quotes when they are processed by unquote.
+	quote_chars = b'\n\r\\\\'
+
+	for pathname in pathnames:
+		attrs = xattr.list(pathname)
+		if not attrs:
+			continue
+
+		file_out.write(b'# file: %s\n' % quote(pathname, quote_chars))
+		for attr in attrs:
+			attr = unicode_encode(attr)
+			value = xattr.get(pathname, attr)
+			file_out.write(b'%s="%s"\n' % (
+				quote(attr, b'=' + quote_chars),
+				quote(value, b'\0"' + quote_chars)))
+
+
+def restore_xattrs(file_in):
+	"""Read |file_in| and restore xattrs content from it
+
+	This expects textual data in the format written by dump_xattrs.
+	"""
+	pathname = None
+	for i, line in enumerate(file_in):
+		if line.startswith(b'# file: '):
+			pathname = unquote(line.rstrip(b'\n')[8:])
+		else:
+			parts = line.split(b'=', 1)
+			if len(parts) == 2:
+				if pathname is None:
+					raise ValueError('line %d: missing pathname' % (i + 1,))
+				attr = unquote(parts[0])
+				# strip trailing newline and quotes
+				value = unquote(parts[1].rstrip(b'\n')[1:-1])
+				xattr.set(pathname, attr, value)
+			elif line.strip():
+				raise ValueError('line %d: malformed entry' % (i + 1,))
+
+
+def main(argv):
+
+	parser = ArgumentParser(description=__doc__)
+	parser.add_argument('paths', nargs='*', default=[])
+
+	actions = parser.add_argument_group('Actions')
+	actions.add_argument('--dump',
+		action='store_true',
+		help='Dump the values of all extended '
+			'attributes associated with null-separated'
+			' paths read from stdin.')
+	actions.add_argument('--restore',
+		action='store_true',
+		help='Restore extended attributes using'
+			' a dump read from stdin.')
+
+	options = parser.parse_args(argv)
+
+	if sys.hexversion >= 0x3000000:
+		file_in = sys.stdin.buffer.raw
+	else:
+		file_in = sys.stdin
+	if not options.paths:
+		options.paths += [x for x in file_in.read().split(b'\0') if x]
+
+	if options.dump:
+		if sys.hexversion >= 0x3000000:
+			file_out = sys.stdout.buffer
+		else:
+			file_out = sys.stdout
+		dump_xattrs(options.paths, file_out)
+
+	elif options.restore:
+		restore_xattrs(file_in)
+
+	else:
+		parser.error('missing action!')
+
+	return os.EX_OK
+
+
+if __name__ == '__main__':
+	sys.exit(main(sys.argv[1:]))

diff --git a/portage_with_autodep/bin/xpak-helper.py b/portage_with_autodep/bin/xpak-helper.py
index 4766d99..ef74920 100755
--- a/portage_with_autodep/bin/xpak-helper.py
+++ b/portage_with_autodep/bin/xpak-helper.py
@@ -1,5 +1,5 @@
 #!/usr/bin/python
-# Copyright 2009 Gentoo Foundation
+# Copyright 2009-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import optparse
@@ -36,7 +36,7 @@ def command_recompose(args):
 
 def main(argv):
 
-	if argv and sys.hexversion < 0x3000000 and not isinstance(argv[0], unicode):
+	if argv and isinstance(argv[0], bytes):
 		for i, x in enumerate(argv):
 			argv[i] = portage._unicode_decode(x, errors='strict')
 

diff --git a/portage_with_autodep/integration_with_portage.patch b/portage_with_autodep/integration_with_portage.patch
new file mode 100644
index 0000000..794c156
--- /dev/null
+++ b/portage_with_autodep/integration_with_portage.patch
@@ -0,0 +1,920 @@
+diff -urN /usr/lib/portage/pym/_emerge/EbuildBuild.py ./pym/_emerge/EbuildBuild.py
+--- /usr/lib/portage/pym/_emerge/EbuildBuild.py	2012-05-28 16:20:40.737712559 +0600
++++ ./pym/_emerge/EbuildBuild.py	2012-06-01 21:37:22.799844102 +0600
+@@ -9,6 +9,8 @@
+ from _emerge.EbuildMerge import EbuildMerge
+ from _emerge.EbuildFetchonly import EbuildFetchonly
+ from _emerge.EbuildBuildDir import EbuildBuildDir
++from _emerge.EventsAnalyser import EventsAnalyser, FilterProcGenerator
++from _emerge.EventsLogger import EventsLogger
+ from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+ from portage.util import writemsg
+ import portage
+@@ -21,7 +23,7 @@
+ class EbuildBuild(CompositeTask):
+ 
+ 	__slots__ = ("args_set", "config_pool", "find_blockers",
+-		"ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
++		"ldpath_mtimes", "logger", "logserver", "opts", "pkg", "pkg_count",
+ 		"prefetcher", "settings", "world_atom") + \
+ 		("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
+ 
+@@ -251,8 +253,54 @@
+ 
+ 		build = EbuildExecuter(background=self.background, pkg=pkg,
+ 			scheduler=scheduler, settings=settings)
++
++		build.addStartListener(self._build_start)
++		build.addExitListener(self._build_stop)
++
+ 		self._start_task(build, self._build_exit)
+ 
++	def _build_start(self,phase):
++		if  "depcheck" in self.settings["FEATURES"] or \
++			"depcheckstrict" in self.settings["FEATURES"]:
++			# Lets start a log listening server
++			temp_path=self.settings.get("T",self.settings["PORTAGE_TMPDIR"])
++			
++			if "depcheckstrict" not in self.settings["FEATURES"]:
++				# use default filter_proc
++				self.logserver=EventsLogger(socket_dir=temp_path)
++			else:
++				portage.util.writemsg("Getting list of allowed files..." + \
++					"This may take some time\n")
++				filter_gen=FilterProcGenerator(self.pkg.cpv, self.settings)
++				filter_proc=filter_gen.get_filter_proc()
++				self.logserver=EventsLogger(socket_dir=temp_path, 
++					filter_proc=filter_proc)
++	
++			self.logserver.start()
++			
++			# Copy socket path to LOG_SOCKET environment variable
++			env=self.settings.configdict["pkg"]
++			env['LOG_SOCKET'] = self.logserver.socket_name
++
++			#import pdb; pdb.set_trace()
++
++	def _build_stop(self,phase):
++		if  "depcheck" in self.settings["FEATURES"] or \
++			"depcheckstrict" in self.settings["FEATURES"]:
++			# Delete LOG_SOCKET from environment
++			env=self.settings.configdict["pkg"]
++			if 'LOG_SOCKET' in env:
++				del env['LOG_SOCKET']
++						
++			events=self.logserver.stop()
++			self.logserver=None
++			analyser=EventsAnalyser(self.pkg.cpv, events, self.settings)
++			analyser.display() # show the analyse
++
++			#import pdb; pdb.set_trace()
++	  
++
++
+ 	def _fetch_failed(self):
+ 		# We only call the pkg_nofetch phase if either RESTRICT=fetch
+ 		# is set or the package has explicitly overridden the default
+diff -urN /usr/lib/portage/pym/_emerge/EbuildPhase.py ./pym/_emerge/EbuildPhase.py
+--- /usr/lib/portage/pym/_emerge/EbuildPhase.py	2012-05-28 16:20:40.738712559 +0600
++++ ./pym/_emerge/EbuildPhase.py	2012-06-01 21:38:11.935842036 +0600
+@@ -34,7 +34,8 @@
+ 
+ 	# FEATURES displayed prior to setup phase
+ 	_features_display = (
+-		"ccache", "compressdebug", "distcc", "distcc-pump", "fakeroot",
++		"ccache", "compressdebug", "depcheck", "depcheckstrict", 
++		"distcc", "distcc-pump", "fakeroot",
+ 		"installsources", "keeptemp", "keepwork", "nostrip",
+ 		"preserve-libs", "sandbox", "selinux", "sesandbox",
+ 		"splitdebug", "suidctl", "test", "userpriv",
+diff -urN /usr/lib/portage/pym/_emerge/EbuildPhase.py.rej ./pym/_emerge/EbuildPhase.py.rej
+--- /usr/lib/portage/pym/_emerge/EbuildPhase.py.rej	1970-01-01 05:00:00.000000000 +0500
++++ ./pym/_emerge/EbuildPhase.py.rej	2012-06-01 21:37:22.800844102 +0600
+@@ -0,0 +1,12 @@
++--- pym/_emerge/EbuildPhase.py
+++++ pym/_emerge/EbuildPhase.py
++@@ -33,7 +33,8 @@
++ 		("_ebuild_lock",)
++ 
++ 	# FEATURES displayed prior to setup phase
++-	_features_display = ("ccache", "distcc", "distcc-pump", "fakeroot",
+++	_features_display = ("ccache", "depcheck", "depcheckstrict" "distcc", 
+++		"distcc-pump", "fakeroot",
++ 		"installsources", "keeptemp", "keepwork", "nostrip",
++ 		"preserve-libs", "sandbox", "selinux", "sesandbox",
++ 		"splitdebug", "suidctl", "test", "userpriv",
+diff -urN /usr/lib/portage/pym/_emerge/EventsAnalyser.py ./pym/_emerge/EventsAnalyser.py
+--- /usr/lib/portage/pym/_emerge/EventsAnalyser.py	1970-01-01 05:00:00.000000000 +0500
++++ ./pym/_emerge/EventsAnalyser.py	2012-06-01 21:37:22.802844102 +0600
+@@ -0,0 +1,511 @@
++# Distributed under the terms of the GNU General Public License v2
++
++import portage
++from portage.dbapi._expand_new_virt import expand_new_virt
++from portage import os
++
++import subprocess
++import re
++
++class PortageUtils:
++	""" class for accessing the portage api """
++	def __init__(self, settings):
++		""" test """
++		self.settings=settings
++		self.vartree=portage.vartree(settings=settings)
++		self.vardbapi=portage.vardbapi(settings=settings, vartree=self.vartree)
++		self.portdbapi=portage.portdbapi(mysettings=settings)
++		self.metadata_keys = [k for k in portage.auxdbkeys if not k.startswith("UNUSED_")]
++		self.use=self.settings["USE"]
++
++	def get_best_visible_pkg(self,pkg):
++		"""
++		Gets best candidate on installing. Returns empty string if no found
++
++		:param pkg: package name
++
++		"""
++		try:
++			return self.portdbapi.xmatch("bestmatch-visible", pkg)
++		except:
++			return ''
++
++	# non-recursive dependency getter
++	def get_dep(self,pkg,dep_type=["RDEPEND","DEPEND"]):
++	  """
++	  Gets current dependencies of a package. Looks in portage db
++	  
++	  :param pkg: name of package
++	  :param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or 
++		["RDEPEND", "DEPEND"]
++	  :returns: **set** of packages names
++	  """
++	  ret=set()
++	  
++	  pkg = self.get_best_visible_pkg(pkg)	
++	  if not pkg:
++		  return ret
++	  
++	  # we found the best visible match in common tree
++
++
++	  metadata = dict(zip(self.metadata_keys, 
++		  self.portdbapi.aux_get(pkg, self.metadata_keys)))
++	  dep_str = " ".join(metadata[k] for k in dep_type)
++
++	  # the IUSE default are very important for us
++	  iuse_defaults=[
++		u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
++	  
++	  use=self.use.split()
++	  
++	  for u in iuse_defaults:
++		  if u not in use:
++			  use.append(u)
++
++	  success, atoms = portage.dep_check(dep_str, None, self.settings, 
++		  myuse=use, myroot=self.settings["ROOT"],
++		  trees={self.settings["ROOT"]:{"vartree":self.vartree, "porttree": self.vartree}})
++	  if not success:
++		  return ret
++
++	  for atom in atoms:
++		  atomname = self.vartree.dep_bestmatch(atom)
++
++		  if not atomname:
++			  continue
++		
++		  for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
++			  for pkg in self.vartree.dep_match(unvirt_pkg):
++				  ret.add(pkg)
++
++	  return ret
++
++	# recursive dependency getter
++	def get_deps(self,pkg,dep_type=["RDEPEND","DEPEND"]):
++		""" 
++		Gets current dependencies of a package on any depth 
++		All dependencies **must** be installed
++		
++		:param pkg: name of package
++		:param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or 
++		  ["RDEPEND", "DEPEND"]
++		:returns: **set** of packages names
++		"""
++		ret=set()
++
++
++		# get porttree dependencies on the first package
++
++		pkg = self.portdbapi.xmatch("bestmatch-visible", pkg)	
++		if not pkg:
++			return ret
++
++		known_packages=set()
++		unknown_packages=self.get_dep(pkg,dep_type)
++		ret=ret.union(unknown_packages)
++		
++		while unknown_packages:
++			p=unknown_packages.pop()
++			if p in known_packages:
++				continue
++			known_packages.add(p)
++
++			metadata = dict(zip(self.metadata_keys, self.vardbapi.aux_get(p, self.metadata_keys)))
++
++			dep_str = " ".join(metadata[k] for k in dep_type)
++			
++			# the IUSE default are very important for us
++			iuse_defaults=[
++			  u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
++	  
++			use=self.use.split()
++	  
++			for u in iuse_defaults:
++				if u not in use:
++					use.append(u)
++			
++			success, atoms = portage.dep_check(dep_str, None, self.settings, 
++				myuse=use,	myroot=self.settings["ROOT"],
++				trees={self.settings["ROOT"]:{"vartree":self.vartree,"porttree": self.vartree}})
++
++			if not success:
++			  continue
++
++			for atom in atoms:
++				atomname = self.vartree.dep_bestmatch(atom)
++				if not atomname:
++					continue
++								
++				for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
++					for pkg in self.vartree.dep_match(unvirt_pkg):
++						ret.add(pkg)
++						unknown_packages.add(pkg)
++		return ret
++
++	def get_deps_for_package_building(self, pkg):
++		""" 
++		  returns buildtime dependencies of current package and 
++		  all runtime dependencies of that buildtime dependencies
++		"""
++		buildtime_deps=self.get_dep(pkg, ["DEPEND"])
++		runtime_deps=set()
++		for dep in buildtime_deps:
++			runtime_deps=runtime_deps.union(self.get_deps(dep,["RDEPEND"]))
++
++		ret=buildtime_deps.union(runtime_deps)
++		return ret
++
++	def get_system_packages_list(self):
++		""" 
++		returns all packages from system set. They are always implicit dependencies
++		
++		:returns: **list** of package names
++		"""
++		ret=[]
++		for atom in self.settings.packages:
++			for pre_pkg in self.vartree.dep_match(atom):
++				for unvirt_pkg in expand_new_virt(self.vardbapi,'='+pre_pkg):
++					for pkg in self.vartree.dep_match(unvirt_pkg):
++						ret.append(pkg)
++		return ret
++
++
++class GentoolkitUtils:
++	""" 
++	Interface with qfile and qlist utils. They are much faster than 
++	internals.
++	"""
++
++	def getpackagesbyfiles(files):
++		"""
++		:param files: list of filenames
++		:returns: **dictionary** file->package, if file doesn't belong to any
++		  package it not returned as key of this dictionary
++		"""
++		ret={}
++		listtocheck=[]
++		for f in files:
++			if os.path.isdir(f):
++				ret[f]="directory"
++			else:
++				listtocheck.append(f)
++			
++		try:
++			proc=subprocess.Popen(['qfile']+['--nocolor','--exact','','--from','-'],
++				stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE, 
++				bufsize=4096)
++						
++			out,err=proc.communicate("\n".join(listtocheck).encode("utf8"))
++			
++			lines=out.decode("utf8").split("\n")
++			#print lines
++			line_re=re.compile(r"^([^ ]+)\s+\(([^)]+)\)$")
++			for line in lines:
++				if len(line)==0:
++					continue
++				match=line_re.match(line)
++				if match:
++					ret[match.group(2)]=match.group(1)
++				else:
++					portage.util.writemsg("Util qfile returned unparsable string: %s\n" % line)
++
++		except OSError as e:
++			portage.util.writemsg("Error while launching qfile: %s\n" % e)
++		  
++		  
++		return ret
++	  
++	def getfilesbypackages(packagenames):
++		"""
++		
++		:param packagename: name of package
++		:returns: **list** of files in package with name *packagename*
++		"""
++		ret=[]
++		try:
++			proc=subprocess.Popen(['qlist']+['--nocolor',"--obj"]+packagenames,
++				stdout=subprocess.PIPE,stderr=subprocess.PIPE, 
++				bufsize=4096)
++			
++			out,err=proc.communicate()
++			
++			ret=out.decode("utf8").split("\n")
++			if ret==['']:
++				ret=[]
++		except OSError as e:
++			portage.util.writemsg("Error while launching qfile: %s\n" % e)
++
++		return ret
++	  
++	def get_all_packages_files():   
++		"""
++		Memory-hungry operation
++		
++		:returns: **set** of all files that belongs to package
++		"""
++		ret=[]
++		try:
++			proc=subprocess.Popen(['qlist']+['--all',"--obj"],
++				stdout=subprocess.PIPE,stderr=subprocess.PIPE, 
++				bufsize=4096)
++			  
++			out,err=proc.communicate()
++			
++			ret=out.decode("utf8").split("\n")
++		except OSError as e:
++			portage.util.writemsg("Error while launching qfile: %s\n" % e)
++
++		return set(ret)
++   
++class FilterProcGenerator:
++	def __init__(self, pkgname, settings):
++		portageutils=PortageUtils(settings=settings)
++
++		deps_all=portageutils.get_deps_for_package_building(pkgname)
++		deps_portage=portageutils.get_dep('portage',["RDEPEND"])
++		
++		system_packages=portageutils.get_system_packages_list()
++
++		allfiles=GentoolkitUtils.get_all_packages_files()
++		portage.util.writemsg("All files list recieved, waiting for " \
++			"a list of allowed files\n")
++
++
++		allowedpkgs=system_packages+list(deps_portage)+list(deps_all)	
++		
++		allowedfiles=GentoolkitUtils.getfilesbypackages(allowedpkgs)
++		#for pkg in allowedpkgs:
++		#	allowedfiles+=GentoolkitUtils.getfilesbypackage(pkg)
++
++		#import pdb; pdb.set_trace()
++
++		# manually add all python interpreters to this list
++		allowedfiles+=GentoolkitUtils.getfilesbypackages(['python'])
++		allowedfiles=set(allowedfiles)
++		
++		deniedfiles=allfiles-allowedfiles
++
++		def filter_proc(eventname,filename,stage):
++			if filename in deniedfiles:
++				return False
++			return True
++			
++		self.filter_proc=filter_proc
++	def get_filter_proc(self):
++		return self.filter_proc
++
++class EventsAnalyser:
++	def __init__(self, pkgname, events, settings):
++		self.pkgname=pkgname
++		self.events=events
++		self.settings=settings
++		self.portageutils=PortageUtils(settings=settings)
++
++		self.deps_all=self.portageutils.get_deps_for_package_building(pkgname)
++		self.deps_direct=self.portageutils.get_dep(pkgname,["DEPEND"])
++		self.deps_portage=self.portageutils.get_dep('portage',["RDEPEND"])
++		
++		self.system_packages=self.portageutils.get_system_packages_list()
++		# All analyse work is here
++		
++		# get unique filenames
++		filenames=set()
++		for stage in events:
++			succ_events=set(events[stage][0])
++			fail_events=set(events[stage][1])
++			filenames=filenames.union(succ_events)
++			filenames=filenames.union(fail_events)
++		filenames=list(filenames)
++
++		file_to_package=GentoolkitUtils.getpackagesbyfiles(filenames)
++		# This part is completly unreadable. 
++		# It converting one complex struct(returned by getfsevents) to another complex
++		# struct which good for generating output.
++		#
++		# Old struct is also used during output
++
++		packagesinfo={}
++
++		for stage in sorted(events):
++			  succ_events=events[stage][0]
++			  fail_events=events[stage][1]
++			  
++			  for filename in succ_events:
++				  if filename in file_to_package:
++					  package=file_to_package[filename]
++				  else:
++					  package="unknown"
++					
++				  if not package in packagesinfo:
++					  packagesinfo[package]={}
++				  stageinfo=packagesinfo[package]
++				  if not stage in stageinfo:
++					  stageinfo[stage]={}
++					
++				  filesinfo=stageinfo[stage]
++				  if not filename in filesinfo:
++					  filesinfo[filename]={"found":[],"notfound":[]}
++				  filesinfo[filename]["found"]=succ_events[filename]
++				
++			  for filename in fail_events:
++				  if filename in file_to_package:
++					  package=file_to_package[filename]
++				  else:
++					  package="unknown"
++				  if not package in packagesinfo:
++					  packagesinfo[package]={}
++				  stageinfo=packagesinfo[package]
++				  if not stage in stageinfo:
++					  stageinfo[stage]={}
++					
++				  filesinfo=stageinfo[stage]
++				  if not filename in filesinfo:
++					  filesinfo[filename]={"found":[],"notfound":[]}
++				  filesinfo[filename]["notfound"]=fail_events[filename]
++		self.packagesinfo=packagesinfo
++		  
++	def display(self):
++		portage.util.writemsg(
++			portage.output.colorize(
++				"WARN", "\nFile access report for %s:\n" % self.pkgname))
++
++		stagesorder={"clean":1,"setup":2,"unpack":3,"prepare":4,"configure":5,"compile":6,"test":7,
++			 "install":8,"preinst":9,"postinst":10,"prerm":11,"postrm":12,"unknown":13}
++		packagesinfo=self.packagesinfo
++		# print information grouped by package	  
++		for package in sorted(packagesinfo):
++			# not showing special directory package
++			if package=="directory":
++				continue
++			
++			if package=="unknown":
++				continue
++
++
++			is_pkg_in_dep=package in self.deps_all
++			is_pkg_in_portage_dep=package in self.deps_portage
++			is_pkg_in_system=package in self.system_packages
++			is_pkg_python="dev-lang/python" in package
++
++			stages=[]
++			for stage in sorted(packagesinfo[package].keys(), key=stagesorder.get):
++				if stage!="unknown":
++					stages.append(stage)
++
++			if len(stages)==0:
++				continue
++	
++			filenames={}
++			for stage in stages:
++				for filename in packagesinfo[package][stage]:
++					if len(packagesinfo[package][stage][filename]["found"])!=0:
++						was_readed,was_writed=packagesinfo[package][stage][filename]["found"]
++						if not filename in filenames:
++							filenames[filename]=['ok',was_readed,was_writed]
++						else:
++							status, old_was_readed, old_was_writed=filenames[filename]
++							filenames[filename]=[
++							  'ok',old_was_readed | was_readed, old_was_writed | was_writed 
++							]
++					if len(packagesinfo[package][stage][filename]["notfound"])!=0:
++						was_notfound,was_blocked=packagesinfo[package][stage][filename]["notfound"]
++						if not filename in filenames:
++							filenames[filename]=['err',was_notfound,was_blocked]
++						else:
++							status, old_was_notfound, old_was_blocked=filenames[filename]
++							filenames[filename]=[
++							  'err',old_was_notfound | was_notfound, old_was_blocked | was_blocked 
++							]
++				  
++
++			if is_pkg_in_dep:
++				portage.util.writemsg("[OK]")
++			elif is_pkg_in_system:
++				portage.util.writemsg("[SYSTEM]")
++			elif is_pkg_in_portage_dep:
++				portage.util.writemsg("[PORTAGE DEP]")
++			elif is_pkg_python:
++				portage.util.writemsg("[INTERPRETER]")
++			elif not self.is_package_useful(package,stages,filenames.keys()):
++				portage.util.writemsg("[LIKELY OK]")
++			else:
++				portage.util.writemsg(portage.output.colorize("BAD", "[NOT IN DEPS]"))
++			# show information about accessed files
++
++			portage.util.writemsg(" %-40s: %s\n" % (package,stages))
++
++			# this is here for readability
++			action={
++				('ok',False,False):"accessed",
++				('ok',True,False):"readed",
++				('ok',False,True):"writed",
++				('ok',True,True):"readed and writed",
++				('err',False,False):"other error",
++				('err',True,False):"not found",
++				('err',False,True):"blocked",
++				('err',True,True):"not found and blocked"
++			}
++			
++			filescounter=0
++			
++			for filename in filenames:
++				event_info=tuple(filenames[filename])
++				portage.util.writemsg("  %-56s %-21s\n" % (filename,action[event_info]))
++				filescounter+=1
++				if filescounter>10:
++					portage.util.writemsg("  ... and %d more ...\n" % (len(filenames)-10))
++					break
++		# ... and one more check. Making sure that direct build time 
++		# dependencies were accessed
++		#import pdb; pdb.set_trace()
++		not_accessed_deps=set(self.deps_direct)-set(self.packagesinfo.keys())
++		if not_accessed_deps:
++			portage.util.writemsg(portage.output.colorize("WARN", "!!! "))
++			portage.util.writemsg("Warning! Some build time dependencies " + \
++				"of packages were not accessed: " + \
++				" ".join(not_accessed_deps) + "\n")
++					
++	def is_package_useful(self,pkg,stages,files):
++		""" some basic heuristics here to cut part of packages """
++
++		excluded_paths=set(
++			['/etc/sandbox.d/']
++		)
++
++		excluded_packages=set(
++			# autodep shows these two packages every time
++			['net-zope/zope-fixers', 'net-zope/zope-interface']
++		)
++
++
++		def is_pkg_excluded(p):
++			for pkg in excluded_packages:
++				if p.startswith(pkg): # if package is excluded
++					return True
++			return False
++
++
++		def is_file_excluded(f):
++			for path in excluded_paths:
++				if f.startswith(path): # if path is excluded
++					return True
++			return False
++
++
++		if is_pkg_excluded(pkg):
++			return False
++
++		for f in files:
++			if is_file_excluded(f):
++			  continue
++			
++			# test 1: package is not useful if all files are *.desktop or *.xml or *.m4		
++			if not (f.endswith(".desktop") or f.endswith(".xml") or f.endswith(".m4") or f.endswith(".pc")):
++			  break
++		else:
++			return False # we get here if cycle ends not with break
++		  
++		return True
++		
++    
+\ No newline at end of file
+diff -urN /usr/lib/portage/pym/_emerge/EventsLogger.py ./pym/_emerge/EventsLogger.py
+--- /usr/lib/portage/pym/_emerge/EventsLogger.py	1970-01-01 05:00:00.000000000 +0500
++++ ./pym/_emerge/EventsLogger.py	2012-06-01 21:37:22.803844102 +0600
+@@ -0,0 +1,180 @@
++# Distributed under the terms of the GNU General Public License v2
++
++import io
++import sys
++import stat
++import socket
++import select
++import tempfile
++
++import threading
++
++from portage import os
++
++class EventsLogger(threading.Thread):
++	def default_filter(eventname, filename, stage):
++		return True
++		
++	def __init__(self, socket_dir="/tmp/", filter_proc=default_filter):
++		threading.Thread.__init__(self) # init the Thread
++		
++		self.alive=False
++		
++		self.main_thread=threading.currentThread()
++		
++		self.socket_dir=socket_dir
++		self.filter_proc=filter_proc
++		
++		self.socket_name=None
++		self.socket_logger=None
++
++		self.events={}
++
++		try:
++			socket_dir_name = tempfile.mkdtemp(dir=self.socket_dir,
++				prefix="log_socket_")
++			
++			socket_name = os.path.join(socket_dir_name, 'socket')
++
++		except OSError as e:
++			return
++		
++		self.socket_name=socket_name
++		
++		#print(self.socket_name)
++		
++		try:
++			socket_logger=socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
++			socket_logger.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
++
++			socket_logger.bind(self.socket_name)
++			socket_logger.listen(64)
++
++		except socket.error as e:
++			return
++
++		self.socket_logger=socket_logger
++
++		try:
++  			# Allow connecting to socket for anyone
++			os.chmod(socket_dir_name,
++				stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|
++				stat.S_IROTH|stat.S_IWOTH|stat.S_IXOTH)
++			os.chmod(socket_name,
++				stat.S_IRUSR|stat.S_IWUSR|stat.S_IXUSR|
++				stat.S_IROTH|stat.S_IWOTH|stat.S_IXOTH)
++		except OSError as e:
++			return
++		  
++	def run(self):
++		""" Starts the log server """
++
++		self.alive=True
++		self.listen_thread=threading.currentThread()
++		clients={}
++		
++		epoll=select.epoll()
++		epoll.register(self.socket_logger.fileno(), select.EPOLLIN)
++
++		while self.alive:
++			try:
++				sock_events = epoll.poll(3)
++				
++				for fileno, sock_event in sock_events:
++					if fileno == self.socket_logger.fileno():
++						ret = self.socket_logger.accept()
++						if ret is None:
++							pass
++						else:
++							(client,addr)=ret
++							epoll.register(client.fileno(), select.EPOLLIN)
++							clients[client.fileno()]=client
++					elif sock_event & select.EPOLLIN:
++						s=clients[fileno]
++						record=s.recv(8192)
++					
++						if not record: # if connection was closed
++							epoll.unregister(fileno)
++							clients[fileno].close()
++							del clients[fileno]
++							continue
++					
++						#import pdb; pdb.set_trace()
++						try:
++							message=record.decode("utf8").split("\0")
++						except UnicodeDecodeError:
++  							print("Bad message %s" % record)
++  							continue
++
++						#  continue
++
++						#print(message)
++					
++						try:
++							if message[4]=="ASKING":
++								if self.filter_proc(message[1],message[2],message[3]):
++									s.sendall(b"ALLOW\0")
++								else:
++									# TODO: log through portage infrastructure
++									#print("Blocking an access to %s" % message[2])
++									s.sendall(b"DENY\0")
++							else:
++								eventname,filename,stage,result=message[1:5]
++
++								if not stage in self.events:
++									self.events[stage]=[{},{}]
++						
++								hashofsucesses=self.events[stage][0]
++								hashoffailures=self.events[stage][1]
++
++								if result=="DENIED":
++									print("Blocking an access to %s" % filename)
++
++								if result=="OK":
++									if not filename in hashofsucesses:
++										hashofsucesses[filename]=[False,False]
++						  
++									readed_or_writed=hashofsucesses[filename]
++						  
++									if eventname=="read":
++										readed_or_writed[0]=True
++									elif eventname=="write":
++										readed_or_writed[1]=True
++							
++								elif result[0:3]=="ERR" or result=="DENIED":
++									if not filename in hashoffailures:
++										hashoffailures[filename]=[False,False]
++									notfound_or_blocked=hashoffailures[filename]
++								  
++									if result=="ERR/2":
++										notfound_or_blocked[0]=True
++									elif result=="DENIED":
++										notfound_or_blocked[1]=True
++
++								else:
++									print("Error in logger module<->analyser protocol")
++						
++						except IndexError:
++								print("IndexError while parsing %s" % record)
++			except IOError as e:
++				if e.errno!=4: # handling "Interrupted system call" errors
++					raise
++				  
++			# if main thread doesnt exists then exit	  
++			if not self.main_thread.is_alive():
++				break
++		epoll.unregister(self.socket_logger.fileno())
++		epoll.close()
++		self.socket_logger.close()
++		
++	def stop(self):
++		""" Stops the log server. Returns all events """
++
++		self.alive=False
++		
++		# Block the main thread until listener exists
++		self.listen_thread.join()
++		
++		# We assume portage clears tmp folder, so no deleting a socket file
++		# We assume that no new socket data will arrive after this moment
++		return self.events
+diff -urN /usr/lib/portage/pym/portage/const.py ./pym/portage/const.py
+--- /usr/lib/portage/pym/portage/const.py	2012-05-28 16:20:40.766712558 +0600
++++ ./pym/portage/const.py	2012-06-01 21:39:51.363837853 +0600
+@@ -67,6 +67,8 @@
+ BASH_BINARY              = "/bin/bash"
+ MOVE_BINARY              = "/bin/mv"
+ PRELINK_BINARY           = "/usr/sbin/prelink"
++AUTODEP_LIBRARY          = "/usr/lib/file_hook.so"
++
+ 
+ INVALID_ENV_FILE         = "/etc/spork/is/not/valid/profile.env"
+ REPO_NAME_FILE           = "repo_name"
+@@ -89,7 +91,7 @@
+                            "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy",
+                            "ccache", "chflags", "clean-logs",
+                            "collision-protect", "compress-build-logs", "compressdebug",
+-                           "config-protect-if-modified",
++                           "config-protect-if-modified", "depcheck", "depcheckstrict",
+                            "digest", "distcc", "distcc-pump", "distlocks", "ebuild-locks", "fakeroot",
+                            "fail-clean", "force-mirror", "force-prefix", "getbinpkg",
+                            "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",
+diff -urN /usr/lib/portage/pym/portage/const.py.rej ./pym/portage/const.py.rej
+--- /usr/lib/portage/pym/portage/const.py.rej	1970-01-01 05:00:00.000000000 +0500
++++ ./pym/portage/const.py.rej	2012-06-01 21:37:22.803844102 +0600
+@@ -0,0 +1,12 @@
++--- pym/portage/const.py
+++++ pym/portage/const.py
++@@ -90,7 +92,8 @@
++ SUPPORTED_FEATURES       = frozenset([
++                            "allow-missing-manifests",
++                            "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy",
++-                           "ccache", "chflags", "collision-protect", "compress-build-logs",
+++                           "ccache", "chflags", "collision-protect", "compress-build-logs", 
+++                           "depcheck", "depcheckstrict",
++                            "digest", "distcc", "distcc-pump", "distlocks", "ebuild-locks", "fakeroot",
++                            "fail-clean", "fixpackages", "force-mirror", "getbinpkg",
++                            "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",
+diff -urN /usr/lib/portage/pym/portage/package/ebuild/_config/special_env_vars.py ./pym/portage/package/ebuild/_config/special_env_vars.py
+--- /usr/lib/portage/pym/portage/package/ebuild/_config/special_env_vars.py	2012-05-28 16:20:40.870712551 +0600
++++ ./pym/portage/package/ebuild/_config/special_env_vars.py	2012-06-01 21:37:22.804844102 +0600
+@@ -101,8 +101,8 @@
+ # other variables inherited from the calling environment
+ environ_whitelist += [
+ 	"CVS_RSH", "ECHANGELOG_USER",
+-	"GPG_AGENT_INFO",
+-	"SSH_AGENT_PID", "SSH_AUTH_SOCK",
++	"GPG_AGENT_INFO", "LOG_SOCKET",
++	"SSH_AGENT_PID", "SSH_AUTH_SOCK"
+ 	"STY", "WINDOW", "XAUTHORITY",
+ ]
+ 
+diff -urN /usr/lib/portage/pym/portage/package/ebuild/doebuild.py ./pym/portage/package/ebuild/doebuild.py
+--- /usr/lib/portage/pym/portage/package/ebuild/doebuild.py	2012-05-28 16:20:40.860712554 +0600
++++ ./pym/portage/package/ebuild/doebuild.py	2012-06-01 21:37:22.805844102 +0600
+@@ -1222,6 +1222,9 @@
+ 		nosandbox = ("sandbox" not in features and \
+ 			"usersandbox" not in features)
+ 
++	if "depcheck" in features or "depcheckstrict" in features:
++		nosandbox = True
++
+ 	if not portage.process.sandbox_capable:
+ 		nosandbox = True
+ 
+@@ -1401,7 +1404,10 @@
+ 		keywords["opt_name"] = "[%s/%s]" % \
+ 			(mysettings.get("CATEGORY",""), mysettings.get("PF",""))
+ 
+-	if free or "SANDBOX_ACTIVE" in os.environ:
++	if "depcheck" in features or "depcheckstrict" in features:
++		keywords["opt_name"] += " bash"
++		spawn_func = portage.process.spawn_autodep
++	elif free or "SANDBOX_ACTIVE" in os.environ:
+ 		keywords["opt_name"] += " bash"
+ 		spawn_func = portage.process.spawn_bash
+ 	elif fakeroot:
+diff -urN /usr/lib/portage/pym/portage/process.py ./pym/portage/process.py
+--- /usr/lib/portage/pym/portage/process.py	2012-05-28 16:20:40.768712558 +0600
++++ ./pym/portage/process.py	2012-06-01 21:37:22.806844102 +0600
+@@ -18,7 +18,7 @@
+ 	'portage.util:dump_traceback',
+ )
+ 
+-from portage.const import BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY
++from portage.const import BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY, AUTODEP_LIBRARY
+ from portage.exception import CommandNotFound
+ 
+ try:
+@@ -53,6 +53,9 @@
+ sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and
+                    os.access(SANDBOX_BINARY, os.X_OK))
+ 
++autodep_capable = (os.path.isfile(AUTODEP_LIBRARY) and
++                   os.access(AUTODEP_LIBRARY, os.X_OK))
++
+ fakeroot_capable = (os.path.isfile(FAKEROOT_BINARY) and
+                     os.access(FAKEROOT_BINARY, os.X_OK))
+ 
+@@ -80,6 +83,16 @@
+ 	args.append(mycommand)
+ 	return spawn(args, opt_name=opt_name, **keywords)
+ 
++def spawn_autodep(mycommand, opt_name=None, **keywords):
++	if not autodep_capable:
++		return spawn_bash(mycommand, opt_name=opt_name, **keywords)
++	if "env" not in keywords or "LOG_SOCKET" not in keywords["env"]:
++		return spawn_bash(mycommand, opt_name=opt_name, **keywords)
++
++	# Core part: tell the loader to preload logging library
++	keywords["env"]["LD_PRELOAD"]=AUTODEP_LIBRARY
++	return spawn_bash(mycommand, opt_name=opt_name, **keywords)	
++
+ def spawn_sandbox(mycommand, opt_name=None, **keywords):
+ 	if not sandbox_capable:
+ 		return spawn_bash(mycommand, opt_name=opt_name, **keywords)

diff --git a/portage_with_autodep/pym/_emerge/AbstractDepPriority.py b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
index 94a9379..94f26ef 100644
--- a/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
+++ b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
@@ -1,8 +1,8 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import copy
-from _emerge.SlotObject import SlotObject
+from portage.util.SlotObject import SlotObject
 
 class AbstractDepPriority(SlotObject):
 	__slots__ = ("buildtime", "runtime", "runtime_post")

diff --git a/portage_with_autodep/pym/_emerge/AbstractDepPriority.pyo b/portage_with_autodep/pym/_emerge/AbstractDepPriority.pyo
new file mode 100644
index 0000000..b6a9871
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/AbstractDepPriority.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
index 4147ecb..c7b8f83 100644
--- a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
+++ b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
@@ -19,8 +19,9 @@ from portage.util import apply_secpass_permissions
 class AbstractEbuildProcess(SpawnProcess):
 
 	__slots__ = ('phase', 'settings',) + \
-		('_build_dir', '_ipc_daemon', '_exit_command',)
+		('_build_dir', '_ipc_daemon', '_exit_command', '_exit_timeout_id')
 	_phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
+	_phases_interactive_whitelist = ('config',)
 
 	# Number of milliseconds to allow natural exit of the ebuild
 	# process after it has called the exit command via IPC. It
@@ -92,7 +93,20 @@ class AbstractEbuildProcess(SpawnProcess):
 			else:
 				self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
 
-		SpawnProcess._start(self)
+		if self.fd_pipes is None:
+			self.fd_pipes = {}
+		null_fd = None
+		if 0 not in self.fd_pipes and \
+			self.phase not in self._phases_interactive_whitelist and \
+			"interactive" not in self.settings.get("PROPERTIES", "").split():
+			null_fd = os.open('/dev/null', os.O_RDONLY)
+			self.fd_pipes[0] = null_fd
+
+		try:
+			SpawnProcess._start(self)
+		finally:
+			if null_fd is not None:
+				os.close(null_fd)
 
 	def _init_ipc_fifos(self):
 
@@ -143,13 +157,29 @@ class AbstractEbuildProcess(SpawnProcess):
 	def _exit_command_callback(self):
 		if self._registered:
 			# Let the process exit naturally, if possible.
-			self.scheduler.schedule(self._reg_id, timeout=self._exit_timeout)
-			if self._registered:
-				# If it doesn't exit naturally in a reasonable amount
-				# of time, kill it (solves bug #278895). We try to avoid
-				# this when possible since it makes sandbox complain about
-				# being killed by a signal.
-				self.cancel()
+			self._exit_timeout_id = \
+				self.scheduler.timeout_add(self._exit_timeout,
+				self._exit_command_timeout_cb)
+
+	def _exit_command_timeout_cb(self):
+		if self._registered:
+			# If it doesn't exit naturally in a reasonable amount
+			# of time, kill it (solves bug #278895). We try to avoid
+			# this when possible since it makes sandbox complain about
+			# being killed by a signal.
+			self.cancel()
+			self._exit_timeout_id = \
+				self.scheduler.timeout_add(self._cancel_timeout,
+					self._cancel_timeout_cb)
+		else:
+			self._exit_timeout_id = None
+
+		return False # only run once
+
+	def _cancel_timeout_cb(self):
+		self._exit_timeout_id = None
+		self.wait()
+		return False # only run once
 
 	def _orphan_process_warn(self):
 		phase = self.phase
@@ -239,6 +269,10 @@ class AbstractEbuildProcess(SpawnProcess):
 	def _set_returncode(self, wait_retval):
 		SpawnProcess._set_returncode(self, wait_retval)
 
+		if self._exit_timeout_id is not None:
+			self.scheduler.source_remove(self._exit_timeout_id)
+			self._exit_timeout_id = None
+
 		if self._ipc_daemon is not None:
 			self._ipc_daemon.cancel()
 			if self._exit_command.exitcode is not None:

diff --git a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.pyo b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.pyo
new file mode 100644
index 0000000..b55f9c2
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/AbstractPollTask.py b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
index f7f3a95..2c84709 100644
--- a/portage_with_autodep/pym/_emerge/AbstractPollTask.py
+++ b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
@@ -1,44 +1,111 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import array
+import errno
 import logging
+import os
 
 from portage.util import writemsg_level
 from _emerge.AsynchronousTask import AsynchronousTask
-from _emerge.PollConstants import PollConstants
+
 class AbstractPollTask(AsynchronousTask):
 
 	__slots__ = ("scheduler",) + \
 		("_registered",)
 
 	_bufsize = 4096
-	_exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
-	_registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
-		_exceptional_events
+
+	@property
+	def _exceptional_events(self):
+		return self.scheduler.IO_ERR | self.scheduler.IO_NVAL
+
+	@property
+	def _registered_events(self):
+		return self.scheduler.IO_IN | self.scheduler.IO_HUP | \
+			self._exceptional_events
 
 	def isAlive(self):
 		return bool(self._registered)
 
-	def _read_buf(self, f, event):
+	def _read_array(self, f, event):
 		"""
+		NOTE: array.fromfile() is used here only for testing purposes,
+		because it has bugs in all known versions of Python (including
+		Python 2.7 and Python 3.2). See PipeReaderArrayTestCase.
+
 		| POLLIN | RETURN
 		| BIT    | VALUE
 		| ---------------------------------------------------
 		| 1      | Read self._bufsize into an instance of
-		|        | array.array('B') and return it, ignoring
+		|        | array.array('B') and return it, handling
 		|        | EOFError and IOError. An empty array
 		|        | indicates EOF.
 		| ---------------------------------------------------
 		| 0      | None
 		"""
 		buf = None
-		if event & PollConstants.POLLIN:
+		if event & self.scheduler.IO_IN:
 			buf = array.array('B')
 			try:
 				buf.fromfile(f, self._bufsize)
-			except (EOFError, IOError):
+			except EOFError:
 				pass
+			except TypeError:
+				# Python 3.2:
+				# TypeError: read() didn't return bytes
+				pass
+			except IOError as e:
+				# EIO happens with pty on Linux after the
+				# slave end of the pty has been closed.
+				if e.errno == errno.EIO:
+					# EOF: return empty string of bytes
+					pass
+				elif e.errno == errno.EAGAIN:
+					# EAGAIN: return None
+					buf = None
+				else:
+					raise
+
+		if buf is not None:
+			try:
+				# Python >=3.2
+				buf = buf.tobytes()
+			except AttributeError:
+				buf = buf.tostring()
+
+		return buf
+
+	def _read_buf(self, fd, event):
+		"""
+		| POLLIN | RETURN
+		| BIT    | VALUE
+		| ---------------------------------------------------
+		| 1      | Read self._bufsize into a string of bytes,
+		|        | handling EAGAIN and EIO. An empty string
+		|        | of bytes indicates EOF.
+		| ---------------------------------------------------
+		| 0      | None
+		"""
+		# NOTE: array.fromfile() is no longer used here because it has
+		# bugs in all known versions of Python (including Python 2.7
+		# and Python 3.2).
+		buf = None
+		if event & self.scheduler.IO_IN:
+			try:
+				buf = os.read(fd, self._bufsize)
+			except OSError as e:
+				# EIO happens with pty on Linux after the
+				# slave end of the pty has been closed.
+				if e.errno == errno.EIO:
+					# EOF: return empty string of bytes
+					buf = b''
+				elif e.errno == errno.EAGAIN:
+					# EAGAIN: return None
+					buf = None
+				else:
+					raise
+
 		return buf
 
 	def _unregister(self):
@@ -56,7 +123,32 @@ class AbstractPollTask(AsynchronousTask):
 				self._log_poll_exception(event)
 				self._unregister()
 				self.cancel()
-			elif event & PollConstants.POLLHUP:
+				self.wait()
+			elif event & self.scheduler.IO_HUP:
 				self._unregister()
 				self.wait()
 
+	def _wait(self):
+		if self.returncode is not None:
+			return self.returncode
+		self._wait_loop()
+		return self.returncode
+
+	def _wait_loop(self, timeout=None):
+
+		if timeout is None:
+			while self._registered:
+				self.scheduler.iteration()
+			return
+
+		def timeout_cb():
+			timeout_cb.timed_out = True
+			return False
+		timeout_cb.timed_out = False
+		timeout_cb.timeout_id = self.scheduler.timeout_add(timeout, timeout_cb)
+
+		try:
+			while self._registered and not timeout_cb.timed_out:
+				self.scheduler.iteration()
+		finally:
+			self.scheduler.unregister(timeout_cb.timeout_id)

diff --git a/portage_with_autodep/pym/_emerge/AbstractPollTask.pyo b/portage_with_autodep/pym/_emerge/AbstractPollTask.pyo
new file mode 100644
index 0000000..06ef6b9
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/AbstractPollTask.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/AsynchronousLock.py b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
index 637ba73..587aa46 100644
--- a/portage_with_autodep/pym/_emerge/AsynchronousLock.py
+++ b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
@@ -1,15 +1,16 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import dummy_threading
 import fcntl
+import errno
 import logging
 import sys
 
 try:
 	import threading
 except ImportError:
-	import dummy_threading as threading
+	threading = dummy_threading
 
 import portage
 from portage import os
@@ -19,7 +20,6 @@ from portage.locks import lockfile, unlockfile
 from portage.util import writemsg_level
 from _emerge.AbstractPollTask import AbstractPollTask
 from _emerge.AsynchronousTask import AsynchronousTask
-from _emerge.PollConstants import PollConstants
 from _emerge.SpawnProcess import SpawnProcess
 
 class AsynchronousLock(AsynchronousTask):
@@ -35,7 +35,7 @@ class AsynchronousLock(AsynchronousTask):
 
 	__slots__ = ('path', 'scheduler',) + \
 		('_imp', '_force_async', '_force_dummy', '_force_process', \
-		'_force_thread', '_waiting')
+		'_force_thread')
 
 	_use_process_by_default = True
 
@@ -66,8 +66,7 @@ class AsynchronousLock(AsynchronousTask):
 
 	def _imp_exit(self, imp):
 		# call exit listeners
-		if not self._waiting:
-			self.wait()
+		self.wait()
 
 	def _cancel(self):
 		if isinstance(self._imp, AsynchronousTask):
@@ -81,9 +80,7 @@ class AsynchronousLock(AsynchronousTask):
 	def _wait(self):
 		if self.returncode is not None:
 			return self.returncode
-		self._waiting = True
 		self.returncode = self._imp.wait()
-		self._waiting = False
 		return self.returncode
 
 	def unlock(self):
@@ -114,13 +111,13 @@ class _LockThread(AbstractPollTask):
 	def _start(self):
 		pr, pw = os.pipe()
 		self._files = {}
-		self._files['pipe_read'] = os.fdopen(pr, 'rb', 0)
-		self._files['pipe_write'] = os.fdopen(pw, 'wb', 0)
-		for k, f in self._files.items():
-			fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
-				fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
-		self._reg_id = self.scheduler.register(self._files['pipe_read'].fileno(),
-			PollConstants.POLLIN, self._output_handler)
+		self._files['pipe_read'] = pr
+		self._files['pipe_write'] = pw
+		for f in self._files.values():
+			fcntl.fcntl(f, fcntl.F_SETFL,
+				fcntl.fcntl(f, fcntl.F_GETFL) | os.O_NONBLOCK)
+		self._reg_id = self.scheduler.register(self._files['pipe_read'],
+			self.scheduler.IO_IN, self._output_handler)
 		self._registered = True
 		threading_mod = threading
 		if self._force_dummy:
@@ -130,26 +127,27 @@ class _LockThread(AbstractPollTask):
 
 	def _run_lock(self):
 		self._lock_obj = lockfile(self.path, wantnewlockfile=True)
-		self._files['pipe_write'].write(b'\0')
+		os.write(self._files['pipe_write'], b'\0')
 
 	def _output_handler(self, f, event):
-		buf = self._read_buf(self._files['pipe_read'], event)
+		buf = None
+		if event & self.scheduler.IO_IN:
+			try:
+				buf = os.read(self._files['pipe_read'], self._bufsize)
+			except OSError as e:
+				if e.errno not in (errno.EAGAIN,):
+					raise
 		if buf:
 			self._unregister()
 			self.returncode = os.EX_OK
 			self.wait()
 
+		return True
+
 	def _cancel(self):
 		# There's currently no way to force thread termination.
 		pass
 
-	def _wait(self):
-		if self.returncode is not None:
-			return self.returncode
-		if self._registered:
-			self.scheduler.schedule(self._reg_id)
-		return self.returncode
-
 	def unlock(self):
 		if self._lock_obj is None:
 			raise AssertionError('not locked')
@@ -171,7 +169,7 @@ class _LockThread(AbstractPollTask):
 
 		if self._files is not None:
 			for f in self._files.values():
-				f.close()
+				os.close(f)
 			self._files = None
 
 class _LockProcess(AbstractPollTask):
@@ -190,12 +188,12 @@ class _LockProcess(AbstractPollTask):
 		in_pr, in_pw = os.pipe()
 		out_pr, out_pw = os.pipe()
 		self._files = {}
-		self._files['pipe_in'] = os.fdopen(in_pr, 'rb', 0)
-		self._files['pipe_out'] = os.fdopen(out_pw, 'wb', 0)
+		self._files['pipe_in'] = in_pr
+		self._files['pipe_out'] = out_pw
 		fcntl.fcntl(in_pr, fcntl.F_SETFL,
 			fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
 		self._reg_id = self.scheduler.register(in_pr,
-			PollConstants.POLLIN, self._output_handler)
+			self.scheduler.IO_IN, self._output_handler)
 		self._registered = True
 		self._proc = SpawnProcess(
 			args=[portage._python_interpreter,
@@ -209,9 +207,22 @@ class _LockProcess(AbstractPollTask):
 		os.close(in_pw)
 
 	def _proc_exit(self, proc):
+
+		if self._files is not None:
+			# Close pipe_out if it's still open, since it's useless
+			# after the process has exited. This helps to avoid
+			# "ResourceWarning: unclosed file" since Python 3.2.
+			try:
+				pipe_out = self._files.pop('pipe_out')
+			except KeyError:
+				pass
+			else:
+				os.close(pipe_out)
+
 		if proc.returncode != os.EX_OK:
 			# Typically, this will happen due to the
 			# process being killed by a signal.
+
 			if not self._acquired:
 				# If the lock hasn't been aquired yet, the
 				# caller can check the returncode and handle
@@ -242,21 +253,22 @@ class _LockProcess(AbstractPollTask):
 			self._proc.poll()
 		return self.returncode
 
-	def _wait(self):
-		if self.returncode is not None:
-			return self.returncode
-		if self._registered:
-			self.scheduler.schedule(self._reg_id)
-		return self.returncode
-
 	def _output_handler(self, f, event):
-		buf = self._read_buf(self._files['pipe_in'], event)
+		buf = None
+		if event & self.scheduler.IO_IN:
+			try:
+				buf = os.read(self._files['pipe_in'], self._bufsize)
+			except OSError as e:
+				if e.errno not in (errno.EAGAIN,):
+					raise
 		if buf:
 			self._acquired = True
 			self._unregister()
 			self.returncode = os.EX_OK
 			self.wait()
 
+		return True
+
 	def _unregister(self):
 		self._registered = False
 
@@ -270,7 +282,7 @@ class _LockProcess(AbstractPollTask):
 			except KeyError:
 				pass
 			else:
-				pipe_in.close()
+				os.close(pipe_in)
 
 	def unlock(self):
 		if self._proc is None:
@@ -281,8 +293,8 @@ class _LockProcess(AbstractPollTask):
 			raise AssertionError("lock process failed with returncode %s" \
 				% (self.returncode,))
 		self._unlocked = True
-		self._files['pipe_out'].write(b'\0')
-		self._files['pipe_out'].close()
+		os.write(self._files['pipe_out'], b'\0')
+		os.close(self._files['pipe_out'])
 		self._files = None
 		self._proc.wait()
 		self._proc = None

diff --git a/portage_with_autodep/pym/_emerge/AsynchronousLock.pyo b/portage_with_autodep/pym/_emerge/AsynchronousLock.pyo
new file mode 100644
index 0000000..5f3cfbb
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/AsynchronousLock.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/AsynchronousTask.py b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
index 36522ca..7a193ce 100644
--- a/portage_with_autodep/pym/_emerge/AsynchronousTask.py
+++ b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
@@ -1,8 +1,11 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
+import signal
+
 from portage import os
-from _emerge.SlotObject import SlotObject
+from portage.util.SlotObject import SlotObject
+
 class AsynchronousTask(SlotObject):
 	"""
 	Subclasses override _wait() and _poll() so that calls
@@ -14,7 +17,10 @@ class AsynchronousTask(SlotObject):
 	"""
 
 	__slots__ = ("background", "cancelled", "returncode") + \
-		("_exit_listeners", "_exit_listener_stack", "_start_listeners")
+		("_exit_listeners", "_exit_listener_stack", "_start_listeners",
+		"_waiting")
+
+	_cancelled_returncode = - signal.SIGINT
 
 	def start(self):
 		"""
@@ -42,7 +48,12 @@ class AsynchronousTask(SlotObject):
 
 	def wait(self):
 		if self.returncode is None:
-			self._wait()
+			if not self._waiting:
+				self._waiting = True
+				try:
+					self._wait()
+				finally:
+					self._waiting = False
 		self._wait_hook()
 		return self.returncode
 
@@ -50,10 +61,17 @@ class AsynchronousTask(SlotObject):
 		return self.returncode
 
 	def cancel(self):
+		"""
+		Cancel the task, but do not wait for exit status. If asynchronous exit
+		notification is desired, then use addExitListener to add a listener
+		before calling this method.
+		NOTE: Synchronous waiting for status is not supported, since it would
+		be vulnerable to hitting the recursion limit when a large number of
+		tasks need to be terminated simultaneously, like in bug #402335.
+		"""
 		if not self.cancelled:
 			self.cancelled = True
 			self._cancel()
-			self.wait()
 
 	def _cancel(self):
 		"""
@@ -62,6 +80,17 @@ class AsynchronousTask(SlotObject):
 		"""
 		pass
 
+	def _was_cancelled(self):
+		"""
+		If cancelled, set returncode if necessary and return True.
+		Otherwise, return False.
+		"""
+		if self.cancelled:
+			if self.returncode is None:
+				self.returncode = self._cancelled_returncode
+			return True
+		return False
+
 	def addStartListener(self, f):
 		"""
 		The function will be called with one argument, a reference to self.
@@ -123,7 +152,11 @@ class AsynchronousTask(SlotObject):
 			self._exit_listener_stack = self._exit_listeners
 			self._exit_listeners = None
 
-			self._exit_listener_stack.reverse()
+			# Execute exit listeners in reverse order, so that
+			# the last added listener is executed first. This
+			# allows SequentialTaskQueue to decrement its running
+			# task count as soon as one of its tasks exits, so that
+			# the value is accurate when other listeners execute.
 			while self._exit_listener_stack:
 				self._exit_listener_stack.pop()(self)
 

diff --git a/portage_with_autodep/pym/_emerge/AsynchronousTask.pyo b/portage_with_autodep/pym/_emerge/AsynchronousTask.pyo
new file mode 100644
index 0000000..b8d67ea
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/AsynchronousTask.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/AtomArg.pyo b/portage_with_autodep/pym/_emerge/AtomArg.pyo
new file mode 100644
index 0000000..b8f59cf
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/AtomArg.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/Binpkg.py b/portage_with_autodep/pym/_emerge/Binpkg.py
index bc6511e..ea8a1ad 100644
--- a/portage_with_autodep/pym/_emerge/Binpkg.py
+++ b/portage_with_autodep/pym/_emerge/Binpkg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from _emerge.EbuildPhase import EbuildPhase
@@ -9,15 +9,18 @@ from _emerge.CompositeTask import CompositeTask
 from _emerge.BinpkgVerifier import BinpkgVerifier
 from _emerge.EbuildMerge import EbuildMerge
 from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.SpawnProcess import SpawnProcess
 from portage.eapi import eapi_exports_replace_vars
-from portage.util import writemsg
+from portage.util import ensure_dirs, writemsg
 import portage
 from portage import os
+from portage import shutil
 from portage import _encodings
 from portage import _unicode_decode
 from portage import _unicode_encode
 import io
 import logging
+import textwrap
 from portage.output import colorize
 
 class Binpkg(CompositeTask):
@@ -25,7 +28,8 @@ class Binpkg(CompositeTask):
 	__slots__ = ("find_blockers",
 		"ldpath_mtimes", "logger", "opts",
 		"pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
-		("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
+		("_bintree", "_build_dir", "_build_prefix",
+		"_ebuild_path", "_fetched_pkg",
 		"_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
 
 	def _writemsg_level(self, msg, level=0, noiselevel=0):
@@ -83,13 +87,12 @@ class Binpkg(CompositeTask):
 
 			waiting_msg = ("Fetching '%s' " + \
 				"in the background. " + \
-				"To view fetch progress, run `tail -f " + \
+				"To view fetch progress, run `tail -f %s" + \
 				"/var/log/emerge-fetch.log` in another " + \
-				"terminal.") % prefetcher.pkg_path
+				"terminal.") % (prefetcher.pkg_path, settings["EPREFIX"])
 			msg_prefix = colorize("GOOD", " * ")
-			from textwrap import wrap
 			waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
-				for line in wrap(waiting_msg, 65))
+				for line in textwrap.wrap(waiting_msg, 65))
 			if not self.background:
 				writemsg(waiting_msg, noiselevel=-1)
 
@@ -101,6 +104,10 @@ class Binpkg(CompositeTask):
 
 	def _prefetch_exit(self, prefetcher):
 
+		if self._was_cancelled():
+			self.wait()
+			return
+
 		pkg = self.pkg
 		pkg_count = self.pkg_count
 		if not (self.opts.pretend or self.opts.fetchonly):
@@ -299,10 +306,68 @@ class Binpkg(CompositeTask):
 		self._start_task(extractor, self._extractor_exit)
 
 	def _extractor_exit(self, extractor):
-		if self._final_exit(extractor) != os.EX_OK:
+		if self._default_exit(extractor) != os.EX_OK:
 			self._unlock_builddir()
 			self._writemsg_level("!!! Error Extracting '%s'\n" % \
 				self._pkg_path, noiselevel=-1, level=logging.ERROR)
+			self.wait()
+			return
+
+		try:
+			with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+				encoding=_encodings['fs'], errors='strict'), mode='r',
+				encoding=_encodings['repo.content'], errors='replace') as f:
+				self._build_prefix = f.read().rstrip('\n')
+		except IOError:
+			self._build_prefix = ""
+
+		if self._build_prefix == self.settings["EPREFIX"]:
+			ensure_dirs(self.settings["ED"])
+			self._current_task = None
+			self.returncode = os.EX_OK
+			self.wait()
+			return
+
+		chpathtool = SpawnProcess(
+			args=[portage._python_interpreter,
+			os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"),
+			self.settings["D"], self._build_prefix, self.settings["EPREFIX"]],
+			background=self.background, env=self.settings.environ(), 
+			scheduler=self.scheduler,
+			logfile=self.settings.get('PORTAGE_LOG_FILE'))
+		self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"])
+		self._start_task(chpathtool, self._chpathtool_exit)
+
+	def _chpathtool_exit(self, chpathtool):
+		if self._final_exit(chpathtool) != os.EX_OK:
+			self._unlock_builddir()
+			self._writemsg_level("!!! Error Adjusting Prefix to %s" %
+				(self.settings["EPREFIX"],),
+				noiselevel=-1, level=logging.ERROR)
+			self.wait()
+			return
+
+		# We want to install in "our" prefix, not the binary one
+		with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+			encoding=_encodings['fs'], errors='strict'), mode='w',
+			encoding=_encodings['repo.content'], errors='strict') as f:
+			f.write(self.settings["EPREFIX"] + "\n")
+
+		# Move the files to the correct location for merge.
+		image_tmp_dir = os.path.join(
+			self.settings["PORTAGE_BUILDDIR"], "image_tmp")
+		build_d = os.path.join(self.settings["D"],
+			self._build_prefix.lstrip(os.sep))
+		if not os.path.isdir(build_d):
+			# Assume this is a virtual package or something.
+			shutil.rmtree(self._image_dir)
+			ensure_dirs(self.settings["ED"])
+		else:
+			os.rename(build_d, image_tmp_dir)
+			shutil.rmtree(self._image_dir)
+			ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep)))
+			os.rename(image_tmp_dir, self.settings["ED"])
+
 		self.wait()
 
 	def _unlock_builddir(self):
@@ -312,13 +377,13 @@ class Binpkg(CompositeTask):
 		self._build_dir.unlock()
 
 	def create_install_task(self):
-		task = EbuildMerge(find_blockers=self.find_blockers,
+		task = EbuildMerge(exit_hook=self._install_exit,
+			find_blockers=self.find_blockers,
 			ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
 			pkg=self.pkg, pkg_count=self.pkg_count,
 			pkg_path=self._pkg_path, scheduler=self.scheduler,
 			settings=self.settings, tree=self._tree,
 			world_atom=self.world_atom)
-		task.addExitListener(self._install_exit)
 		return task
 
 	def _install_exit(self, task):

diff --git a/portage_with_autodep/pym/_emerge/Binpkg.pyo b/portage_with_autodep/pym/_emerge/Binpkg.pyo
new file mode 100644
index 0000000..4499b9d
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/Binpkg.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
index f68971b..5ba1495 100644
--- a/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
+++ b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import errno
@@ -38,7 +38,7 @@ class BinpkgEnvExtractor(CompositeTask):
 			background=self.background,
 			env=self.settings.environ(), 
 			scheduler=self.scheduler,
-			logfile=self.settings.get('PORTAGE_LOGFILE'))
+			logfile=self.settings.get('PORTAGE_LOG_FILE'))
 
 		self._start_task(extractor_proc, self._extractor_exit)
 
@@ -59,7 +59,7 @@ class BinpkgEnvExtractor(CompositeTask):
 		# This is a signal to ebuild.sh, so that it knows to filter
 		# out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
 		# would be preserved between normal phases.
-		open(_unicode_encode(self._get_dest_env_path() + '.raw'), 'w')
+		open(_unicode_encode(self._get_dest_env_path() + '.raw'), 'wb').close()
 
 		self._current_task = None
 		self.returncode = os.EX_OK

diff --git a/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.pyo b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.pyo
new file mode 100644
index 0000000..21c2e13
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
index d1630f2..f25cbf9 100644
--- a/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
+++ b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
@@ -1,9 +1,8 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from _emerge.SpawnProcess import SpawnProcess
 import portage
-import os
 import signal
 
 class BinpkgExtractorAsync(SpawnProcess):

diff --git a/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.pyo b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.pyo
new file mode 100644
index 0000000..f8498f7
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/BinpkgFetcher.py b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
index baea4d6..f415e2e 100644
--- a/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
+++ b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
@@ -28,9 +28,6 @@ class BinpkgFetcher(SpawnProcess):
 
 	def _start(self):
 
-		if self.cancelled:
-			return
-
 		pkg = self.pkg
 		pretend = self.pretend
 		bintree = pkg.root_config.trees["bintree"]

diff --git a/portage_with_autodep/pym/_emerge/BinpkgFetcher.pyo b/portage_with_autodep/pym/_emerge/BinpkgFetcher.pyo
new file mode 100644
index 0000000..482e55e
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/BinpkgFetcher.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.pyo b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.pyo
new file mode 100644
index 0000000..c890cac
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/BinpkgVerifier.pyo b/portage_with_autodep/pym/_emerge/BinpkgVerifier.pyo
new file mode 100644
index 0000000..21f770e
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/BinpkgVerifier.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/Blocker.pyo b/portage_with_autodep/pym/_emerge/Blocker.pyo
new file mode 100644
index 0000000..b9e56bc
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/Blocker.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/BlockerCache.py b/portage_with_autodep/pym/_emerge/BlockerCache.py
index 5c4f43e..fce81f8 100644
--- a/portage_with_autodep/pym/_emerge/BlockerCache.py
+++ b/portage_with_autodep/pym/_emerge/BlockerCache.py
@@ -1,6 +1,7 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
+import errno
 import sys
 from portage.util import writemsg
 from portage.data import secpass
@@ -15,6 +16,9 @@ except ImportError:
 if sys.hexversion >= 0x3000000:
 	basestring = str
 	long = int
+	_unicode = str
+else:
+	_unicode = unicode
 
 class BlockerCache(portage.cache.mappings.MutableMapping):
 	"""This caches blockers of installed packages so that dep_check does not
@@ -58,8 +62,11 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
 			self._cache_data = mypickle.load()
 			f.close()
 			del f
-		except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
-			if isinstance(e, pickle.UnpicklingError):
+		except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
+			if isinstance(e, EnvironmentError) and \
+				getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
+				pass
+			else:
 				writemsg("!!! Error loading '%s': %s\n" % \
 					(self._cache_filename, str(e)), noiselevel=-1)
 			del e
@@ -141,7 +148,7 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
 				f.close()
 				portage.util.apply_secpass_permissions(
 					self._cache_filename, gid=portage.portage_gid, mode=0o644)
-			except (IOError, OSError) as e:
+			except (IOError, OSError):
 				pass
 			self._modified.clear()
 
@@ -155,8 +162,8 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
 		@param blocker_data: An object with counter and atoms attributes.
 		@type blocker_data: BlockerData
 		"""
-		self._cache_data["blockers"][cpv] = \
-			(blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
+		self._cache_data["blockers"][_unicode(cpv)] = (blocker_data.counter,
+			tuple(_unicode(x) for x in blocker_data.atoms))
 		self._modified.add(cpv)
 
 	def __iter__(self):
@@ -176,7 +183,7 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
 	def __getitem__(self, cpv):
 		"""
 		@rtype: BlockerData
-		@returns: An object with counter and atoms attributes.
+		@return: An object with counter and atoms attributes.
 		"""
 		return self.BlockerData(*self._cache_data["blockers"][cpv])
 

diff --git a/portage_with_autodep/pym/_emerge/BlockerCache.pyo b/portage_with_autodep/pym/_emerge/BlockerCache.pyo
new file mode 100644
index 0000000..41554e1
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/BlockerCache.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/BlockerDB.py b/portage_with_autodep/pym/_emerge/BlockerDB.py
index 4819749..459affd 100644
--- a/portage_with_autodep/pym/_emerge/BlockerDB.py
+++ b/portage_with_autodep/pym/_emerge/BlockerDB.py
@@ -25,7 +25,7 @@ class BlockerDB(object):
 		self._dep_check_trees = None
 		self._fake_vartree = fake_vartree
 		self._dep_check_trees = {
-			self._vartree.root : {
+			self._vartree.settings["EROOT"] : {
 				"porttree"    :  fake_vartree,
 				"vartree"     :  fake_vartree,
 		}}
@@ -36,7 +36,8 @@ class BlockerDB(object):
 		new_pkg is planned to be installed. This ignores build-time
 		blockers, since new_pkg is assumed to be built already.
 		"""
-		blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
+		blocker_cache = BlockerCache(None,
+			self._vartree.dbapi)
 		dep_keys = ["RDEPEND", "PDEPEND"]
 		settings = self._vartree.settings
 		stale_cache = set(blocker_cache)

diff --git a/portage_with_autodep/pym/_emerge/BlockerDB.pyo b/portage_with_autodep/pym/_emerge/BlockerDB.pyo
new file mode 100644
index 0000000..dfab0aa
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/BlockerDB.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/BlockerDepPriority.pyo b/portage_with_autodep/pym/_emerge/BlockerDepPriority.pyo
new file mode 100644
index 0000000..c3b554c
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/BlockerDepPriority.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/CompositeTask.py b/portage_with_autodep/pym/_emerge/CompositeTask.py
index 644a69b..3e43478 100644
--- a/portage_with_autodep/pym/_emerge/CompositeTask.py
+++ b/portage_with_autodep/pym/_emerge/CompositeTask.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from _emerge.AsynchronousTask import AsynchronousTask
@@ -60,7 +60,8 @@ class CompositeTask(AsynchronousTask):
 					self._current_task = None
 					break
 				else:
-					self.scheduler.schedule(condition=self._task_queued_wait)
+					while not self._task_queued_wait():
+						self.scheduler.iteration()
 					if self.returncode is not None:
 						break
 					elif self.cancelled:
@@ -103,7 +104,7 @@ class CompositeTask(AsynchronousTask):
 		Subclasses can use this as a generic task exit callback.
 
 		@rtype: int
-		@returns: The task.returncode attribute.
+		@return: The task.returncode attribute.
 		"""
 		self._assert_current(task)
 		if task.returncode != os.EX_OK:

diff --git a/portage_with_autodep/pym/_emerge/CompositeTask.pyo b/portage_with_autodep/pym/_emerge/CompositeTask.pyo
new file mode 100644
index 0000000..adc8cae
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/CompositeTask.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/DepPriority.pyo b/portage_with_autodep/pym/_emerge/DepPriority.pyo
new file mode 100644
index 0000000..4028a36
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/DepPriority.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.pyo b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.pyo
new file mode 100644
index 0000000..5e0f710
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.pyo b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.pyo
new file mode 100644
index 0000000..5309bcd
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/Dependency.py b/portage_with_autodep/pym/_emerge/Dependency.py
index 0f746b6..c2d36b2 100644
--- a/portage_with_autodep/pym/_emerge/Dependency.py
+++ b/portage_with_autodep/pym/_emerge/Dependency.py
@@ -1,8 +1,9 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
+from portage.util.SlotObject import SlotObject
 from _emerge.DepPriority import DepPriority
-from _emerge.SlotObject import SlotObject
+
 class Dependency(SlotObject):
 	__slots__ = ("atom", "blocker", "child", "depth",
 		"parent", "onlydeps", "priority", "root",

diff --git a/portage_with_autodep/pym/_emerge/Dependency.pyo b/portage_with_autodep/pym/_emerge/Dependency.pyo
new file mode 100644
index 0000000..f53e0ed
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/Dependency.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/DependencyArg.pyo b/portage_with_autodep/pym/_emerge/DependencyArg.pyo
new file mode 100644
index 0000000..916a762
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/DependencyArg.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildBinpkg.py b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
index b7d43ba..34a6aef 100644
--- a/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
+++ b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from _emerge.CompositeTask import CompositeTask
@@ -34,6 +34,10 @@ class EbuildBinpkg(CompositeTask):
 
 		self.settings.pop("PORTAGE_BINPKG_TMPFILE", None)
 		if self._default_exit(package_phase) != os.EX_OK:
+			try:
+				os.unlink(self._binpkg_tmpfile)
+			except OSError:
+				pass
 			self.wait()
 			return
 

diff --git a/portage_with_autodep/pym/_emerge/EbuildBinpkg.pyo b/portage_with_autodep/pym/_emerge/EbuildBinpkg.pyo
new file mode 100644
index 0000000..2acfc87
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildBinpkg.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildBuild.py b/portage_with_autodep/pym/_emerge/EbuildBuild.py
index 1c423a3..5a48f8e 100644
--- a/portage_with_autodep/pym/_emerge/EbuildBuild.py
+++ b/portage_with_autodep/pym/_emerge/EbuildBuild.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from _emerge.EbuildExecuter import EbuildExecuter
@@ -32,12 +32,13 @@ class EbuildBuild(CompositeTask):
 		pkg = self.pkg
 		settings = self.settings
 
-		rval = _check_temp_dir(settings)
-		if rval != os.EX_OK:
-			self.returncode = rval
-			self._current_task = None
-			self.wait()
-			return
+		if not self.opts.fetchonly:
+			rval = _check_temp_dir(settings)
+			if rval != os.EX_OK:
+				self.returncode = rval
+				self._current_task = None
+				self.wait()
+				return
 
 		root_config = pkg.root_config
 		tree = "porttree"
@@ -108,6 +109,10 @@ class EbuildBuild(CompositeTask):
 
 	def _prefetch_exit(self, prefetcher):
 
+		if self._was_cancelled():
+			self.wait()
+			return
+
 		opts = self.opts
 		pkg = self.pkg
 		settings = self.settings
@@ -225,9 +230,11 @@ class EbuildBuild(CompositeTask):
 		#buildsyspkg: Check if we need to _force_ binary package creation
 		self._issyspkg = "buildsyspkg" in features and \
 				system_set.findAtomForPackage(pkg) and \
-				not opts.buildpkg
+				"buildpkg" not in features and \
+				opts.buildpkg != 'n'
 
-		if opts.buildpkg or self._issyspkg:
+		if ("buildpkg" in features or self._issyspkg) \
+			and not self.opts.buildpkg_exclude.findAtomForPackage(pkg):
 
 			self._buildpkg = True
 
@@ -406,7 +413,8 @@ class EbuildBuild(CompositeTask):
 		ebuild_path = self._ebuild_path
 		tree = self._tree
 
-		task = EbuildMerge(find_blockers=self.find_blockers,
+		task = EbuildMerge(exit_hook=self._install_exit,
+			find_blockers=self.find_blockers,
 			ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
 			pkg_count=pkg_count, pkg_path=ebuild_path,
 			scheduler=self.scheduler,
@@ -419,7 +427,6 @@ class EbuildBuild(CompositeTask):
 			(pkg_count.curval, pkg_count.maxval, pkg.cpv)
 		logger.log(msg, short_msg=short_msg)
 
-		task.addExitListener(self._install_exit)
 		return task
 
 	def _install_exit(self, task):

diff --git a/portage_with_autodep/pym/_emerge/EbuildBuild.pyo b/portage_with_autodep/pym/_emerge/EbuildBuild.pyo
new file mode 100644
index 0000000..19d913c
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildBuild.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildBuildDir.py b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
index ddc5fe0..9773bd7 100644
--- a/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
+++ b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
@@ -1,11 +1,12 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from _emerge.AsynchronousLock import AsynchronousLock
-from _emerge.SlotObject import SlotObject
+
 import portage
 from portage import os
 from portage.exception import PortageException
+from portage.util.SlotObject import SlotObject
 import errno
 
 class EbuildBuildDir(SlotObject):

diff --git a/portage_with_autodep/pym/_emerge/EbuildBuildDir.pyo b/portage_with_autodep/pym/_emerge/EbuildBuildDir.pyo
new file mode 100644
index 0000000..2846579
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildBuildDir.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildExecuter.py b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
index f8febd4..fd663a4 100644
--- a/portage_with_autodep/pym/_emerge/EbuildExecuter.py
+++ b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
@@ -12,7 +12,7 @@ from portage.package.ebuild.doebuild import _prepare_fake_distdir
 
 class EbuildExecuter(CompositeTask):
 
-	__slots__ = ("pkg", "scheduler", "settings")
+	__slots__ = ("pkg", "settings")
 
 	_phases = ("prepare", "configure", "compile", "test", "install")
 
@@ -34,8 +34,6 @@ class EbuildExecuter(CompositeTask):
 		cleanup = 0
 		portage.prepare_build_dirs(pkg.root, settings, cleanup)
 
-		portdb = pkg.root_config.trees['porttree'].dbapi
-		ebuild_path = settings['EBUILD']
 		alist = settings.configdict["pkg"].get("A", "").split()
 		_prepare_fake_distdir(settings, alist)
 

diff --git a/portage_with_autodep/pym/_emerge/EbuildExecuter.pyo b/portage_with_autodep/pym/_emerge/EbuildExecuter.pyo
new file mode 100644
index 0000000..592a0c9
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildExecuter.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildFetcher.py b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
index feb68d0..c0a7fdd 100644
--- a/portage_with_autodep/pym/_emerge/EbuildFetcher.py
+++ b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import traceback
@@ -21,7 +21,7 @@ class EbuildFetcher(SpawnProcess):
 
 	__slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
 		"pkg", "prefetch") + \
-		("_digests", "_settings", "_uri_map")
+		("_digests", "_manifest", "_settings", "_uri_map")
 
 	def already_fetched(self, settings):
 		"""
@@ -40,7 +40,7 @@ class EbuildFetcher(SpawnProcess):
 
 		digests = self._get_digests()
 		distdir = settings["DISTDIR"]
-		allow_missing = "allow-missing-manifests" in settings.features
+		allow_missing = self._get_manifest().allow_missing
 
 		for filename in uri_map:
 			# Use stat rather than lstat since fetch() creates
@@ -163,10 +163,15 @@ class EbuildFetcher(SpawnProcess):
 
 		pid = os.fork()
 		if pid != 0:
+			if not isinstance(pid, int):
+				raise AssertionError(
+					"fork returned non-integer: %s" % (repr(pid),))
 			portage.process.spawned_pids.append(pid)
 			return [pid]
 
-		portage.process._setup_pipes(fd_pipes)
+		portage.locks._close_fds()
+		# Disable close_fds since we don't exec (see _setup_pipes docstring).
+		portage.process._setup_pipes(fd_pipes, close_fds=False)
 
 		# Use default signal handlers in order to avoid problems
 		# killing subprocesses as reported in bug #353239.
@@ -179,7 +184,7 @@ class EbuildFetcher(SpawnProcess):
 			not in ('yes', 'true')
 
 		rval = 1
-		allow_missing = 'allow-missing-manifests' in self._settings.features
+		allow_missing = self._get_manifest().allow_missing
 		try:
 			if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
 				digests=copy.deepcopy(self._get_digests()),
@@ -203,11 +208,16 @@ class EbuildFetcher(SpawnProcess):
 			raise AssertionError("ebuild not found for '%s'" % self.pkg.cpv)
 		return self.ebuild_path
 
+	def _get_manifest(self):
+		if self._manifest is None:
+			pkgdir = os.path.dirname(self._get_ebuild_path())
+			self._manifest = self.pkg.root_config.settings.repositories.get_repo_for_location(
+				os.path.dirname(os.path.dirname(pkgdir))).load_manifest(pkgdir, None)
+		return self._manifest
+
 	def _get_digests(self):
-		if self._digests is not None:
-			return self._digests
-		self._digests = portage.Manifest(os.path.dirname(
-			self._get_ebuild_path()), None).getTypeDigests("DIST")
+		if self._digests is None:
+			self._digests = self._get_manifest().getTypeDigests("DIST")
 		return self._digests
 
 	def _get_uri_map(self):

diff --git a/portage_with_autodep/pym/_emerge/EbuildFetcher.pyo b/portage_with_autodep/pym/_emerge/EbuildFetcher.pyo
new file mode 100644
index 0000000..ddc92d1
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildFetcher.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildFetchonly.py b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
index b898971..f88ea96 100644
--- a/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
+++ b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
@@ -1,10 +1,10 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-from _emerge.SlotObject import SlotObject
 import portage
 from portage import os
 from portage.elog.messages import eerror
+from portage.util.SlotObject import SlotObject
 
 class EbuildFetchonly(SlotObject):
 
@@ -21,7 +21,7 @@ class EbuildFetchonly(SlotObject):
 		debug = settings.get("PORTAGE_DEBUG") == "1"
 
 		rval = portage.doebuild(ebuild_path, "fetch",
-			settings["ROOT"], settings, debug=debug,
+			settings=settings, debug=debug,
 			listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
 			mydbapi=portdb, tree="porttree")
 

diff --git a/portage_with_autodep/pym/_emerge/EbuildFetchonly.pyo b/portage_with_autodep/pym/_emerge/EbuildFetchonly.pyo
new file mode 100644
index 0000000..c54a1db
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildFetchonly.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
index 5dabe34..8414d20 100644
--- a/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
+++ b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
@@ -1,14 +1,15 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import errno
 import logging
 import pickle
 from portage import os
+from portage.exception import TryAgain
 from portage.localization import _
+from portage.locks import lockfile, unlockfile
 from portage.util import writemsg_level
 from _emerge.FifoIpcDaemon import FifoIpcDaemon
-from _emerge.PollConstants import PollConstants
 
 class EbuildIpcDaemon(FifoIpcDaemon):
 	"""
@@ -34,7 +35,7 @@ class EbuildIpcDaemon(FifoIpcDaemon):
 	def _input_handler(self, fd, event):
 		# Read the whole pickle in a single atomic read() call.
 		data = None
-		if event & PollConstants.POLLIN:
+		if event & self.scheduler.IO_IN:
 			# For maximum portability, use os.read() here since
 			# array.fromfile() and file.read() are both known to
 			# erroneously return an empty string from this
@@ -84,6 +85,30 @@ class EbuildIpcDaemon(FifoIpcDaemon):
 				if reply_hook is not None:
 					reply_hook()
 
+		elif event & self.scheduler.IO_HUP:
+			# This can be triggered due to a race condition which happens when
+			# the previous _reopen_input() call occurs before the writer has
+			# closed the pipe (see bug #401919). It's not safe to re-open
+			# without a lock here, since it's possible that another writer will
+			# write something to the pipe just before we close it, and in that
+			# case the write will be lost. Therefore, try for a non-blocking
+			# lock, and only re-open the pipe if the lock is acquired.
+			lock_filename = os.path.join(
+				os.path.dirname(self.input_fifo), '.ipc_lock')
+			try:
+				lock_obj = lockfile(lock_filename, unlinkfile=True,
+					flags=os.O_NONBLOCK)
+			except TryAgain:
+				# We'll try again when another IO_HUP event arrives.
+				pass
+			else:
+				try:
+					self._reopen_input()
+				finally:
+					unlockfile(lock_obj)
+
+		return True
+
 	def _send_reply(self, reply):
 		# File streams are in unbuffered mode since we do atomic
 		# read and write of whole pickles. Use non-blocking mode so

diff --git a/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.pyo b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.pyo
new file mode 100644
index 0000000..7a9588f
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildMerge.py b/portage_with_autodep/pym/_emerge/EbuildMerge.py
index 9c35988..df0778c 100644
--- a/portage_with_autodep/pym/_emerge/EbuildMerge.py
+++ b/portage_with_autodep/pym/_emerge/EbuildMerge.py
@@ -7,7 +7,7 @@ from portage.dbapi._MergeProcess import MergeProcess
 
 class EbuildMerge(CompositeTask):
 
-	__slots__ = ("find_blockers", "logger", "ldpath_mtimes",
+	__slots__ = ("exit_hook", "find_blockers", "logger", "ldpath_mtimes",
 		"pkg", "pkg_count", "pkg_path", "pretend",
 		"settings", "tree", "world_atom")
 
@@ -35,6 +35,7 @@ class EbuildMerge(CompositeTask):
 
 	def _merge_exit(self, merge_task):
 		if self._final_exit(merge_task) != os.EX_OK:
+			self.exit_hook(self)
 			self.wait()
 			return
 
@@ -53,4 +54,5 @@ class EbuildMerge(CompositeTask):
 		logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
 			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
 
+		self.exit_hook(self)
 		self.wait()

diff --git a/portage_with_autodep/pym/_emerge/EbuildMerge.pyo b/portage_with_autodep/pym/_emerge/EbuildMerge.pyo
new file mode 100644
index 0000000..662c681
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildMerge.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
index e53298b..c2d3747 100644
--- a/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
+++ b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
@@ -1,15 +1,19 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from _emerge.SubProcess import SubProcess
-from _emerge.PollConstants import PollConstants
 import sys
 from portage.cache.mappings import slot_dict_class
 import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.package.ebuild._eapi_invalid:eapi_invalid',
+)
 from portage import os
 from portage import _encodings
 from portage import _unicode_decode
 from portage import _unicode_encode
+
+import errno
 import fcntl
 import io
 
@@ -20,37 +24,44 @@ class EbuildMetadataPhase(SubProcess):
 	used to extract metadata from the ebuild.
 	"""
 
-	__slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
-		"ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
-		("_raw_metadata",)
+	__slots__ = ("cpv", "eapi_supported", "ebuild_hash", "fd_pipes",
+		"metadata", "portdb", "repo_path", "settings") + \
+		("_eapi", "_eapi_lineno", "_raw_metadata",)
 
 	_file_names = ("ebuild",)
 	_files_dict = slot_dict_class(_file_names, prefix="")
 	_metadata_fd = 9
 
 	def _start(self):
+		ebuild_path = self.ebuild_hash.location
+
+		with io.open(_unicode_encode(ebuild_path,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'],
+			errors='replace') as f:
+			self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)
+
+		parsed_eapi = self._eapi
+		if parsed_eapi is None:
+			parsed_eapi = "0"
+
+		if not parsed_eapi:
+			# An empty EAPI setting is invalid.
+			self._eapi_invalid(None)
+			self._set_returncode((self.pid, 1 << 8))
+			self.wait()
+			return
+
+		self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
+		if not self.eapi_supported:
+			self.metadata = {"EAPI": parsed_eapi}
+			self._set_returncode((self.pid, os.EX_OK << 8))
+			self.wait()
+			return
+
 		settings = self.settings
 		settings.setcpv(self.cpv)
-		ebuild_path = self.ebuild_path
-
-		eapi = None
-		if eapi is None and \
-			'parse-eapi-ebuild-head' in settings.features:
-			eapi = portage._parse_eapi_ebuild_head(
-				io.open(_unicode_encode(ebuild_path,
-				encoding=_encodings['fs'], errors='strict'),
-				mode='r', encoding=_encodings['repo.content'],
-				errors='replace'))
-
-		if eapi is not None:
-			if not portage.eapi_is_supported(eapi):
-				self.metadata_callback(self.cpv, self.ebuild_path,
-					self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
-				self._set_returncode((self.pid, os.EX_OK << 8))
-				self.wait()
-				return
-
-			settings.configdict['pkg']['EAPI'] = eapi
+		settings.configdict['pkg']['EAPI'] = parsed_eapi
 
 		debug = settings.get("PORTAGE_DEBUG") == "1"
 		master_fd = None
@@ -61,7 +72,8 @@ class EbuildMetadataPhase(SubProcess):
 		else:
 			fd_pipes = {}
 
-		fd_pipes.setdefault(0, sys.stdin.fileno())
+		null_input = open('/dev/null', 'rb')
+		fd_pipes.setdefault(0, null_input.fileno())
 		fd_pipes.setdefault(1, sys.stdout.fileno())
 		fd_pipes.setdefault(2, sys.stderr.fileno())
 
@@ -72,7 +84,6 @@ class EbuildMetadataPhase(SubProcess):
 			if fd == sys.stderr.fileno():
 				sys.stderr.flush()
 
-		fd_pipes_orig = fd_pipes.copy()
 		self._files = self._files_dict()
 		files = self._files
 
@@ -83,17 +94,18 @@ class EbuildMetadataPhase(SubProcess):
 		fd_pipes[self._metadata_fd] = slave_fd
 
 		self._raw_metadata = []
-		files.ebuild = os.fdopen(master_fd, 'rb', 0)
-		self._reg_id = self.scheduler.register(files.ebuild.fileno(),
+		files.ebuild = master_fd
+		self._reg_id = self.scheduler.register(files.ebuild,
 			self._registered_events, self._output_handler)
 		self._registered = True
 
 		retval = portage.doebuild(ebuild_path, "depend",
-			settings["ROOT"], settings, debug,
+			settings=settings, debug=debug,
 			mydbapi=self.portdb, tree="porttree",
 			fd_pipes=fd_pipes, returnpid=True)
 
 		os.close(slave_fd)
+		null_input.close()
 
 		if isinstance(retval, int):
 			# doebuild failed before spawning
@@ -107,27 +119,81 @@ class EbuildMetadataPhase(SubProcess):
 
 	def _output_handler(self, fd, event):
 
-		if event & PollConstants.POLLIN:
-			self._raw_metadata.append(self._files.ebuild.read())
-			if not self._raw_metadata[-1]:
-				self._unregister()
-				self.wait()
+		if event & self.scheduler.IO_IN:
+			while True:
+				try:
+					self._raw_metadata.append(
+						os.read(self._files.ebuild, self._bufsize))
+				except OSError as e:
+					if e.errno not in (errno.EAGAIN,):
+						raise
+					break
+				else:
+					if not self._raw_metadata[-1]:
+						self._unregister()
+						self.wait()
+						break
 
 		self._unregister_if_appropriate(event)
 
+		return True
+
 	def _set_returncode(self, wait_retval):
 		SubProcess._set_returncode(self, wait_retval)
-		if self.returncode == os.EX_OK:
-			metadata_lines = ''.join(_unicode_decode(chunk,
-				encoding=_encodings['repo.content'], errors='replace')
-				for chunk in self._raw_metadata).splitlines()
+		# self._raw_metadata is None when _start returns
+		# early due to an unsupported EAPI detected with
+		# FEATURES=parse-eapi-ebuild-head
+		if self.returncode == os.EX_OK and \
+			self._raw_metadata is not None:
+			metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
+				encoding=_encodings['repo.content'],
+				errors='replace').splitlines()
+			metadata_valid = True
 			if len(portage.auxdbkeys) != len(metadata_lines):
 				# Don't trust bash's returncode if the
 				# number of lines is incorrect.
-				self.returncode = 1
+				metadata_valid = False
 			else:
-				metadata = zip(portage.auxdbkeys, metadata_lines)
-				self.metadata = self.metadata_callback(self.cpv,
-					self.ebuild_path, self.repo_path, metadata,
-					self.ebuild_mtime)
+				metadata = dict(zip(portage.auxdbkeys, metadata_lines))
+				parsed_eapi = self._eapi
+				if parsed_eapi is None:
+					parsed_eapi = "0"
+				self.eapi_supported = \
+					portage.eapi_is_supported(metadata["EAPI"])
+				if (not metadata["EAPI"] or self.eapi_supported) and \
+					metadata["EAPI"] != parsed_eapi:
+					self._eapi_invalid(metadata)
+					if 'parse-eapi-ebuild-head' in self.settings.features:
+						metadata_valid = False
+
+			if metadata_valid:
+				# Since we're supposed to be able to efficiently obtain the
+				# EAPI from _parse_eapi_ebuild_head, we don't write cache
+				# entries for unsupported EAPIs.
+				if self.eapi_supported:
+
+					if metadata.get("INHERITED", False):
+						metadata["_eclasses_"] = \
+							self.portdb.repositories.get_repo_for_location(
+							self.repo_path).eclass_db.get_eclass_data(
+							metadata["INHERITED"].split())
+					else:
+						metadata["_eclasses_"] = {}
+					metadata.pop("INHERITED", None)
+
+					self.portdb._write_cache(self.cpv,
+						self.repo_path, metadata, self.ebuild_hash)
+				else:
+					metadata = {"EAPI": metadata["EAPI"]}
+				self.metadata = metadata
+			else:
+				self.returncode = 1
 
+	def _eapi_invalid(self, metadata):
+		repo_name = self.portdb.getRepositoryName(self.repo_path)
+		if metadata is not None:
+			eapi_var = metadata["EAPI"]
+		else:
+			eapi_var = None
+		eapi_invalid(self, self.cpv, repo_name, self.settings,
+			eapi_var, self._eapi, self._eapi_lineno)

diff --git a/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.pyo b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.pyo
new file mode 100644
index 0000000..fcc0874
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildPhase.py b/portage_with_autodep/pym/_emerge/EbuildPhase.py
index 82c165d..36ca8b0 100644
--- a/portage_with_autodep/pym/_emerge/EbuildPhase.py
+++ b/portage_with_autodep/pym/_emerge/EbuildPhase.py
@@ -33,12 +33,14 @@ class EbuildPhase(CompositeTask):
 		("_ebuild_lock",)
 
 	# FEATURES displayed prior to setup phase
-	_features_display = ("ccache", "depcheck", "depcheckstrict" "distcc", 
-		"distcc-pump", "fakeroot",
+	_features_display = (
+		"ccache", "compressdebug", "depcheck", "depcheckstrict", 
+		"distcc", "distcc-pump", "fakeroot",
 		"installsources", "keeptemp", "keepwork", "nostrip",
 		"preserve-libs", "sandbox", "selinux", "sesandbox",
 		"splitdebug", "suidctl", "test", "userpriv",
-		"usersandbox")
+		"usersandbox"
+	)
 
 	# Locked phases
 	_locked_phases = ("setup", "preinst", "postinst", "prerm", "postrm")
@@ -274,13 +276,15 @@ class EbuildPhase(CompositeTask):
 		temp_file = open(_unicode_encode(temp_log,
 			encoding=_encodings['fs'], errors='strict'), 'rb')
 
-		log_file = self._open_log(log_path)
+		log_file, log_file_real = self._open_log(log_path)
 
 		for line in temp_file:
 			log_file.write(line)
 
 		temp_file.close()
 		log_file.close()
+		if log_file_real is not log_file:
+			log_file_real.close()
 		os.unlink(temp_log)
 
 	def _open_log(self, log_path):
@@ -288,11 +292,12 @@ class EbuildPhase(CompositeTask):
 		f = open(_unicode_encode(log_path,
 			encoding=_encodings['fs'], errors='strict'),
 			mode='ab')
+		f_real = f
 
 		if log_path.endswith('.gz'):
 			f =  gzip.GzipFile(filename='', mode='ab', fileobj=f)
 
-		return f
+		return (f, f_real)
 
 	def _die_hooks(self):
 		self.returncode = None

diff --git a/portage_with_autodep/pym/_emerge/EbuildPhase.py.rej b/portage_with_autodep/pym/_emerge/EbuildPhase.py.rej
new file mode 100644
index 0000000..0f061da
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildPhase.py.rej
@@ -0,0 +1,12 @@
+--- pym/_emerge/EbuildPhase.py
++++ pym/_emerge/EbuildPhase.py
+@@ -33,7 +33,8 @@
+ 		("_ebuild_lock",)
+ 
+ 	# FEATURES displayed prior to setup phase
+-	_features_display = ("ccache", "distcc", "distcc-pump", "fakeroot",
++	_features_display = ("ccache", "depcheck", "depcheckstrict" "distcc", 
++		"distcc-pump", "fakeroot",
+ 		"installsources", "keeptemp", "keepwork", "nostrip",
+ 		"preserve-libs", "sandbox", "selinux", "sesandbox",
+ 		"splitdebug", "suidctl", "test", "userpriv",

diff --git a/portage_with_autodep/pym/_emerge/EbuildPhase.pyo b/portage_with_autodep/pym/_emerge/EbuildPhase.pyo
new file mode 100644
index 0000000..4c73313
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildPhase.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildProcess.pyo b/portage_with_autodep/pym/_emerge/EbuildProcess.pyo
new file mode 100644
index 0000000..52f6cdf
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildProcess.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.pyo b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.pyo
new file mode 100644
index 0000000..1f3e925
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/EventsAnalyser.py b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
index 7e416e7..65ece7b 100644
--- a/portage_with_autodep/pym/_emerge/EventsAnalyser.py
+++ b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
@@ -18,51 +18,39 @@ class PortageUtils:
 		self.metadata_keys = [k for k in portage.auxdbkeys if not k.startswith("UNUSED_")]
 		self.use=self.settings["USE"]
 
-	def get_best_visible_pkg(self,pkg,db="portdb"):
+	def get_best_visible_pkg(self,pkg):
 		"""
 		Gets best candidate on installing. Returns empty string if no found
 
 		:param pkg: package name
-		:param db: name of db to look. Can be "vardb" or "portdb" 
 
 		"""
 		try:
-		  if db=="portdb":
-			  return self.portdbapi.xmatch("bestmatch-visible", pkg)
-		  elif db=="vardb":
-			  return self.vardbapi.match(pkg)[0]
-		  else:
-			  return ''
+			return self.portdbapi.xmatch("bestmatch-visible", pkg)
 		except:
 			return ''
 
 	# non-recursive dependency getter
-	def get_dep(self,pkg,dep_type=["RDEPEND","DEPEND"],db="portdb"):
+	def get_dep(self,pkg,dep_type=["RDEPEND","DEPEND"]):
 	  """
 	  Gets current dependencies of a package. Looks in portage db
 	  
 	  :param pkg: name of package
 	  :param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or 
 		["RDEPEND", "DEPEND"]
-	  :param db: name of db to look. Can be "vardb" or "portdb"
 	  :returns: **set** of packages names
 	  """
 	  ret=set()
 	  
-	  pkg = self.get_best_visible_pkg(pkg,db)	
+	  pkg = self.get_best_visible_pkg(pkg)	
 	  if not pkg:
 		  return ret
 	  
 	  # we found the best visible match in common tree
 
-	  if db=="portdb":
-		  aux_get=self.portdbapi.aux_get
-	  elif db=="vardb":
-		  aux_get=self.vardbapi.aux_get
-	  else:
-		  return ret
 
-	  metadata = dict(zip(self.metadata_keys, aux_get(pkg, self.metadata_keys)))
+	  metadata = dict(zip(self.metadata_keys, 
+		  self.portdbapi.aux_get(pkg, self.metadata_keys)))
 	  dep_str = " ".join(metadata[k] for k in dep_type)
 
 	  # the IUSE default are very important for us
@@ -94,7 +82,7 @@ class PortageUtils:
 	  return ret
 
 	# recursive dependency getter
-	def get_deps(self,pkg,dep_type=["RDEPEND","DEPEND"],db="portdb"):
+	def get_deps(self,pkg,dep_type=["RDEPEND","DEPEND"]):
 		""" 
 		Gets current dependencies of a package on any depth 
 		All dependencies **must** be installed
@@ -102,20 +90,19 @@ class PortageUtils:
 		:param pkg: name of package
 		:param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or 
 		  ["RDEPEND", "DEPEND"]
-		:param db: name of db to look. Can be "vardb" or "portdb" 
 		:returns: **set** of packages names
 		"""
 		ret=set()
 
-		#import pdb; pdb.set_trace()
+
 		# get porttree dependencies on the first package
 
-		pkg = self.get_best_visible_pkg(pkg,db)	
+		pkg = self.portdbapi.xmatch("bestmatch-visible", pkg)	
 		if not pkg:
 			return ret
 
 		known_packages=set()
-		unknown_packages=self.get_dep(pkg,dep_type,db)
+		unknown_packages=self.get_dep(pkg,dep_type)
 		ret=ret.union(unknown_packages)
 		
 		while unknown_packages:
@@ -124,40 +111,36 @@ class PortageUtils:
 				continue
 			known_packages.add(p)
 
-			current_deps=self.get_dep(p,dep_type,'vardb')
-			unknown_packages=unknown_packages.union(current_deps)
-			ret=ret.union(current_deps)
-			
-			#metadata = dict(zip(self.metadata_keys, self.vardbapi.aux_get(p, self.metadata_keys)))
+			metadata = dict(zip(self.metadata_keys, self.vardbapi.aux_get(p, self.metadata_keys)))
 
-			#dep_str = " ".join(metadata[k] for k in dep_type)
+			dep_str = " ".join(metadata[k] for k in dep_type)
 			
 			# the IUSE default are very important for us
-			#iuse_defaults=[
-			#  u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
+			iuse_defaults=[
+			  u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
 	  
-			#use=self.use.split()
+			use=self.use.split()
 	  
-			#for u in iuse_defaults:
-			#	if u not in use:
-			#		use.append(u)
+			for u in iuse_defaults:
+				if u not in use:
+					use.append(u)
 			
-			#success, atoms = portage.dep_check(dep_str, None, self.settings, 
-			#	myuse=use,	myroot=self.settings["ROOT"],
-			#	trees={self.settings["ROOT"]:{"vartree":self.vartree,"porttree": self.vartree}})
-
-			#if not success:
-			#  continue
-
-			#for atom in atoms:
-			#	atomname = self.vartree.dep_bestmatch(atom)
-			#	if not atomname:
-			#		continue
-			#					
-			#	for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
-			#		for pkg in self.vartree.dep_match(unvirt_pkg):
-			#			ret.add(pkg)
-			#			unknown_packages.add(pkg)
+			success, atoms = portage.dep_check(dep_str, None, self.settings, 
+				myuse=use,	myroot=self.settings["ROOT"],
+				trees={self.settings["ROOT"]:{"vartree":self.vartree,"porttree": self.vartree}})
+
+			if not success:
+			  continue
+
+			for atom in atoms:
+				atomname = self.vartree.dep_bestmatch(atom)
+				if not atomname:
+					continue
+								
+				for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
+					for pkg in self.vartree.dep_match(unvirt_pkg):
+						ret.add(pkg)
+						unknown_packages.add(pkg)
 		return ret
 
 	def get_deps_for_package_building(self, pkg):
@@ -165,13 +148,12 @@ class PortageUtils:
 		  returns buildtime dependencies of current package and 
 		  all runtime dependencies of that buildtime dependencies
 		"""
-		buildtime_deps=self.get_dep(pkg, ["DEPEND"],"portdb")
+		buildtime_deps=self.get_dep(pkg, ["DEPEND"])
 		runtime_deps=set()
 		for dep in buildtime_deps:
-			runtime_deps|=self.get_deps(dep,["RDEPEND","PDEPEND"],"vardb")
+			runtime_deps=runtime_deps.union(self.get_deps(dep,["RDEPEND"]))
 
-		ret = buildtime_deps | runtime_deps
-		
+		ret=buildtime_deps.union(runtime_deps)
 		return ret
 
 	def get_system_packages_list(self):
@@ -187,19 +169,7 @@ class PortageUtils:
 					for pkg in self.vartree.dep_match(unvirt_pkg):
 						ret.append(pkg)
 		return ret
-	
-	def get_system_packages_rdeps(self):
-		"""
-		returns runtime dependencies of packages from system set
-		
-		:returns: **list** of package names
-		"""
-		ret=set()
-		
-		for pkg in self.get_system_packages_list():
-			ret=ret.union(self.get_deps(pkg,["RDEPEND"]))
-		return list(ret)
-		
+
 
 class GentoolkitUtils:
 	""" 
@@ -207,7 +177,7 @@ class GentoolkitUtils:
 	internals.
 	"""
 
-	def getpackagesbyfiles(self,files):
+	def getpackagesbyfiles(files):
 		"""
 		:param files: list of filenames
 		:returns: **dictionary** file->package, if file doesn't belong to any
@@ -226,30 +196,17 @@ class GentoolkitUtils:
 				stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE, 
 				bufsize=4096)
 						
-			out,err=proc.communicate(b"\n".join(listtocheck))
+			out,err=proc.communicate("\n".join(listtocheck).encode("utf8"))
 			
-			lines=out.split(b"\n")
+			lines=out.decode("utf8").split("\n")
 			#print lines
 			line_re=re.compile(r"^([^ ]+)\s+\(([^)]+)\)$")
 			for line in lines:
-				try:
-				  line=line.decode("utf-8")
-				except UnicodeDecodeError:
-				  portage.util.writemsg("Util qfile returned non-utf8 string: %s\n" % line)
-				
-				#import pdb; pdb.set_trace()
-				
 				if len(line)==0:
 					continue
 				match=line_re.match(line)
 				if match:
-					try:
-						ret[match.group(2).encode("utf-8")]=match.group(1)
-					except UnicodeEncodeError:
-					  portage.util.writemsg(
-						"Util qfile failed to encode string %s to unicode\n" % 
-						match.group(2))
-						
+					ret[match.group(2)]=match.group(1)
 				else:
 					portage.util.writemsg("Util qfile returned unparsable string: %s\n" % line)
 
@@ -259,7 +216,7 @@ class GentoolkitUtils:
 		  
 		return ret
 	  
-	def getfilesbypackages(self,packagenames):
+	def getfilesbypackages(packagenames):
 		"""
 		
 		:param packagename: name of package
@@ -273,7 +230,7 @@ class GentoolkitUtils:
 			
 			out,err=proc.communicate()
 			
-			ret=out.split(b"\n")
+			ret=out.decode("utf8").split("\n")
 			if ret==['']:
 				ret=[]
 		except OSError as e:
@@ -281,7 +238,7 @@ class GentoolkitUtils:
 
 		return ret
 	  
-	def get_all_packages_files(self):   
+	def get_all_packages_files():   
 		"""
 		Memory-hungry operation
 		
@@ -295,7 +252,7 @@ class GentoolkitUtils:
 			  
 			out,err=proc.communicate()
 			
-			ret=out.split(b"\n")
+			ret=out.decode("utf8").split("\n")
 		except OSError as e:
 			portage.util.writemsg("Error while launching qfile: %s\n" % e)
 
@@ -306,28 +263,25 @@ class FilterProcGenerator:
 		portageutils=PortageUtils(settings=settings)
 
 		deps_all=portageutils.get_deps_for_package_building(pkgname)
-		deps_portage=portageutils.get_dep('sys-apps/portage',["RDEPEND"])
+		deps_portage=portageutils.get_dep('portage',["RDEPEND"])
 		
 		system_packages=portageutils.get_system_packages_list()
-		system_deps=portageutils.get_system_packages_rdeps()
 
-		allfiles=GentoolkitUtils().get_all_packages_files()
+		allfiles=GentoolkitUtils.get_all_packages_files()
 		portage.util.writemsg("All files list recieved, waiting for " \
 			"a list of allowed files\n")
 
 
-		allowedpkgs=system_packages+system_deps
-		allowedpkgs+=list(deps_portage)+list(deps_all)	
-		allowedpkgs+=["app-portage/autodep"]
+		allowedpkgs=system_packages+list(deps_portage)+list(deps_all)	
 		
-		allowedfiles=GentoolkitUtils().getfilesbypackages(allowedpkgs)
+		allowedfiles=GentoolkitUtils.getfilesbypackages(allowedpkgs)
 		#for pkg in allowedpkgs:
 		#	allowedfiles+=GentoolkitUtils.getfilesbypackage(pkg)
 
 		#import pdb; pdb.set_trace()
 
 		# manually add all python interpreters to this list
-		allowedfiles+=GentoolkitUtils().getfilesbypackages(['python'])
+		allowedfiles+=GentoolkitUtils.getfilesbypackages(['python'])
 		allowedfiles=set(allowedfiles)
 		
 		deniedfiles=allfiles-allowedfiles
@@ -350,10 +304,9 @@ class EventsAnalyser:
 
 		self.deps_all=self.portageutils.get_deps_for_package_building(pkgname)
 		self.deps_direct=self.portageutils.get_dep(pkgname,["DEPEND"])
-		self.deps_portage=self.portageutils.get_dep('sys-apps/portage',["RDEPEND"])
-
+		self.deps_portage=self.portageutils.get_dep('portage',["RDEPEND"])
+		
 		self.system_packages=self.portageutils.get_system_packages_list()
-		self.system_deps=self.portageutils.get_system_packages_rdeps()
 		# All analyse work is here
 		
 		# get unique filenames
@@ -365,7 +318,7 @@ class EventsAnalyser:
 			filenames=filenames.union(fail_events)
 		filenames=list(filenames)
 
-		file_to_package=GentoolkitUtils().getpackagesbyfiles(filenames)
+		file_to_package=GentoolkitUtils.getpackagesbyfiles(filenames)
 		# This part is completly unreadable. 
 		# It converting one complex struct(returned by getfsevents) to another complex
 		# struct which good for generating output.
@@ -420,8 +373,7 @@ class EventsAnalyser:
 		stagesorder={"clean":1,"setup":2,"unpack":3,"prepare":4,"configure":5,"compile":6,"test":7,
 			 "install":8,"preinst":9,"postinst":10,"prerm":11,"postrm":12,"unknown":13}
 		packagesinfo=self.packagesinfo
-		# print information grouped by package
-		#print(packagesinfo.keys())
+		# print information grouped by package	  
 		for package in sorted(packagesinfo):
 			# not showing special directory package
 			if package=="directory":
@@ -429,14 +381,12 @@ class EventsAnalyser:
 			
 			if package=="unknown":
 				continue
-			
+
 
 			is_pkg_in_dep=package in self.deps_all
 			is_pkg_in_portage_dep=package in self.deps_portage
 			is_pkg_in_system=package in self.system_packages
-			is_pkg_in_system_dep=package in self.system_deps
 			is_pkg_python="dev-lang/python" in package
-			is_pkg_self="app-portage/autodep" in package
 
 			stages=[]
 			for stage in sorted(packagesinfo[package].keys(), key=stagesorder.get):
@@ -475,10 +425,6 @@ class EventsAnalyser:
 				portage.util.writemsg("[SYSTEM]")
 			elif is_pkg_in_portage_dep:
 				portage.util.writemsg("[PORTAGE DEP]")
-			elif is_pkg_in_system_dep:
-				portage.util.writemsg("[SYSTEM DEP]")
-			elif is_pkg_self:
-				portage.util.writemsg("[AUTODEP]")
 			elif is_pkg_python:
 				portage.util.writemsg("[INTERPRETER]")
 			elif not self.is_package_useful(package,stages,filenames.keys()):
@@ -505,12 +451,7 @@ class EventsAnalyser:
 			
 			for filename in filenames:
 				event_info=tuple(filenames[filename])
-				try:
-					portage.util.writemsg(
-						"  %-56s %-21s\n" % (filename.decode('utf-8'),action[event_info]))
-				except UnicodeDecodeError:
-					portage.util.writemsg(
-						"  %-56s %-21s\n" % ('NON-UTF8-FILENAME',action[event_info]))
+				portage.util.writemsg("  %-56s %-21s\n" % (filename,action[event_info]))
 				filescounter+=1
 				if filescounter>10:
 					portage.util.writemsg("  ... and %d more ...\n" % (len(filenames)-10))
@@ -529,7 +470,7 @@ class EventsAnalyser:
 		""" some basic heuristics here to cut part of packages """
 
 		excluded_paths=set(
-			[b'/etc/sandbox.d/']
+			['/etc/sandbox.d/']
 		)
 
 		excluded_packages=set(
@@ -560,9 +501,8 @@ class EventsAnalyser:
 			  continue
 			
 			# test 1: package is not useful if all files are *.desktop or *.xml or *.m4		
-			if not (f.endswith(b".desktop") or f.endswith(b".xml") or 
-				f.endswith(b".m4") or f.endswith(b".pc")):
-				break
+			if not (f.endswith(".desktop") or f.endswith(".xml") or f.endswith(".m4") or f.endswith(".pc")):
+			  break
 		else:
 			return False # we get here if cycle ends not with break
 		  

diff --git a/portage_with_autodep/pym/_emerge/EventsLogger.py b/portage_with_autodep/pym/_emerge/EventsLogger.py
index 1ade9fd..68b3c67 100644
--- a/portage_with_autodep/pym/_emerge/EventsLogger.py
+++ b/portage_with_autodep/pym/_emerge/EventsLogger.py
@@ -100,69 +100,62 @@ class EventsLogger(threading.Thread):
 							continue
 					
 						#import pdb; pdb.set_trace()
-						#try:
-						message=record.split(b"\0")
-						#except UnicodeDecodeError:
-  						#	print("Bad message %s" % record)
-  						#	continue
+						try:
+							message=record.decode("utf8").split("\0")
+						except UnicodeDecodeError:
+  							print("Bad message %s" % record)
+  							continue
 
 						#  continue
 
 						#print(message)
 					
 						try:
-							eventname,filename,stage,result=message[1:5]
-							eventname=eventname.decode("utf-8")
-							stage=stage.decode("utf-8")
-							result=result.decode("utf-8")
-						except IndexError:
-							print("IndexError while parsing %s" % record)
-						except ValueError:
-							print("ValueError while parsing %s" % record)
-						except UnicodeDecodeError:
-							print("UnicodeDecodeError while parsing %s" % record)  
-							
-						if result=="ASKING":
-							if self.filter_proc(eventname,filename,stage):
-								s.sendall(b"ALLOW\0")
+							if message[4]=="ASKING":
+								if self.filter_proc(message[1],message[2],message[3]):
+									s.sendall(b"ALLOW\0")
+								else:
+									# TODO: log through portage infrastructure
+									#print("Blocking an access to %s" % message[2])
+									s.sendall(b"DENY\0")
 							else:
-								# TODO: log through portage infrastructure
-								#print("Blocking an access to %s" % message[2])
-								s.sendall(b"DENY\0")
-						else:
-							if not stage in self.events:
-								self.events[stage]=[{},{}]
-					
-							hashofsucesses=self.events[stage][0]
-							hashoffailures=self.events[stage][1]
-
-							if result=="DENIED":
-								print("Blocking an access to %s" % filename)
-
-							if result=="OK":
-								if not filename in hashofsucesses:
-									hashofsucesses[filename]=[False,False]
-					  
-								readed_or_writed=hashofsucesses[filename]
-					  
-								if eventname=="read":
-									readed_or_writed[0]=True
-								elif eventname=="write":
-									readed_or_writed[1]=True
-						
-							elif result[0:3]=="ERR" or result=="DENIED":
-								if not filename in hashoffailures:
-									hashoffailures[filename]=[False,False]
-								notfound_or_blocked=hashoffailures[filename]
-							  
-								if result=="ERR/2":
-									notfound_or_blocked[0]=True
-								elif result=="DENIED":
-									notfound_or_blocked[1]=True
+								eventname,filename,stage,result=message[1:5]
 
-							else:
-								print("Error in logger module<->analyser protocol")
+								if not stage in self.events:
+									self.events[stage]=[{},{}]
 						
+								hashofsucesses=self.events[stage][0]
+								hashoffailures=self.events[stage][1]
+
+								if result=="DENIED":
+									print("Blocking an access to %s" % filename)
+
+								if result=="OK":
+									if not filename in hashofsucesses:
+										hashofsucesses[filename]=[False,False]
+						  
+									readed_or_writed=hashofsucesses[filename]
+						  
+									if eventname=="read":
+										readed_or_writed[0]=True
+									elif eventname=="write":
+										readed_or_writed[1]=True
+							
+								elif result[0:3]=="ERR" or result=="DENIED":
+									if not filename in hashoffailures:
+										hashoffailures[filename]=[False,False]
+									notfound_or_blocked=hashoffailures[filename]
+								  
+									if result=="ERR/2":
+										notfound_or_blocked[0]=True
+									elif result=="DENIED":
+										notfound_or_blocked[1]=True
+
+								else:
+									print("Error in logger module<->analyser protocol")
+						
+						except IndexError:
+								print("IndexError while parsing %s" % record)
 			except IOError as e:
 				if e.errno!=4: # handling "Interrupted system call" errors
 					raise
@@ -184,5 +177,4 @@ class EventsLogger(threading.Thread):
 		
 		# We assume portage clears tmp folder, so no deleting a socket file
 		# We assume that no new socket data will arrive after this moment
-		#print(self.events)
 		return self.events

diff --git a/portage_with_autodep/pym/_emerge/FakeVartree.py b/portage_with_autodep/pym/_emerge/FakeVartree.py
index a11966f..d4dbe97 100644
--- a/portage_with_autodep/pym/_emerge/FakeVartree.py
+++ b/portage_with_autodep/pym/_emerge/FakeVartree.py
@@ -2,6 +2,7 @@
 # Distributed under the terms of the GNU General Public License v2
 
 import sys
+import warnings
 
 import portage
 from portage import os
@@ -37,8 +38,10 @@ class FakeVartree(vartree):
 	global updates are necessary (updates are performed when necessary if there
 	is not a matching ebuild in the tree). Instances of this class are not
 	populated until the sync() method is called."""
-	def __init__(self, root_config, pkg_cache=None, pkg_root_config=None):
+	def __init__(self, root_config, pkg_cache=None, pkg_root_config=None,
+		dynamic_deps=True):
 		self._root_config = root_config
+		self._dynamic_deps = dynamic_deps
 		if pkg_root_config is None:
 			pkg_root_config = self._root_config
 		self._pkg_root_config = pkg_root_config
@@ -47,7 +50,6 @@ class FakeVartree(vartree):
 		real_vartree = root_config.trees["vartree"]
 		self._real_vardb = real_vartree.dbapi
 		portdb = root_config.trees["porttree"].dbapi
-		self.root = real_vartree.root
 		self.settings = real_vartree.settings
 		mykeys = list(real_vartree.dbapi._aux_cache_keys)
 		if "_mtime_" not in mykeys:
@@ -55,19 +57,30 @@ class FakeVartree(vartree):
 		self._db_keys = mykeys
 		self._pkg_cache = pkg_cache
 		self.dbapi = FakeVardbapi(real_vartree.settings)
+		self.dbapi._aux_cache_keys = set(self._db_keys)
 
 		# Initialize variables needed for lazy cache pulls of the live ebuild
 		# metadata.  This ensures that the vardb lock is released ASAP, without
 		# being delayed in case cache generation is triggered.
 		self._aux_get = self.dbapi.aux_get
-		self.dbapi.aux_get = self._aux_get_wrapper
 		self._match = self.dbapi.match
-		self.dbapi.match = self._match_wrapper
+		if dynamic_deps:
+			self.dbapi.aux_get = self._aux_get_wrapper
+			self.dbapi.match = self._match_wrapper
 		self._aux_get_history = set()
 		self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
 		self._portdb = portdb
 		self._global_updates = None
 
+	@property
+	def root(self):
+		warnings.warn("The root attribute of "
+			"_emerge.FakeVartree.FakeVartree"
+			" is deprecated. Use "
+			"settings['ROOT'] instead.",
+			DeprecationWarning, stacklevel=3)
+		return self.settings['ROOT']
+
 	def _match_wrapper(self, cpv, use_cache=1):
 		"""
 		Make sure the metadata in Package instances gets updated for any
@@ -147,15 +160,14 @@ class FakeVartree(vartree):
 			self.dbapi.aux_get = self._aux_get
 			self.settings._populate_treeVirtuals_if_needed(self)
 		finally:
-			self.dbapi.aux_get = self._aux_get_wrapper
+			if self._dynamic_deps:
+				self.dbapi.aux_get = self._aux_get_wrapper
 
 	def _sync(self):
 
 		real_vardb = self._root_config.trees["vartree"].dbapi
 		current_cpv_set = frozenset(real_vardb.cpv_all())
 		pkg_vardb = self.dbapi
-		pkg_cache = self._pkg_cache
-		aux_get_history = self._aux_get_history
 
 		# Remove any packages that have been uninstalled.
 		for pkg in list(pkg_vardb):

diff --git a/portage_with_autodep/pym/_emerge/FakeVartree.pyo b/portage_with_autodep/pym/_emerge/FakeVartree.pyo
new file mode 100644
index 0000000..8707391
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/FakeVartree.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
index a716dac..fcc4ab4 100644
--- a/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
+++ b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from portage import os
@@ -15,14 +15,14 @@ class FifoIpcDaemon(AbstractPollTask):
 
 	def _start(self):
 		self._files = self._files_dict()
-		input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
 
 		# File streams are in unbuffered mode since we do atomic
 		# read and write of whole pickles.
-		self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
+		self._files.pipe_in = \
+			os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
 
 		self._reg_id = self.scheduler.register(
-			self._files.pipe_in.fileno(),
+			self._files.pipe_in,
 			self._registered_events, self._input_handler)
 
 		self._registered = True
@@ -32,12 +32,12 @@ class FifoIpcDaemon(AbstractPollTask):
 		Re-open the input stream, in order to suppress
 		POLLHUP events (bug #339976).
 		"""
-		self._files.pipe_in.close()
-		input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
-		self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
 		self.scheduler.unregister(self._reg_id)
+		os.close(self._files.pipe_in)
+		self._files.pipe_in = \
+			os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
 		self._reg_id = self.scheduler.register(
-			self._files.pipe_in.fileno(),
+			self._files.pipe_in,
 			self._registered_events, self._input_handler)
 
 	def isAlive(self):
@@ -51,14 +51,9 @@ class FifoIpcDaemon(AbstractPollTask):
 	def _wait(self):
 		if self.returncode is not None:
 			return self.returncode
-
-		if self._registered:
-			self.scheduler.schedule(self._reg_id)
-			self._unregister()
-
+		self._wait_loop()
 		if self.returncode is None:
 			self.returncode = os.EX_OK
-
 		return self.returncode
 
 	def _input_handler(self, fd, event):
@@ -77,5 +72,5 @@ class FifoIpcDaemon(AbstractPollTask):
 
 		if self._files is not None:
 			for f in self._files.values():
-				f.close()
+				os.close(f)
 			self._files = None

diff --git a/portage_with_autodep/pym/_emerge/FifoIpcDaemon.pyo b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.pyo
new file mode 100644
index 0000000..6d7c4f9
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/JobStatusDisplay.py b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
index 1949232..5b9b221 100644
--- a/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
+++ b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
@@ -97,7 +97,7 @@ class JobStatusDisplay(object):
 		"""
 		Initialize term control codes.
 		@rtype: bool
-		@returns: True if term codes were successfully initialized,
+		@return: True if term codes were successfully initialized,
 			False otherwise.
 		"""
 
@@ -209,24 +209,26 @@ class JobStatusDisplay(object):
 	def display(self):
 		"""
 		Display status on stdout, but only if something has
-		changed since the last call.
+		changed since the last call. This always returns True,
+		for continuous scheduling via timeout_add.
 		"""
 
 		if self.quiet:
-			return
+			return True
 
 		current_time = time.time()
 		time_delta = current_time - self._last_display_time
 		if self._displayed and \
 			not self._changed:
 			if not self._isatty:
-				return
+				return True
 			if time_delta < self._min_display_latency:
-				return
+				return True
 
 		self._last_display_time = current_time
 		self._changed = False
 		self._display_status()
+		return True
 
 	def _display_status(self):
 		# Don't use len(self._completed_tasks) here since that also
@@ -289,4 +291,11 @@ class JobStatusDisplay(object):
 			self._update(color_output.getvalue())
 
 		if self.xterm_titles:
-			xtermTitle(" ".join(plain_output.split()))
+			# If the HOSTNAME variable is exported, include it
+			# in the xterm title, just like emergelog() does.
+			# See bug #390699.
+			title_str = " ".join(plain_output.split())
+			hostname = os.environ.get("HOSTNAME")
+			if hostname is not None:
+				title_str = "%s: %s" % (hostname, title_str)
+			xtermTitle(title_str)

diff --git a/portage_with_autodep/pym/_emerge/JobStatusDisplay.pyo b/portage_with_autodep/pym/_emerge/JobStatusDisplay.pyo
new file mode 100644
index 0000000..f79b2c2
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/JobStatusDisplay.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/MergeListItem.py b/portage_with_autodep/pym/_emerge/MergeListItem.py
index 2176bf6..8086c68 100644
--- a/portage_with_autodep/pym/_emerge/MergeListItem.py
+++ b/portage_with_autodep/pym/_emerge/MergeListItem.py
@@ -68,7 +68,7 @@ class MergeListItem(CompositeTask):
 					pkg_repo_name = "unknown repo"
 				msg += " from %s" % pkg_repo_name
 
-		if pkg.root != "/":
+		if pkg.root_config.settings["ROOT"] != "/":
 			msg += " %s %s" % (preposition, pkg.root)
 
 		if not build_opts.pretend:

diff --git a/portage_with_autodep/pym/_emerge/MergeListItem.pyo b/portage_with_autodep/pym/_emerge/MergeListItem.pyo
new file mode 100644
index 0000000..168a227
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/MergeListItem.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/MetadataRegen.py b/portage_with_autodep/pym/_emerge/MetadataRegen.py
index 8103175..e82015f 100644
--- a/portage_with_autodep/pym/_emerge/MetadataRegen.py
+++ b/portage_with_autodep/pym/_emerge/MetadataRegen.py
@@ -1,8 +1,9 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import portage
 from portage import os
+from portage.dep import _repo_separator
 from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
 from _emerge.PollScheduler import PollScheduler
 
@@ -10,7 +11,7 @@ class MetadataRegen(PollScheduler):
 
 	def __init__(self, portdb, cp_iter=None, consumer=None,
 		max_jobs=None, max_load=None):
-		PollScheduler.__init__(self)
+		PollScheduler.__init__(self, main=True)
 		self._portdb = portdb
 		self._global_cleanse = False
 		if cp_iter is None:
@@ -33,10 +34,11 @@ class MetadataRegen(PollScheduler):
 		self.returncode = os.EX_OK
 		self._error_count = 0
 		self._running_tasks = set()
+		self._remaining_tasks = True
 
 	def _terminate_tasks(self):
-		while self._running_tasks:
-			self._running_tasks.pop().cancel()
+		for task in list(self._running_tasks):
+			task.cancel()
 
 	def _iter_every_cp(self):
 		portage.writemsg_stdout("Listing available packages...\n")
@@ -60,27 +62,32 @@ class MetadataRegen(PollScheduler):
 				break
 			cp_set.add(cp)
 			portage.writemsg_stdout("Processing %s\n" % cp)
-			cpv_list = portdb.cp_list(cp)
-			for cpv in cpv_list:
-				if self._terminated_tasks:
-					break
-				valid_pkgs.add(cpv)
-				ebuild_path, repo_path = portdb.findname2(cpv)
-				if ebuild_path is None:
-					raise AssertionError("ebuild not found for '%s'" % cpv)
-				metadata, st, emtime = portdb._pull_valid_cache(
-					cpv, ebuild_path, repo_path)
-				if metadata is not None:
-					if consumer is not None:
-						consumer(cpv, ebuild_path,
-							repo_path, metadata)
-					continue
-
-				yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
-					ebuild_mtime=emtime,
-					metadata_callback=portdb._metadata_callback,
-					portdb=portdb, repo_path=repo_path,
-					settings=portdb.doebuild_settings)
+			# We iterate over portdb.porttrees, since it's common to
+			# tweak this attribute in order to adjust repo selection.
+			for mytree in portdb.porttrees:
+				repo = portdb.repositories.get_repo_for_location(mytree)
+				cpv_list = portdb.cp_list(cp, mytree=[repo.location])
+				for cpv in cpv_list:
+					if self._terminated_tasks:
+						break
+					valid_pkgs.add(cpv)
+					ebuild_path, repo_path = portdb.findname2(cpv, myrepo=repo.name)
+					if ebuild_path is None:
+						raise AssertionError("ebuild not found for '%s%s%s'" % (cpv, _repo_separator, repo.name))
+					metadata, ebuild_hash = portdb._pull_valid_cache(
+						cpv, ebuild_path, repo_path)
+					if metadata is not None:
+						if consumer is not None:
+							consumer(cpv, repo_path, metadata, ebuild_hash, True)
+						continue
+
+					yield EbuildMetadataPhase(cpv=cpv,
+						ebuild_hash=ebuild_hash,
+						portdb=portdb, repo_path=repo_path,
+						settings=portdb.doebuild_settings)
+
+	def _keep_scheduling(self):
+		return self._remaining_tasks and not self._terminated_tasks
 
 	def run(self):
 
@@ -88,11 +95,7 @@ class MetadataRegen(PollScheduler):
 		from portage.cache.cache_errors import CacheError
 		dead_nodes = {}
 
-		while self._schedule():
-			self._poll_loop()
-
-		while self._jobs:
-			self._poll_loop()
+		self._main_loop()
 
 		if self._terminated_tasks:
 			self.returncode = 1
@@ -140,26 +143,21 @@ class MetadataRegen(PollScheduler):
 						pass
 
 	def _schedule_tasks(self):
-		"""
-		@rtype: bool
-		@returns: True if there may be remaining tasks to schedule,
-			False otherwise.
-		"""
 		if self._terminated_tasks:
-			return False
+			return
 
 		while self._can_add_job():
 			try:
 				metadata_process = next(self._process_iter)
 			except StopIteration:
-				return False
+				self._remaining_tasks = False
+				return
 
 			self._jobs += 1
 			self._running_tasks.add(metadata_process)
 			metadata_process.scheduler = self.sched_iface
 			metadata_process.addExitListener(self._metadata_exit)
 			metadata_process.start()
-		return True
 
 	def _metadata_exit(self, metadata_process):
 		self._jobs -= 1
@@ -176,9 +174,10 @@ class MetadataRegen(PollScheduler):
 			# On failure, still notify the consumer (in this case the metadata
 			# argument is None).
 			self._consumer(metadata_process.cpv,
-				metadata_process.ebuild_path,
 				metadata_process.repo_path,
-				metadata_process.metadata)
+				metadata_process.metadata,
+				metadata_process.ebuild_hash,
+				metadata_process.eapi_supported)
 
 		self._schedule()
 

diff --git a/portage_with_autodep/pym/_emerge/MetadataRegen.pyo b/portage_with_autodep/pym/_emerge/MetadataRegen.pyo
new file mode 100644
index 0000000..6c8788f
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/MetadataRegen.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
index ce0ab14..afa44fb 100644
--- a/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
+++ b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
@@ -29,5 +29,11 @@ class MiscFunctionsProcess(AbstractEbuildProcess):
 		AbstractEbuildProcess._start(self)
 
 	def _spawn(self, args, **kwargs):
-		self.settings.pop("EBUILD_PHASE", None)
-		return spawn(" ".join(args), self.settings, **kwargs)
+		# Temporarily unset EBUILD_PHASE so that bashrc code doesn't
+		# think this is a real phase.
+		phase_backup = self.settings.pop("EBUILD_PHASE", None)
+		try:
+			return spawn(" ".join(args), self.settings, **kwargs)
+		finally:
+			if phase_backup is not None:
+				self.settings["EBUILD_PHASE"] = phase_backup

diff --git a/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.pyo b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.pyo
new file mode 100644
index 0000000..e3f5344
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/Package.py b/portage_with_autodep/pym/_emerge/Package.py
index 20c72b4..c04fa1f 100644
--- a/portage_with_autodep/pym/_emerge/Package.py
+++ b/portage_with_autodep/pym/_emerge/Package.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import sys
@@ -9,9 +9,9 @@ from portage.cache.mappings import slot_dict_class
 from portage.const import EBUILD_PHASES
 from portage.dep import Atom, check_required_use, use_reduce, \
 	paren_enclose, _slot_re, _slot_separator, _repo_separator
+from portage.versions import _pkg_str, _unknown_repo
 from portage.eapi import eapi_has_iuse_defaults, eapi_has_required_use
 from portage.exception import InvalidDependString
-from portage.repository.config import _gen_valid_repo
 from _emerge.Task import Task
 
 if sys.hexversion >= 0x3000000:
@@ -26,7 +26,7 @@ class Package(Task):
 		"root_config", "type_name",
 		"category", "counter", "cp", "cpv_split",
 		"inherited", "invalid", "iuse", "masks", "mtime",
-		"pf", "pv_split", "root", "slot", "slot_atom", "visible",) + \
+		"pf", "root", "slot", "slot_atom", "version", "visible",) + \
 	("_raw_metadata", "_use",)
 
 	metadata_keys = [
@@ -38,7 +38,7 @@ class Package(Task):
 
 	_dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND',)
 	_use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
-	UNKNOWN_REPO = "__unknown__"
+	UNKNOWN_REPO = _unknown_repo
 
 	def __init__(self, **kwargs):
 		Task.__init__(self, **kwargs)
@@ -49,7 +49,6 @@ class Package(Task):
 		self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
 		if not self.built:
 			self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
-		self.cp = portage.cpv_getkey(self.cpv)
 		slot = self.slot
 		if _slot_re.match(slot) is None:
 			self._invalid_metadata('SLOT.invalid',
@@ -57,6 +56,11 @@ class Package(Task):
 			# Avoid an InvalidAtom exception when creating slot_atom.
 			# This package instance will be masked due to empty SLOT.
 			slot = '0'
+		self.cpv = _pkg_str(self.cpv, slot=slot,
+			repo=self.metadata.get('repository', ''))
+		self.cp = self.cpv.cp
+		# sync metadata with validated repo (may be UNKNOWN_REPO)
+		self.metadata['repository'] = self.cpv.repo
 		if (self.iuse.enabled or self.iuse.disabled) and \
 			not eapi_has_iuse_defaults(self.metadata["EAPI"]):
 			if not self.installed:
@@ -64,14 +68,10 @@ class Package(Task):
 					"IUSE contains defaults, but EAPI doesn't allow them")
 		self.slot_atom = portage.dep.Atom("%s%s%s" % (self.cp, _slot_separator, slot))
 		self.category, self.pf = portage.catsplit(self.cpv)
-		self.cpv_split = portage.catpkgsplit(self.cpv)
-		self.pv_split = self.cpv_split[1:]
+		self.cpv_split = self.cpv.cpv_split
+		self.version = self.cpv.version
 		if self.inherited is None:
 			self.inherited = frozenset()
-		repo = _gen_valid_repo(self.metadata.get('repository', ''))
-		if not repo:
-			repo = self.UNKNOWN_REPO
-		self.metadata['repository'] = repo
 
 		self._validate_deps()
 		self.masks = self._masks()
@@ -84,7 +84,7 @@ class Package(Task):
 
 		self._hash_key = Package._gen_hash_key(cpv=self.cpv,
 			installed=self.installed, onlydeps=self.onlydeps,
-			operation=self.operation, repo_name=repo,
+			operation=self.operation, repo_name=self.cpv.repo,
 			root_config=self.root_config,
 			type_name=self.type_name)
 		self._hash_value = hash(self._hash_key)
@@ -239,11 +239,6 @@ class Package(Task):
 		if mask_atom is not None:
 			masks['package.mask'] = mask_atom
 
-		system_mask = settings._getProfileMaskAtom(
-			self.cpv, self.metadata)
-		if system_mask is not None:
-			masks['profile.system'] = system_mask
-
 		try:
 			missing_licenses = settings._getMissingLicenses(
 				self.cpv, self.metadata)
@@ -276,7 +271,6 @@ class Package(Task):
 				return False
 
 			if 'package.mask' in masks or \
-				'profile.system' in masks or \
 				'LICENSE' in masks:
 				return False
 
@@ -367,15 +361,15 @@ class Package(Task):
 			% (portage.output.colorize(cpv_color, self.cpv + _repo_separator + self.repo) , self.type_name)
 
 		if self.type_name == "installed":
-			if self.root != "/":
-				s += " in '%s'" % self.root
+			if self.root_config.settings['ROOT'] != "/":
+				s += " in '%s'" % self.root_config.settings['ROOT']
 			if self.operation == "uninstall":
 				s += " scheduled for uninstall"
 		else:
 			if self.operation == "merge":
 				s += " scheduled for merge"
-				if self.root != "/":
-					s += " to '%s'" % self.root
+				if self.root_config.settings['ROOT'] != "/":
+					s += " to '%s'" % self.root_config.settings['ROOT']
 		s += ")"
 		return s
 
@@ -497,7 +491,7 @@ class Package(Task):
 
 		def is_valid_flag(self, flags):
 			"""
-			@returns: True if all flags are valid USE values which may
+			@return: True if all flags are valid USE values which may
 				be specified in USE dependencies, False otherwise.
 			"""
 			if isinstance(flags, basestring):
@@ -511,7 +505,7 @@ class Package(Task):
 
 		def get_missing_iuse(self, flags):
 			"""
-			@returns: A list of flags missing from IUSE.
+			@return: A list of flags missing from IUSE.
 			"""
 			if isinstance(flags, basestring):
 				flags = [flags]
@@ -535,28 +529,28 @@ class Package(Task):
 	def __lt__(self, other):
 		if other.cp != self.cp:
 			return False
-		if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
+		if portage.vercmp(self.version, other.version) < 0:
 			return True
 		return False
 
 	def __le__(self, other):
 		if other.cp != self.cp:
 			return False
-		if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
+		if portage.vercmp(self.version, other.version) <= 0:
 			return True
 		return False
 
 	def __gt__(self, other):
 		if other.cp != self.cp:
 			return False
-		if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
+		if portage.vercmp(self.version, other.version) > 0:
 			return True
 		return False
 
 	def __ge__(self, other):
 		if other.cp != self.cp:
 			return False
-		if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
+		if portage.vercmp(self.version, other.version) >= 0:
 			return True
 		return False
 

diff --git a/portage_with_autodep/pym/_emerge/Package.pyo b/portage_with_autodep/pym/_emerge/Package.pyo
new file mode 100644
index 0000000..3d37317
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/Package.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/PackageArg.pyo b/portage_with_autodep/pym/_emerge/PackageArg.pyo
new file mode 100644
index 0000000..c50e145
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/PackageArg.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/PackageMerge.py b/portage_with_autodep/pym/_emerge/PackageMerge.py
index f8fa04a..eed34e9 100644
--- a/portage_with_autodep/pym/_emerge/PackageMerge.py
+++ b/portage_with_autodep/pym/_emerge/PackageMerge.py
@@ -28,7 +28,7 @@ class PackageMerge(CompositeTask):
 			counter_str,
 			colorize("GOOD", pkg.cpv))
 
-		if pkg.root != "/":
+		if pkg.root_config.settings["ROOT"] != "/":
 			msg += " %s %s" % (preposition, pkg.root)
 
 		if not self.merge.build_opts.fetchonly and \

diff --git a/portage_with_autodep/pym/_emerge/PackageMerge.pyo b/portage_with_autodep/pym/_emerge/PackageMerge.pyo
new file mode 100644
index 0000000..6403d1b
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/PackageMerge.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/PackageUninstall.pyo b/portage_with_autodep/pym/_emerge/PackageUninstall.pyo
new file mode 100644
index 0000000..847c749
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/PackageUninstall.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
index a692bb6..0f7be44 100644
--- a/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
+++ b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
@@ -1,8 +1,9 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import sys
 from portage.dbapi import dbapi
+from portage.dbapi.dep_expand import dep_expand
 
 class PackageVirtualDbapi(dbapi):
 	"""
@@ -76,20 +77,24 @@ class PackageVirtualDbapi(dbapi):
 			self._match_cache = {}
 
 	def match(self, origdep, use_cache=1):
-		result = self._match_cache.get(origdep)
+		atom = dep_expand(origdep, mydb=self, settings=self.settings)
+		cache_key = (atom, atom.unevaluated_atom)
+		result = self._match_cache.get(cache_key)
 		if result is not None:
 			return result[:]
-		result = dbapi.match(self, origdep, use_cache=use_cache)
-		self._match_cache[origdep] = result
+		result = list(self._iter_match(atom, self.cp_list(atom.cp)))
+		self._match_cache[cache_key] = result
 		return result[:]
 
 	def cpv_exists(self, cpv, myrepo=None):
 		return cpv in self._cpv_map
 
 	def cp_list(self, mycp, use_cache=1):
-		cachelist = self._match_cache.get(mycp)
-		# cp_list() doesn't expand old-style virtuals
-		if cachelist and cachelist[0].startswith(mycp):
+		# NOTE: Cache can be safely shared with the match cache, since the
+		# match cache uses the result from dep_expand for the cache_key.
+		cache_key = (mycp, mycp)
+		cachelist = self._match_cache.get(cache_key)
+		if cachelist is not None:
 			return cachelist[:]
 		cpv_list = self._cp_map.get(mycp)
 		if cpv_list is None:
@@ -97,8 +102,7 @@ class PackageVirtualDbapi(dbapi):
 		else:
 			cpv_list = [pkg.cpv for pkg in cpv_list]
 		self._cpv_sort_ascending(cpv_list)
-		if not (not cpv_list and mycp.startswith("virtual/")):
-			self._match_cache[mycp] = cpv_list
+		self._match_cache[cache_key] = cpv_list
 		return cpv_list[:]
 
 	def cp_all(self):

diff --git a/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.pyo b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.pyo
new file mode 100644
index 0000000..a1a850f
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/PipeReader.py b/portage_with_autodep/pym/_emerge/PipeReader.py
index 375c98f..90febdf 100644
--- a/portage_with_autodep/pym/_emerge/PipeReader.py
+++ b/portage_with_autodep/pym/_emerge/PipeReader.py
@@ -1,11 +1,9 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from portage import os
 from _emerge.AbstractPollTask import AbstractPollTask
-from _emerge.PollConstants import PollConstants
 import fcntl
-import array
 
 class PipeReader(AbstractPollTask):
 
@@ -17,16 +15,22 @@ class PipeReader(AbstractPollTask):
 	"""
 
 	__slots__ = ("input_files",) + \
-		("_read_data", "_reg_ids")
+		("_read_data", "_reg_ids", "_use_array")
 
 	def _start(self):
 		self._reg_ids = set()
 		self._read_data = []
-		for k, f in self.input_files.items():
+
+		if self._use_array:
+			output_handler = self._array_output_handler
+		else:
+			output_handler = self._output_handler
+
+		for f in self.input_files.values():
 			fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
 				fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
 			self._reg_ids.add(self.scheduler.register(f.fileno(),
-				self._registered_events, self._output_handler))
+				self._registered_events, output_handler))
 		self._registered = True
 
 	def isAlive(self):
@@ -39,11 +43,7 @@ class PipeReader(AbstractPollTask):
 	def _wait(self):
 		if self.returncode is not None:
 			return self.returncode
-
-		if self._registered:
-			self.scheduler.schedule(self._reg_ids)
-			self._unregister()
-
+		self._wait_loop()
 		self.returncode = os.EX_OK
 		return self.returncode
 
@@ -57,26 +57,42 @@ class PipeReader(AbstractPollTask):
 
 	def _output_handler(self, fd, event):
 
-		if event & PollConstants.POLLIN:
+		while True:
+			data = self._read_buf(fd, event)
+			if data is None:
+				break
+			if data:
+				self._read_data.append(data)
+			else:
+				self._unregister()
+				self.wait()
+				break
+
+		self._unregister_if_appropriate(event)
+
+		return True
 
-			for f in self.input_files.values():
-				if fd == f.fileno():
-					break
+	def _array_output_handler(self, fd, event):
 
-			buf = array.array('B')
-			try:
-				buf.fromfile(f, self._bufsize)
-			except (EOFError, IOError):
-				pass
+		for f in self.input_files.values():
+			if f.fileno() == fd:
+				break
 
-			if buf:
-				self._read_data.append(buf.tostring())
+		while True:
+			data = self._read_array(f, event)
+			if data is None:
+				break
+			if data:
+				self._read_data.append(data)
 			else:
 				self._unregister()
 				self.wait()
+				break
 
 		self._unregister_if_appropriate(event)
 
+		return True
+
 	def _unregister(self):
 		"""
 		Unregister from the scheduler and close open files.

diff --git a/portage_with_autodep/pym/_emerge/PipeReader.pyo b/portage_with_autodep/pym/_emerge/PipeReader.pyo
new file mode 100644
index 0000000..2f53e7d
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/PipeReader.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/PollScheduler.py b/portage_with_autodep/pym/_emerge/PollScheduler.py
index a2b5c24..965dc20 100644
--- a/portage_with_autodep/pym/_emerge/PollScheduler.py
+++ b/portage_with_autodep/pym/_emerge/PollScheduler.py
@@ -1,11 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import gzip
 import errno
-import logging
-import select
-import time
 
 try:
 	import threading
@@ -15,36 +12,55 @@ except ImportError:
 from portage import _encodings
 from portage import _unicode_encode
 from portage.util import writemsg_level
+from portage.util.SlotObject import SlotObject
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
 
-from _emerge.SlotObject import SlotObject
 from _emerge.getloadavg import getloadavg
-from _emerge.PollConstants import PollConstants
-from _emerge.PollSelectAdapter import PollSelectAdapter
 
 class PollScheduler(object):
 
 	class _sched_iface_class(SlotObject):
-		__slots__ = ("output", "register", "schedule", "unregister")
+		__slots__ = ("IO_ERR", "IO_HUP", "IO_IN", "IO_NVAL", "IO_OUT",
+			"IO_PRI", "child_watch_add",
+			"idle_add", "io_add_watch", "iteration",
+			"output", "register", "run",
+			"source_remove", "timeout_add", "unregister")
 
-	def __init__(self):
+	def __init__(self, main=False):
+		"""
+		@param main: If True then use global_event_loop(), otherwise use
+			a local EventLoop instance (default is False, for safe use in
+			a non-main thread)
+		@type main: bool
+		"""
 		self._terminated = threading.Event()
 		self._terminated_tasks = False
 		self._max_jobs = 1
 		self._max_load = None
 		self._jobs = 0
-		self._poll_event_queue = []
-		self._poll_event_handlers = {}
-		self._poll_event_handler_ids = {}
-		# Increment id for each new handler.
-		self._event_handler_id = 0
-		self._poll_obj = create_poll_instance()
 		self._scheduling = False
 		self._background = False
+		if main:
+			self._event_loop = global_event_loop()
+		else:
+			self._event_loop = EventLoop(main=False)
 		self.sched_iface = self._sched_iface_class(
+			IO_ERR=self._event_loop.IO_ERR,
+			IO_HUP=self._event_loop.IO_HUP,
+			IO_IN=self._event_loop.IO_IN,
+			IO_NVAL=self._event_loop.IO_NVAL,
+			IO_OUT=self._event_loop.IO_OUT,
+			IO_PRI=self._event_loop.IO_PRI,
+			child_watch_add=self._event_loop.child_watch_add,
+			idle_add=self._event_loop.idle_add,
+			io_add_watch=self._event_loop.io_add_watch,
+			iteration=self._event_loop.iteration,
 			output=self._task_output,
-			register=self._register,
-			schedule=self._schedule_wait,
-			unregister=self._unregister)
+			register=self._event_loop.io_add_watch,
+			source_remove=self._event_loop.source_remove,
+			timeout_add=self._event_loop.timeout_add,
+			unregister=self._event_loop.source_remove)
 
 	def terminate(self):
 		"""
@@ -55,17 +71,47 @@ class PollScheduler(object):
 		"""
 		self._terminated.set()
 
+	def _termination_check(self):
+		"""
+		Calls _terminate_tasks() if appropriate. It's guaranteed not to
+		call it while _schedule_tasks() is being called. The check should
+		be executed for each iteration of the event loop, for response to
+		termination signals at the earliest opportunity. It always returns
+		True, for continuous scheduling via idle_add.
+		"""
+		if not self._scheduling and \
+			self._terminated.is_set() and \
+			not self._terminated_tasks:
+			self._scheduling = True
+			try:
+				self._terminated_tasks = True
+				self._terminate_tasks()
+			finally:
+				self._scheduling = False
+		return True
+
 	def _terminate_tasks(self):
 		"""
 		Send signals to terminate all tasks. This is called once
-		from self._schedule() in the event dispatching thread. This
-		prevents it from being called while the _schedule_tasks()
+		from _keep_scheduling() or _is_work_scheduled() in the event
+		dispatching thread. It will not be called while the _schedule_tasks()
 		implementation is running, in order to avoid potential
 		interference. All tasks should be cleaned up at the earliest
 		opportunity, but not necessarily before this method returns.
+		Typically, this method will send kill signals and return without
+		waiting for exit status. This allows basic cleanup to occur, such as
+		flushing of buffered output to logs.
 		"""
 		raise NotImplementedError()
 
+	def _keep_scheduling(self):
+		"""
+		@rtype: bool
+		@return: True if there may be remaining tasks to schedule,
+			False otherwise.
+		"""
+		return False
+
 	def _schedule_tasks(self):
 		"""
 		This is called from inside the _schedule() method, which
@@ -79,10 +125,10 @@ class PollScheduler(object):
 		Unless this method is used to perform user interface updates,
 		or something like that, the first thing it should do is check
 		the state of _terminated_tasks and if that is True then it
-		should return False immediately (since there's no need to
+		should return immediately (since there's no need to
 		schedule anything after _terminate_tasks() has been called).
 		"""
-		raise NotImplementedError()
+		pass
 
 	def _schedule(self):
 		"""
@@ -95,15 +141,32 @@ class PollScheduler(object):
 			return False
 		self._scheduling = True
 		try:
+			self._schedule_tasks()
+		finally:
+			self._scheduling = False
 
-			if self._terminated.is_set() and \
-				not self._terminated_tasks:
-				self._terminated_tasks = True
-				self._terminate_tasks()
+	def _main_loop(self):
+		term_check_id = self.sched_iface.idle_add(self._termination_check)
+		try:
+			# Populate initial event sources. We only need to do
+			# this once here, since it can be called during the
+			# loop from within event handlers.
+			self._schedule()
+
+			# Loop while there are jobs to be scheduled.
+			while self._keep_scheduling():
+				self.sched_iface.iteration()
 
-			return self._schedule_tasks()
+			# Clean shutdown of previously scheduled jobs. In the
+			# case of termination, this allows for basic cleanup
+			# such as flushing of buffered output to logs.
+			while self._is_work_scheduled():
+				self.sched_iface.iteration()
 		finally:
-			self._scheduling = False
+			self.sched_iface.source_remove(term_check_id)
+
+	def _is_work_scheduled(self):
+		return bool(self._running_job_count())
 
 	def _running_job_count(self):
 		return self._jobs
@@ -132,183 +195,6 @@ class PollScheduler(object):
 
 		return True
 
-	def _poll(self, timeout=None):
-		"""
-		All poll() calls pass through here. The poll events
-		are added directly to self._poll_event_queue.
-		In order to avoid endless blocking, this raises
-		StopIteration if timeout is None and there are
-		no file descriptors to poll.
-		"""
-		if not self._poll_event_handlers:
-			self._schedule()
-			if timeout is None and \
-				not self._poll_event_handlers:
-				raise StopIteration(
-					"timeout is None and there are no poll() event handlers")
-
-		# The following error is known to occur with Linux kernel versions
-		# less than 2.6.24:
-		#
-		#   select.error: (4, 'Interrupted system call')
-		#
-		# This error has been observed after a SIGSTOP, followed by SIGCONT.
-		# Treat it similar to EAGAIN if timeout is None, otherwise just return
-		# without any events.
-		while True:
-			try:
-				self._poll_event_queue.extend(self._poll_obj.poll(timeout))
-				break
-			except select.error as e:
-				writemsg_level("\n!!! select error: %s\n" % (e,),
-					level=logging.ERROR, noiselevel=-1)
-				del e
-				if timeout is not None:
-					break
-
-	def _next_poll_event(self, timeout=None):
-		"""
-		Since the _schedule_wait() loop is called by event
-		handlers from _poll_loop(), maintain a central event
-		queue for both of them to share events from a single
-		poll() call. In order to avoid endless blocking, this
-		raises StopIteration if timeout is None and there are
-		no file descriptors to poll.
-		"""
-		if not self._poll_event_queue:
-			self._poll(timeout)
-			if not self._poll_event_queue:
-				raise StopIteration()
-		return self._poll_event_queue.pop()
-
-	def _poll_loop(self):
-
-		event_handlers = self._poll_event_handlers
-		event_handled = False
-
-		try:
-			while event_handlers:
-				f, event = self._next_poll_event()
-				handler, reg_id = event_handlers[f]
-				handler(f, event)
-				event_handled = True
-		except StopIteration:
-			event_handled = True
-
-		if not event_handled:
-			raise AssertionError("tight loop")
-
-	def _schedule_yield(self):
-		"""
-		Schedule for a short period of time chosen by the scheduler based
-		on internal state. Synchronous tasks should call this periodically
-		in order to allow the scheduler to service pending poll events. The
-		scheduler will call poll() exactly once, without blocking, and any
-		resulting poll events will be serviced.
-		"""
-		event_handlers = self._poll_event_handlers
-		events_handled = 0
-
-		if not event_handlers:
-			return bool(events_handled)
-
-		if not self._poll_event_queue:
-			self._poll(0)
-
-		try:
-			while event_handlers and self._poll_event_queue:
-				f, event = self._next_poll_event()
-				handler, reg_id = event_handlers[f]
-				handler(f, event)
-				events_handled += 1
-		except StopIteration:
-			events_handled += 1
-
-		return bool(events_handled)
-
-	def _register(self, f, eventmask, handler):
-		"""
-		@rtype: Integer
-		@return: A unique registration id, for use in schedule() or
-			unregister() calls.
-		"""
-		if f in self._poll_event_handlers:
-			raise AssertionError("fd %d is already registered" % f)
-		self._event_handler_id += 1
-		reg_id = self._event_handler_id
-		self._poll_event_handler_ids[reg_id] = f
-		self._poll_event_handlers[f] = (handler, reg_id)
-		self._poll_obj.register(f, eventmask)
-		return reg_id
-
-	def _unregister(self, reg_id):
-		f = self._poll_event_handler_ids[reg_id]
-		self._poll_obj.unregister(f)
-		if self._poll_event_queue:
-			# Discard any unhandled events that belong to this file,
-			# in order to prevent these events from being erroneously
-			# delivered to a future handler that is using a reallocated
-			# file descriptor of the same numeric value (causing
-			# extremely confusing bugs).
-			remaining_events = []
-			discarded_events = False
-			for event in self._poll_event_queue:
-				if event[0] == f:
-					discarded_events = True
-				else:
-					remaining_events.append(event)
-
-			if discarded_events:
-				self._poll_event_queue[:] = remaining_events
-
-		del self._poll_event_handlers[f]
-		del self._poll_event_handler_ids[reg_id]
-
-	def _schedule_wait(self, wait_ids=None, timeout=None, condition=None):
-		"""
-		Schedule until wait_id is not longer registered
-		for poll() events.
-		@type wait_id: int
-		@param wait_id: a task id to wait for
-		"""
-		event_handlers = self._poll_event_handlers
-		handler_ids = self._poll_event_handler_ids
-		event_handled = False
-
-		if isinstance(wait_ids, int):
-			wait_ids = frozenset([wait_ids])
-
-		start_time = None
-		remaining_timeout = timeout
-		timed_out = False
-		if timeout is not None:
-			start_time = time.time()
-		try:
-			while (wait_ids is None and event_handlers) or \
-				(wait_ids is not None and wait_ids.intersection(handler_ids)):
-				f, event = self._next_poll_event(timeout=remaining_timeout)
-				handler, reg_id = event_handlers[f]
-				handler(f, event)
-				event_handled = True
-				if condition is not None and condition():
-					break
-				if timeout is not None:
-					elapsed_time = time.time() - start_time
-					if elapsed_time < 0:
-						# The system clock has changed such that start_time
-						# is now in the future, so just assume that the
-						# timeout has already elapsed.
-						timed_out = True
-						break
-					remaining_timeout = timeout - 1000 * elapsed_time
-					if remaining_timeout <= 0:
-						timed_out = True
-						break
-		except StopIteration:
-			event_handled = True
-
-		return event_handled
-
 	def _task_output(self, msg, log_path=None, background=None,
 		level=0, noiselevel=-1):
 		"""
@@ -333,6 +219,7 @@ class PollScheduler(object):
 				f = open(_unicode_encode(log_path,
 					encoding=_encodings['fs'], errors='strict'),
 					mode='ab')
+				f_real = f
 			except IOError as e:
 				if e.errno not in (errno.ENOENT, errno.ESTALE):
 					raise
@@ -349,50 +236,5 @@ class PollScheduler(object):
 
 				f.write(_unicode_encode(msg))
 				f.close()
-
-_can_poll_device = None
-
-def can_poll_device():
-	"""
-	Test if it's possible to use poll() on a device such as a pty. This
-	is known to fail on Darwin.
-	@rtype: bool
-	@returns: True if poll() on a device succeeds, False otherwise.
-	"""
-
-	global _can_poll_device
-	if _can_poll_device is not None:
-		return _can_poll_device
-
-	if not hasattr(select, "poll"):
-		_can_poll_device = False
-		return _can_poll_device
-
-	try:
-		dev_null = open('/dev/null', 'rb')
-	except IOError:
-		_can_poll_device = False
-		return _can_poll_device
-
-	p = select.poll()
-	p.register(dev_null.fileno(), PollConstants.POLLIN)
-
-	invalid_request = False
-	for f, event in p.poll():
-		if event & PollConstants.POLLNVAL:
-			invalid_request = True
-			break
-	dev_null.close()
-
-	_can_poll_device = not invalid_request
-	return _can_poll_device
-
-def create_poll_instance():
-	"""
-	Create an instance of select.poll, or an instance of
-	PollSelectAdapter there is no poll() implementation or
-	it is broken somehow.
-	"""
-	if can_poll_device():
-		return select.poll()
-	return PollSelectAdapter()
+				if f_real is not f:
+					f_real.close()

diff --git a/portage_with_autodep/pym/_emerge/PollScheduler.pyo b/portage_with_autodep/pym/_emerge/PollScheduler.pyo
new file mode 100644
index 0000000..b7e52be
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/PollScheduler.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/ProgressHandler.pyo b/portage_with_autodep/pym/_emerge/ProgressHandler.pyo
new file mode 100644
index 0000000..83e2f7f
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/ProgressHandler.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/QueueScheduler.py b/portage_with_autodep/pym/_emerge/QueueScheduler.py
index a4ab328..206087c 100644
--- a/portage_with_autodep/pym/_emerge/QueueScheduler.py
+++ b/portage_with_autodep/pym/_emerge/QueueScheduler.py
@@ -1,8 +1,6 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-import time
-
 from _emerge.PollScheduler import PollScheduler
 
 class QueueScheduler(PollScheduler):
@@ -12,8 +10,8 @@ class QueueScheduler(PollScheduler):
 	run() method returns when no tasks remain.
 	"""
 
-	def __init__(self, max_jobs=None, max_load=None):
-		PollScheduler.__init__(self)
+	def __init__(self, main=True, max_jobs=None, max_load=None):
+		PollScheduler.__init__(self, main=main)
 
 		if max_jobs is None:
 			max_jobs = 1
@@ -36,51 +34,44 @@ class QueueScheduler(PollScheduler):
 
 	def run(self, timeout=None):
 
-		start_time = None
-		timed_out = False
-		remaining_timeout = timeout
+		timeout_callback = None
 		if timeout is not None:
-			start_time = time.time()
-
-		while self._schedule():
-			self._schedule_wait(timeout=remaining_timeout)
-			if timeout is not None:
-				elapsed_time = time.time() - start_time
-				if elapsed_time < 0:
-					# The system clock has changed such that start_time
-					# is now in the future, so just assume that the
-					# timeout has already elapsed.
-					timed_out = True
-					break
-				remaining_timeout = timeout - 1000 * elapsed_time
-				if remaining_timeout <= 0:
-					timed_out = True
+			def timeout_callback():
+				timeout_callback.timed_out = True
+				return False
+			timeout_callback.timed_out = False
+			timeout_callback.timeout_id = self.sched_iface.timeout_add(
+				timeout, timeout_callback)
+
+		term_check_id = self.sched_iface.idle_add(self._termination_check)
+		try:
+			while not (timeout_callback is not None and
+				timeout_callback.timed_out):
+				# We don't have any callbacks to trigger _schedule(),
+				# so we have to call it explicitly here.
+				self._schedule()
+				if self._keep_scheduling():
+					self.sched_iface.iteration()
+				else:
 					break
 
-		if timeout is None or not timed_out:
-			while self._running_job_count():
-				self._schedule_wait(timeout=remaining_timeout)
-				if timeout is not None:
-					elapsed_time = time.time() - start_time
-					if elapsed_time < 0:
-						# The system clock has changed such that start_time
-						# is now in the future, so just assume that the
-						# timeout has already elapsed.
-						timed_out = True
-						break
-					remaining_timeout = timeout - 1000 * elapsed_time
-					if remaining_timeout <= 0:
-						timed_out = True
-						break
+			while self._is_work_scheduled() and \
+				not (timeout_callback is not None and
+				timeout_callback.timed_out):
+				self.sched_iface.iteration()
+		finally:
+			self.sched_iface.source_remove(term_check_id)
+			if timeout_callback is not None:
+				self.sched_iface.unregister(timeout_callback.timeout_id)
 
 	def _schedule_tasks(self):
 		"""
 		@rtype: bool
-		@returns: True if there may be remaining tasks to schedule,
+		@return: True if there may be remaining tasks to schedule,
 			False otherwise.
 		"""
 		if self._terminated_tasks:
-			return False
+			return
 
 		while self._can_add_job():
 			n = self._max_jobs - self._running_job_count()
@@ -88,12 +79,10 @@ class QueueScheduler(PollScheduler):
 				break
 
 			if not self._start_next_job(n):
-				return False
+				return
 
-		for q in self._queues:
-			if q:
-				return True
-		return False
+	def _keep_scheduling(self):
+		return not self._terminated_tasks and any(self._queues)
 
 	def _running_job_count(self):
 		job_count = 0

diff --git a/portage_with_autodep/pym/_emerge/QueueScheduler.pyo b/portage_with_autodep/pym/_emerge/QueueScheduler.pyo
new file mode 100644
index 0000000..88de3ea
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/QueueScheduler.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/RootConfig.py b/portage_with_autodep/pym/_emerge/RootConfig.py
index d84f108..bb0d768 100644
--- a/portage_with_autodep/pym/_emerge/RootConfig.py
+++ b/portage_with_autodep/pym/_emerge/RootConfig.py
@@ -19,7 +19,7 @@ class RootConfig(object):
 	def __init__(self, settings, trees, setconfig):
 		self.trees = trees
 		self.settings = settings
-		self.root = self.settings["ROOT"]
+		self.root = self.settings['EROOT']
 		self.setconfig = setconfig
 		if setconfig is None:
 			self.sets = {}

diff --git a/portage_with_autodep/pym/_emerge/RootConfig.pyo b/portage_with_autodep/pym/_emerge/RootConfig.pyo
new file mode 100644
index 0000000..fad3022
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/RootConfig.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/Scheduler.py b/portage_with_autodep/pym/_emerge/Scheduler.py
index 6412d82..30a7e10 100644
--- a/portage_with_autodep/pym/_emerge/Scheduler.py
+++ b/portage_with_autodep/pym/_emerge/Scheduler.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -7,10 +7,8 @@ from collections import deque
 import gc
 import gzip
 import logging
-import shutil
 import signal
 import sys
-import tempfile
 import textwrap
 import time
 import warnings
@@ -28,9 +26,12 @@ from portage.output import colorize, create_color_func, red
 bad = create_color_func("BAD")
 from portage._sets import SETPREFIX
 from portage._sets.base import InternalPackageSet
-from portage.util import writemsg, writemsg_level
+from portage.util import ensure_dirs, writemsg, writemsg_level
+from portage.util.SlotObject import SlotObject
 from portage.package.ebuild.digestcheck import digestcheck
 from portage.package.ebuild.digestgen import digestgen
+from portage.package.ebuild.doebuild import (_check_temp_dir,
+	_prepare_self_update)
 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
 
 import _emerge
@@ -44,6 +45,7 @@ from _emerge.create_depgraph_params import create_depgraph_params
 from _emerge.create_world_atom import create_world_atom
 from _emerge.DepPriority import DepPriority
 from _emerge.depgraph import depgraph, resume_depgraph
+from _emerge.EbuildBuildDir import EbuildBuildDir
 from _emerge.EbuildFetcher import EbuildFetcher
 from _emerge.EbuildPhase import EbuildPhase
 from _emerge.emergelog import emergelog
@@ -52,12 +54,9 @@ from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_dep
 from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
 from _emerge.JobStatusDisplay import JobStatusDisplay
 from _emerge.MergeListItem import MergeListItem
-from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
 from _emerge.Package import Package
 from _emerge.PackageMerge import PackageMerge
 from _emerge.PollScheduler import PollScheduler
-from _emerge.RootConfig import RootConfig
-from _emerge.SlotObject import SlotObject
 from _emerge.SequentialTaskQueue import SequentialTaskQueue
 
 if sys.hexversion >= 0x3000000:
@@ -77,17 +76,12 @@ class Scheduler(PollScheduler):
 		frozenset(["--pretend",
 		"--fetchonly", "--fetch-all-uri"])
 
-	_opts_no_restart = frozenset(["--buildpkgonly",
+	_opts_no_self_update = frozenset(["--buildpkgonly",
 		"--fetchonly", "--fetch-all-uri", "--pretend"])
 
-	_bad_resume_opts = set(["--ask", "--changelog",
-		"--resume", "--skipfirst"])
-
-	class _iface_class(SlotObject):
+	class _iface_class(PollScheduler._sched_iface_class):
 		__slots__ = ("fetch",
-			"output", "register", "schedule",
-			"scheduleSetup", "scheduleUnpack", "scheduleYield",
-			"unregister")
+			"scheduleSetup", "scheduleUnpack")
 
 	class _fetch_iface_class(SlotObject):
 		__slots__ = ("log_file", "schedule")
@@ -96,7 +90,7 @@ class Scheduler(PollScheduler):
 		("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
 
 	class _build_opts_class(SlotObject):
-		__slots__ = ("buildpkg", "buildpkgonly",
+		__slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
 			"fetch_all_uri", "fetchonly", "pretend")
 
 	class _binpkg_opts_class(SlotObject):
@@ -141,8 +135,9 @@ class Scheduler(PollScheduler):
 			portage.exception.PortageException.__init__(self, value)
 
 	def __init__(self, settings, trees, mtimedb, myopts,
-		spinner, mergelist=None, favorites=None, graph_config=None):
-		PollScheduler.__init__(self)
+		spinner, mergelist=None, favorites=None, graph_config=None,
+		uninstall_only=False):
+		PollScheduler.__init__(self, main=True)
 
 		if mergelist is not None:
 			warnings.warn("The mergelist parameter of the " + \
@@ -151,16 +146,22 @@ class Scheduler(PollScheduler):
 				DeprecationWarning, stacklevel=2)
 
 		self.settings = settings
-		self.target_root = settings["ROOT"]
+		self.target_root = settings["EROOT"]
 		self.trees = trees
 		self.myopts = myopts
 		self._spinner = spinner
 		self._mtimedb = mtimedb
 		self._favorites = favorites
+		self._uninstall_only = uninstall_only
 		self._args_set = InternalPackageSet(favorites, allow_repo=True)
 		self._build_opts = self._build_opts_class()
+
 		for k in self._build_opts.__slots__:
-			setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
+			setattr(self._build_opts, k, myopts.get("--" + k.replace("_", "-")))
+		self._build_opts.buildpkg_exclude = InternalPackageSet( \
+			initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
+			allow_wildcard=True, allow_repo=True)
+
 		self._binpkg_opts = self._binpkg_opts_class()
 		for k in self._binpkg_opts.__slots__:
 			setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
@@ -202,10 +203,7 @@ class Scheduler(PollScheduler):
 		if max_jobs is None:
 			max_jobs = 1
 		self._set_max_jobs(max_jobs)
-
-		# The root where the currently running
-		# portage instance is installed.
-		self._running_root = trees["/"]["root_config"]
+		self._running_root = trees[trees._running_eroot]["root_config"]
 		self.edebug = 0
 		if settings.get("PORTAGE_DEBUG", "") == "1":
 			self.edebug = 1
@@ -219,13 +217,11 @@ class Scheduler(PollScheduler):
 		fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
 			schedule=self._schedule_fetch)
 		self._sched_iface = self._iface_class(
-			fetch=fetch_iface, output=self._task_output,
-			register=self._register,
-			schedule=self._schedule_wait,
+			fetch=fetch_iface,
 			scheduleSetup=self._schedule_setup,
 			scheduleUnpack=self._schedule_unpack,
-			scheduleYield=self._schedule_yield,
-			unregister=self._unregister)
+			**dict((k, getattr(self.sched_iface, k))
+			for k in self.sched_iface.__slots__))
 
 		self._prefetchers = weakref.WeakValueDictionary()
 		self._pkg_queue = []
@@ -277,7 +273,7 @@ class Scheduler(PollScheduler):
 		if self._parallel_fetch:
 				# clear out existing fetch log if it exists
 				try:
-					open(self._fetch_log, 'w')
+					open(self._fetch_log, 'w').close()
 				except EnvironmentError:
 					pass
 
@@ -289,10 +285,37 @@ class Scheduler(PollScheduler):
 			self._running_portage = self._pkg(cpv, "installed",
 				self._running_root, installed=True)
 
+	def _handle_self_update(self):
+
+		if self._opts_no_self_update.intersection(self.myopts):
+			return os.EX_OK
+
+		for x in self._mergelist:
+			if not isinstance(x, Package):
+				continue
+			if x.operation != "merge":
+				continue
+			if x.root != self._running_root.root:
+				continue
+			if not portage.dep.match_from_list(
+				portage.const.PORTAGE_PACKAGE_ATOM, [x]):
+				continue
+			if self._running_portage is None or \
+				self._running_portage.cpv != x.cpv or \
+				'9999' in x.cpv or \
+				'git' in x.inherited or \
+				'git-2' in x.inherited:
+				rval = _check_temp_dir(self.settings)
+				if rval != os.EX_OK:
+					return rval
+				_prepare_self_update(self.settings)
+			break
+
+		return os.EX_OK
+
 	def _terminate_tasks(self):
 		self._status_display.quiet = True
-		while self._running_tasks:
-			task_id, task = self._running_tasks.popitem()
+		for task in list(self._running_tasks.values()):
 			task.cancel()
 		for q in self._task_queues.values():
 			q.clear()
@@ -304,10 +327,13 @@ class Scheduler(PollScheduler):
 		"""
 		self._set_graph_config(graph_config)
 		self._blocker_db = {}
+		dynamic_deps = self.myopts.get("--dynamic-deps", "y") != "n"
 		for root in self.trees:
+			if self._uninstall_only:
+				continue
 			if graph_config is None:
 				fake_vartree = FakeVartree(self.trees[root]["root_config"],
-					pkg_cache=self._pkg_cache)
+					pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps)
 				fake_vartree.sync()
 			else:
 				fake_vartree = graph_config.trees[root]['vartree']
@@ -324,52 +350,6 @@ class Scheduler(PollScheduler):
 		self._set_graph_config(None)
 		gc.collect()
 
-	def _poll(self, timeout=None):
-
-		self._schedule()
-
-		if timeout is None:
-			while True:
-				if not self._poll_event_handlers:
-					self._schedule()
-					if not self._poll_event_handlers:
-						raise StopIteration(
-							"timeout is None and there are no poll() event handlers")
-				previous_count = len(self._poll_event_queue)
-				PollScheduler._poll(self, timeout=self._max_display_latency)
-				self._status_display.display()
-				if previous_count != len(self._poll_event_queue):
-					break
-
-		elif timeout <= self._max_display_latency:
-			PollScheduler._poll(self, timeout=timeout)
-			if timeout == 0:
-				# The display is updated by _schedule() above, so it would be
-				# redundant to update it here when timeout is 0.
-				pass
-			else:
-				self._status_display.display()
-
-		else:
-			remaining_timeout = timeout
-			start_time = time.time()
-			while True:
-				previous_count = len(self._poll_event_queue)
-				PollScheduler._poll(self,
-					timeout=min(self._max_display_latency, remaining_timeout))
-				self._status_display.display()
-				if previous_count != len(self._poll_event_queue):
-					break
-				elapsed_time = time.time() - start_time
-				if elapsed_time < 0:
-					# The system clock has changed such that start_time
-					# is now in the future, so just assume that the
-					# timeout has already elapsed.
-					break
-				remaining_timeout = timeout - 1000 * elapsed_time
-				if remaining_timeout <= 0:
-					break
-
 	def _set_max_jobs(self, max_jobs):
 		self._max_jobs = max_jobs
 		self._task_queues.jobs.max_jobs = max_jobs
@@ -381,11 +361,11 @@ class Scheduler(PollScheduler):
 		Check if background mode is enabled and adjust states as necessary.
 
 		@rtype: bool
-		@returns: True if background mode is enabled, False otherwise.
+		@return: True if background mode is enabled, False otherwise.
 		"""
 		background = (self._max_jobs is True or \
 			self._max_jobs > 1 or "--quiet" in self.myopts \
-			or "--quiet-build" in self.myopts) and \
+			or self.myopts.get("--quiet-build") == "y") and \
 			not bool(self._opts_no_background.intersection(self.myopts))
 
 		if background:
@@ -398,7 +378,7 @@ class Scheduler(PollScheduler):
 				msg = [""]
 				for pkg in interactive_tasks:
 					pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
-					if pkg.root != "/":
+					if pkg.root_config.settings["ROOT"] != "/":
 						pkg_str += " for " + pkg.root
 					msg.append(pkg_str)
 				msg.append("")
@@ -741,7 +721,6 @@ class Scheduler(PollScheduler):
 			self._status_msg("Starting parallel fetch")
 
 			prefetchers = self._prefetchers
-			getbinpkg = "--getbinpkg" in self.myopts
 
 			for pkg in self._mergelist:
 				# mergelist can contain solved Blocker instances
@@ -749,15 +728,13 @@ class Scheduler(PollScheduler):
 					continue
 				prefetcher = self._create_prefetcher(pkg)
 				if prefetcher is not None:
-					self._task_queues.fetch.add(prefetcher)
+					# This will start the first prefetcher immediately, so that
+					# self._task() won't discard it. This avoids a case where
+					# the first prefetcher is discarded, causing the second
+					# prefetcher to occupy the fetch queue before the first
+					# fetcher has an opportunity to execute.
 					prefetchers[pkg] = prefetcher
-
-			# Start the first prefetcher immediately so that self._task()
-			# won't discard it. This avoids a case where the first
-			# prefetcher is discarded, causing the second prefetcher to
-			# occupy the fetch queue before the first fetcher has an
-			# opportunity to execute.
-			self._task_queues.fetch.schedule()
+					self._task_queues.fetch.add(prefetcher)
 
 	def _create_prefetcher(self, pkg):
 		"""
@@ -785,100 +762,6 @@ class Scheduler(PollScheduler):
 
 		return prefetcher
 
-	def _is_restart_scheduled(self):
-		"""
-		Check if the merge list contains a replacement
-		for the current running instance, that will result
-		in restart after merge.
-		@rtype: bool
-		@returns: True if a restart is scheduled, False otherwise.
-		"""
-		if self._opts_no_restart.intersection(self.myopts):
-			return False
-
-		mergelist = self._mergelist
-
-		for i, pkg in enumerate(mergelist):
-			if self._is_restart_necessary(pkg) and \
-				i != len(mergelist) - 1:
-				return True
-
-		return False
-
-	def _is_restart_necessary(self, pkg):
-		"""
-		@return: True if merging the given package
-			requires restart, False otherwise.
-		"""
-
-		# Figure out if we need a restart.
-		if pkg.root == self._running_root.root and \
-			portage.match_from_list(
-			portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
-			if self._running_portage is None:
-				return True
-			elif pkg.cpv != self._running_portage.cpv or \
-				'9999' in pkg.cpv or \
-				'git' in pkg.inherited or \
-				'git-2' in pkg.inherited:
-				return True
-		return False
-
-	def _restart_if_necessary(self, pkg):
-		"""
-		Use execv() to restart emerge. This happens
-		if portage upgrades itself and there are
-		remaining packages in the list.
-		"""
-
-		if self._opts_no_restart.intersection(self.myopts):
-			return
-
-		if not self._is_restart_necessary(pkg):
-			return
-
-		if pkg == self._mergelist[-1]:
-			return
-
-		self._main_loop_cleanup()
-
-		logger = self._logger
-		pkg_count = self._pkg_count
-		mtimedb = self._mtimedb
-		bad_resume_opts = self._bad_resume_opts
-
-		logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
-			(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
-
-		logger.log(" *** RESTARTING " + \
-			"emerge via exec() after change of " + \
-			"portage version.")
-
-		mtimedb["resume"]["mergelist"].remove(list(pkg))
-		mtimedb.commit()
-		portage.run_exitfuncs()
-		# Don't trust sys.argv[0] here because eselect-python may modify it.
-		emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
-		mynewargv = [emerge_binary, "--resume"]
-		resume_opts = self.myopts.copy()
-		# For automatic resume, we need to prevent
-		# any of bad_resume_opts from leaking in
-		# via EMERGE_DEFAULT_OPTS.
-		resume_opts["--ignore-default-opts"] = True
-		for myopt, myarg in resume_opts.items():
-			if myopt not in bad_resume_opts:
-				if myarg is True:
-					mynewargv.append(myopt)
-				elif isinstance(myarg, list):
-					# arguments like --exclude that use 'append' action
-					for x in myarg:
-						mynewargv.append("%s=%s" % (myopt, x))
-				else:
-					mynewargv.append("%s=%s" % (myopt, myarg))
-		# priority only needs to be adjusted on the first run
-		os.environ["PORTAGE_NICENESS"] = "0"
-		os.execv(mynewargv[0], mynewargv)
-
 	def _run_pkg_pretend(self):
 		"""
 		Since pkg_pretend output may be important, this method sends all
@@ -912,11 +795,48 @@ class Scheduler(PollScheduler):
 			root_config = x.root_config
 			settings = self.pkgsettings[root_config.root]
 			settings.setcpv(x)
-			tmpdir = tempfile.mkdtemp()
-			tmpdir_orig = settings["PORTAGE_TMPDIR"]
-			settings["PORTAGE_TMPDIR"] = tmpdir
+
+			# setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
+			# have to validate it for each package
+			rval = _check_temp_dir(settings)
+			if rval != os.EX_OK:
+				return rval
+
+			build_dir_path = os.path.join(
+				os.path.realpath(settings["PORTAGE_TMPDIR"]),
+				"portage", x.category, x.pf)
+			existing_buildir = os.path.isdir(build_dir_path)
+			settings["PORTAGE_BUILDDIR"] = build_dir_path
+			build_dir = EbuildBuildDir(scheduler=sched_iface,
+				settings=settings)
+			build_dir.lock()
+			current_task = None
 
 			try:
+
+				# Clean up the existing build dir, in case pkg_pretend
+				# checks for available space (bug #390711).
+				if existing_buildir:
+					if x.built:
+						tree = "bintree"
+						infloc = os.path.join(build_dir_path, "build-info")
+						ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+					else:
+						tree = "porttree"
+						portdb = root_config.trees["porttree"].dbapi
+						ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+						if ebuild_path is None:
+							raise AssertionError(
+								"ebuild not found for '%s'" % x.cpv)
+					portage.package.ebuild.doebuild.doebuild_environment(
+						ebuild_path, "clean", settings=settings,
+						db=self.trees[settings['EROOT']][tree].dbapi)
+					clean_phase = EbuildPhase(background=False,
+						phase='clean', scheduler=sched_iface, settings=settings)
+					current_task = clean_phase
+					clean_phase.start()
+					clean_phase.wait()
+
 				if x.built:
 					tree = "bintree"
 					bintree = root_config.trees["bintree"].dbapi.bintree
@@ -935,6 +855,7 @@ class Scheduler(PollScheduler):
 
 					verifier = BinpkgVerifier(pkg=x,
 						scheduler=sched_iface)
+					current_task = verifier
 					verifier.start()
 					if verifier.wait() != os.EX_OK:
 						failures += 1
@@ -943,8 +864,8 @@ class Scheduler(PollScheduler):
 					if fetched:
 						bintree.inject(x.cpv, filename=fetched)
 					tbz2_file = bintree.getname(x.cpv)
-					infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
-					os.makedirs(infloc)
+					infloc = os.path.join(build_dir_path, "build-info")
+					ensure_dirs(infloc)
 					portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
 					ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
 					settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
@@ -964,7 +885,8 @@ class Scheduler(PollScheduler):
 
 				portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
 					"pretend", settings=settings,
-					db=self.trees[settings["ROOT"]][tree].dbapi)
+					db=self.trees[settings['EROOT']][tree].dbapi)
+
 				prepare_build_dirs(root_config.root, settings, cleanup=0)
 
 				vardb = root_config.trees['vartree'].dbapi
@@ -976,14 +898,21 @@ class Scheduler(PollScheduler):
 					phase="pretend", scheduler=sched_iface,
 					settings=settings)
 
+				current_task = pretend_phase
 				pretend_phase.start()
 				ret = pretend_phase.wait()
 				if ret != os.EX_OK:
 					failures += 1
 				portage.elog.elog_process(x.cpv, settings)
 			finally:
-				shutil.rmtree(tmpdir)
-				settings["PORTAGE_TMPDIR"] = tmpdir_orig
+				if current_task is not None and current_task.isAlive():
+					current_task.cancel()
+					current_task.wait()
+				clean_phase = EbuildPhase(background=False,
+					phase='clean', scheduler=sched_iface, settings=settings)
+				clean_phase.start()
+				clean_phase.wait()
+				build_dir.unlock()
 
 		if failures:
 			return 1
@@ -1003,6 +932,10 @@ class Scheduler(PollScheduler):
 		except self._unknown_internal_error:
 			return 1
 
+		rval = self._handle_self_update()
+		if rval != os.EX_OK:
+			return rval
+
 		for root in self.trees:
 			root_config = self.trees[root]["root_config"]
 
@@ -1131,10 +1064,8 @@ class Scheduler(PollScheduler):
 			# If only one package failed then just show it's
 			# whole log for easy viewing.
 			failed_pkg = self._failed_pkgs_all[-1]
-			build_dir = failed_pkg.build_dir
 			log_file = None
-
-			log_paths = [failed_pkg.build_log]
+			log_file_real = None
 
 			log_path = self._locate_failure_log(failed_pkg)
 			if log_path is not None:
@@ -1145,6 +1076,7 @@ class Scheduler(PollScheduler):
 					pass
 				else:
 					if log_path.endswith('.gz'):
+						log_file_real = log_file
 						log_file =  gzip.GzipFile(filename='',
 							mode='rb', fileobj=log_file)
 
@@ -1157,6 +1089,8 @@ class Scheduler(PollScheduler):
 						noiselevel=-1)
 				finally:
 					log_file.close()
+					if log_file_real is not None:
+						log_file_real.close()
 				failure_log_shown = True
 
 		# Dump mod_echo output now since it tends to flood the terminal.
@@ -1228,9 +1162,6 @@ class Scheduler(PollScheduler):
 
 	def _locate_failure_log(self, failed_pkg):
 
-		build_dir = failed_pkg.build_dir
-		log_file = None
-
 		log_paths = [failed_pkg.build_log]
 
 		for log_path in log_paths:
@@ -1272,7 +1203,7 @@ class Scheduler(PollScheduler):
 
 		# Skip this if $ROOT != / since it shouldn't matter if there
 		# are unsatisfied system runtime deps in this case.
-		if pkg.root != '/':
+		if pkg.root_config.settings["ROOT"] != "/":
 			return
 
 		completed_tasks = self._completed_tasks
@@ -1350,8 +1281,6 @@ class Scheduler(PollScheduler):
 		if pkg.installed:
 			return
 
-		self._restart_if_necessary(pkg)
-
 		# Call mtimedb.commit() after each merge so that
 		# --resume still works after being interrupted
 		# by reboot, sigkill or similar.
@@ -1411,12 +1340,16 @@ class Scheduler(PollScheduler):
 
 	def _merge(self):
 
+		if self._opts_no_background.intersection(self.myopts):
+			self._set_max_jobs(1)
+
 		self._add_prefetchers()
 		self._add_packages()
-		pkg_queue = self._pkg_queue
 		failed_pkgs = self._failed_pkgs
 		portage.locks._quiet = self._background
 		portage.elog.add_listener(self._elog_listener)
+		display_timeout_id = self.sched_iface.timeout_add(
+			self._max_display_latency, self._status_display.display)
 		rval = os.EX_OK
 
 		try:
@@ -1425,6 +1358,7 @@ class Scheduler(PollScheduler):
 			self._main_loop_cleanup()
 			portage.locks._quiet = False
 			portage.elog.remove_listener(self._elog_listener)
+			self.sched_iface.source_remove(display_timeout_id)
 			if failed_pkgs:
 				rval = failed_pkgs[-1].returncode
 
@@ -1505,7 +1439,7 @@ class Scheduler(PollScheduler):
 			merge order
 		@type later: set
 		@rtype: bool
-		@returns: True if the package is dependent, False otherwise.
+		@return: True if the package is dependent, False otherwise.
 		"""
 
 		graph = self._digraph
@@ -1553,24 +1487,7 @@ class Scheduler(PollScheduler):
 		return temp_settings
 
 	def _deallocate_config(self, settings):
-		self._config_pool[settings["ROOT"]].append(settings)
-
-	def _main_loop(self):
-
-		# Only allow 1 job max if a restart is scheduled
-		# due to portage update.
-		if self._is_restart_scheduled() or \
-			self._opts_no_background.intersection(self.myopts):
-			self._set_max_jobs(1)
-
-		while self._schedule():
-			self._poll_loop()
-
-		while True:
-			self._schedule()
-			if not self._is_work_scheduled():
-				break
-			self._poll_loop()
+		self._config_pool[settings['EROOT']].append(settings)
 
 	def _keep_scheduling(self):
 		return bool(not self._terminated_tasks and self._pkg_queue and \
@@ -1583,6 +1500,8 @@ class Scheduler(PollScheduler):
 
 		while True:
 
+			state_change = 0
+
 			# When the number of jobs and merges drops to zero,
 			# process a single merge from _merge_wait_queue if
 			# it's not empty. We only process one since these are
@@ -1593,37 +1512,34 @@ class Scheduler(PollScheduler):
 				not self._task_queues.merge):
 				task = self._merge_wait_queue.popleft()
 				task.addExitListener(self._merge_wait_exit_handler)
+				self._merge_wait_scheduled.append(task)
 				self._task_queues.merge.add(task)
 				self._status_display.merges = len(self._task_queues.merge)
-				self._merge_wait_scheduled.append(task)
+				state_change += 1
 
-			self._schedule_tasks_imp()
-			self._status_display.display()
+			if self._schedule_tasks_imp():
+				state_change += 1
 
-			state_change = 0
-			for q in self._task_queues.values():
-				if q.schedule():
-					state_change += 1
+			self._status_display.display()
 
 			# Cancel prefetchers if they're the only reason
 			# the main poll loop is still running.
 			if self._failed_pkgs and not self._build_opts.fetchonly and \
 				not self._is_work_scheduled() and \
 				self._task_queues.fetch:
+				# Since this happens asynchronously, it doesn't count in
+				# state_change (counting it triggers an infinite loop).
 				self._task_queues.fetch.clear()
-				state_change += 1
 
 			if not (state_change or \
 				(self._merge_wait_queue and not self._jobs and
 				not self._task_queues.merge)):
 				break
 
-		return self._keep_scheduling()
-
 	def _job_delay(self):
 		"""
 		@rtype: bool
-		@returns: True if job scheduling should be delayed, False otherwise.
+		@return: True if job scheduling should be delayed, False otherwise.
 		"""
 
 		if self._jobs and self._max_load is not None:
@@ -1641,7 +1557,7 @@ class Scheduler(PollScheduler):
 	def _schedule_tasks_imp(self):
 		"""
 		@rtype: bool
-		@returns: True if state changed, False otherwise.
+		@return: True if state changed, False otherwise.
 		"""
 
 		state_change = 0
@@ -1709,7 +1625,14 @@ class Scheduler(PollScheduler):
 					"installed", pkg.root_config, installed=True,
 					operation="uninstall")
 
-		prefetcher = self._prefetchers.pop(pkg, None)
+		try:
+			prefetcher = self._prefetchers.pop(pkg, None)
+		except KeyError:
+			# KeyError observed with PyPy 1.8, despite None given as default.
+			# Note that PyPy 1.8 has the same WeakValueDictionary code as
+			# CPython 2.7, so it may be possible for CPython to raise KeyError
+			# here as well.
+			prefetcher = None
 		if prefetcher is not None and not prefetcher.isAlive():
 			try:
 				self._task_queues.fetch._task_queue.remove(prefetcher)
@@ -1738,7 +1661,7 @@ class Scheduler(PollScheduler):
 		pkg = failed_pkg.pkg
 		msg = "%s to %s %s" % \
 			(bad("Failed"), action, colorize("INFORM", pkg.cpv))
-		if pkg.root != "/":
+		if pkg.root_config.settings["ROOT"] != "/":
 			msg += " %s %s" % (preposition, pkg.root)
 
 		log_path = self._locate_failure_log(failed_pkg)
@@ -1791,7 +1714,7 @@ class Scheduler(PollScheduler):
 		Use the current resume list to calculate a new one,
 		dropping any packages with unsatisfied deps.
 		@rtype: bool
-		@returns: True if successful, False otherwise.
+		@return: True if successful, False otherwise.
 		"""
 		print(colorize("GOOD", "*** Resuming merge..."))
 
@@ -1868,7 +1791,7 @@ class Scheduler(PollScheduler):
 			pkg = task
 			msg = "emerge --keep-going:" + \
 				" %s" % (pkg.cpv,)
-			if pkg.root != "/":
+			if pkg.root_config.settings["ROOT"] != "/":
 				msg += " for %s" % (pkg.root,)
 			msg += " dropped due to unsatisfied dependency."
 			for line in textwrap.wrap(msg, msg_width):

diff --git a/portage_with_autodep/pym/_emerge/Scheduler.pyo b/portage_with_autodep/pym/_emerge/Scheduler.pyo
new file mode 100644
index 0000000..5555703
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/Scheduler.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
index c1c98c4..8090893 100644
--- a/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
+++ b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
@@ -1,13 +1,15 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-import sys
-from _emerge.SlotObject import SlotObject
 from collections import deque
+import sys
+
+from portage.util.SlotObject import SlotObject
+
 class SequentialTaskQueue(SlotObject):
 
 	__slots__ = ("max_jobs", "running_tasks") + \
-		("_dirty", "_scheduling", "_task_queue")
+		("_scheduling", "_task_queue")
 
 	def __init__(self, **kwargs):
 		SlotObject.__init__(self, **kwargs)
@@ -15,50 +17,34 @@ class SequentialTaskQueue(SlotObject):
 		self.running_tasks = set()
 		if self.max_jobs is None:
 			self.max_jobs = 1
-		self._dirty = True
 
 	def add(self, task):
 		self._task_queue.append(task)
-		self._dirty = True
+		self.schedule()
 
 	def addFront(self, task):
 		self._task_queue.appendleft(task)
-		self._dirty = True
+		self.schedule()
 
 	def schedule(self):
 
-		if not self._dirty:
-			return False
-
-		if not self:
-			return False
-
 		if self._scheduling:
 			# Ignore any recursive schedule() calls triggered via
 			# self._task_exit().
-			return False
+			return
 
 		self._scheduling = True
-
-		task_queue = self._task_queue
-		running_tasks = self.running_tasks
-		max_jobs = self.max_jobs
-		state_changed = False
-
-		while task_queue and \
-			(max_jobs is True or len(running_tasks) < max_jobs):
-			task = task_queue.popleft()
-			cancelled = getattr(task, "cancelled", None)
-			if not cancelled:
-				running_tasks.add(task)
-				task.addExitListener(self._task_exit)
-				task.start()
-			state_changed = True
-
-		self._dirty = False
-		self._scheduling = False
-
-		return state_changed
+		try:
+			while self._task_queue and (self.max_jobs is True or
+				len(self.running_tasks) < self.max_jobs):
+				task = self._task_queue.popleft()
+				cancelled = getattr(task, "cancelled", None)
+				if not cancelled:
+					self.running_tasks.add(task)
+					task.addExitListener(self._task_exit)
+					task.start()
+		finally:
+			self._scheduling = False
 
 	def _task_exit(self, task):
 		"""
@@ -68,16 +54,22 @@ class SequentialTaskQueue(SlotObject):
 		"""
 		self.running_tasks.remove(task)
 		if self._task_queue:
-			self._dirty = True
+			self.schedule()
 
 	def clear(self):
+		"""
+		Clear the task queue and asynchronously terminate any running tasks.
+		"""
 		self._task_queue.clear()
-		running_tasks = self.running_tasks
-		while running_tasks:
-			task = running_tasks.pop()
-			task.removeExitListener(self._task_exit)
+		for task in list(self.running_tasks):
 			task.cancel()
-		self._dirty = False
+
+	def wait(self):
+		"""
+		Synchronously wait for all running tasks to exit.
+		"""
+		while self.running_tasks:
+			next(iter(self.running_tasks)).wait()
 
 	def __bool__(self):
 		return bool(self._task_queue or self.running_tasks)

diff --git a/portage_with_autodep/pym/_emerge/SequentialTaskQueue.pyo b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.pyo
new file mode 100644
index 0000000..3ab65c9
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/SetArg.pyo b/portage_with_autodep/pym/_emerge/SetArg.pyo
new file mode 100644
index 0000000..5a3d9d9
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/SetArg.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/SpawnProcess.py b/portage_with_autodep/pym/_emerge/SpawnProcess.py
index b72971c..9fbc964 100644
--- a/portage_with_autodep/pym/_emerge/SpawnProcess.py
+++ b/portage_with_autodep/pym/_emerge/SpawnProcess.py
@@ -26,29 +26,16 @@ class SpawnProcess(SubProcess):
 		"path_lookup", "pre_exec")
 
 	__slots__ = ("args",) + \
-		_spawn_kwarg_names + ("_selinux_type",)
+		_spawn_kwarg_names + ("_log_file_real", "_selinux_type",)
 
 	_file_names = ("log", "process", "stdout")
 	_files_dict = slot_dict_class(_file_names, prefix="")
 
 	def _start(self):
 
-		if self.cancelled:
-			return
-
 		if self.fd_pipes is None:
 			self.fd_pipes = {}
 		fd_pipes = self.fd_pipes
-		fd_pipes.setdefault(0, sys.stdin.fileno())
-		fd_pipes.setdefault(1, sys.stdout.fileno())
-		fd_pipes.setdefault(2, sys.stderr.fileno())
-
-		# flush any pending output
-		for fd in fd_pipes.values():
-			if fd == sys.stdout.fileno():
-				sys.stdout.flush()
-			if fd == sys.stderr.fileno():
-				sys.stderr.flush()
 
 		self._files = self._files_dict()
 		files = self._files
@@ -56,34 +43,46 @@ class SpawnProcess(SubProcess):
 		master_fd, slave_fd = self._pipe(fd_pipes)
 		fcntl.fcntl(master_fd, fcntl.F_SETFL,
 			fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+		files.process = master_fd
 
 		logfile = None
 		if self._can_log(slave_fd):
 			logfile = self.logfile
 
 		null_input = None
-		fd_pipes_orig = fd_pipes.copy()
-		if self.background:
+		if not self.background or 0 in fd_pipes:
+			# Subclasses such as AbstractEbuildProcess may have already passed
+			# in a null file descriptor in fd_pipes, so use that when given.
+			pass
+		else:
 			# TODO: Use job control functions like tcsetpgrp() to control
 			# access to stdin. Until then, use /dev/null so that any
 			# attempts to read from stdin will immediately return EOF
 			# instead of blocking indefinitely.
-			null_input = open('/dev/null', 'rb')
-			fd_pipes[0] = null_input.fileno()
-		else:
-			fd_pipes[0] = fd_pipes_orig[0]
+			null_input = os.open('/dev/null', os.O_RDWR)
+			fd_pipes[0] = null_input
+
+		fd_pipes.setdefault(0, sys.stdin.fileno())
+		fd_pipes.setdefault(1, sys.stdout.fileno())
+		fd_pipes.setdefault(2, sys.stderr.fileno())
+
+		# flush any pending output
+		for fd in fd_pipes.values():
+			if fd == sys.stdout.fileno():
+				sys.stdout.flush()
+			if fd == sys.stderr.fileno():
+				sys.stderr.flush()
 
-		# WARNING: It is very important to use unbuffered mode here,
-		# in order to avoid issue 5380 with python3.
-		files.process = os.fdopen(master_fd, 'rb', 0)
 		if logfile is not None:
 
+			fd_pipes_orig = fd_pipes.copy()
 			fd_pipes[1] = slave_fd
 			fd_pipes[2] = slave_fd
 
 			files.log = open(_unicode_encode(logfile,
 				encoding=_encodings['fs'], errors='strict'), mode='ab')
 			if logfile.endswith('.gz'):
+				self._log_file_real = files.log
 				files.log = gzip.GzipFile(filename='', mode='ab',
 					fileobj=files.log)
 
@@ -92,7 +91,7 @@ class SpawnProcess(SubProcess):
 				mode=0o660)
 
 			if not self.background:
-				files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
+				files.stdout = os.dup(fd_pipes_orig[1])
 
 			output_handler = self._output_handler
 
@@ -116,7 +115,7 @@ class SpawnProcess(SubProcess):
 		kwargs["returnpid"] = True
 		kwargs.pop("logfile", None)
 
-		self._reg_id = self.scheduler.register(files.process.fileno(),
+		self._reg_id = self.scheduler.register(files.process,
 			self._registered_events, output_handler)
 		self._registered = True
 
@@ -124,7 +123,7 @@ class SpawnProcess(SubProcess):
 
 		os.close(slave_fd)
 		if null_input is not None:
-			null_input.close()
+			os.close(null_input)
 
 		if isinstance(retval, int):
 			# spawn failed
@@ -161,22 +160,30 @@ class SpawnProcess(SubProcess):
 	def _output_handler(self, fd, event):
 
 		files = self._files
-		buf = self._read_buf(files.process, event)
+		while True:
+			buf = self._read_buf(fd, event)
+
+			if buf is None:
+				# not a POLLIN event, EAGAIN, etc...
+				break
 
-		if buf is not None:
+			if not buf:
+				# EOF
+				self._unregister()
+				self.wait()
+				break
 
-			if buf:
+			else:
 				if not self.background:
 					write_successful = False
 					failures = 0
 					while True:
 						try:
 							if not write_successful:
-								buf.tofile(files.stdout)
+								os.write(files.stdout, buf)
 								write_successful = True
-							files.stdout.flush()
 							break
-						except IOError as e:
+						except OSError as e:
 							if e.errno != errno.EAGAIN:
 								raise
 							del e
@@ -198,22 +205,17 @@ class SpawnProcess(SubProcess):
 							# inherit stdio file descriptors from portage
 							# (maybe it can't be avoided with
 							# PROPERTIES=interactive).
-							fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
-								fcntl.fcntl(files.stdout.fileno(),
+							fcntl.fcntl(files.stdout, fcntl.F_SETFL,
+								fcntl.fcntl(files.stdout,
 								fcntl.F_GETFL) ^ os.O_NONBLOCK)
 
-				try:
-					buf.tofile(files.log)
-				except TypeError:
-					# array.tofile() doesn't work with GzipFile
-					files.log.write(buf.tostring())
+				files.log.write(buf)
 				files.log.flush()
-			else:
-				self._unregister()
-				self.wait()
 
 		self._unregister_if_appropriate(event)
 
+		return True
+
 	def _dummy_handler(self, fd, event):
 		"""
 		This method is mainly interested in detecting EOF, since
@@ -221,15 +223,26 @@ class SpawnProcess(SubProcess):
 		monitor the process from inside a poll() loop.
 		"""
 
-		buf = self._read_buf(self._files.process, event)
+		while True:
+			buf = self._read_buf(fd, event)
 
-		if buf is not None:
+			if buf is None:
+				# not a POLLIN event, EAGAIN, etc...
+				break
 
-			if buf:
-				pass
-			else:
+			if not buf:
+				# EOF
 				self._unregister()
 				self.wait()
+				break
 
 		self._unregister_if_appropriate(event)
 
+		return True
+
+	def _unregister(self):
+		super(SpawnProcess, self)._unregister()
+		if self._log_file_real is not None:
+			# Avoid "ResourceWarning: unclosed file" since python 3.2.
+			self._log_file_real.close()
+			self._log_file_real = None

diff --git a/portage_with_autodep/pym/_emerge/SpawnProcess.pyo b/portage_with_autodep/pym/_emerge/SpawnProcess.pyo
new file mode 100644
index 0000000..7a6142e
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/SpawnProcess.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/SubProcess.py b/portage_with_autodep/pym/_emerge/SubProcess.py
index b99cf0b..76b313f 100644
--- a/portage_with_autodep/pym/_emerge/SubProcess.py
+++ b/portage_with_autodep/pym/_emerge/SubProcess.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from portage import os
@@ -16,6 +16,10 @@ class SubProcess(AbstractPollTask):
 	# serve this purpose alone.
 	_dummy_pipe_fd = 9
 
+	# This is how much time we allow for waitpid to succeed after
+	# we've sent a kill signal to our subprocess.
+	_cancel_timeout = 1000 # 1 second
+
 	def _poll(self):
 		if self.returncode is not None:
 			return self.returncode
@@ -60,8 +64,7 @@ class SubProcess(AbstractPollTask):
 
 		if self._registered:
 			if self.cancelled:
-				timeout = 1000
-				self.scheduler.schedule(self._reg_id, timeout=timeout)
+				self._wait_loop(timeout=self._cancel_timeout)
 				if self._registered:
 					try:
 						os.kill(self.pid, signal.SIGKILL)
@@ -69,41 +72,39 @@ class SubProcess(AbstractPollTask):
 						if e.errno != errno.ESRCH:
 							raise
 						del e
-					self.scheduler.schedule(self._reg_id, timeout=timeout)
+					self._wait_loop(timeout=self._cancel_timeout)
 					if self._registered:
 						self._orphan_process_warn()
 			else:
-				self.scheduler.schedule(self._reg_id)
-			self._unregister()
+				self._wait_loop()
+
 			if self.returncode is not None:
 				return self.returncode
 
-		try:
-			# With waitpid and WNOHANG, only check the
-			# first element of the tuple since the second
-			# element may vary (bug #337465).
-			wait_retval = os.waitpid(self.pid, os.WNOHANG)
-		except OSError as e:
-			if e.errno != errno.ECHILD:
-				raise
-			del e
-			self._set_returncode((self.pid, 1 << 8))
-		else:
-			if wait_retval[0] != 0:
-				self._set_returncode(wait_retval)
-			else:
-				try:
-					wait_retval = os.waitpid(self.pid, 0)
-				except OSError as e:
-					if e.errno != errno.ECHILD:
-						raise
-					del e
-					self._set_returncode((self.pid, 1 << 8))
-				else:
-					self._set_returncode(wait_retval)
+		if not isinstance(self.pid, int):
+			# Get debug info for bug #403697.
+			raise AssertionError(
+				"%s: pid is non-integer: %s" %
+				(self.__class__.__name__, repr(self.pid)))
+
+		self._waitpid_loop()
 
 		return self.returncode
 
+	def _waitpid_loop(self):
+		source_id = self.scheduler.child_watch_add(
+			self.pid, self._waitpid_cb)
+		try:
+			while self.returncode is None:
+				self.scheduler.iteration()
+		finally:
+			self.scheduler.source_remove(source_id)
+
+	def _waitpid_cb(self, pid, condition, user_data=None):
+		if pid != self.pid:
+			raise AssertionError("expected pid %s, got %s" % (self.pid, pid))
+		self._set_returncode((pid, condition))
+
 	def _orphan_process_warn(self):
 		pass
 
@@ -120,7 +121,10 @@ class SubProcess(AbstractPollTask):
 
 		if self._files is not None:
 			for f in self._files.values():
-				f.close()
+				if isinstance(f, int):
+					os.close(f)
+				else:
+					f.close()
 			self._files = None
 
 	def _set_returncode(self, wait_retval):
@@ -129,6 +133,7 @@ class SubProcess(AbstractPollTask):
 		subprocess.Popen.returncode: A negative value -N indicates
 		that the child was terminated by signal N (Unix only).
 		"""
+		self._unregister()
 
 		pid, status = wait_retval
 

diff --git a/portage_with_autodep/pym/_emerge/SubProcess.pyo b/portage_with_autodep/pym/_emerge/SubProcess.pyo
new file mode 100644
index 0000000..26e13e1
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/SubProcess.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/Task.py b/portage_with_autodep/pym/_emerge/Task.py
index efbe3a9..40f5066 100644
--- a/portage_with_autodep/pym/_emerge/Task.py
+++ b/portage_with_autodep/pym/_emerge/Task.py
@@ -1,7 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-from _emerge.SlotObject import SlotObject
+from portage.util.SlotObject import SlotObject
+
 class Task(SlotObject):
 	__slots__ = ("_hash_key", "_hash_value")
 

diff --git a/portage_with_autodep/pym/_emerge/Task.pyo b/portage_with_autodep/pym/_emerge/Task.pyo
new file mode 100644
index 0000000..2958cb1
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/Task.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/TaskScheduler.py b/portage_with_autodep/pym/_emerge/TaskScheduler.py
index 83c0cbe..583bfe3 100644
--- a/portage_with_autodep/pym/_emerge/TaskScheduler.py
+++ b/portage_with_autodep/pym/_emerge/TaskScheduler.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from _emerge.QueueScheduler import QueueScheduler
@@ -11,13 +11,14 @@ class TaskScheduler(object):
 	add tasks and call run(). The run() method returns when no tasks remain.
 	"""
 
-	def __init__(self, max_jobs=None, max_load=None):
+	def __init__(self, main=True, max_jobs=None, max_load=None):
 		self._queue = SequentialTaskQueue(max_jobs=max_jobs)
-		self._scheduler = QueueScheduler(
+		self._scheduler = QueueScheduler(main=main,
 			max_jobs=max_jobs, max_load=max_load)
 		self.sched_iface = self._scheduler.sched_iface
 		self.run = self._scheduler.run
 		self.clear = self._scheduler.clear
+		self.wait = self._queue.wait
 		self._scheduler.add(self._queue)
 
 	def add(self, task):

diff --git a/portage_with_autodep/pym/_emerge/TaskScheduler.pyo b/portage_with_autodep/pym/_emerge/TaskScheduler.pyo
new file mode 100644
index 0000000..8b84de7
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/TaskScheduler.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/TaskSequence.pyo b/portage_with_autodep/pym/_emerge/TaskSequence.pyo
new file mode 100644
index 0000000..b98196e
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/TaskSequence.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/UninstallFailure.pyo b/portage_with_autodep/pym/_emerge/UninstallFailure.pyo
new file mode 100644
index 0000000..9f1c88b
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/UninstallFailure.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/UnmergeDepPriority.pyo b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.pyo
new file mode 100644
index 0000000..b163ed7
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/UseFlagDisplay.pyo b/portage_with_autodep/pym/_emerge/UseFlagDisplay.pyo
new file mode 100644
index 0000000..005b007
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/UseFlagDisplay.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/__init__.pyo b/portage_with_autodep/pym/_emerge/__init__.pyo
new file mode 100644
index 0000000..fba4ca5
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/__init__.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.pyo b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.pyo
new file mode 100644
index 0000000..8ad61b2
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
index eab4168..9ac65b8 100644
--- a/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
+++ b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
@@ -8,7 +8,7 @@ def _flush_elog_mod_echo():
 	Dump the mod_echo output now so that our other
 	notifications are shown last.
 	@rtype: bool
-	@returns: True if messages were shown, False otherwise.
+	@return: True if messages were shown, False otherwise.
 	"""
 	messages_shown = bool(mod_echo._items)
 	mod_echo.finalize()

diff --git a/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.pyo b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.pyo
new file mode 100644
index 0000000..f211d41
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/actions.py b/portage_with_autodep/pym/_emerge/actions.py
index 2166963..eaf5a15 100644
--- a/portage_with_autodep/pym/_emerge/actions.py
+++ b/portage_with_autodep/pym/_emerge/actions.py
@@ -1,18 +1,19 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
 
 import errno
 import logging
+import operator
 import platform
 import pwd
 import random
 import re
-import shutil
 import signal
 import socket
 import stat
+import subprocess
 import sys
 import tempfile
 import textwrap
@@ -20,20 +21,27 @@ import time
 from itertools import chain
 
 import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.news:count_unread_news,display_news_notifications',
+)
+
+from portage.localization import _
 from portage import os
-from portage import subprocess_getstatusoutput
-from portage import _unicode_decode
+from portage import shutil
+from portage import eapi_is_supported, _unicode_decode
 from portage.cache.cache_errors import CacheError
-from portage.const import GLOBAL_CONFIG_PATH, NEWS_LIB_PATH
+from portage.const import GLOBAL_CONFIG_PATH
 from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_SET_CONFIG
 from portage.dbapi.dep_expand import dep_expand
 from portage.dbapi._expand_new_virt import expand_new_virt
 from portage.dep import Atom, extended_cp_match
+from portage.eclass_cache import hashed_path
 from portage.exception import InvalidAtom
 from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
 	red, yellow
 good = create_color_func("GOOD")
 bad = create_color_func("BAD")
+warn = create_color_func("WARN")
 from portage.package.ebuild._ipc.QueryCommand import QueryCommand
 from portage.package.ebuild.doebuild import _check_temp_dir
 from portage._sets import load_default_config, SETPREFIX
@@ -180,8 +188,7 @@ def action_build(settings, trees, mtimedb,
 			" entire repository or category at once."
 		prefix = bad(" * ")
 		writemsg(prefix + "\n")
-		from textwrap import wrap
-		for line in wrap(msg, 72):
+		for line in textwrap.wrap(msg, 72):
 			writemsg("%s%s\n" % (prefix, line))
 		writemsg(prefix + "\n")
 
@@ -209,7 +216,6 @@ def action_build(settings, trees, mtimedb,
 			if isinstance(e, depgraph.UnsatisfiedResumeDep):
 				mydepgraph = e.depgraph
 
-			from textwrap import wrap
 			from portage.output import EOutput
 			out = EOutput()
 
@@ -248,7 +254,7 @@ def action_build(settings, trees, mtimedb,
 					"to skip the first package in the list and " + \
 					"any other packages that may be " + \
 					"masked or have missing dependencies."
-				for line in wrap(msg, 72):
+				for line in textwrap.wrap(msg, 72):
 					out.eerror(line)
 			elif isinstance(e, portage.exception.PackageNotFound):
 				out.eerror("An expected package is " + \
@@ -258,7 +264,7 @@ def action_build(settings, trees, mtimedb,
 					"packages that are no longer " + \
 					"available. Please restart/continue " + \
 					"the operation manually."
-				for line in wrap(msg, 72):
+				for line in textwrap.wrap(msg, 72):
 					out.eerror(line)
 
 		if success:
@@ -291,7 +297,7 @@ def action_build(settings, trees, mtimedb,
 			success, mydepgraph, favorites = backtrack_depgraph(
 				settings, trees, myopts, myparams, myaction, myfiles, spinner)
 		except portage.exception.PackageSetNotFound as e:
-			root_config = trees[settings["ROOT"]]["root_config"]
+			root_config = trees[settings['EROOT']]['root_config']
 			display_missing_pkg_set(root_config, e.value)
 			return 1
 
@@ -329,7 +335,7 @@ def action_build(settings, trees, mtimedb,
 					mergecount += 1
 
 			if mergecount==0:
-				sets = trees[settings["ROOT"]]["root_config"].sets
+				sets = trees[settings['EROOT']]['root_config'].sets
 				world_candidates = None
 				if "selective" in myparams and \
 					not oneshot and favorites:
@@ -362,7 +368,7 @@ def action_build(settings, trees, mtimedb,
 			print()
 			print("Quitting.")
 			print()
-			return os.EX_OK
+			return 128 + signal.SIGINT
 		# Don't ask again (e.g. when auto-cleaning packages after merge)
 		myopts.pop("--ask", None)
 
@@ -439,7 +445,7 @@ def action_build(settings, trees, mtimedb,
 		if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
 			if "yes" == settings.get("AUTOCLEAN"):
 				portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
-				unmerge(trees[settings["ROOT"]]["root_config"],
+				unmerge(trees[settings['EROOT']]['root_config'],
 					myopts, "clean", [],
 					ldpath_mtimes, autoclean=1)
 			else:
@@ -454,7 +460,7 @@ def action_config(settings, trees, myopts, myfiles):
 	if len(myfiles) != 1:
 		print(red("!!! config can only take a single package atom at this time\n"))
 		sys.exit(1)
-	if not is_valid_package_atom(myfiles[0]):
+	if not is_valid_package_atom(myfiles[0], allow_repo=True):
 		portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
 			noiselevel=-1)
 		portage.writemsg("!!! Please check ebuild(5) for full details.\n")
@@ -462,7 +468,7 @@ def action_config(settings, trees, myopts, myfiles):
 		sys.exit(1)
 	print()
 	try:
-		pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
+		pkgs = trees[settings['EROOT']]['vartree'].dbapi.match(myfiles[0])
 	except portage.exception.AmbiguousPackageName as e:
 		# Multiple matches thrown from cpv_expand
 		pkgs = e.args[0]
@@ -482,7 +488,7 @@ def action_config(settings, trees, myopts, myfiles):
 			options.append("X")
 			idx = userquery("Selection?", enter_invalid, responses=options)
 			if idx == "X":
-				sys.exit(0)
+				sys.exit(128 + signal.SIGINT)
 			pkg = pkgs[int(idx)-1]
 		else:
 			print("The following packages available:")
@@ -496,21 +502,20 @@ def action_config(settings, trees, myopts, myfiles):
 	print()
 	if "--ask" in myopts:
 		if userquery("Ready to configure %s?" % pkg, enter_invalid) == "No":
-			sys.exit(0)
+			sys.exit(128 + signal.SIGINT)
 	else:
 		print("Configuring pkg...")
 	print()
-	ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
+	ebuildpath = trees[settings['EROOT']]['vartree'].dbapi.findname(pkg)
 	mysettings = portage.config(clone=settings)
-	vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
+	vardb = trees[mysettings['EROOT']]['vartree'].dbapi
 	debug = mysettings.get("PORTAGE_DEBUG") == "1"
-	retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
-		mysettings,
+	retval = portage.doebuild(ebuildpath, "config", settings=mysettings,
 		debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
-		mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
+		mydbapi = trees[settings['EROOT']]['vartree'].dbapi, tree="vartree")
 	if retval == os.EX_OK:
-		portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
-			mysettings, debug=debug, mydbapi=vardb, tree="vartree")
+		portage.doebuild(ebuildpath, "clean", settings=mysettings,
+			debug=debug, mydbapi=vardb, tree="vartree")
 	print()
 
 def action_depclean(settings, trees, ldpath_mtimes,
@@ -550,7 +555,7 @@ def action_depclean(settings, trees, ldpath_mtimes,
 		for x in msg:
 			portage.writemsg_stdout(colorize("WARN", " * ") + x)
 
-	root_config = trees[settings['ROOT']]['root_config']
+	root_config = trees[settings['EROOT']]['root_config']
 	vardb = root_config.trees['vartree'].dbapi
 
 	args_set = InternalPackageSet(allow_repo=True)
@@ -582,15 +587,15 @@ def action_depclean(settings, trees, ldpath_mtimes,
 		return rval
 
 	if cleanlist:
-		unmerge(root_config, myopts, "unmerge",
+		rval = unmerge(root_config, myopts, "unmerge",
 			cleanlist, ldpath_mtimes, ordered=ordered,
 			scheduler=scheduler)
 
 	if action == "prune":
-		return
+		return rval
 
 	if not cleanlist and "--quiet" in myopts:
-		return
+		return rval
 
 	print("Packages installed:   " + str(len(vardb.cpv_all())))
 	print("Packages in world:    " + \
@@ -603,14 +608,17 @@ def action_depclean(settings, trees, ldpath_mtimes,
 	else:
 		print("Number removed:       "+str(len(cleanlist)))
 
+	return rval
+
 def calc_depclean(settings, trees, ldpath_mtimes,
 	myopts, action, args_set, spinner):
 	allow_missing_deps = bool(args_set)
 
 	debug = '--debug' in myopts
 	xterm_titles = "notitles" not in settings.features
-	myroot = settings["ROOT"]
-	root_config = trees[myroot]["root_config"]
+	root_len = len(settings["ROOT"])
+	eroot = settings['EROOT']
+	root_config = trees[eroot]["root_config"]
 	psets = root_config.setconfig.psets
 	deselect = myopts.get('--deselect') != 'n'
 	required_sets = {}
@@ -649,8 +657,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 	resolver_params = create_depgraph_params(myopts, "remove")
 	resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
 	resolver._load_vdb()
-	vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
-	real_vardb = trees[myroot]["vartree"].dbapi
+	vardb = resolver._frozen_config.trees[eroot]["vartree"].dbapi
+	real_vardb = trees[eroot]["vartree"].dbapi
 
 	if action == "depclean":
 
@@ -705,7 +713,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 		# that are also matched by argument atoms, but do not remove
 		# them if they match the highest installed version.
 		for pkg in vardb:
-			spinner.update()
+			if spinner is not None:
+				spinner.update()
 			pkgs_for_cp = vardb.match_pkgs(pkg.cp)
 			if not pkgs_for_cp or pkg not in pkgs_for_cp:
 				raise AssertionError("package expected in matches: " + \
@@ -751,7 +760,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 				del e
 				required_sets['__excluded__'].add("=" + pkg.cpv)
 
-	success = resolver._complete_graph(required_sets={myroot:required_sets})
+	success = resolver._complete_graph(required_sets={eroot:required_sets})
 	writemsg_level("\b\b... done!\n")
 
 	resolver.display_problems()
@@ -937,7 +946,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 			consumers = {}
 
 			for lib in pkg_dblink.getcontents():
-				lib = lib[len(myroot):]
+				lib = lib[root_len:]
 				lib_key = linkmap._obj_key(lib)
 				lib_consumers = consumer_cache.get(lib_key)
 				if lib_consumers is None:
@@ -1053,9 +1062,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 				"the packages that pulled them in."
 
 			prefix = bad(" * ")
-			from textwrap import wrap
 			writemsg_level("".join(prefix + "%s\n" % line for \
-				line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
+				line in textwrap.wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
 
 			msg = []
 			for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
@@ -1095,7 +1103,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 
 			writemsg_level("\nCalculating dependencies  ")
 			success = resolver._complete_graph(
-				required_sets={myroot:required_sets})
+				required_sets={eroot:required_sets})
 			writemsg_level("\b\b... done!\n")
 			resolver.display_problems()
 			if not success:
@@ -1137,7 +1145,6 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 
 		for node in clean_set:
 			graph.add(node, None)
-			mydeps = []
 			for dep_type in dep_keys:
 				depstr = node.metadata[dep_type]
 				if not depstr:
@@ -1153,7 +1160,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 						% (priority,), noiselevel=-1, level=logging.DEBUG)
 
 				try:
-					atoms = resolver._select_atoms(myroot, depstr,
+					atoms = resolver._select_atoms(eroot, depstr,
 						myuse=node.use.enabled, parent=node,
 						priority=priority)[node]
 				except portage.exception.InvalidDependString:
@@ -1226,7 +1233,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 
 def action_deselect(settings, trees, opts, atoms):
 	enter_invalid = '--ask-enter-invalid' in opts
-	root_config = trees[settings['ROOT']]['root_config']
+	root_config = trees[settings['EROOT']]['root_config']
 	world_set = root_config.sets['selected']
 	if not hasattr(world_set, 'update'):
 		writemsg_level("World @selected set does not appear to be mutable.\n",
@@ -1291,7 +1298,7 @@ def action_deselect(settings, trees, opts, atoms):
 				prompt = "Would you like to remove these " + \
 					"packages from your world favorites?"
 				if userquery(prompt, enter_invalid) == 'No':
-					return os.EX_OK
+					return 128 + signal.SIGINT
 
 			remaining = set(world_set)
 			remaining.difference_update(discard_atoms)
@@ -1325,11 +1332,12 @@ def action_info(settings, trees, myopts, myfiles):
 
 	output_buffer = []
 	append = output_buffer.append
-	root_config = trees[settings['ROOT']]['root_config']
+	root_config = trees[settings['EROOT']]['root_config']
+	running_eroot = trees._running_eroot
 
-	append(getportageversion(settings["PORTDIR"], settings["ROOT"],
+	append(getportageversion(settings["PORTDIR"], None,
 		settings.profile_path, settings["CHOST"],
-		trees[settings["ROOT"]]["vartree"].dbapi))
+		trees[settings['EROOT']]["vartree"].dbapi))
 
 	header_width = 65
 	header_title = "System Settings"
@@ -1347,7 +1355,14 @@ def action_info(settings, trees, myopts, myfiles):
 		lastSync = "Unknown"
 	append("Timestamp of tree: %s" % (lastSync,))
 
-	output=subprocess_getstatusoutput("distcc --version")
+	try:
+		proc = subprocess.Popen(["distcc", "--version"],
+			stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+	except OSError:
+		output = (1, None)
+	else:
+		output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+		output = (proc.wait(), output)
 	if output[0] == os.EX_OK:
 		distcc_str = output[1].split("\n", 1)[0]
 		if "distcc" in settings.features:
@@ -1356,7 +1371,14 @@ def action_info(settings, trees, myopts, myfiles):
 			distcc_str += " [disabled]"
 		append(distcc_str)
 
-	output=subprocess_getstatusoutput("ccache -V")
+	try:
+		proc = subprocess.Popen(["ccache", "-V"],
+			stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+	except OSError:
+		output = (1, None)
+	else:
+		output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+		output = (proc.wait(), output)
 	if output[0] == os.EX_OK:
 		ccache_str = output[1].split("\n", 1)[0]
 		if "ccache" in settings.features:
@@ -1369,7 +1391,7 @@ def action_info(settings, trees, myopts, myfiles):
 	           "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
 	myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
 	atoms = []
-	vardb = trees["/"]["vartree"].dbapi
+	vardb = trees[running_eroot]['vartree'].dbapi
 	for x in myvars:
 		try:
 			x = Atom(x)
@@ -1382,7 +1404,7 @@ def action_info(settings, trees, myopts, myfiles):
 
 	myvars = sorted(set(atoms))
 
-	portdb = trees["/"]["porttree"].dbapi
+	portdb = trees[running_eroot]['porttree'].dbapi
 	main_repo = portdb.getRepositoryName(portdb.porttree_root)
 	cp_map = {}
 	cp_max_len = 0
@@ -1425,8 +1447,6 @@ def action_info(settings, trees, myopts, myfiles):
 		append("%s %s" % \
 			((cp + ":").ljust(cp_max_len + 1), versions))
 
-	libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
-
 	repos = portdb.settings.repositories
 	if "--verbose" in myopts:
 		append("Repositories:\n")
@@ -1463,9 +1483,6 @@ def action_info(settings, trees, myopts, myfiles):
 	myvars = portage.util.unique_array(myvars)
 	use_expand = settings.get('USE_EXPAND', '').split()
 	use_expand.sort()
-	use_expand_hidden = set(
-		settings.get('USE_EXPAND_HIDDEN', '').upper().split())
-	alphabetical_use = '--alphabetical' in myopts
 	unset_vars = []
 	myvars.sort()
 	for k in myvars:
@@ -1504,9 +1521,10 @@ def action_info(settings, trees, myopts, myfiles):
 	# See if we can find any packages installed matching the strings
 	# passed on the command line
 	mypkgs = []
-	vardb = trees[settings["ROOT"]]["vartree"].dbapi
-	portdb = trees[settings["ROOT"]]["porttree"].dbapi
-	bindb = trees[settings["ROOT"]]["bintree"].dbapi
+	eroot = settings['EROOT']
+	vardb = trees[eroot]["vartree"].dbapi
+	portdb = trees[eroot]['porttree'].dbapi
+	bindb = trees[eroot]["bintree"].dbapi
 	for x in myfiles:
 		match_found = False
 		installed_match = vardb.match(x)
@@ -1541,7 +1559,6 @@ def action_info(settings, trees, myopts, myfiles):
 		mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
 		auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
 		auxkeys.append('DEFINED_PHASES')
-		global_vals = {}
 		pkgsettings = portage.config(clone=settings)
 
 		# Loop through each package
@@ -1611,19 +1628,19 @@ def action_info(settings, trees, myopts, myfiles):
 				continue
 
 			if pkg_type == "installed":
-				portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
-					pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
-					mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
+				portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+					debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+					mydbapi=trees[settings['EROOT']]["vartree"].dbapi,
 					tree="vartree")
 			elif pkg_type == "ebuild":
-				portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
-					pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
-					mydbapi=trees[settings["ROOT"]]["porttree"].dbapi,
+				portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+					debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+					mydbapi=trees[settings['EROOT']]['porttree'].dbapi,
 					tree="porttree")
 			elif pkg_type == "binary":
-				portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
-					pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
-					mydbapi=trees[settings["ROOT"]]["bintree"].dbapi,
+				portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+					debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+					mydbapi=trees[settings['EROOT']]["bintree"].dbapi,
 					tree="bintree")
 				shutil.rmtree(tmpdir)
 
@@ -1643,8 +1660,7 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
 	if not os.path.exists(cachedir):
 		os.makedirs(cachedir)
 
-	auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
-	auxdbkeys = tuple(auxdbkeys)
+	auxdbkeys = portdb._known_keys
 
 	class TreeData(object):
 		__slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
@@ -1658,18 +1674,14 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
 	porttrees_data = []
 	for path in porttrees:
 		src_db = portdb._pregen_auxdb.get(path)
-		if src_db is None and \
-			os.path.isdir(os.path.join(path, 'metadata', 'cache')):
-			src_db = portdb.metadbmodule(
-				path, 'metadata/cache', auxdbkeys, readonly=True)
-			try:
-				src_db.ec = portdb._repo_info[path].eclass_db
-			except AttributeError:
-				pass
+		if src_db is None:
+			# portdbapi does not populate _pregen_auxdb
+			# when FEATURES=metadata-transfer is enabled
+			src_db = portdb._create_pregen_cache(path)
 
 		if src_db is not None:
 			porttrees_data.append(TreeData(portdb.auxdb[path],
-				portdb._repo_info[path].eclass_db, path, src_db))
+				portdb.repositories.get_repo_for_location(path).eclass_db, path, src_db))
 
 	porttrees = [tree_data.path for tree_data in porttrees_data]
 
@@ -1704,42 +1716,45 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
 	if onProgress is not None:
 		onProgress(maxval, curval)
 
-	from portage.cache.util import quiet_mirroring
-	from portage import eapi_is_supported, \
-		_validate_cache_for_unsupported_eapis
-
 	# TODO: Display error messages, but do not interfere with the progress bar.
 	# Here's how:
 	#  1) erase the progress bar
 	#  2) show the error message
 	#  3) redraw the progress bar on a new line
-	noise = quiet_mirroring()
 
 	for cp in cp_all:
 		for tree_data in porttrees_data:
+
+			src_chf = tree_data.src_db.validation_chf
+			dest_chf = tree_data.dest_db.validation_chf
+			dest_chf_key = '_%s_' % dest_chf
+			dest_chf_getter = operator.attrgetter(dest_chf)
+
 			for cpv in portdb.cp_list(cp, mytree=tree_data.path):
 				tree_data.valid_nodes.add(cpv)
 				try:
 					src = tree_data.src_db[cpv]
-				except KeyError as e:
-					noise.missing_entry(cpv)
-					del e
+				except (CacheError, KeyError):
 					continue
-				except CacheError as ce:
-					noise.exception(cpv, ce)
-					del ce
+
+				ebuild_location = portdb.findname(cpv, mytree=tree_data.path)
+				if ebuild_location is None:
+					continue
+				ebuild_hash = hashed_path(ebuild_location)
+
+				try:
+					if not tree_data.src_db.validate_entry(src,
+						ebuild_hash, tree_data.eclass_db):
+						continue
+				except CacheError:
 					continue
 
 				eapi = src.get('EAPI')
 				if not eapi:
 					eapi = '0'
-				eapi = eapi.lstrip('-')
 				eapi_supported = eapi_is_supported(eapi)
 				if not eapi_supported:
-					if not _validate_cache_for_unsupported_eapis:
-						noise.misc(cpv, "unable to validate " + \
-							"cache for EAPI='%s'" % eapi)
-						continue
+					continue
 
 				dest = None
 				try:
@@ -1751,18 +1766,30 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
 					if d is not None and d.get('EAPI') in ('', '0'):
 						del d['EAPI']
 
+				if src_chf != 'mtime':
+					# src may contain an irrelevant _mtime_ which corresponds
+					# to the time that the cache entry was written
+					src.pop('_mtime_', None)
+
+				if src_chf != dest_chf:
+					# populate src entry with dest_chf_key
+					# (the validity of the dest_chf that we generate from the
+					# ebuild here relies on the fact that we already used
+					# validate_entry to validate the ebuild with src_chf)
+					src[dest_chf_key] = dest_chf_getter(ebuild_hash)
+
 				if dest is not None:
-					if not (dest['_mtime_'] == src['_mtime_'] and \
-						tree_data.eclass_db.is_eclass_data_valid(
-							dest['_eclasses_']) and \
+					if not (dest[dest_chf_key] == src[dest_chf_key] and \
+						tree_data.eclass_db.validate_and_rewrite_cache(
+							dest['_eclasses_'], tree_data.dest_db.validation_chf,
+							tree_data.dest_db.store_eclass_paths) is not None and \
 						set(dest['_eclasses_']) == set(src['_eclasses_'])):
 						dest = None
 					else:
 						# We don't want to skip the write unless we're really
 						# sure that the existing cache is identical, so don't
 						# trust _mtime_ and _eclasses_ alone.
-						for k in set(chain(src, dest)).difference(
-							('_mtime_', '_eclasses_')):
+						for k in auxdbkeys:
 							if dest.get(k, '') != src.get(k, ''):
 								dest = None
 								break
@@ -1773,56 +1800,10 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
 					continue
 
 				try:
-					inherited = src.get('INHERITED', '')
-					eclasses = src.get('_eclasses_')
-				except CacheError as ce:
-					noise.exception(cpv, ce)
-					del ce
-					continue
-
-				if eclasses is not None:
-					if not tree_data.eclass_db.is_eclass_data_valid(
-						src['_eclasses_']):
-						noise.eclass_stale(cpv)
-						continue
-					inherited = eclasses
-				else:
-					inherited = inherited.split()
-
-				if tree_data.src_db.complete_eclass_entries and \
-					eclasses is None:
-					noise.corruption(cpv, "missing _eclasses_ field")
-					continue
-
-				if inherited:
-					# Even if _eclasses_ already exists, replace it with data from
-					# eclass_cache, in order to insert local eclass paths.
-					try:
-						eclasses = tree_data.eclass_db.get_eclass_data(inherited)
-					except KeyError:
-						# INHERITED contains a non-existent eclass.
-						noise.eclass_stale(cpv)
-						continue
-
-					if eclasses is None:
-						noise.eclass_stale(cpv)
-						continue
-					src['_eclasses_'] = eclasses
-				else:
-					src['_eclasses_'] = {}
-
-				if not eapi_supported:
-					src = {
-						'EAPI'       : '-' + eapi,
-						'_mtime_'    : src['_mtime_'],
-						'_eclasses_' : src['_eclasses_'],
-					}
-
-				try:
 					tree_data.dest_db[cpv] = src
-				except CacheError as ce:
-					noise.exception(cpv, ce)
-					del ce
+				except CacheError:
+					# ignore it; can't do anything about it.
+					pass
 
 		curval += 1
 		if onProgress is not None:
@@ -1860,12 +1841,6 @@ def action_regen(settings, portdb, max_jobs, max_load):
 	xterm_titles = "notitles" not in settings.features
 	emergelog(xterm_titles, " === regen")
 	#regenerate cache entries
-	try:
-		os.close(sys.stdin.fileno())
-	except SystemExit as e:
-		raise # Needed else can't exit
-	except:
-		pass
 	sys.stdout.flush()
 
 	regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
@@ -1921,7 +1896,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 	enter_invalid = '--ask-enter-invalid' in myopts
 	xterm_titles = "notitles" not in settings.features
 	emergelog(xterm_titles, " === sync")
-	portdb = trees[settings["ROOT"]]["porttree"].dbapi
+	portdb = trees[settings['EROOT']]['porttree'].dbapi
 	myportdir = portdb.porttree_root
 	if not myportdir:
 		myportdir = settings.get('PORTDIR', '')
@@ -1993,6 +1968,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 	os.umask(0o022)
 	dosyncuri = syncuri
 	updatecache_flg = False
+	git = False
 	if myaction == "metadata":
 		print("skipping sync")
 		updatecache_flg = True
@@ -2021,9 +1997,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 		msg = ">>> Git pull in %s successful" % myportdir
 		emergelog(xterm_titles, msg)
 		writemsg_level(msg + "\n")
-		exitcode = git_sync_timestamps(settings, myportdir)
-		if exitcode == os.EX_OK:
-			updatecache_flg = True
+		git = True
 	elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
 		for vcs_dir in vcs_dirs:
 			writemsg_level(("!!! %s appears to be under revision " + \
@@ -2050,6 +2024,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 				"--whole-file",   # Don't do block transfers, only entire files
 				"--delete",       # Delete files that aren't in the master tree
 				"--stats",        # Show final statistics about what was transfered
+				"--human-readable",
 				"--timeout="+str(mytimeout), # IO timeout if not done in X seconds
 				"--exclude=/distfiles",   # Exclude distfiles from consideration
 				"--exclude=/local",       # Exclude local     from consideration
@@ -2237,7 +2212,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 						print()
 						print("Quitting.")
 						print()
-						sys.exit(0)
+						sys.exit(128 + signal.SIGINT)
 				emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
 				if "--quiet" not in myopts:
 					print(">>> Starting rsync with "+dosyncuri+"...")
@@ -2465,17 +2440,25 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 			noiselevel=-1, level=logging.ERROR)
 		return 1
 
+	# Reload the whole config from scratch.
+	settings, trees, mtimedb = load_emerge_config(trees=trees)
+	adjust_configs(myopts, trees)
+	root_config = trees[settings['EROOT']]['root_config']
+	portdb = trees[settings['EROOT']]['porttree'].dbapi
+
+	if git:
+		# NOTE: Do this after reloading the config, in case
+		# it did not exist prior to sync, so that the config
+		# and portdb properly account for its existence.
+		exitcode = git_sync_timestamps(portdb, myportdir)
+		if exitcode == os.EX_OK:
+			updatecache_flg = True
+
 	if updatecache_flg and  \
 		myaction != "metadata" and \
 		"metadata-transfer" not in settings.features:
 		updatecache_flg = False
 
-	# Reload the whole config from scratch.
-	settings, trees, mtimedb = load_emerge_config(trees=trees)
-	adjust_configs(myopts, trees)
-	root_config = trees[settings["ROOT"]]["root_config"]
-	portdb = trees[settings["ROOT"]]["porttree"].dbapi
-
 	if updatecache_flg and \
 		os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
 
@@ -2489,13 +2472,13 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 		# Reload the whole config from scratch.
 		settings, trees, mtimedb = load_emerge_config(trees=trees)
 		adjust_configs(myopts, trees)
-		portdb = trees[settings["ROOT"]]["porttree"].dbapi
-		root_config = trees[settings["ROOT"]]["root_config"]
+		portdb = trees[settings['EROOT']]['porttree'].dbapi
+		root_config = trees[settings['EROOT']]['root_config']
 
 	mybestpv = portdb.xmatch("bestmatch-visible",
 		portage.const.PORTAGE_PACKAGE_ATOM)
 	mypvs = portage.best(
-		trees[settings["ROOT"]]["vartree"].dbapi.match(
+		trees[settings['EROOT']]['vartree'].dbapi.match(
 		portage.const.PORTAGE_PACKAGE_ATOM))
 
 	chk_updated_cfg_files(settings["EROOT"],
@@ -2514,10 +2497,10 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 
 	if(mybestpv != mypvs) and not "--quiet" in myopts:
 		print()
-		print(red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
-		print(red(" * ")+"that you update portage now, before any other packages are updated.")
+		print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+		print(warn(" * ")+"that you update portage now, before any other packages are updated.")
 		print()
-		print(red(" * ")+"To update portage, run 'emerge portage' now.")
+		print(warn(" * ")+"To update portage, run 'emerge portage' now.")
 		print()
 
 	display_news_notification(root_config, myopts)
@@ -2528,7 +2511,8 @@ def action_uninstall(settings, trees, ldpath_mtimes,
 	# For backward compat, some actions do not require leading '='.
 	ignore_missing_eq = action in ('clean', 'unmerge')
 	root = settings['ROOT']
-	vardb = trees[root]['vartree'].dbapi
+	eroot = settings['EROOT']
+	vardb = trees[settings['EROOT']]['vartree'].dbapi
 	valid_atoms = []
 	lookup_owners = []
 
@@ -2566,9 +2550,9 @@ def action_uninstall(settings, trees, ldpath_mtimes,
 				valid_atoms.append(atom)
 
 		elif x.startswith(os.sep):
-			if not x.startswith(root):
+			if not x.startswith(eroot):
 				writemsg_level(("!!! '%s' does not start with" + \
-					" $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
+					" $EROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
 				return 1
 			# Queue these up since it's most efficient to handle
 			# multiple files in a single iter_owners() call.
@@ -2661,7 +2645,7 @@ def action_uninstall(settings, trees, ldpath_mtimes,
 	# redirection of ebuild phase output to logs as required for
 	# options such as --quiet.
 	sched = Scheduler(settings, trees, None, opts,
-		spinner)
+		spinner, uninstall_only=True)
 	sched._background = sched._background_mode()
 	sched._status_display.quiet = True
 
@@ -2670,16 +2654,15 @@ def action_uninstall(settings, trees, ldpath_mtimes,
 		sched.settings["PORTAGE_BACKGROUND"] = "1"
 		sched.settings.backup_changes("PORTAGE_BACKGROUND")
 		sched.settings.lock()
-		sched.pkgsettings[root] = portage.config(clone=sched.settings)
+		sched.pkgsettings[eroot] = portage.config(clone=sched.settings)
 
 	if action in ('clean', 'unmerge') or \
 		(action == 'prune' and "--nodeps" in opts):
 		# When given a list of atoms, unmerge them in the order given.
 		ordered = action == 'unmerge'
-		unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
+		rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
 			valid_atoms, ldpath_mtimes, ordered=ordered,
 			scheduler=sched._sched_iface)
-		rval = os.EX_OK
 	else:
 		rval = action_depclean(settings, trees, ldpath_mtimes,
 			opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
@@ -2730,7 +2713,13 @@ def adjust_config(myopts, settings):
 	settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
 	settings.backup_changes("EMERGE_WARNING_DELAY")
 
-	if "--quiet" in myopts or "--quiet-build" in myopts:
+	buildpkg = myopts.get("--buildpkg")
+	if buildpkg is True:
+		settings.features.add("buildpkg")
+	elif buildpkg == 'n':
+		settings.features.discard("buildpkg")
+
+	if "--quiet" in myopts:
 		settings["PORTAGE_QUIET"]="1"
 		settings.backup_changes("PORTAGE_QUIET")
 
@@ -2766,8 +2755,8 @@ def adjust_config(myopts, settings):
 	if settings.get("NOCOLOR") not in ("yes","true"):
 		portage.output.havecolor = 1
 
-	"""The explicit --color < y | n > option overrides the NOCOLOR environment
-	variable and stdout auto-detection."""
+	# The explicit --color < y | n > option overrides the NOCOLOR environment
+	# variable and stdout auto-detection.
 	if "--color" in myopts:
 		if "y" == myopts["--color"]:
 			portage.output.havecolor = 1
@@ -2806,7 +2795,7 @@ def relative_profile_path(portdir, abs_profile):
 		profilever = None
 	return profilever
 
-def getportageversion(portdir, target_root, profile, chost, vardb):
+def getportageversion(portdir, _unused, profile, chost, vardb):
 	profilever = None
 	if profile:
 		profilever = relative_profile_path(portdir, profile)
@@ -2839,7 +2828,7 @@ def getportageversion(portdir, target_root, profile, chost, vardb):
 		for cpv in sorted(libclist):
 			libc_split = portage.catpkgsplit(cpv)[1:]
 			if libc_split[-1] == "r0":
-				libc_split[:-1]
+				libc_split = libc_split[:-1]
 			libcver.append("-".join(libc_split))
 	else:
 		libcver = ["unavailable"]
@@ -2850,27 +2839,35 @@ def getportageversion(portdir, target_root, profile, chost, vardb):
 	return "Portage %s (%s, %s, %s, %s)" % \
 		(portage.VERSION, profilever, gccver, ",".join(libcver), unameout)
 
-def git_sync_timestamps(settings, portdir):
+def git_sync_timestamps(portdb, portdir):
 	"""
 	Since git doesn't preserve timestamps, synchronize timestamps between
 	entries and ebuilds/eclasses. Assume the cache has the correct timestamp
 	for a given file as long as the file in the working tree is not modified
 	(relative to HEAD).
 	"""
-	cache_dir = os.path.join(portdir, "metadata", "cache")
-	if not os.path.isdir(cache_dir):
-		return os.EX_OK
-	writemsg_level(">>> Synchronizing timestamps...\n")
 
-	from portage.cache.cache_errors import CacheError
+	cache_db = portdb._pregen_auxdb.get(portdir)
+
 	try:
-		cache_db = settings.load_best_module("portdbapi.metadbmodule")(
-			portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
+		if cache_db is None:
+			# portdbapi does not populate _pregen_auxdb
+			# when FEATURES=metadata-transfer is enabled
+			cache_db = portdb._create_pregen_cache(portdir)
 	except CacheError as e:
 		writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
 			level=logging.ERROR, noiselevel=-1)
 		return 1
 
+	if cache_db is None:
+		return os.EX_OK
+
+	if cache_db.validation_chf != 'mtime':
+		# newer formats like md5-dict do not require mtime sync
+		return os.EX_OK
+
+	writemsg_level(">>> Synchronizing timestamps...\n")
+
 	ec_dir = os.path.join(portdir, "eclass")
 	try:
 		ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
@@ -2883,10 +2880,10 @@ def git_sync_timestamps(settings, portdir):
 	args = [portage.const.BASH_BINARY, "-c",
 		"cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
 		portage._shell_quote(portdir)]
-	import subprocess
 	proc = subprocess.Popen(args, stdout=subprocess.PIPE)
 	modified_files = set(_unicode_decode(l).rstrip("\n") for l in proc.stdout)
 	rval = proc.wait()
+	proc.stdout.close()
 	if rval != os.EX_OK:
 		return rval
 
@@ -2990,22 +2987,15 @@ def load_emerge_config(trees=None):
 			kwargs[k] = v
 	trees = portage.create_trees(trees=trees, **kwargs)
 
-	for root, root_trees in trees.items():
+	for root_trees in trees.values():
 		settings = root_trees["vartree"].settings
 		settings._init_dirs()
 		setconfig = load_default_config(settings, root_trees)
 		root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
 
-	settings = trees["/"]["vartree"].settings
-
-	for myroot in trees:
-		if myroot != "/":
-			settings = trees[myroot]["vartree"].settings
-			break
-
+	settings = trees[trees._target_eroot]['vartree'].settings
 	mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
 	mtimedb = portage.MtimeDB(mtimedbfile)
-	portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
 	QueryCommand._db = trees
 	return settings, trees, mtimedb
 
@@ -3015,55 +3005,35 @@ def chk_updated_cfg_files(eroot, config_protect):
 		portage.util.find_updated_config_files(target_root, config_protect))
 
 	for x in result:
-		writemsg_level("\n %s " % (colorize("WARN", "* IMPORTANT:"),),
+		writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
 			level=logging.INFO, noiselevel=-1)
 		if not x[1]: # it's a protected file
-			writemsg_level("config file '%s' needs updating.\n" % x[0],
+			writemsg_level( _("config file '%s' needs updating.\n") % x[0],
 				level=logging.INFO, noiselevel=-1)
 		else: # it's a protected dir
 			if len(x[1]) == 1:
 				head, tail = os.path.split(x[1][0])
 				tail = tail[len("._cfg0000_"):]
 				fpath = os.path.join(head, tail)
-				writemsg_level("config file '%s' needs updating.\n" % fpath,
+				writemsg_level(_("config file '%s' needs updating.\n") % fpath,
 					level=logging.INFO, noiselevel=-1)
 			else:
-				writemsg_level("%d config files in '%s' need updating.\n" % \
+				writemsg_level( _("%d config files in '%s' need updating.\n") % \
 					(len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
 
 	if result:
-		print(" "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")\
-				+ " section of the " + bold("emerge"))
-		print(" "+yellow("*")+" man page to learn how to update config files.")
+		print(" "+yellow("*")+ " See the "+colorize("INFORM", _("CONFIGURATION FILES"))\
+				+ " " + _("section of the") + " " + bold("emerge"))
+		print(" "+yellow("*")+ " " + _("man page to learn how to update config files."))
+
 
 def display_news_notification(root_config, myopts):
-	target_root = root_config.settings['EROOT']
-	trees = root_config.trees
-	settings = trees["vartree"].settings
-	portdb = trees["porttree"].dbapi
-	vardb = trees["vartree"].dbapi
-	NEWS_PATH = os.path.join("metadata", "news")
-	UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
-	newsReaderDisplay = False
-	update = "--pretend" not in myopts
-	if "news" not in settings.features:
+	if "news" not in root_config.settings.features:
 		return
-
-	for repo in portdb.getRepositories():
-		unreadItems = checkUpdatedNewsItems(
-			portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
-		if unreadItems:
-			if not newsReaderDisplay:
-				newsReaderDisplay = True
-				print()
-			print(colorize("WARN", " * IMPORTANT:"), end=' ')
-			print("%s news items need reading for repository '%s'." % (unreadItems, repo))
-
-
-	if newsReaderDisplay:
-		print(colorize("WARN", " *"), end=' ')
-		print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
-		print()
+	portdb = root_config.trees["porttree"].dbapi
+	vardb = root_config.trees["vartree"].dbapi
+	news_counts = count_unread_news(portdb, vardb)
+	display_news_notifications(news_counts)
 
 def getgccversion(chost):
 	"""
@@ -3071,7 +3041,7 @@ def getgccversion(chost):
 	return:  the current in-use gcc version
 	"""
 
-	gcc_ver_command = 'gcc -dumpversion'
+	gcc_ver_command = ['gcc', '-dumpversion']
 	gcc_ver_prefix = 'gcc-'
 
 	gcc_not_found_error = red(
@@ -3080,44 +3050,42 @@ def getgccversion(chost):
 	"!!! other terminals also.\n"
 	)
 
-	mystatus, myoutput = subprocess_getstatusoutput("gcc-config -c")
+	try:
+		proc = subprocess.Popen(["gcc-config", "-c"],
+			stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+	except OSError:
+		myoutput = None
+		mystatus = 1
+	else:
+		myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+		mystatus = proc.wait()
 	if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
 		return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
 
-	mystatus, myoutput = subprocess_getstatusoutput(
-		chost + "-" + gcc_ver_command)
+	try:
+		proc = subprocess.Popen(
+			[chost + "-" + gcc_ver_command[0]] + gcc_ver_command[1:],
+			stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+	except OSError:
+		myoutput = None
+		mystatus = 1
+	else:
+		myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+		mystatus = proc.wait()
 	if mystatus == os.EX_OK:
 		return gcc_ver_prefix + myoutput
 
-	mystatus, myoutput = subprocess_getstatusoutput(gcc_ver_command)
+	try:
+		proc = subprocess.Popen(gcc_ver_command,
+			stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+	except OSError:
+		myoutput = None
+		mystatus = 1
+	else:
+		myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+		mystatus = proc.wait()
 	if mystatus == os.EX_OK:
 		return gcc_ver_prefix + myoutput
 
 	portage.writemsg(gcc_not_found_error, noiselevel=-1)
 	return "[unavailable]"
-
-def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
-	update=False):
-	"""
-	Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
-	Returns the number of unread (yet relevent) items.
-
-	@param portdb: a portage tree database
-	@type portdb: pordbapi
-	@param vardb: an installed package database
-	@type vardb: vardbapi
-	@param NEWS_PATH:
-	@type NEWS_PATH:
-	@param UNREAD_PATH:
-	@type UNREAD_PATH:
-	@param repo_id:
-	@type repo_id:
-	@rtype: Integer
-	@returns:
-	1.  The number of unread but relevant news items.
-
-	"""
-	from portage.news import NewsManager
-	manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
-	return manager.getUnreadItems( repo_id, update=update )
-

diff --git a/portage_with_autodep/pym/_emerge/actions.pyo b/portage_with_autodep/pym/_emerge/actions.pyo
new file mode 100644
index 0000000..4fbda01
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/actions.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/chk_updated_cfg_files.py b/portage_with_autodep/pym/_emerge/chk_updated_cfg_files.py
new file mode 100644
index 0000000..9f2ab6f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/chk_updated_cfg_files.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage import os
+from portage.localization import _
+from portage.output import bold, colorize, yellow
+from portage.util import writemsg_level
+
+def chk_updated_cfg_files(eroot, config_protect):
+	target_root = eroot
+	result = list(
+		portage.util.find_updated_config_files(target_root, config_protect))
+
+	for x in result:
+		writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
+			level=logging.INFO, noiselevel=-1)
+		if not x[1]: # it's a protected file
+			writemsg_level( _("config file '%s' needs updating.\n") % x[0],
+				level=logging.INFO, noiselevel=-1)
+		else: # it's a protected dir
+			if len(x[1]) == 1:
+				head, tail = os.path.split(x[1][0])
+				tail = tail[len("._cfg0000_"):]
+				fpath = os.path.join(head, tail)
+				writemsg_level(_("config file '%s' needs updating.\n") % fpath,
+					level=logging.INFO, noiselevel=-1)
+			else:
+				writemsg_level(
+					_("%d config files in '%s' need updating.\n") % \
+					(len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+	if result:
+		print(" " + yellow("*") + " See the " +
+			colorize("INFORM", _("CONFIGURATION FILES")) +
+			" " + _("section of the") + " " + bold("emerge"))
+		print(" " + yellow("*") + " " +
+			_("man page to learn how to update config files."))

diff --git a/portage_with_autodep/pym/_emerge/clear_caches.pyo b/portage_with_autodep/pym/_emerge/clear_caches.pyo
new file mode 100644
index 0000000..2e6f010
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/clear_caches.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/countdown.pyo b/portage_with_autodep/pym/_emerge/countdown.pyo
new file mode 100644
index 0000000..537dd27
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/countdown.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/create_depgraph_params.py b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
index 44dceda..8f15c68 100644
--- a/portage_with_autodep/pym/_emerge/create_depgraph_params.py
+++ b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
@@ -21,6 +21,10 @@ def create_depgraph_params(myopts, myaction):
 	if bdeps is not None:
 		myparams["bdeps"] = bdeps
 
+	dynamic_deps = myopts.get("--dynamic-deps")
+	if dynamic_deps is not None:
+		myparams["dynamic_deps"] = dynamic_deps
+
 	if myaction == "remove":
 		myparams["remove"] = True
 		myparams["complete"] = True
@@ -37,6 +41,12 @@ def create_depgraph_params(myopts, myaction):
 	deep = myopts.get("--deep")
 	if deep is not None and deep != 0:
 		myparams["deep"] = deep
+
+	complete_if_new_ver = \
+		myopts.get("--complete-graph-if-new-ver")
+	if complete_if_new_ver is not None:
+		myparams["complete_if_new_ver"] = complete_if_new_ver
+
 	if ("--complete-graph" in myopts or "--rebuild-if-new-rev" in myopts or
 		"--rebuild-if-new-ver" in myopts or "--rebuild-if-unbuilt" in myopts):
 		myparams["complete"] = True
@@ -58,6 +68,16 @@ def create_depgraph_params(myopts, myaction):
 		'--update' in myopts:
 		myparams['rebuilt_binaries'] = True
 
+	binpkg_respect_use = myopts.get('--binpkg-respect-use')
+	if binpkg_respect_use is not None:
+		myparams['binpkg_respect_use'] = binpkg_respect_use
+	elif '--usepkgonly' not in myopts:
+		# If --binpkg-respect-use is not explicitly specified, we enable
+		# the behavior automatically (like requested in bug #297549), as
+		# long as it doesn't strongly conflict with other options that
+		# have been specified.
+		myparams['binpkg_respect_use'] = 'auto'
+
 	if myopts.get("--selective") == "n":
 		# --selective=n can be used to remove selective
 		# behavior that may have been implied by some

diff --git a/portage_with_autodep/pym/_emerge/create_depgraph_params.pyo b/portage_with_autodep/pym/_emerge/create_depgraph_params.pyo
new file mode 100644
index 0000000..834580a
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/create_depgraph_params.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/create_world_atom.py b/portage_with_autodep/pym/_emerge/create_world_atom.py
index fa7cffc..35fb7c4 100644
--- a/portage_with_autodep/pym/_emerge/create_world_atom.py
+++ b/portage_with_autodep/pym/_emerge/create_world_atom.py
@@ -21,8 +21,25 @@ def create_world_atom(pkg, args_set, root_config):
 	sets = root_config.sets
 	portdb = root_config.trees["porttree"].dbapi
 	vardb = root_config.trees["vartree"].dbapi
-	available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
-		for cpv in portdb.match(cp))
+
+	if arg_atom.repo is not None:
+		repos = [arg_atom.repo]
+	else:
+		# Iterate over portdbapi.porttrees, since it's common to
+		# tweak this attribute in order to adjust match behavior.
+		repos = []
+		for tree in portdb.porttrees:
+			repos.append(portdb.repositories.get_name_for_location(tree))
+
+	available_slots = set()
+	for cpv in portdb.match(cp):
+		for repo in repos:
+			try:
+				available_slots.add(portdb.aux_get(cpv, ["SLOT"],
+					myrepo=repo)[0])
+			except KeyError:
+				pass
+
 	slotted = len(available_slots) > 1 or \
 		(len(available_slots) == 1 and "0" not in available_slots)
 	if not slotted:
@@ -64,8 +81,18 @@ def create_world_atom(pkg, args_set, root_config):
 			# enough to identify a specific slot.
 			matches = mydb.match(arg_atom)
 			matched_slots = set()
-			for cpv in matches:
-				matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+			if mydb is vardb:
+				for cpv in matches:
+					matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+			else:
+				for cpv in matches:
+					for repo in repos:
+						try:
+							matched_slots.add(portdb.aux_get(cpv, ["SLOT"],
+								myrepo=repo)[0])
+						except KeyError:
+							pass
+
 			if len(matched_slots) == 1:
 				new_world_atom = slot_atom
 				if arg_atom.repo:

diff --git a/portage_with_autodep/pym/_emerge/create_world_atom.pyo b/portage_with_autodep/pym/_emerge/create_world_atom.pyo
new file mode 100644
index 0000000..ac3fb5d
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/create_world_atom.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/depgraph.py b/portage_with_autodep/pym/_emerge/depgraph.py
index 5b48aca..572cea7 100644
--- a/portage_with_autodep/pym/_emerge/depgraph.py
+++ b/portage_with_autodep/pym/_emerge/depgraph.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -18,7 +18,10 @@ from portage import os, OrderedDict
 from portage import _unicode_decode, _unicode_encode, _encodings
 from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
 from portage.dbapi import dbapi
-from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
+from portage.dbapi.dep_expand import dep_expand
+from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
+	check_required_use, human_readable_required_use, match_from_list, \
+	_repo_separator
 from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
 from portage.exception import InvalidAtom, InvalidDependString, PortageException
 from portage.output import colorize, create_color_func, \
@@ -92,15 +95,13 @@ class _frozen_depgraph_config(object):
 
 	def __init__(self, settings, trees, myopts, spinner):
 		self.settings = settings
-		self.target_root = settings["ROOT"]
+		self.target_root = settings["EROOT"]
 		self.myopts = myopts
 		self.edebug = 0
 		if settings.get("PORTAGE_DEBUG", "") == "1":
 			self.edebug = 1
 		self.spinner = spinner
-		self._running_root = trees["/"]["root_config"]
-		self._opts_no_restart = frozenset(["--buildpkgonly",
-			"--fetchonly", "--fetch-all-uri", "--pretend"])
+		self._running_root = trees[trees._running_eroot]["root_config"]
 		self.pkgsettings = {}
 		self.trees = {}
 		self._trees_orig = trees
@@ -108,6 +109,7 @@ class _frozen_depgraph_config(object):
 		# All Package instances
 		self._pkg_cache = {}
 		self._highest_license_masked = {}
+		dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
 		for myroot in trees:
 			self.trees[myroot] = {}
 			# Create a RootConfig instance that references
@@ -121,7 +123,8 @@ class _frozen_depgraph_config(object):
 			self.trees[myroot]["vartree"] = \
 				FakeVartree(trees[myroot]["root_config"],
 					pkg_cache=self._pkg_cache,
-					pkg_root_config=self.roots[myroot])
+					pkg_root_config=self.roots[myroot],
+					dynamic_deps=dynamic_deps)
 			self.pkgsettings[myroot] = portage.config(
 				clone=self.trees[myroot]["vartree"].settings)
 
@@ -174,7 +177,7 @@ class _rebuild_config(object):
 		rebuild_exclude = self._frozen_config.rebuild_exclude
 		rebuild_ignore = self._frozen_config.rebuild_ignore
 		if (self.rebuild and isinstance(parent, Package) and
-			parent.built and (priority.buildtime or priority.runtime) and
+			parent.built and priority.buildtime and
 			isinstance(dep_pkg, Package) and
 			not rebuild_exclude.findAtomForPackage(parent) and
 			not rebuild_ignore.findAtomForPackage(dep_pkg)):
@@ -209,66 +212,63 @@ class _rebuild_config(object):
 
 		return True
 
-	def _trigger_rebuild(self, parent, build_deps, runtime_deps):
+	def _trigger_rebuild(self, parent, build_deps):
 		root_slot = (parent.root, parent.slot_atom)
 		if root_slot in self.rebuild_list:
 			return False
 		trees = self._frozen_config.trees
-		children = set(build_deps).intersection(runtime_deps)
 		reinstall = False
-		for slot_atom in children:
-			kids = set([build_deps[slot_atom], runtime_deps[slot_atom]])
-			for dep_pkg in kids:
-				dep_root_slot = (dep_pkg.root, slot_atom)
-				if self._needs_rebuild(dep_pkg):
+		for slot_atom, dep_pkg in build_deps.items():
+			dep_root_slot = (dep_pkg.root, slot_atom)
+			if self._needs_rebuild(dep_pkg):
+				self.rebuild_list.add(root_slot)
+				return True
+			elif ("--usepkg" in self._frozen_config.myopts and
+				(dep_root_slot in self.reinstall_list or
+				dep_root_slot in self.rebuild_list or
+				not dep_pkg.installed)):
+
+				# A direct rebuild dependency is being installed. We
+				# should update the parent as well to the latest binary,
+				# if that binary is valid.
+				#
+				# To validate the binary, we check whether all of the
+				# rebuild dependencies are present on the same binhost.
+				#
+				# 1) If parent is present on the binhost, but one of its
+				#    rebuild dependencies is not, then the parent should
+				#    be rebuilt from source.
+				# 2) Otherwise, the parent binary is assumed to be valid,
+				#    because all of its rebuild dependencies are
+				#    consistent.
+				bintree = trees[parent.root]["bintree"]
+				uri = bintree.get_pkgindex_uri(parent.cpv)
+				dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
+				bindb = bintree.dbapi
+				if self.rebuild_if_new_ver and uri and uri != dep_uri:
+					cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+					for cpv in bindb.match(dep_pkg.slot_atom):
+						if cpv_norev == catpkgsplit(cpv)[:-1]:
+							dep_uri = bintree.get_pkgindex_uri(cpv)
+							if uri == dep_uri:
+								break
+				if uri and uri != dep_uri:
+					# 1) Remote binary package is invalid because it was
+					#    built without dep_pkg. Force rebuild.
 					self.rebuild_list.add(root_slot)
 					return True
-				elif ("--usepkg" in self._frozen_config.myopts and
-					(dep_root_slot in self.reinstall_list or
-					dep_root_slot in self.rebuild_list or
-					not dep_pkg.installed)):
-
-					# A direct rebuild dependency is being installed. We
-					# should update the parent as well to the latest binary,
-					# if that binary is valid.
-					#
-					# To validate the binary, we check whether all of the
-					# rebuild dependencies are present on the same binhost.
-					#
-					# 1) If parent is present on the binhost, but one of its
-					#    rebuild dependencies is not, then the parent should
-					#    be rebuilt from source.
-					# 2) Otherwise, the parent binary is assumed to be valid,
-					#    because all of its rebuild dependencies are
-					#    consistent.
-					bintree = trees[parent.root]["bintree"]
-					uri = bintree.get_pkgindex_uri(parent.cpv)
-					dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
-					bindb = bintree.dbapi
-					if self.rebuild_if_new_ver and uri and uri != dep_uri:
-						cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
-						for cpv in bindb.match(dep_pkg.slot_atom):
-							if cpv_norev == catpkgsplit(cpv)[:-1]:
-								dep_uri = bintree.get_pkgindex_uri(cpv)
-								if uri == dep_uri:
-									break
-					if uri and uri != dep_uri:
-						# 1) Remote binary package is invalid because it was
-						#    built without dep_pkg. Force rebuild.
-						self.rebuild_list.add(root_slot)
-						return True
-					elif (parent.installed and
-						root_slot not in self.reinstall_list):
-						inst_build_time = parent.metadata.get("BUILD_TIME")
-						try:
-							bin_build_time, = bindb.aux_get(parent.cpv,
-								["BUILD_TIME"])
-						except KeyError:
-							continue
-						if bin_build_time != inst_build_time:
-							# 2) Remote binary package is valid, and local package
-							#    is not up to date. Force reinstall.
-							reinstall = True
+				elif (parent.installed and
+					root_slot not in self.reinstall_list):
+					inst_build_time = parent.metadata.get("BUILD_TIME")
+					try:
+						bin_build_time, = bindb.aux_get(parent.cpv,
+							["BUILD_TIME"])
+					except KeyError:
+						continue
+					if bin_build_time != inst_build_time:
+						# 2) Remote binary package is valid, and local package
+						#    is not up to date. Force reinstall.
+						reinstall = True
 		if reinstall:
 			self.reinstall_list.add(root_slot)
 		return reinstall
@@ -282,31 +282,15 @@ class _rebuild_config(object):
 		need_restart = False
 		graph = self._graph
 		build_deps = {}
-		runtime_deps = {}
-		leaf_nodes = deque(graph.leaf_nodes())
-
-		def ignore_non_runtime(priority):
-			return not priority.runtime
 
-		def ignore_non_buildtime(priority):
-			return not priority.buildtime
+		leaf_nodes = deque(graph.leaf_nodes())
 
 		# Trigger rebuilds bottom-up (starting with the leaves) so that parents
 		# will always know which children are being rebuilt.
 		while graph:
 			if not leaf_nodes:
-				# We're interested in intersection of buildtime and runtime,
-				# so ignore edges that do not contain both.
-				leaf_nodes.extend(graph.leaf_nodes(
-					ignore_priority=ignore_non_runtime))
-				if not leaf_nodes:
-					leaf_nodes.extend(graph.leaf_nodes(
-						ignore_priority=ignore_non_buildtime))
-					if not leaf_nodes:
-						# We'll have to drop an edge that is both
-						# buildtime and runtime. This should be
-						# quite rare.
-						leaf_nodes.append(graph.order[-1])
+				# We'll have to drop an edge. This should be quite rare.
+				leaf_nodes.append(graph.order[-1])
 
 			node = leaf_nodes.popleft()
 			if node not in graph:
@@ -315,32 +299,23 @@ class _rebuild_config(object):
 			slot_atom = node.slot_atom
 
 			# Remove our leaf node from the graph, keeping track of deps.
-			parents = graph.nodes[node][1].items()
+			parents = graph.parent_nodes(node)
 			graph.remove(node)
 			node_build_deps = build_deps.get(node, {})
-			node_runtime_deps = runtime_deps.get(node, {})
-			for parent, priorities in parents:
+			for parent in parents:
 				if parent == node:
 					# Ignore a direct cycle.
 					continue
 				parent_bdeps = build_deps.setdefault(parent, {})
-				parent_rdeps = runtime_deps.setdefault(parent, {})
-				for priority in priorities:
-					if priority.buildtime:
-						parent_bdeps[slot_atom] = node
-					if priority.runtime:
-						parent_rdeps[slot_atom] = node
-				if slot_atom in parent_bdeps and slot_atom in parent_rdeps:
-					parent_rdeps.update(node_runtime_deps)
+				parent_bdeps[slot_atom] = node
 				if not graph.child_nodes(parent):
 					leaf_nodes.append(parent)
 
 			# Trigger rebuilds for our leaf node. Because all of our children
-			# have been processed, build_deps and runtime_deps will be
-			# completely filled in, and self.rebuild_list / self.reinstall_list
-			# will tell us whether any of our children need to be rebuilt or
-			# reinstalled.
-			if self._trigger_rebuild(node, node_build_deps, node_runtime_deps):
+			# have been processed, the build_deps will be completely filled in,
+			# and self.rebuild_list / self.reinstall_list will tell us whether
+			# any of our children need to be rebuilt or reinstalled.
+			if self._trigger_rebuild(node, node_build_deps):
 				need_restart = True
 
 		return need_restart
@@ -416,6 +391,11 @@ class _dynamic_depgraph_config(object):
 		self._ignored_deps = []
 		self._highest_pkg_cache = {}
 
+		# Binary packages that have been rejected because their USE
+		# didn't match the user's config. It maps packages to a set
+		# of flags causing the rejection.
+		self.ignored_binaries = {}
+
 		self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
 		self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
 		self._needed_license_changes = backtrack_parameters.needed_license_changes
@@ -536,9 +516,15 @@ class depgraph(object):
 
 		for myroot in self._frozen_config.trees:
 
+			dynamic_deps = self._dynamic_config.myparams.get(
+				"dynamic_deps", "y") != "n"
 			preload_installed_pkgs = \
 				"--nodeps" not in self._frozen_config.myopts
 
+			if self._frozen_config.myopts.get("--root-deps") is not None and \
+				myroot != self._frozen_config.target_root:
+				continue
+
 			fake_vartree = self._frozen_config.trees[myroot]["vartree"]
 			if not fake_vartree.dbapi:
 				# This needs to be called for the first depgraph, but not for
@@ -557,8 +543,11 @@ class depgraph(object):
 
 				for pkg in vardb:
 					self._spinner_update()
-					# This triggers metadata updates via FakeVartree.
-					vardb.aux_get(pkg.cpv, [])
+					if dynamic_deps:
+						# This causes FakeVartree to update the
+						# Package instance dependencies via
+						# PackageVirtualDbapi.aux_update()
+						vardb.aux_get(pkg.cpv, [])
 					fakedb.cpv_inject(pkg)
 
 		self._dynamic_config._vdb_loaded = True
@@ -567,6 +556,67 @@ class depgraph(object):
 		if self._frozen_config.spinner:
 			self._frozen_config.spinner.update()
 
+	def _show_ignored_binaries(self):
+		"""
+		Show binaries that have been ignored because their USE didn't
+		match the user's config.
+		"""
+		if not self._dynamic_config.ignored_binaries \
+			or '--quiet' in self._frozen_config.myopts \
+			or self._dynamic_config.myparams.get(
+			"binpkg_respect_use") in ("y", "n"):
+			return
+
+		for pkg in list(self._dynamic_config.ignored_binaries):
+
+			selected_pkg = self._dynamic_config.mydbapi[pkg.root
+				].match_pkgs(pkg.slot_atom)
+
+			if not selected_pkg:
+				continue
+
+			selected_pkg = selected_pkg[-1]
+			if selected_pkg > pkg:
+				self._dynamic_config.ignored_binaries.pop(pkg)
+				continue
+
+			if selected_pkg.installed and \
+				selected_pkg.cpv == pkg.cpv and \
+				selected_pkg.metadata.get('BUILD_TIME') == \
+				pkg.metadata.get('BUILD_TIME'):
+				# We don't care about ignored binaries when an
+				# identical installed instance is selected to
+				# fill the slot.
+				self._dynamic_config.ignored_binaries.pop(pkg)
+				continue
+
+		if not self._dynamic_config.ignored_binaries:
+			return
+
+		self._show_merge_list()
+
+		writemsg("\n!!! The following binary packages have been ignored " + \
+				"due to non matching USE:\n\n", noiselevel=-1)
+
+		for pkg, flags in self._dynamic_config.ignored_binaries.items():
+			writemsg("    =%s" % pkg.cpv, noiselevel=-1)
+			if pkg.root_config.settings["ROOT"] != "/":
+				writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+			writemsg("\n        use flag(s): %s\n" % ", ".join(sorted(flags)),
+				noiselevel=-1)
+
+		msg = [
+			"",
+			"NOTE: The --binpkg-respect-use=n option will prevent emerge",
+			"      from ignoring these binary packages if possible.",
+			"      Using --binpkg-respect-use=y will silence this warning."
+		]
+
+		for line in msg:
+			if line:
+				line = colorize("INFORM", line)
+			writemsg(line + "\n", noiselevel=-1)
+
 	def _show_missed_update(self):
 
 		# In order to minimize noise, show only the highest
@@ -578,6 +628,10 @@ class depgraph(object):
 				# Exclude installed here since we only
 				# want to show available updates.
 				continue
+			chosen_pkg = self._dynamic_config.mydbapi[pkg.root
+				].match_pkgs(pkg.slot_atom)
+			if not chosen_pkg or chosen_pkg[-1] >= pkg:
+				continue
 			k = (pkg.root, pkg.slot_atom)
 			if k in missed_updates:
 				other_pkg, mask_type, parent_atoms = missed_updates[k]
@@ -613,6 +667,7 @@ class depgraph(object):
 		if not missed_updates:
 			return
 
+		self._show_merge_list()
 		backtrack_masked = []
 
 		for pkg, parent_atoms in missed_updates:
@@ -630,7 +685,7 @@ class depgraph(object):
 				"due to unsatisfied dependencies:\n\n", noiselevel=-1)
 
 			writemsg(str(pkg.slot_atom), noiselevel=-1)
-			if pkg.root != '/':
+			if pkg.root_config.settings["ROOT"] != "/":
 				writemsg(" for %s" % (pkg.root,), noiselevel=-1)
 			writemsg("\n", noiselevel=-1)
 
@@ -646,7 +701,7 @@ class depgraph(object):
 				"!!! triggered by backtracking:\n\n", noiselevel=-1)
 			for pkg, parent_atoms in backtrack_masked:
 				writemsg(str(pkg.slot_atom), noiselevel=-1)
-				if pkg.root != '/':
+				if pkg.root_config.settings["ROOT"] != "/":
 					writemsg(" for %s" % (pkg.root,), noiselevel=-1)
 				writemsg("\n", noiselevel=-1)
 
@@ -655,6 +710,7 @@ class depgraph(object):
 		if not missed_updates:
 			return
 
+		self._show_merge_list()
 		msg = []
 		msg.append("\nWARNING: One or more updates have been " + \
 			"skipped due to a dependency conflict:\n\n")
@@ -662,7 +718,7 @@ class depgraph(object):
 		indent = "  "
 		for pkg, parent_atoms in missed_updates:
 			msg.append(str(pkg.slot_atom))
-			if pkg.root != '/':
+			if pkg.root_config.settings["ROOT"] != "/":
 				msg.append(" for %s" % (pkg.root,))
 			msg.append("\n\n")
 
@@ -777,19 +833,28 @@ class depgraph(object):
 					else:
 						self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
 
-	def _reinstall_for_flags(self, forced_flags,
+	def _reinstall_for_flags(self, pkg, forced_flags,
 		orig_use, orig_iuse, cur_use, cur_iuse):
 		"""Return a set of flags that trigger reinstallation, or None if there
 		are no such flags."""
-		if "--newuse" in self._frozen_config.myopts or \
-			"--binpkg-respect-use" in self._frozen_config.myopts:
+
+		# binpkg_respect_use: Behave like newuse by default. If newuse is
+		# False and changed_use is True, then behave like changed_use.
+		binpkg_respect_use = (pkg.built and
+			self._dynamic_config.myparams.get("binpkg_respect_use")
+			in ("y", "auto"))
+		newuse = "--newuse" in self._frozen_config.myopts
+		changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
+
+		if newuse or (binpkg_respect_use and not changed_use):
 			flags = set(orig_iuse.symmetric_difference(
 				cur_iuse).difference(forced_flags))
 			flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
 				cur_iuse.intersection(cur_use)))
 			if flags:
 				return flags
-		elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
+
+		elif changed_use or binpkg_respect_use:
 			flags = orig_iuse.intersection(orig_use).symmetric_difference(
 				cur_iuse.intersection(cur_use))
 			if flags:
@@ -827,7 +892,7 @@ class depgraph(object):
 			relationships from nested sets
 		@type add_to_digraph: Boolean
 		@rtype: Iterable
-		@returns: All args given in the input together with additional
+		@return: All args given in the input together with additional
 			SetArg instances that are generated from nested sets
 		"""
 
@@ -876,8 +941,6 @@ class depgraph(object):
 		debug = "--debug" in self._frozen_config.myopts
 		buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
 		nodeps = "--nodeps" in self._frozen_config.myopts
-		deep = self._dynamic_config.myparams.get("deep", 0)
-		recurse = deep is True or dep.depth <= deep
 		if dep.blocker:
 			if not buildpkgonly and \
 				not nodeps and \
@@ -922,7 +985,7 @@ class depgraph(object):
 			# infinite backtracking loop.
 			if self._dynamic_config._allow_backtracking:
 				if dep.parent in self._dynamic_config._runtime_pkg_mask:
-					if "--debug" in self._frozen_config.myopts:
+					if debug:
 						writemsg(
 							"!!! backtracking loop detected: %s %s\n" % \
 							(dep.parent,
@@ -937,7 +1000,7 @@ class depgraph(object):
 					if dep_pkg is None:
 						self._dynamic_config._backtrack_infos["missing dependency"] = dep
 						self._dynamic_config._need_restart = True
-						if "--debug" in self._frozen_config.myopts:
+						if debug:
 							msg = []
 							msg.append("")
 							msg.append("")
@@ -1009,17 +1072,18 @@ class depgraph(object):
 			else:
 				# Display the specific atom from SetArg or
 				# Package types.
+				uneval = ""
+				if dep.atom is not dep.atom.unevaluated_atom:
+					uneval = " (%s)" % (dep.atom.unevaluated_atom,)
 				writemsg_level(
-					"%s%s required by %s\n" %
-					("Parent Dep:".ljust(15), dep.atom, myparent),
+					"%s%s%s required by %s\n" %
+					("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
 					level=logging.DEBUG, noiselevel=-1)
 
 		# Ensure that the dependencies of the same package
 		# are never processed more than once.
 		previously_added = pkg in self._dynamic_config.digraph
 
-		# select the correct /var database that we'll be checking against
-		vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
 		pkgsettings = self._frozen_config.pkgsettings[pkg.root]
 
 		arg_atoms = None
@@ -1036,7 +1100,7 @@ class depgraph(object):
 		# package selection, since we want to prompt the user
 		# for USE adjustment rather than have REQUIRED_USE
 		# affect package selection and || dep choices.
-		if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
+		if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
 			eapi_has_required_use(pkg.metadata["EAPI"]):
 			required_use_is_sat = check_required_use(
 				pkg.metadata["REQUIRED_USE"],
@@ -1055,7 +1119,8 @@ class depgraph(object):
 				if atom is None:
 					atom = Atom("=" + pkg.cpv)
 				self._dynamic_config._unsatisfied_deps_for_display.append(
-					((pkg.root, atom), {"myparent":dep.parent}))
+					((pkg.root, atom),
+					{"myparent" : dep.parent, "show_req_use" : pkg}))
 				self._dynamic_config._skip_restart = True
 				return 0
 
@@ -1146,11 +1211,6 @@ class depgraph(object):
 									all_match = False
 									break
 
-							if to_be_selected >= to_be_masked:
-								# We only care about the parent atoms
-								# when they trigger a downgrade.
-								parent_atoms = set()
-
 							fallback_data.append((to_be_masked, parent_atoms))
 
 							if all_match:
@@ -1244,7 +1304,7 @@ class depgraph(object):
 					settings.unlock()
 					settings.setinst(pkg.cpv, pkg.metadata)
 					settings.lock()
-				except portage.exception.InvalidDependString as e:
+				except portage.exception.InvalidDependString:
 					if not pkg.installed:
 						# should have been masked before it was selected
 						raise
@@ -1265,12 +1325,11 @@ class depgraph(object):
 				self._dynamic_config.digraph.add(pkg, parent, priority=priority)
 				self._add_parent_atom(pkg, parent_atom)
 
-		""" This section determines whether we go deeper into dependencies or not.
-			We want to go deeper on a few occasions:
-			Installing package A, we need to make sure package A's deps are met.
-			emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
-			If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
-		"""
+		# This section determines whether we go deeper into dependencies or not.
+		# We want to go deeper on a few occasions:
+		# Installing package A, we need to make sure package A's deps are met.
+		# emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
+		# If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
 		if arg_atoms:
 			depth = 0
 		pkg.depth = depth
@@ -1318,13 +1377,8 @@ class depgraph(object):
 
 	def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
 
-		mytype = pkg.type_name
 		myroot = pkg.root
-		mykey = pkg.cpv
 		metadata = pkg.metadata
-		myuse = self._pkg_use_enabled(pkg)
-		jbigkey = pkg
-		depth = pkg.depth + 1
 		removal_action = "remove" in self._dynamic_config.myparams
 
 		edepend={}
@@ -1361,7 +1415,7 @@ class depgraph(object):
 		if removal_action:
 			depend_root = myroot
 		else:
-			depend_root = "/"
+			depend_root = self._frozen_config._running_root.root
 			root_deps = self._frozen_config.myopts.get("--root-deps")
 			if root_deps is not None:
 				if root_deps is True:
@@ -1388,7 +1442,6 @@ class depgraph(object):
 		)
 
 		debug = "--debug" in self._frozen_config.myopts
-		strict = mytype != "installed"
 
 		for dep_root, dep_string, dep_priority in deps:
 				if not dep_string:
@@ -1481,7 +1534,7 @@ class depgraph(object):
 			selected_atoms = self._select_atoms(dep_root,
 				dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
 				strict=strict, priority=dep_priority)
-		except portage.exception.InvalidDependString as e:
+		except portage.exception.InvalidDependString:
 			if pkg.installed:
 				self._dynamic_config._masked_installed.add(pkg)
 				return 1
@@ -1731,7 +1784,7 @@ class depgraph(object):
 			pkg_atom_map.setdefault(pkg, set()).add(atom)
 			cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
 
-		for cp, pkgs in cp_pkg_map.items():
+		for pkgs in cp_pkg_map.values():
 			if len(pkgs) < 2:
 				for pkg in pkgs:
 					for atom in pkg_atom_map[pkg]:
@@ -1807,7 +1860,7 @@ class depgraph(object):
 				i += 1
 			else:
 				try:
-					x = portage.dep.Atom(x)
+					x = portage.dep.Atom(x, eapi=pkg.metadata["EAPI"])
 				except portage.exception.InvalidAtom:
 					if not pkg.installed:
 						raise portage.exception.InvalidDependString(
@@ -1855,7 +1908,7 @@ class depgraph(object):
 		@param atom_without_category: an atom without a category component
 		@type atom_without_category: String
 		@rtype: list
-		@returns: a list of atoms containing categories (possibly empty)
+		@return: a list of atoms containing categories (possibly empty)
 		"""
 		null_cp = portage.dep_getkey(insert_category_into_atom(
 			atom_without_category, "null"))
@@ -1886,7 +1939,6 @@ class depgraph(object):
 	def _iter_atoms_for_pkg(self, pkg):
 		depgraph_sets = self._dynamic_config.sets[pkg.root]
 		atom_arg_map = depgraph_sets.atom_arg_map
-		root_config = self._frozen_config.roots[pkg.root]
 		for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
 			if atom.cp != pkg.cp and \
 				self._have_new_virt(pkg.root, atom.cp):
@@ -1923,13 +1975,13 @@ class depgraph(object):
 		sets = root_config.sets
 		depgraph_sets = self._dynamic_config.sets[root_config.root]
 		myfavorites=[]
-		myroot = self._frozen_config.target_root
-		dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
-		vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
-		real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
-		portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
-		bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
-		pkgsettings = self._frozen_config.pkgsettings[myroot]
+		eroot = root_config.root
+		root = root_config.settings['ROOT']
+		vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
+		real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
+		portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
+		bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
+		pkgsettings = self._frozen_config.pkgsettings[eroot]
 		args = []
 		onlydeps = "--onlydeps" in self._frozen_config.myopts
 		lookup_owners = []
@@ -1950,7 +2002,7 @@ class depgraph(object):
 				mytbz2=portage.xpak.tbz2(x)
 				mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
 				if os.path.realpath(x) != \
-					os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
+					os.path.realpath(bindb.bintree.getname(mykey)):
 					writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
 					self._dynamic_config._skip_restart = True
 					return 0, myfavorites
@@ -1996,9 +2048,9 @@ class depgraph(object):
 				args.append(PackageArg(arg=x, package=pkg,
 					root_config=root_config))
 			elif x.startswith(os.path.sep):
-				if not x.startswith(myroot):
+				if not x.startswith(eroot):
 					portage.writemsg(("\n\n!!! '%s' does not start with" + \
-						" $ROOT.\n") % x, noiselevel=-1)
+						" $EROOT.\n") % x, noiselevel=-1)
 					self._dynamic_config._skip_restart = True
 					return 0, []
 				# Queue these up since it's most efficient to handle
@@ -2007,9 +2059,9 @@ class depgraph(object):
 			elif x.startswith("." + os.sep) or \
 				x.startswith(".." + os.sep):
 				f = os.path.abspath(x)
-				if not f.startswith(myroot):
+				if not f.startswith(eroot):
 					portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
-						" $ROOT.\n") % (f, x), noiselevel=-1)
+						" $EROOT.\n") % (f, x), noiselevel=-1)
 					self._dynamic_config._skip_restart = True
 					return 0, []
 				lookup_owners.append(f)
@@ -2126,7 +2178,7 @@ class depgraph(object):
 			for x in lookup_owners:
 				if not search_for_multiple and os.path.isdir(x):
 					search_for_multiple = True
-				relative_paths.append(x[len(myroot)-1:])
+				relative_paths.append(x[len(root)-1:])
 
 			owners = set()
 			for pkg, relative_path in \
@@ -2526,24 +2578,36 @@ class depgraph(object):
 			# account for masking and USE settings.
 			_autounmask_backup = self._dynamic_config._autounmask
 			self._dynamic_config._autounmask = False
-			mytrees["pkg_use_enabled"] = self._pkg_use_enabled
+			# backup state for restoration, in case of recursive
+			# calls to this method
+			backup_state = mytrees.copy()
 			try:
+				# clear state from previous call, in case this
+				# call is recursive (we have a backup, that we
+				# will use to restore it later)
+				mytrees.pop("pkg_use_enabled", None)
+				mytrees.pop("parent", None)
+				mytrees.pop("atom_graph", None)
+				mytrees.pop("priority", None)
+
+				mytrees["pkg_use_enabled"] = self._pkg_use_enabled
 				if parent is not None:
-					trees[root]["parent"] = parent
-					trees[root]["atom_graph"] = atom_graph
+					mytrees["parent"] = parent
+					mytrees["atom_graph"] = atom_graph
 				if priority is not None:
-					trees[root]["priority"] = priority
+					mytrees["priority"] = priority
+
 				mycheck = portage.dep_check(depstring, None,
 					pkgsettings, myuse=myuse,
 					myroot=root, trees=trees)
 			finally:
+				# restore state
 				self._dynamic_config._autounmask = _autounmask_backup
-				del mytrees["pkg_use_enabled"]
-				if parent is not None:
-					trees[root].pop("parent")
-					trees[root].pop("atom_graph")
-				if priority is not None:
-					trees[root].pop("priority")
+				mytrees.pop("pkg_use_enabled", None)
+				mytrees.pop("parent", None)
+				mytrees.pop("atom_graph", None)
+				mytrees.pop("priority", None)
+				mytrees.update(backup_state)
 			if not mycheck[0]:
 				raise portage.exception.InvalidDependString(mycheck[1])
 		if parent is None:
@@ -2637,6 +2701,38 @@ class depgraph(object):
 					continue
 				yield atom
 
+	def _virt_deps_visible(self, pkg, ignore_use=False):
+		"""
+		Assumes pkg is a virtual package. Traverses virtual deps recursively
+		and returns True if all deps are visible, False otherwise. This is
+		useful for checking if it will be necessary to expand virtual slots,
+		for cases like bug #382557.
+		"""
+		try:
+			rdepend = self._select_atoms(
+				pkg.root, pkg.metadata.get("RDEPEND", ""),
+				myuse=self._pkg_use_enabled(pkg),
+				parent=pkg, priority=self._priority(runtime=True))
+		except InvalidDependString as e:
+			if not pkg.installed:
+				raise
+			writemsg_level("!!! Invalid RDEPEND in " + \
+				"'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+				(pkg.root, pkg.cpv, e),
+				noiselevel=-1, level=logging.ERROR)
+			return False
+
+		for atoms in rdepend.values():
+			for atom in atoms:
+				if ignore_use:
+					atom = atom.without_use
+				pkg, existing = self._select_package(
+					pkg.root, atom)
+				if pkg is None or not self._pkg_visibility_check(pkg):
+					return False
+
+		return True
+
 	def _get_dep_chain(self, start_node, target_atom=None,
 		unsatisfied_dependency=False):
 		"""
@@ -2652,6 +2748,7 @@ class depgraph(object):
 		node = start_node
 		child = None
 		all_parents = self._dynamic_config._parent_atoms
+		graph = self._dynamic_config.digraph
 
 		if target_atom is not None and isinstance(node, Package):
 			affecting_use = set()
@@ -2676,11 +2773,46 @@ class depgraph(object):
 
 			dep_chain.append((pkg_name, node.type_name))
 
+
+		# To build a dep chain for the given package we take
+		# "random" parents form the digraph, except for the
+		# first package, because we want a parent that forced
+		# the corresponding change (i.e '>=foo-2', instead 'foo').
+
+		traversed_nodes.add(start_node)
+
+		start_node_parent_atoms = {}
+		for ppkg, patom in all_parents.get(node, []):
+			# Get a list of suitable atoms. For use deps
+			# (aka unsatisfied_dependency is not None) we
+			# need that the start_node doesn't match the atom.
+			if not unsatisfied_dependency or \
+				not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
+				start_node_parent_atoms.setdefault(patom, []).append(ppkg)
+
+		if start_node_parent_atoms:
+			# If there are parents in all_parents then use one of them.
+			# If not, then this package got pulled in by an Arg and
+			# will be correctly handled by the code that handles later
+			# packages in the dep chain.
+			best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
+
+			child = node
+			for ppkg in start_node_parent_atoms[best_match]:
+				node = ppkg
+				if ppkg in self._dynamic_config._initial_arg_list:
+					# Stop if reached the top level of the dep chain.
+					break
+
 		while node is not None:
 			traversed_nodes.add(node)
 
-			if isinstance(node, DependencyArg):
-				if self._dynamic_config.digraph.parent_nodes(node):
+			if node not in graph:
+				# The parent is not in the graph due to backtracking.
+				break
+
+			elif isinstance(node, DependencyArg):
+				if graph.parent_nodes(node):
 					node_type = "set"
 				else:
 					node_type = "argument"
@@ -2689,17 +2821,29 @@ class depgraph(object):
 			elif node is not start_node:
 				for ppkg, patom in all_parents[child]:
 					if ppkg == node:
+						if child is start_node and unsatisfied_dependency and \
+							InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
+							# This atom is satisfied by child, there must be another atom.
+							continue
 						atom = patom.unevaluated_atom
 						break
 
 				dep_strings = set()
-				for priority in self._dynamic_config.digraph.nodes[node][0][child]:
-					if priority.buildtime:
-						dep_strings.add(node.metadata["DEPEND"])
-					if priority.runtime:
-						dep_strings.add(node.metadata["RDEPEND"])
-					if priority.runtime_post:
-						dep_strings.add(node.metadata["PDEPEND"])
+				priorities = graph.nodes[node][0].get(child)
+				if priorities is None:
+					# This edge comes from _parent_atoms and was not added to
+					# the graph, and _parent_atoms does not contain priorities.
+					dep_strings.add(node.metadata["DEPEND"])
+					dep_strings.add(node.metadata["RDEPEND"])
+					dep_strings.add(node.metadata["PDEPEND"])
+				else:
+					for priority in priorities:
+						if priority.buildtime:
+							dep_strings.add(node.metadata["DEPEND"])
+						if priority.runtime:
+							dep_strings.add(node.metadata["RDEPEND"])
+						if priority.runtime_post:
+							dep_strings.add(node.metadata["PDEPEND"])
 
 				affecting_use = set()
 				for dep_str in dep_strings:
@@ -2726,10 +2870,6 @@ class depgraph(object):
 
 				dep_chain.append((pkg_name, node.type_name))
 
-			if node not in self._dynamic_config.digraph:
-				# The parent is not in the graph due to backtracking.
-				break
-
 			# When traversing to parents, prefer arguments over packages
 			# since arguments are root nodes. Never traverse the same
 			# package twice, in order to prevent an infinite loop.
@@ -2791,7 +2931,7 @@ class depgraph(object):
 
 
 	def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
-		check_backtrack=False, check_autounmask_breakage=False):
+		check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
 		"""
 		When check_backtrack=True, no output is produced and
 		the method either returns or raises _backtrack_mask if
@@ -2810,14 +2950,13 @@ class depgraph(object):
 			xinfo = _unicode_decode('"%s"') % (myparent,)
 		# Discard null/ from failed cpv_expand category expansion.
 		xinfo = xinfo.replace("null/", "")
-		if root != "/":
+		if root != self._frozen_config._running_root.root:
 			xinfo = "%s for %s" % (xinfo, root)
 		masked_packages = []
 		missing_use = []
 		missing_use_adjustable = set()
 		required_use_unsatisfied = []
 		masked_pkg_instances = set()
-		missing_licenses = []
 		have_eapi_mask = False
 		pkgsettings = self._frozen_config.pkgsettings[root]
 		root_config = self._frozen_config.roots[root]
@@ -2828,7 +2967,6 @@ class depgraph(object):
 		for db, pkg_type, built, installed, db_keys in dbs:
 			if installed:
 				continue
-			match = db.match
 			if hasattr(db, "xmatch"):
 				cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
 			else:
@@ -2854,12 +2992,20 @@ class depgraph(object):
 							repo = metadata.get('repository')
 						pkg = self._pkg(cpv, pkg_type, root_config,
 							installed=installed, myrepo=repo)
-						if not atom_set.findAtomForPackage(pkg,
-							modified_use=self._pkg_use_enabled(pkg)):
-							continue
 						# pkg.metadata contains calculated USE for ebuilds,
 						# required later for getMissingLicenses.
 						metadata = pkg.metadata
+						if pkg.invalid:
+							# Avoid doing any operations with packages that
+							# have invalid metadata. It would be unsafe at
+							# least because it could trigger unhandled
+							# exceptions in places like check_required_use().
+							masked_packages.append(
+								(root_config, pkgsettings, cpv, repo, metadata, mreasons))
+							continue
+						if not atom_set.findAtomForPackage(pkg,
+							modified_use=self._pkg_use_enabled(pkg)):
+							continue
 						if pkg in self._dynamic_config._runtime_pkg_mask:
 							backtrack_reasons = \
 								self._dynamic_config._runtime_pkg_mask[pkg]
@@ -2887,7 +3033,7 @@ class depgraph(object):
 								raise
 						if not mreasons and \
 							not pkg.built and \
-							pkg.metadata["REQUIRED_USE"] and \
+							pkg.metadata.get("REQUIRED_USE") and \
 							eapi_has_required_use(pkg.metadata["EAPI"]):
 							if not check_required_use(
 								pkg.metadata["REQUIRED_USE"],
@@ -2942,7 +3088,7 @@ class depgraph(object):
 					continue
 
 				missing_use_adjustable.add(pkg)
-				required_use = pkg.metadata["REQUIRED_USE"]
+				required_use = pkg.metadata.get("REQUIRED_USE")
 				required_use_warning = ""
 				if required_use:
 					old_use = self._pkg_use_enabled(pkg)
@@ -2990,7 +3136,7 @@ class depgraph(object):
 					if untouchable_flags.intersection(involved_flags):
 						continue
 
-					required_use = myparent.metadata["REQUIRED_USE"]
+					required_use = myparent.metadata.get("REQUIRED_USE")
 					required_use_warning = ""
 					if required_use:
 						old_use = self._pkg_use_enabled(myparent)
@@ -3066,62 +3212,66 @@ class depgraph(object):
 
 		mask_docs = False
 
-		if required_use_unsatisfied:
+		if show_req_use is None and required_use_unsatisfied:
 			# We have an unmasked package that only requires USE adjustment
 			# in order to satisfy REQUIRED_USE, and nothing more. We assume
 			# that the user wants the latest version, so only the first
 			# instance is displayed.
-			pkg = required_use_unsatisfied[0]
+			show_req_use = required_use_unsatisfied[0]
+
+		if show_req_use is not None:
+
+			pkg = show_req_use
 			output_cpv = pkg.cpv + _repo_separator + pkg.repo
-			writemsg_stdout("\n!!! " + \
+			writemsg("\n!!! " + \
 				colorize("BAD", "The ebuild selected to satisfy ") + \
 				colorize("INFORM", xinfo) + \
 				colorize("BAD", " has unmet requirements.") + "\n",
 				noiselevel=-1)
 			use_display = pkg_use_display(pkg, self._frozen_config.myopts)
-			writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
+			writemsg("- %s %s\n" % (output_cpv, use_display),
 				noiselevel=-1)
-			writemsg_stdout("\n  The following REQUIRED_USE flag constraints " + \
+			writemsg("\n  The following REQUIRED_USE flag constraints " + \
 				"are unsatisfied:\n", noiselevel=-1)
 			reduced_noise = check_required_use(
 				pkg.metadata["REQUIRED_USE"],
 				self._pkg_use_enabled(pkg),
 				pkg.iuse.is_valid_flag).tounicode()
-			writemsg_stdout("    %s\n" % \
+			writemsg("    %s\n" % \
 				human_readable_required_use(reduced_noise),
 				noiselevel=-1)
 			normalized_required_use = \
 				" ".join(pkg.metadata["REQUIRED_USE"].split())
 			if reduced_noise != normalized_required_use:
-				writemsg_stdout("\n  The above constraints " + \
+				writemsg("\n  The above constraints " + \
 					"are a subset of the following complete expression:\n",
 					noiselevel=-1)
-				writemsg_stdout("    %s\n" % \
+				writemsg("    %s\n" % \
 					human_readable_required_use(normalized_required_use),
 					noiselevel=-1)
-			writemsg_stdout("\n", noiselevel=-1)
+			writemsg("\n", noiselevel=-1)
 
 		elif show_missing_use:
-			writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
-			writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
+			writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+			writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
 			for pkg, mreasons in show_missing_use:
-				writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
+				writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
 
 		elif masked_packages:
-			writemsg_stdout("\n!!! " + \
+			writemsg("\n!!! " + \
 				colorize("BAD", "All ebuilds that could satisfy ") + \
 				colorize("INFORM", xinfo) + \
 				colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
-			writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
+			writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
 			have_eapi_mask = show_masked_packages(masked_packages)
 			if have_eapi_mask:
-				writemsg_stdout("\n", noiselevel=-1)
+				writemsg("\n", noiselevel=-1)
 				msg = ("The current version of portage supports " + \
 					"EAPI '%s'. You must upgrade to a newer version" + \
 					" of portage before EAPI masked packages can" + \
 					" be installed.") % portage.const.EAPI
-				writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
-			writemsg_stdout("\n", noiselevel=-1)
+				writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
+			writemsg("\n", noiselevel=-1)
 			mask_docs = True
 		else:
 			cp_exists = False
@@ -3131,7 +3281,7 @@ class depgraph(object):
 					cp_exists = True
 					break
 
-			writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+			writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
 			if isinstance(myparent, AtomArg) and \
 				not cp_exists and \
 				self._frozen_config.myopts.get(
@@ -3141,12 +3291,13 @@ class depgraph(object):
 				if cat == "null":
 					cat = None
 
-				writemsg_stdout("\nemerge: searching for similar names..."
+				writemsg("\nemerge: searching for similar names..."
 					, noiselevel=-1)
 
 				all_cp = set()
 				all_cp.update(vardb.cp_all())
-				all_cp.update(portdb.cp_all())
+				if "--usepkgonly" not in self._frozen_config.myopts:
+					all_cp.update(portdb.cp_all())
 				if "--usepkg" in self._frozen_config.myopts:
 					all_cp.update(bindb.cp_all())
 				# discard dir containing no ebuilds
@@ -3164,9 +3315,18 @@ class depgraph(object):
 					for other_cp in list(all_cp):
 						other_pkg = portage.catsplit(other_cp)[1]
 						if other_pkg == pkg:
-							# discard dir containing no ebuilds
-							all_cp.discard(other_cp)
-							continue
+							# Check for non-identical package that
+							# differs only by upper/lower case.
+							identical = True
+							for cp_orig in orig_cp_map[other_cp]:
+								if portage.catsplit(cp_orig)[1] != \
+									portage.catsplit(atom.cp)[1]:
+									identical = False
+									break
+							if identical:
+								# discard dir containing no ebuilds
+								all_cp.discard(other_cp)
+								continue
 						pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
 					pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
 					matches = []
@@ -3179,16 +3339,16 @@ class depgraph(object):
 				matches = matches_orig_case
 
 				if len(matches) == 1:
-					writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
+					writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
 						, noiselevel=-1)
 				elif len(matches) > 1:
-					writemsg_stdout(
+					writemsg(
 						"\nemerge: Maybe you meant any of these: %s?\n" % \
 						(", ".join(matches),), noiselevel=-1)
 				else:
 					# Generally, this would only happen if
 					# all dbapis are empty.
-					writemsg_stdout(" nothing similar found.\n"
+					writemsg(" nothing similar found.\n"
 						, noiselevel=-1)
 		msg = []
 		if not isinstance(myparent, AtomArg):
@@ -3201,12 +3361,12 @@ class depgraph(object):
 						(node)), node_type))
 
 		if msg:
-			writemsg_stdout("\n".join(msg), noiselevel=-1)
-			writemsg_stdout("\n", noiselevel=-1)
+			writemsg("\n".join(msg), noiselevel=-1)
+			writemsg("\n", noiselevel=-1)
 
 		if mask_docs:
 			show_mask_docs()
-			writemsg_stdout("\n", noiselevel=-1)
+			writemsg("\n", noiselevel=-1)
 
 	def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
 		for db, pkg_type, built, installed, db_keys in \
@@ -3224,51 +3384,12 @@ class depgraph(object):
 		"""
 
 		db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
-
-		if hasattr(db, "xmatch"):
-			# For portdbapi we match only against the cpv, in order
-			# to bypass unnecessary cache access for things like IUSE
-			# and SLOT. Later, we cache the metadata in a Package
-			# instance, and use that for further matching. This
-			# optimization is especially relevant since
-			# pordbapi.aux_get() does not cache calls that have
-			# myrepo or mytree arguments.
-			cpv_list = db.xmatch("match-all-cpv-only", atom)
-		else:
-			cpv_list = db.match(atom)
-
-		# USE=multislot can make an installed package appear as if
-		# it doesn't satisfy a slot dependency. Rebuilding the ebuild
-		# won't do any good as long as USE=multislot is enabled since
-		# the newly built package still won't have the expected slot.
-		# Therefore, assume that such SLOT dependencies are already
-		# satisfied rather than forcing a rebuild.
+		atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
+		cp_list = db.cp_list(atom_exp.cp)
+		matched_something = False
 		installed = pkg_type == 'installed'
-		if installed and not cpv_list and atom.slot:
-			for cpv in db.match(atom.cp):
-				slot_available = False
-				for other_db, other_type, other_built, \
-					other_installed, other_keys in \
-					self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
-					try:
-						if atom.slot == \
-							other_db.aux_get(cpv, ["SLOT"])[0]:
-							slot_available = True
-							break
-					except KeyError:
-						pass
-				if not slot_available:
-					continue
-				inst_pkg = self._pkg(cpv, "installed",
-					root_config, installed=installed, myrepo = atom.repo)
-				# Remove the slot from the atom and verify that
-				# the package matches the resulting atom.
-				if portage.match_from_list(
-					atom.without_slot, [inst_pkg]):
-					yield inst_pkg
-					return
-
-		if cpv_list:
+
+		if cp_list:
 			atom_set = InternalPackageSet(initial_atoms=(atom,),
 				allow_repo=True)
 			if atom.repo is None and hasattr(db, "getRepositories"):
@@ -3277,8 +3398,13 @@ class depgraph(object):
 				repo_list = [atom.repo]
 
 			# descending order
-			cpv_list.reverse()
-			for cpv in cpv_list:
+			cp_list.reverse()
+			for cpv in cp_list:
+				# Call match_from_list on one cpv at a time, in order
+				# to avoid unnecessary match_from_list comparisons on
+				# versions that are never yielded from this method.
+				if not match_from_list(atom_exp, [cpv]):
+					continue
 				for repo in repo_list:
 
 					try:
@@ -3295,16 +3421,63 @@ class depgraph(object):
 						# Make sure that cpv from the current repo satisfies the atom.
 						# This might not be the case if there are several repos with
 						# the same cpv, but different metadata keys, like SLOT.
-						# Also, for portdbapi, parts of the match that require
-						# metadata access are deferred until we have cached the
-						# metadata in a Package instance.
+						# Also, parts of the match that require metadata access
+						# are deferred until we have cached the metadata in a
+						# Package instance.
 						if not atom_set.findAtomForPackage(pkg,
 							modified_use=self._pkg_use_enabled(pkg)):
 							continue
+						matched_something = True
 						yield pkg
 
+		# USE=multislot can make an installed package appear as if
+		# it doesn't satisfy a slot dependency. Rebuilding the ebuild
+		# won't do any good as long as USE=multislot is enabled since
+		# the newly built package still won't have the expected slot.
+		# Therefore, assume that such SLOT dependencies are already
+		# satisfied rather than forcing a rebuild.
+		if not matched_something and installed and atom.slot is not None:
+
+			if "remove" in self._dynamic_config.myparams:
+				# We need to search the portdbapi, which is not in our
+				# normal dbs list, in order to find the real SLOT.
+				portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
+				db_keys = list(portdb._aux_cache_keys)
+				dbs = [(portdb, "ebuild", False, False, db_keys)]
+			else:
+				dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+
+			cp_list = db.cp_list(atom_exp.cp)
+			if cp_list:
+				atom_set = InternalPackageSet(
+					initial_atoms=(atom.without_slot,), allow_repo=True)
+				atom_exp_without_slot = atom_exp.without_slot
+				cp_list.reverse()
+				for cpv in cp_list:
+					if not match_from_list(atom_exp_without_slot, [cpv]):
+						continue
+					slot_available = False
+					for other_db, other_type, other_built, \
+						other_installed, other_keys in dbs:
+						try:
+							if atom.slot == \
+								other_db.aux_get(cpv, ["SLOT"])[0]:
+								slot_available = True
+								break
+						except KeyError:
+							pass
+					if not slot_available:
+						continue
+					inst_pkg = self._pkg(cpv, "installed",
+						root_config, installed=installed, myrepo=atom.repo)
+					# Remove the slot from the atom and verify that
+					# the package matches the resulting atom.
+					if atom_set.findAtomForPackage(inst_pkg):
+						yield inst_pkg
+						return
+
 	def _select_pkg_highest_available(self, root, atom, onlydeps=False):
-		cache_key = (root, atom, onlydeps)
+		cache_key = (root, atom, atom.unevaluated_atom, onlydeps)
 		ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
 		if ret is not None:
 			pkg, existing = ret
@@ -3320,7 +3493,6 @@ class depgraph(object):
 		self._dynamic_config._highest_pkg_cache[cache_key] = ret
 		pkg, existing = ret
 		if pkg is not None:
-			settings = pkg.root_config.settings
 			if self._pkg_visibility_check(pkg) and \
 				not (pkg.installed and pkg.masks):
 				self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
@@ -3347,40 +3519,81 @@ class depgraph(object):
 				return False
 		return True
 
+	class _AutounmaskLevel(object):
+		__slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
+			"allow_missing_keywords", "allow_unmasks")
+
+		def __init__(self):
+			self.allow_use_changes = False
+			self.allow_license_changes = False
+			self.allow_unstable_keywords = False
+			self.allow_missing_keywords = False
+			self.allow_unmasks = False
+
+	def _autounmask_levels(self):
+		"""
+		Iterate over the different allowed things to unmask.
+
+		1. USE
+		2. USE + ~arch + license
+		3. USE + ~arch + license + missing keywords
+		4. USE + ~arch + license + masks
+		5. USE + ~arch + license + missing keywords + masks
+
+		Some thoughts:
+			* Do least invasive changes first.
+			* Try unmasking alone before unmasking + missing keywords
+				to avoid -9999 versions if possible
+		"""
+
+		if self._dynamic_config._autounmask is not True:
+			return
+
+		autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
+		autounmask_level = self._AutounmaskLevel()
+
+		autounmask_level.allow_use_changes = True
+
+		for only_use_changes in (True, False):
+
+			autounmask_level.allow_unstable_keywords = (not only_use_changes)
+			autounmask_level.allow_license_changes = (not only_use_changes)
+
+			for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
+
+				if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
+					break
+
+				autounmask_level.allow_missing_keywords = missing_keyword
+				autounmask_level.allow_unmasks = unmask
+
+				yield autounmask_level
+
+
 	def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
 		pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
 
 		default_selection = (pkg, existing)
 
-		if self._dynamic_config._autounmask is True:
+		def reset_pkg(pkg):
 			if pkg is not None and \
 				pkg.installed and \
 				not self._want_installed_pkg(pkg):
 				pkg = None
 
-			for only_use_changes in True, False:
+		if self._dynamic_config._autounmask is True:
+			reset_pkg(pkg)
+
+			for autounmask_level in self._autounmask_levels():
 				if pkg is not None:
 					break
 
-				for allow_unmasks in (False, True):
-					if only_use_changes and allow_unmasks:
-						continue
+				pkg, existing = \
+					self._wrapped_select_pkg_highest_available_imp(
+						root, atom, onlydeps=onlydeps,
+						autounmask_level=autounmask_level)
 
-					if pkg is not None:
-						break
-
-					pkg, existing = \
-						self._wrapped_select_pkg_highest_available_imp(
-							root, atom, onlydeps=onlydeps,
-							allow_use_changes=True,
-							allow_unstable_keywords=(not only_use_changes),
-							allow_license_changes=(not only_use_changes),
-							allow_unmasks=allow_unmasks)
-
-					if pkg is not None and \
-						pkg.installed and \
-						not self._want_installed_pkg(pkg):
-						pkg = None
+				reset_pkg(pkg)
 			
 			if self._dynamic_config._need_restart:
 				return None, None
@@ -3392,21 +3605,20 @@ class depgraph(object):
 
 		return pkg, existing
 
-	def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+	def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
 
 		if pkg.visible:
 			return True
 
-		if pkg in self._dynamic_config.digraph:
+		if trust_graph and pkg in self._dynamic_config.digraph:
 			# Sometimes we need to temporarily disable
 			# dynamic_config._autounmask, but for overall
-			# consistency in dependency resolution, in any
-			# case we want to respect autounmask visibity
-			# for packages that have already been added to
-			# the dependency graph.
+			# consistency in dependency resolution, in most
+			# cases we want to treat packages in the graph
+			# as though they are visible.
 			return True
 
-		if not self._dynamic_config._autounmask:
+		if not self._dynamic_config._autounmask or autounmask_level is None:
 			return False
 
 		pkgsettings = self._frozen_config.pkgsettings[pkg.root]
@@ -3455,11 +3667,10 @@ class depgraph(object):
 			#Package has already been unmasked.
 			return True
 
-		#We treat missing keywords in the same way as masks.
-		if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
-			(masked_by_missing_keywords and not allow_unmasks) or \
-			(masked_by_p_mask and not allow_unmasks) or \
-			(missing_licenses and not allow_license_changes):
+		if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
+			(masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
+			(masked_by_p_mask and not autounmask_level.allow_unmasks) or \
+			(missing_licenses and not autounmask_level.allow_license_changes):
 			#We are not allowed to do the needed changes.
 			return False
 
@@ -3556,7 +3767,7 @@ class depgraph(object):
 
 		if new_changes != old_changes:
 			#Don't do the change if it violates REQUIRED_USE.
-			required_use = pkg.metadata["REQUIRED_USE"]
+			required_use = pkg.metadata.get("REQUIRED_USE")
 			if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
 				not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
 				return old_use
@@ -3574,13 +3785,11 @@ class depgraph(object):
 				self._dynamic_config._need_restart = True
 		return new_use
 
-	def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
-		allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+	def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
 		root_config = self._frozen_config.roots[root]
 		pkgsettings = self._frozen_config.pkgsettings[root]
 		dbs = self._dynamic_config._filtered_trees[root]["dbs"]
 		vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
-		portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
 		# List of acceptable packages, ordered by type preference.
 		matched_packages = []
 		matched_pkgs_ignore_use = []
@@ -3588,6 +3797,8 @@ class depgraph(object):
 		if not isinstance(atom, portage.dep.Atom):
 			atom = portage.dep.Atom(atom)
 		atom_cp = atom.cp
+		have_new_virt = atom_cp.startswith("virtual/") and \
+			self._have_new_virt(root, atom_cp)
 		atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
 		existing_node = None
 		myeb = None
@@ -3635,6 +3846,9 @@ class depgraph(object):
 				# USE configuration.
 				for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use, 
 					onlydeps=onlydeps):
+					if pkg.cp != atom_cp and have_new_virt:
+						# pull in a new-style virtual instead
+						continue
 					if pkg in self._dynamic_config._runtime_pkg_mask:
 						# The package has been masked by the backtracking logic
 						continue
@@ -3698,10 +3912,7 @@ class depgraph(object):
 						# _dep_check_composite_db, in order to prevent
 						# incorrect choices in || deps like bug #351828.
 
-						if not self._pkg_visibility_check(pkg, \
-							allow_unstable_keywords=allow_unstable_keywords,
-							allow_license_changes=allow_license_changes,
-							allow_unmasks=allow_unmasks):
+						if not self._pkg_visibility_check(pkg, autounmask_level):
 							continue
 
 						# Enable upgrade or downgrade to a version
@@ -3741,19 +3952,13 @@ class depgraph(object):
 										pkg_eb_visible = False
 										for pkg_eb in self._iter_match_pkgs(pkg.root_config,
 											"ebuild", Atom("=%s" % (pkg.cpv,))):
-											if self._pkg_visibility_check(pkg_eb, \
-												allow_unstable_keywords=allow_unstable_keywords,
-												allow_license_changes=allow_license_changes,
-												allow_unmasks=allow_unmasks):
+											if self._pkg_visibility_check(pkg_eb, autounmask_level):
 												pkg_eb_visible = True
 												break
 										if not pkg_eb_visible:
 											continue
 									else:
-										if not self._pkg_visibility_check(pkg_eb, \
-											allow_unstable_keywords=allow_unstable_keywords,
-											allow_license_changes=allow_license_changes,
-											allow_unmasks=allow_unmasks):
+										if not self._pkg_visibility_check(pkg_eb, autounmask_level):
 											continue
 
 					# Calculation of USE for unbuilt ebuilds is relatively
@@ -3783,7 +3988,7 @@ class depgraph(object):
 					if atom.use:
 
 						matched_pkgs_ignore_use.append(pkg)
-						if allow_use_changes and not pkg.built:
+						if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
 							target_use = {}
 							for flag in atom.use.enabled:
 								target_use[flag] = True
@@ -3852,6 +4057,7 @@ class depgraph(object):
 						e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
 						if not e_pkg:
 							break
+
 						# Use PackageSet.findAtomForPackage()
 						# for PROVIDE support.
 						if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
@@ -3872,7 +4078,8 @@ class depgraph(object):
 					if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
 						("--newuse" in self._frozen_config.myopts or \
 						"--reinstall" in self._frozen_config.myopts or \
-						"--binpkg-respect-use" in self._frozen_config.myopts):
+						(not installed and self._dynamic_config.myparams.get(
+						"binpkg_respect_use") in ("y", "auto"))):
 						iuses = pkg.iuse.all
 						old_use = self._pkg_use_enabled(pkg)
 						if myeb:
@@ -3886,9 +4093,11 @@ class depgraph(object):
 						cur_iuse = iuses
 						if myeb and not usepkgonly and not useoldpkg:
 							cur_iuse = myeb.iuse.all
-						if self._reinstall_for_flags(forced_flags,
-							old_use, iuses,
-							now_use, cur_iuse):
+						reinstall_for_flags = self._reinstall_for_flags(pkg,
+							forced_flags, old_use, iuses, now_use, cur_iuse)
+						if reinstall_for_flags:
+							if not pkg.installed:
+								self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
 							break
 					# Compare current config to installed package
 					# and do not reinstall if possible.
@@ -3905,7 +4114,7 @@ class depgraph(object):
 						cur_use = self._pkg_use_enabled(pkg)
 						cur_iuse = pkg.iuse.all
 						reinstall_for_flags = \
-							self._reinstall_for_flags(
+							self._reinstall_for_flags(pkg,
 							forced_flags, old_use, old_iuse,
 							cur_use, cur_iuse)
 						if reinstall_for_flags:
@@ -4002,21 +4211,16 @@ class depgraph(object):
 
 			if avoid_update:
 				for pkg in matched_packages:
-					if pkg.installed and self._pkg_visibility_check(pkg, \
-						allow_unstable_keywords=allow_unstable_keywords,
-						allow_license_changes=allow_license_changes,
-						allow_unmasks=allow_unmasks):
+					if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
 						return pkg, existing_node
 
 			visible_matches = []
 			if matched_oldpkg:
 				visible_matches = [pkg.cpv for pkg in matched_oldpkg \
-					if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
-						allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+					if self._pkg_visibility_check(pkg, autounmask_level)]
 			if not visible_matches:
 				visible_matches = [pkg.cpv for pkg in matched_packages \
-					if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
-						allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+					if self._pkg_visibility_check(pkg, autounmask_level)]
 			if visible_matches:
 				bestmatch = portage.best(visible_matches)
 			else:
@@ -4046,11 +4250,12 @@ class depgraph(object):
 		"""
 		Select packages that are installed.
 		"""
-		vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
-		matches = vardb.match_pkgs(atom)
+		matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
+			"installed", atom))
 		if not matches:
 			return None, None
 		if len(matches) > 1:
+			matches.reverse() # ascending order
 			unmasked = [pkg for pkg in matches if \
 				self._pkg_visibility_check(pkg)]
 			if unmasked:
@@ -4088,11 +4293,10 @@ class depgraph(object):
 			"recurse" not in self._dynamic_config.myparams:
 			return 1
 
-		if "complete" not in self._dynamic_config.myparams:
-			# Automatically enable complete mode if there are any
-			# downgrades, since they often break dependencies
-			# (like in bug #353613).
-			have_downgrade = False
+		if "complete" not in self._dynamic_config.myparams and \
+			self._dynamic_config.myparams.get("complete_if_new_ver", "y") == "y":
+			# Enable complete mode if an installed package version will change.
+			version_change = False
 			for node in self._dynamic_config.digraph:
 				if not isinstance(node, Package) or \
 					node.operation != "merge":
@@ -4100,16 +4304,15 @@ class depgraph(object):
 				vardb = self._frozen_config.roots[
 					node.root].trees["vartree"].dbapi
 				inst_pkg = vardb.match_pkgs(node.slot_atom)
-				if inst_pkg and inst_pkg[0] > node:
-					have_downgrade = True
+				if inst_pkg and (inst_pkg[0] > node or inst_pkg[0] < node):
+					version_change = True
 					break
 
-			if have_downgrade:
+			if version_change:
 				self._dynamic_config.myparams["complete"] = True
-			else:
-				# Skip complete graph mode, in order to avoid consuming
-				# enough time to disturb users.
-				return 1
+
+		if "complete" not in self._dynamic_config.myparams:
+			return 1
 
 		self._load_vdb()
 
@@ -4137,7 +4340,8 @@ class depgraph(object):
 		args = self._dynamic_config._initial_arg_list[:]
 		for root in self._frozen_config.roots:
 			if root != self._frozen_config.target_root and \
-				"remove" in self._dynamic_config.myparams:
+				("remove" in self._dynamic_config.myparams or
+				self._frozen_config.myopts.get("--root-deps") is not None):
 				# Only pull in deps for the relevant root.
 				continue
 			depgraph_sets = self._dynamic_config.sets[root]
@@ -4265,9 +4469,6 @@ class depgraph(object):
 			"--nodeps" in self._frozen_config.myopts:
 			return True
 
-		complete = "complete" in self._dynamic_config.myparams
-		deep = "deep" in self._dynamic_config.myparams
-
 		if True:
 			# Pull in blockers from all installed packages that haven't already
 			# been pulled into the depgraph, in order to ensure that they are
@@ -4281,11 +4482,14 @@ class depgraph(object):
 			# are already built.
 			dep_keys = ["RDEPEND", "PDEPEND"]
 			for myroot in self._frozen_config.trees:
+
+				if self._frozen_config.myopts.get("--root-deps") is not None and \
+					myroot != self._frozen_config.target_root:
+					continue
+
 				vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
-				portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
 				pkgsettings = self._frozen_config.pkgsettings[myroot]
 				root_config = self._frozen_config.roots[myroot]
-				dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
 				final_db = self._dynamic_config.mydbapi[myroot]
 
 				blocker_cache = BlockerCache(myroot, vardb)
@@ -4304,7 +4508,8 @@ class depgraph(object):
 					# packages masked by license, since the user likely wants
 					# to adjust ACCEPT_LICENSE.
 					if pkg in final_db:
-						if not self._pkg_visibility_check(pkg) and \
+						if not self._pkg_visibility_check(pkg,
+							trust_graph=False) and \
 							(pkg_in_graph or 'LICENSE' in pkg.masks):
 							self._dynamic_config._masked_installed.add(pkg)
 						else:
@@ -4381,7 +4586,7 @@ class depgraph(object):
 							# matches (this can happen if an atom lacks a
 							# category).
 							show_invalid_depstring_notice(
-								pkg, depstr, str(e))
+								pkg, depstr, _unicode_decode("%s") % (e,))
 							del e
 							raise
 						if not success:
@@ -4412,7 +4617,8 @@ class depgraph(object):
 						except portage.exception.InvalidAtom as e:
 							depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
 							show_invalid_depstring_notice(
-								pkg, depstr, "Invalid Atom: %s" % (e,))
+								pkg, depstr,
+								_unicode_decode("Invalid Atom: %s") % (e,))
 							return False
 				for cpv in stale_cache:
 					del blocker_cache[cpv]
@@ -4852,15 +5058,6 @@ class depgraph(object):
 		if replacement_portage == running_portage:
 			replacement_portage = None
 
-		if replacement_portage is not None and \
-			(running_portage is None or \
-			running_portage.cpv != replacement_portage.cpv or \
-			'9999' in replacement_portage.cpv or \
-			'git' in replacement_portage.inherited or \
-			'git-2' in replacement_portage.inherited):
-			# update from running_portage to replacement_portage asap
-			asap_nodes.append(replacement_portage)
-
 		if running_portage is not None:
 			try:
 				portage_rdepend = self._select_atoms_highest_available(
@@ -5668,6 +5865,8 @@ class depgraph(object):
 		"""
 
 		autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
+		autounmask_unrestricted_atoms = \
+			self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
 		quiet = "--quiet" in self._frozen_config.myopts
 		pretend = "--pretend" in self._frozen_config.myopts
 		ask = "--ask" in self._frozen_config.myopts
@@ -5703,6 +5902,7 @@ class depgraph(object):
 		#Set of roots we have autounmask changes for.
 		roots = set()
 
+		masked_by_missing_keywords = False
 		unstable_keyword_msg = {}
 		for pkg in self._dynamic_config._needed_unstable_keywords:
 			self._show_merge_list()
@@ -5718,12 +5918,17 @@ class depgraph(object):
 					if reason.unmask_hint and \
 						reason.unmask_hint.key == 'unstable keyword':
 						keyword = reason.unmask_hint.value
+						if keyword == "**":
+							masked_by_missing_keywords = True
 
 						unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
-						if is_latest:
-							unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
-						elif is_latest_in_slot:
-							unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+						if autounmask_unrestricted_atoms:
+							if is_latest:
+								unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
+							elif is_latest_in_slot:
+								unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+							else:
+								unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
 						else:
 							unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
 
@@ -5757,10 +5962,13 @@ class depgraph(object):
 								comment.splitlines() if line]
 							for line in comment:
 								p_mask_change_msg[root].append("%s\n" % line)
-						if is_latest:
-							p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
-						elif is_latest_in_slot:
-							p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+						if autounmask_unrestricted_atoms:
+							if is_latest:
+								p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
+							elif is_latest_in_slot:
+								p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+							else:
+								p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
 						else:
 							p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
 
@@ -5893,33 +6101,41 @@ class depgraph(object):
 
 			write_to_file = not problems
 
+		def format_msg(lines):
+			lines = lines[:]
+			for i, line in enumerate(lines):
+				if line.startswith("#"):
+					continue
+				lines[i] = colorize("INFORM", line.rstrip()) + "\n"
+			return "".join(lines)
+
 		for root in roots:
 			settings = self._frozen_config.roots[root].settings
 			abs_user_config = os.path.join(
 				settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
 
 			if len(roots) > 1:
-				writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+				writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
 
 			if root in unstable_keyword_msg:
-				writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
+				writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
 					" are necessary to proceed:\n", noiselevel=-1)
-				writemsg_stdout("".join(unstable_keyword_msg[root]), noiselevel=-1)
+				writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
 
 			if root in p_mask_change_msg:
-				writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
+				writemsg("\nThe following " + colorize("BAD", "mask changes") + \
 					" are necessary to proceed:\n", noiselevel=-1)
-				writemsg_stdout("".join(p_mask_change_msg[root]), noiselevel=-1)
+				writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
 
 			if root in use_changes_msg:
-				writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
+				writemsg("\nThe following " + colorize("BAD", "USE changes") + \
 					" are necessary to proceed:\n", noiselevel=-1)
-				writemsg_stdout("".join(use_changes_msg[root]), noiselevel=-1)
+				writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
 
 			if root in license_msg:
-				writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
+				writemsg("\nThe following " + colorize("BAD", "license changes") + \
 					" are necessary to proceed:\n", noiselevel=-1)
-				writemsg_stdout("".join(license_msg[root]), noiselevel=-1)
+				writemsg(format_msg(license_msg[root]), noiselevel=-1)
 
 		protect_obj = {}
 		if write_to_file:
@@ -5948,7 +6164,7 @@ class depgraph(object):
 				if protect_obj[root].isprotected(file_to_write_to):
 					# We want to force new_protect_filename to ensure
 					# that the user will see all our changes via
-					# etc-update, even if file_to_write_to doesn't
+					# dispatch-conf, even if file_to_write_to doesn't
 					# exist yet, so we specify force=True.
 					file_to_write_to = new_protect_filename(file_to_write_to,
 						force=True)
@@ -5957,20 +6173,16 @@ class depgraph(object):
 				except PortageException:
 					problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
 
-		if not quiet and \
-			(unstable_keyword_msg or \
-			p_mask_change_msg or \
-			use_changes_msg or \
-			license_msg):
+		if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
 			msg = [
 				"",
-				"NOTE: This --autounmask behavior can be disabled by setting",
-				"      EMERGE_DEFAULT_OPTS=\"--autounmask=n\" in make.conf."
+				"NOTE: The --autounmask-keep-masks option will prevent emerge",
+				"      from creating package.unmask or ** keyword changes."
 			]
 			for line in msg:
 				if line:
 					line = colorize("INFORM", line)
-				writemsg_stdout(line + "\n", noiselevel=-1)
+				writemsg(line + "\n", noiselevel=-1)
 
 		if ask and write_to_file and file_to_write_to:
 			prompt = "\nWould you like to add these " + \
@@ -6002,14 +6214,14 @@ class depgraph(object):
 						file_to_write_to.get((abs_user_config, "package.license")))
 
 		if problems:
-			writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
+			writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
 				noiselevel=-1)
-			writemsg_stdout("".join(problems), noiselevel=-1)
+			writemsg("".join(problems), noiselevel=-1)
 		elif write_to_file and roots:
-			writemsg_stdout("\nAutounmask changes successfully written. Remember to run etc-update.\n", \
+			writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
 				noiselevel=-1)
 		elif not pretend and not autounmask_write and roots:
-			writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
+			writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
 				noiselevel=-1)
 
 
@@ -6020,49 +6232,25 @@ class depgraph(object):
 		the merge list where it is most likely to be seen, but if display()
 		is not going to be called then this method should be called explicitly
 		to ensure that the user is notified of problems with the graph.
-
-		All output goes to stderr, except for unsatisfied dependencies which
-		go to stdout for parsing by programs such as autounmask.
 		"""
 
-		# Note that show_masked_packages() sends its output to
-		# stdout, and some programs such as autounmask parse the
-		# output in cases when emerge bails out. However, when
-		# show_masked_packages() is called for installed packages
-		# here, the message is a warning that is more appropriate
-		# to send to stderr, so temporarily redirect stdout to
-		# stderr. TODO: Fix output code so there's a cleaner way
-		# to redirect everything to stderr.
-		sys.stdout.flush()
-		sys.stderr.flush()
-		stdout = sys.stdout
-		try:
-			sys.stdout = sys.stderr
-			self._display_problems()
-		finally:
-			sys.stdout = stdout
-			sys.stdout.flush()
-			sys.stderr.flush()
-
-		# This goes to stdout for parsing by programs like autounmask.
-		for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
-			self._show_unsatisfied_dep(*pargs, **kwargs)
-
-	def _display_problems(self):
 		if self._dynamic_config._circular_deps_for_display is not None:
 			self._show_circular_deps(
 				self._dynamic_config._circular_deps_for_display)
 
-		# The user is only notified of a slot conflict if
-		# there are no unresolvable blocker conflicts.
-		if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+		# The slot conflict display has better noise reduction than
+		# the unsatisfied blockers display, so skip unsatisfied blockers
+		# display if there are slot conflicts (see bug #385391).
+		if self._dynamic_config._slot_collision_info:
+			self._show_slot_collision_notice()
+		elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
 			self._show_unsatisfied_blockers(
 				self._dynamic_config._unsatisfied_blockers_for_display)
-		elif self._dynamic_config._slot_collision_info:
-			self._show_slot_collision_notice()
 		else:
 			self._show_missed_update()
 
+		self._show_ignored_binaries()
+
 		self._display_autounmask()
 
 		# TODO: Add generic support for "set problem" handlers so that
@@ -6164,6 +6352,9 @@ class depgraph(object):
 			show_mask_docs()
 			writemsg("\n", noiselevel=-1)
 
+		for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+			self._show_unsatisfied_dep(*pargs, **kwargs)
+
 	def saveNomergeFavorites(self):
 		"""Find atoms in favorites that are not in the mergelist and add them
 		to the world file if necessary."""
@@ -6184,7 +6375,6 @@ class depgraph(object):
 
 		args_set = self._dynamic_config.sets[
 			self._frozen_config.target_root].sets['__non_set_args__']
-		portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
 		added_favorites = set()
 		for x in self._dynamic_config._set_nodes:
 			if x.operation != "nomerge":
@@ -6222,7 +6412,8 @@ class depgraph(object):
 		all_added.extend(added_favorites)
 		all_added.sort()
 		for a in all_added:
-			writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
+			writemsg_stdout(
+				">>> Recording %s in \"world\" favorites file...\n" % \
 				colorize("INFORM", str(a)), noiselevel=-1)
 		if all_added:
 			world_set.update(all_added)
@@ -6247,15 +6438,12 @@ class depgraph(object):
 			mergelist = []
 
 		favorites = resume_data.get("favorites")
-		args_set = self._dynamic_config.sets[
-			self._frozen_config.target_root].sets['__non_set_args__']
 		if isinstance(favorites, list):
 			args = self._load_favorites(favorites)
 		else:
 			args = []
 
 		fakedb = self._dynamic_config.mydbapi
-		trees = self._frozen_config.trees
 		serialized_tasks = []
 		masked_tasks = []
 		for x in mergelist:
@@ -6552,38 +6740,43 @@ class _dep_check_composite_db(dbapi):
 		return ret
 
 	def match(self, atom):
-		ret = self._match_cache.get(atom)
+		cache_key = (atom, atom.unevaluated_atom)
+		ret = self._match_cache.get(cache_key)
 		if ret is not None:
 			return ret[:]
+
+		ret = []
 		pkg, existing = self._depgraph._select_package(self._root, atom)
-		if not pkg:
-			ret = []
-		else:
-			# Return the highest available from select_package() as well as
-			# any matching slots in the graph db.
+
+		if pkg is not None and self._visible(pkg):
+			self._cpv_pkg_map[pkg.cpv] = pkg
+			ret.append(pkg.cpv)
+
+		if pkg is not None and \
+			atom.slot is None and \
+			pkg.cp.startswith("virtual/") and \
+			(("remove" not in self._depgraph._dynamic_config.myparams and
+			"--update" not in self._depgraph._frozen_config.myopts) or
+			not ret or
+			not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
+			# For new-style virtual lookahead that occurs inside dep_check()
+			# for bug #141118, examine all slots. This is needed so that newer
+			# slots will not unnecessarily be pulled in when a satisfying lower
+			# slot is already installed. For example, if virtual/jdk-1.5 is
+			# satisfied via gcj-jdk then there's no need to pull in a newer
+			# slot to satisfy a virtual/jdk dependency, unless --update is
+			# enabled.
 			slots = set()
-			slots.add(pkg.metadata["SLOT"])
-			if pkg.cp.startswith("virtual/"):
-				# For new-style virtual lookahead that occurs inside
-				# dep_check(), examine all slots. This is needed
-				# so that newer slots will not unnecessarily be pulled in
-				# when a satisfying lower slot is already installed. For
-				# example, if virtual/jdk-1.4 is satisfied via kaffe then
-				# there's no need to pull in a newer slot to satisfy a
-				# virtual/jdk dependency.
-				for db, pkg_type, built, installed, db_keys in \
-					self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
-					for cpv in db.match(atom):
-						if portage.cpv_getkey(cpv) != pkg.cp:
-							continue
-						slots.add(db.aux_get(cpv, ["SLOT"])[0])
-			ret = []
-			if self._visible(pkg):
-				self._cpv_pkg_map[pkg.cpv] = pkg
-				ret.append(pkg.cpv)
-			slots.remove(pkg.metadata["SLOT"])
+			slots.add(pkg.slot)
+			for virt_pkg in self._depgraph._iter_match_pkgs_any(
+				self._depgraph._frozen_config.roots[self._root], atom):
+				if virt_pkg.cp != pkg.cp:
+					continue
+				slots.add(virt_pkg.slot)
+
+			slots.remove(pkg.slot)
 			while slots:
-				slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
+				slot_atom = atom.with_slot(slots.pop())
 				pkg, existing = self._depgraph._select_package(
 					self._root, slot_atom)
 				if not pkg:
@@ -6592,9 +6785,11 @@ class _dep_check_composite_db(dbapi):
 					continue
 				self._cpv_pkg_map[pkg.cpv] = pkg
 				ret.append(pkg.cpv)
-			if ret:
+
+			if len(ret) > 1:
 				self._cpv_sort_ascending(ret)
-		self._match_cache[atom] = ret
+
+		self._match_cache[cache_key] = ret
 		return ret[:]
 
 	def _visible(self, pkg):
@@ -6650,7 +6845,7 @@ class _dep_check_composite_db(dbapi):
 			# Note: highest_visible is not necessarily the real highest
 			# visible, especially when --update is not enabled, so use
 			# < operator instead of !=.
-			if pkg < highest_visible:
+			if highest_visible is not None and pkg < highest_visible:
 				return False
 		elif in_graph != pkg:
 			# Mask choices for packages that would trigger a slot
@@ -6832,7 +7027,7 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
 	PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
 	TODO: Return reasons for dropped_tasks, for display/logging.
 	@rtype: tuple
-	@returns: (success, depgraph, dropped_tasks)
+	@return: (success, depgraph, dropped_tasks)
 	"""
 	skip_masked = True
 	skip_unsatisfied = True
@@ -6869,12 +7064,12 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
 					if not isinstance(parent_node, Package) \
 						or parent_node.operation not in ("merge", "nomerge"):
 						continue
-					unsatisfied = \
-						graph.child_nodes(parent_node,
-						ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
-					if pkg in unsatisfied:
-						unsatisfied_parents[parent_node] = parent_node
-						unsatisfied_stack.append(parent_node)
+					# We need to traverse all priorities here, in order to
+					# ensure that a package with an unsatisfied depenedency
+					# won't get pulled in, even indirectly via a soft
+					# dependency.
+					unsatisfied_parents[parent_node] = parent_node
+					unsatisfied_stack.append(parent_node)
 
 			unsatisfied_tuples = frozenset(tuple(parent_node)
 				for parent_node in unsatisfied_parents
@@ -6907,7 +7102,6 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
 
 def get_mask_info(root_config, cpv, pkgsettings,
 	db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
-	eapi_masked = False
 	try:
 		metadata = dict(zip(db_keys,
 			db.aux_get(cpv, db_keys, myrepo=myrepo)))
@@ -6918,8 +7112,6 @@ def get_mask_info(root_config, cpv, pkgsettings,
 		mreasons = ["corruption"]
 	else:
 		eapi = metadata['EAPI']
-		if eapi[:1] == '-':
-			eapi = eapi[1:]
 		if not portage.eapi_is_supported(eapi):
 			mreasons = ['EAPI %s' % eapi]
 		else:
@@ -6976,10 +7168,11 @@ def show_masked_packages(masked_packages):
 				# above via mreasons.
 				pass
 
-		writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
+		writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
+			noiselevel=-1)
 
 		if comment and comment not in shown_comments:
-			writemsg_stdout(filename + ":\n" + comment + "\n",
+			writemsg(filename + ":\n" + comment + "\n",
 				noiselevel=-1)
 			shown_comments.add(comment)
 		portdb = root_config.trees["porttree"].dbapi
@@ -6989,13 +7182,14 @@ def show_masked_packages(masked_packages):
 				continue
 			msg = ("A copy of the '%s' license" + \
 			" is located at '%s'.\n\n") % (l, l_path)
-			writemsg_stdout(msg, noiselevel=-1)
+			writemsg(msg, noiselevel=-1)
 			shown_licenses.add(l)
 	return have_eapi_mask
 
 def show_mask_docs():
-	writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
-	writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
+	writemsg("For more information, see the MASKED PACKAGES "
+		"section in the emerge\n", noiselevel=-1)
+	writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
 
 def show_blocker_docs_link():
 	writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
@@ -7017,7 +7211,7 @@ def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
 				pkg.metadata["CHOST"]))
 
 	if pkg.invalid:
-		for msg_type, msgs in pkg.invalid.items():
+		for msgs in pkg.invalid.values():
 			for msg in msgs:
 				mreasons.append(
 					_MaskReason("invalid", "invalid: %s" % (msg,)))

diff --git a/portage_with_autodep/pym/_emerge/depgraph.pyo b/portage_with_autodep/pym/_emerge/depgraph.pyo
new file mode 100644
index 0000000..ba00a11
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/depgraph.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/emergelog.py b/portage_with_autodep/pym/_emerge/emergelog.py
index d6ef1b4..b1b093f 100644
--- a/portage_with_autodep/pym/_emerge/emergelog.py
+++ b/portage_with_autodep/pym/_emerge/emergelog.py
@@ -49,15 +49,12 @@ def emergelog(xterm_titles, mystr, short_msg=None):
 			portage.util.apply_secpass_permissions(file_path,
 				uid=portage.portage_uid, gid=portage.portage_gid,
 				mode=0o660)
-		mylock = None
+		mylock = portage.locks.lockfile(file_path)
 		try:
-			mylock = portage.locks.lockfile(mylogfile)
 			mylogfile.write(_log_fmt % (time.time(), mystr))
-			mylogfile.flush()
-		finally:
-			if mylock:
-				portage.locks.unlockfile(mylock)
 			mylogfile.close()
+		finally:
+			portage.locks.unlockfile(mylock)
 	except (IOError,OSError,portage.exception.PortageException) as e:
 		if secpass >= 1:
 			print("emergelog():",e, file=sys.stderr)

diff --git a/portage_with_autodep/pym/_emerge/emergelog.pyo b/portage_with_autodep/pym/_emerge/emergelog.pyo
new file mode 100644
index 0000000..7e67bd3
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/emergelog.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/getloadavg.pyo b/portage_with_autodep/pym/_emerge/getloadavg.pyo
new file mode 100644
index 0000000..56bda8c
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/getloadavg.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/help.py b/portage_with_autodep/pym/_emerge/help.py
index c978ce2..a1dbb37 100644
--- a/portage_with_autodep/pym/_emerge/help.py
+++ b/portage_with_autodep/pym/_emerge/help.py
@@ -3,10 +3,9 @@
 
 from __future__ import print_function
 
-from portage.const import _ENABLE_DYN_LINK_MAP
 from portage.output import bold, turquoise, green
 
-def shorthelp():
+def help():
 	print(bold("emerge:")+" the other white meat (command-line interface to the Portage system)")
 	print(bold("Usage:"))
 	print("   "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuild")+" | "+turquoise("tbz2")+" | "+turquoise("file")+" | "+turquoise("@set")+" | "+turquoise("atom")+" ] [ ... ]")
@@ -19,797 +18,8 @@ def shorthelp():
 	print("          [ "+green("--complete-graph")+"             ] [ "+green("--deep")+"       ]")
 	print("          [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + "            ]")
 	print("          [ "+green("--newuse")+"    ] [ "+green("--noconfmem")+"  ] [ "+green("--nospinner")+"  ]")
-	print("          [ "+green("--oneshot")+"   ] [ "+green("--onlydeps")+"   ]")
+	print("          [ "+green("--oneshot")+"   ] [ "+green("--onlydeps")+"   ] [ "+ green("--quiet-build")+" [ " + turquoise("y") + " | "+ turquoise("n")+" ]        ]")
 	print("          [ "+green("--reinstall ")+turquoise("changed-use")+"      ] [ " + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" >         ]")
 	print(bold("Actions:")+"  [ "+green("--depclean")+" | "+green("--list-sets")+" | "+green("--search")+" | "+green("--sync")+" | "+green("--version")+"        ]")
-
-def help(myopts, havecolor=1):
-	# TODO: Implement a wrap() that accounts for console color escape codes.
-	from textwrap import wrap
-	desc_left_margin = 14
-	desc_indent = desc_left_margin * " "
-	desc_width = 80 - desc_left_margin - 5
-	if "--verbose" not in myopts:
-		shorthelp()
-		print()
-		print("   For more help try 'emerge --help --verbose' or consult the man page.")
-	else:
-		shorthelp()
-		print()
-		print(turquoise("Help (this screen):"))
-		print("       "+green("--help")+" ("+green("-h")+" short option)")
-		print("              Displays this help; an additional argument (see above) will tell")
-		print("              emerge to display detailed help.")
-		print()
-		print(turquoise("Actions:"))
-		print("       "+green("--clean"))
-		print("              Cleans the system by removing outdated packages which will not")
-		print("              remove functionalities or prevent your system from working.")
-		print("              The arguments can be in several different formats :")
-		print("              * world ")
-		print("              * system or")
-		print("              * 'dependency specification' (in single quotes is best.)")
-		print("              Here are a few examples of the dependency specification format:")
-		print("              "+bold("binutils")+" matches")
-		print("                  binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
-		print("              "+bold("sys-devel/binutils")+" matches")
-		print("                  binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
-		print("              "+bold(">sys-devel/binutils-2.11.90.0.7")+" matches")
-		print("                  binutils-2.11.92.0.12.3-r1")
-		print("              "+bold(">=sys-devel/binutils-2.11.90.0.7")+" matches")
-		print("                  binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
-		print("              "+bold("<=sys-devel/binutils-2.11.92.0.12.3-r1")+" matches")
-		print("                  binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
-		print()
-		print("       "+green("--config"))
-		print("              Runs package-specific operations that must be executed after an")
-		print("              emerge process has completed.  This usually entails configuration")
-		print("              file setup or other similar setups that the user may wish to run.")
-		print()
-		print("       "+green("--depclean")+" ("+green("-c")+" short option)")
-
-		paragraph = "Cleans the system by removing packages that are " + \
-		"not associated with explicitly merged packages. Depclean works " + \
-		"by creating the full dependency tree from the " + \
-		"@world set, then comparing it to installed packages. Packages " + \
-		"installed, but not part of the dependency tree, will be " + \
-		"uninstalled by depclean. See --with-bdeps for behavior with " + \
-		"respect to build time dependencies that are not strictly " + \
-		"required. Packages that are part of the world set will " + \
-		"always be kept. They can be manually added to this set with " + \
-		"emerge --noreplace <atom>. As a safety measure, depclean " + \
-		"will not remove any packages unless *all* required dependencies " + \
-		"have been resolved. As a consequence, it is often necessary to " + \
-		"run emerge --update --newuse --deep @world " + \
-		"prior to depclean."
-
-		for line in wrap(paragraph, desc_width):
-			print(desc_indent + line)
-		print()
-
-		paragraph =  "WARNING: Inexperienced users are advised to use " + \
-		"--pretend with this option in order to see a preview of which " + \
-		"packages will be uninstalled. Always study the list of packages " + \
-		"to be cleaned for any obvious mistakes. Note that packages " + \
-		"listed in package.provided (see portage(5)) may be removed by " + \
-		"depclean, even if they are part of the world set."
-
-		paragraph += " Also note that " + \
-			"depclean may break link level dependencies"
-
-		if _ENABLE_DYN_LINK_MAP:
-			paragraph += ", especially when the " + \
-				"--depclean-lib-check option is disabled"
-
-		paragraph += ". Thus, it is " + \
-			"recommended to use a tool such as revdep-rebuild(1) " + \
-			"in order to detect such breakage."
-
-		for line in wrap(paragraph, desc_width):
-			print(desc_indent + line)
-		print()
-
-		paragraph = "Depclean serves as a dependency aware version of " + \
-			"--unmerge. When given one or more atoms, it will unmerge " + \
-			"matched packages that have no reverse dependencies. Use " + \
-			"--depclean together with --verbose to show reverse dependencies."
-
-		for line in wrap(paragraph, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--deselect") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-
-		paragraph = \
-			"Remove atoms and/or sets from the world file. This action is implied " + \
-			"by uninstall actions, including --depclean, " + \
-			"--prune and --unmerge. Use --deselect=n " + \
-			"in order to prevent uninstall actions from removing " + \
-			"atoms from the world file."
-
-		for line in wrap(paragraph, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--ignore-default-opts"))
-
-		paragraph = \
-			"Causes EMERGE_DEFAULT_OPTS (see make.conf(5)) to be ignored."
-
-		for line in wrap(paragraph, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--info"))
-		print("              Displays important portage variables that will be exported to")
-		print("              ebuild.sh when performing merges. This information is useful")
-		print("              for bug reports and verification of settings. All settings in")
-		print("              make.{conf,globals,defaults} and the environment show up if")
-		print("              run with the '--verbose' flag.")
-		print()
-		print("       " + green("--list-sets"))
-		paragraph = "Displays a list of available package sets."
-
-		for line in wrap(paragraph, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--metadata"))
-		print("              Transfers metadata cache from ${PORTDIR}/metadata/cache/ to")
-		print("              /var/cache/edb/dep/ as is normally done on the tail end of an")
-		print("              rsync update using " + bold("emerge --sync") + ". This process populates the")
-		print("              cache database that portage uses for pre-parsed lookups of")
-		print("              package data.  It does not populate cache for the overlays")
-		print("              listed in PORTDIR_OVERLAY.  In order to generate cache for")
-		print("              overlays, use " + bold("--regen") + ".")
-		print()
-		print("       "+green("--prune")+" ("+green("-P")+" short option)")
-		print("              "+turquoise("WARNING: This action can remove important packages!"))
-		paragraph = "Removes all but the highest installed version of a " + \
-			"package from your system. Use --prune together with " + \
-			"--verbose to show reverse dependencies or with --nodeps " + \
-			"to ignore all dependencies. "
-
-		for line in wrap(paragraph, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--regen"))
-		print("              Causes portage to check and update the dependency cache of all")
-		print("              ebuilds in the portage tree. This is not recommended for rsync")
-		print("              users as rsync updates the cache using server-side caches.")
-		print("              Rsync users should simply 'emerge --sync' to regenerate.")
-		desc = "In order to specify parallel --regen behavior, use "+ \
-			"the ---jobs and --load-average options. If you would like to " + \
-			"generate and distribute cache for use by others, use egencache(1)."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--resume")+" ("+green("-r")+" short option)")
-		print("              Resumes the most recent merge list that has been aborted due to an")
-		print("              error. Please note that this operation will only return an error")
-		print("              on failure. If there is nothing for portage to do, then portage")
-		print("              will exit with a message and a success condition. A resume list")
-		print("              will persist until it has been completed in entirety or until")
-		print("              another aborted merge list replaces it. The resume history is")
-		print("              capable of storing two merge lists. After one resume list")
-		print("              completes, it is possible to invoke --resume once again in order")
-		print("              to resume an older list.")
-		print()
-		print("       "+green("--search")+" ("+green("-s")+" short option)")
-		print("              Searches for matches of the supplied string in the current local")
-		print("              portage tree. By default emerge uses a case-insensitive simple ")
-		print("              search, but you can enable a regular expression search by ")
-		print("              prefixing the search string with %.")
-		print("              Prepending the expression with a '@' will cause the category to")
-		print("              be included in the search.")
-		print("              A few examples:")
-		print("              "+bold("emerge --search libc"))
-		print("                  list all packages that contain libc in their name")
-		print("              "+bold("emerge --search '%^kde'"))
-		print("                  list all packages starting with kde")
-		print("              "+bold("emerge --search '%gcc$'"))
-		print("                  list all packages ending with gcc")
-		print("              "+bold("emerge --search '%@^dev-java.*jdk'"))
-		print("                  list all available Java JDKs")
-		print()
-		print("       "+green("--searchdesc")+" ("+green("-S")+" short option)")
-		print("              Matches the search string against the description field as well")
-		print("              the package's name. Take caution as the descriptions are also")
-		print("              matched as regular expressions.")
-		print("                emerge -S html")
-		print("                emerge -S applet")
-		print("                emerge -S 'perl.*module'")
-		print()
-		print("       "+green("--sync"))
-		desc = "This updates the portage tree that is located in the " + \
-			"directory that the PORTDIR variable refers to (default " + \
-			"location is /usr/portage). The SYNC variable specifies " + \
-			"the remote URI from which files will be synchronized. " + \
-			"The PORTAGE_SYNC_STALE variable configures " + \
-			"warnings that are shown when emerge --sync has not " + \
-			"been executed recently."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print(desc_indent + turquoise("WARNING:"))
-		desc = "The emerge --sync action will modify and/or delete " + \
-			"files located inside the directory that the PORTDIR " + \
-			"variable refers to (default location is /usr/portage). " + \
-			"For more information, see the PORTDIR documentation in " + \
-			"the make.conf(5) man page."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print(desc_indent + green("NOTE:"))
-		desc = "The emerge-webrsync program will download the entire " + \
-			"portage tree as a tarball, which is much faster than emerge " + \
-			"--sync for first time syncs."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--unmerge")+" ("+green("-C")+" short option)")
-		print("              "+turquoise("WARNING: This action can remove important packages!"))
-		print("              Removes all matching packages. This does no checking of")
-		print("              dependencies, so it may remove packages necessary for the proper")
-		print("              operation of your system. Its arguments can be atoms or")
-		print("              ebuilds. For a dependency aware version of --unmerge, use")
-		print("              --depclean or --prune.")
-		print()
-		print("       "+green("--version")+" ("+green("-V")+" short option)")
-		print("              Displays the currently installed version of portage along with")
-		print("              other information useful for quick reference on a system. See")
-		print("              "+bold("emerge info")+" for more advanced information.")
-		print()
-		print(turquoise("Options:"))
-		print("       "+green("--accept-properties=ACCEPT_PROPERTIES"))
-		desc = "This option temporarily overrides the ACCEPT_PROPERTIES " + \
-			"variable. The ACCEPT_PROPERTIES variable is incremental, " + \
-			"which means that the specified setting is appended to the " + \
-			"existing value from your configuration. The special -* " + \
-			"token can be used to discard the existing configuration " + \
-			"value and start fresh. See the MASKED PACKAGES section " + \
-			"and make.conf(5) for more information about " + \
-			"ACCEPT_PROPERTIES. A typical usage example for this option " + \
-			"would be to use --accept-properties=-interactive to " + \
-			"temporarily mask interactive packages. With default " + \
-			"configuration, this would result in an effective " + \
-			"ACCEPT_PROPERTIES value of \"* -interactive\"."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--alphabetical"))
-		print("              When displaying USE and other flag output, combines the enabled")
-		print("              and disabled flags into a single list and sorts it alphabetically.")
-		print("              With this option, output such as USE=\"dar -bar -foo\" will instead")
-		print("              be displayed as USE=\"-bar dar -foo\"")
-		print()
-		print("       " + green("--ask") + \
-			" [ %s | %s ] (%s short option)" % \
-			(turquoise("y"), turquoise("n"), green("-a")))
-		desc = "Before performing the action, display what will take place (server info for " + \
-			"--sync, --pretend output for merge, and so forth), then ask " + \
-			"whether to proceed with the action or abort.  Using --ask is more " + \
-			"efficient than using --pretend and then executing the same command " + \
-			"without --pretend, as dependencies will only need to be calculated once. " + \
-			"WARNING: If the \"Enter\" key is pressed at the prompt (with no other input), " + \
-			"it is interpreted as acceptance of the first choice.  Note that the input " + \
-			"buffer is not cleared prior to the prompt, so an accidental press of the " + \
-			"\"Enter\" key at any time prior to the prompt will be interpreted as a choice! " + \
-			"Use the --ask-enter-invalid option if you want a single \"Enter\" key " + \
-			"press to be interpreted as invalid input."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("        " + green("--ask-enter-invalid"))
-		desc = "When used together with the --ask option, " + \
-			"interpret a single \"Enter\" key press as " + \
-			"invalid input. This helps prevent accidental " + \
-			"acceptance of the first choice. This option is " + \
-			"intended to be set in the make.conf(5) " + \
-			"EMERGE_DEFAULT_OPTS variable."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print() 
-		print("       " + green("--autounmask") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Automatically unmask packages and generate package.use " + \
-			"settings as necessary to satisfy dependencies. This " + \
-			"option is enabled by default. If any configuration " + \
-			"changes are required, then they will be displayed " + \
-			"after the merge list and emerge will immediately " + \
-			"abort. If the displayed configuration changes are " + \
-			"satisfactory, you should copy and paste them into " + \
-			"the specified configuration file(s), or enable the " + \
-			"--autounmask-write option. The " + \
-			"EMERGE_DEFAULT_OPTS variable may be used to " + \
-			"disable this option by default in make.conf(5)."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--autounmask-write") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "If --autounmask is enabled, changes are written " + \
-			"to config files, respecting CONFIG_PROTECT and --ask."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--backtrack") + " " + turquoise("COUNT"))
-		desc = "Specifies an integer number of times to backtrack if " + \
-			"dependency calculation fails due to a conflict or an " + \
-			"unsatisfied dependency (default: '10')."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--binpkg-respect-use") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Tells emerge to ignore binary packages if their use flags" + \
-			" don't match the current configuration. (default: 'n')"
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--buildpkg") + \
-			" [ %s | %s ] (%s short option)" % \
-			(turquoise("y"), turquoise("n"), green("-b")))
-		desc = "Tells emerge to build binary packages for all ebuilds processed in" + \
-			" addition to actually merging the packages. Useful for maintainers" + \
-			" or if you administrate multiple Gentoo Linux systems (build once," + \
-			" emerge tbz2s everywhere) as well as disaster recovery. The package" + \
-			" will be created in the" + \
-			" ${PKGDIR}/All directory. An alternative for already-merged" + \
-			" packages is to use quickpkg(1) which creates a tbz2 from the" + \
-			" live filesystem."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--buildpkgonly")+" ("+green("-B")+" short option)")
-		print("              Creates a binary package, but does not merge it to the")
-		print("              system. This has the restriction that unsatisfied dependencies")
-		print("              must not exist for the desired package as they cannot be used if")
-		print("              they do not exist on the system.")
-		print()
-		print("       " + green("--changed-use"))
-		desc = "This is an alias for --reinstall=changed-use."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--changelog")+" ("+green("-l")+" short option)")
-		print("              When pretending, also display the ChangeLog entries for packages")
-		print("              that will be upgraded.")
-		print()
-		print("       "+green("--color") + " < " + turquoise("y") + " | "+ turquoise("n")+" >")
-		print("              Enable or disable color output. This option will override NOCOLOR")
-		print("              (see make.conf(5)) and may also be used to force color output when")
-		print("              stdout is not a tty (by default, color is disabled unless stdout")
-		print("              is a tty).")
-		print()
-		print("       "+green("--columns"))
-		print("              Display the pretend output in a tabular form. Versions are")
-		print("              aligned vertically.")
-		print()
-		print("       "+green("--complete-graph") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "This causes emerge to consider the deep dependencies of all" + \
-			" packages from the world set. With this option enabled," + \
-			" emerge will bail out if it determines that the given operation will" + \
-			" break any dependencies of the packages that have been added to the" + \
-			" graph. Like the --deep option, the --complete-graph" + \
-			" option will significantly increase the time taken for dependency" + \
-			" calculations. Note that, unlike the --deep option, the" + \
-			" --complete-graph option does not cause any more packages to" + \
-			" be updated than would have otherwise " + \
-			"been updated with the option disabled. " + \
-			"Using --with-bdeps=y together with --complete-graph makes " + \
-			"the graph as complete as possible."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--config-root=DIR"))
-		desc = "Set the PORTAGE_CONFIGROOT environment variable " + \
-			"which is documented in the emerge(1) man page."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--debug")+" ("+green("-d")+" short option)")
-		print("              Tell emerge to run the ebuild command in --debug mode. In this")
-		print("              mode, the bash build environment will run with the -x option,")
-		print("              causing it to output verbose debug information print to stdout.")
-		print("              --debug is great for finding bash syntax errors as providing")
-		print("              very verbose information about the dependency and build process.")
-		print()
-		print("       "+green("--deep") + " " + turquoise("[DEPTH]") + \
-			" (" + green("-D") + " short option)")
-		print("              This flag forces emerge to consider the entire dependency tree of")
-		print("              packages, instead of checking only the immediate dependencies of")
-		print("              the packages. As an example, this catches updates in libraries")
-		print("              that are not directly listed in the dependencies of a package.")
-		print("              Also see --with-bdeps for behavior with respect to build time")
-		print("              dependencies that are not strictly required.")
-		print()
-
-		if _ENABLE_DYN_LINK_MAP:
-			print("       " + green("--depclean-lib-check") + " [ %s | %s ]" % \
-				(turquoise("y"), turquoise("n")))
-			desc = "Account for library link-level dependencies during " + \
-				"--depclean and --prune actions. This " + \
-				"option is enabled by default. In some cases this can " + \
-				"be somewhat time-consuming. This option is ignored " + \
-				"when FEATURES=\"preserve-libs\" is enabled in " + \
-				"make.conf(5), since any libraries that have " + \
-				"consumers will simply be preserved."
-			for line in wrap(desc, desc_width):
-				print(desc_indent + line)
-			print()
-
-		print("       "+green("--emptytree")+" ("+green("-e")+" short option)")
-		desc = "Reinstalls target atoms and their entire deep " + \
-			"dependency tree, as though no packages are currently " + \
-			"installed. You should run this with --pretend " + \
-			"first to make sure the result is what you expect."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--exclude") + " " + turquoise("ATOMS"))
-		desc = "A space separated list of package names or slot atoms. " + \
-			"Emerge won't  install any ebuild or binary package that " + \
-			"matches any of the given package atoms."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--fail-clean") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Clean up temporary files after a build failure. This is " + \
-			"particularly useful if you have PORTAGE_TMPDIR on " + \
-			"tmpfs. If this option is enabled, you probably also want " + \
-			"to enable PORT_LOGDIR (see make.conf(5)) in " + \
-			"order to save the build log."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--fetchonly")+" ("+green("-f")+" short option)")
-		print("              Instead of doing any package building, just perform fetches for")
-		print("              all packages (main package as well as all dependencies.) When")
-		print("              used in combination with --pretend all the SRC_URIs will be")
-		print("              displayed multiple mirrors per line, one line per file.")
-		print()
-		print("       "+green("--fetch-all-uri")+" ("+green("-F")+" short option)")
-		print("              Same as --fetchonly except that all package files, including those")
-		print("              not required to build the package, will be processed.")
-		print()
-		print("       " + green("--getbinpkg") + \
-			" [ %s | %s ] (%s short option)" % \
-			(turquoise("y"), turquoise("n"), green("-g")))
-		print("              Using the server and location defined in PORTAGE_BINHOST, portage")
-		print("              will download the information from each binary file there and it")
-		print("              will use that information to help build the dependency list. This")
-		print("              option implies '-k'. (Use -gK for binary-only merging.)")
-		print()
-		print("       " + green("--getbinpkgonly") + \
-			" [ %s | %s ] (%s short option)" % \
-			(turquoise("y"), turquoise("n"), green("-G")))
-		print("              This option is identical to -g, as above, except it will not use")
-		print("              ANY information from the local machine. All binaries will be")
-		print("              downloaded from the remote server without consulting packages")
-		print("              existing in the packages directory.")
-		print()
-		print("       " + green("--jobs") + " " + turquoise("[JOBS]") + " ("+green("-j")+" short option)")
-		desc = "Specifies the number of packages " + \
-			"to build simultaneously. If this option is " + \
-			"given without an argument, emerge will not " + \
-			"limit the number of jobs that " + \
-			"can run simultaneously. Also see " + \
-			"the related --load-average option. " + \
-			"Note that interactive packages currently force a setting " + \
-			"of --jobs=1. This issue can be temporarily avoided " + \
-			"by specifying --accept-properties=-interactive."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--keep-going") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Continue as much as possible after " + \
-			"an error. When an error occurs, " + \
-			"dependencies are recalculated for " + \
-			"remaining packages and any with " + \
-			"unsatisfied dependencies are " + \
-			"automatically dropped. Also see " + \
-			"the related --skipfirst option."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--load-average") + " " + turquoise("LOAD"))
-		desc = "Specifies that no new builds should " + \
-			"be started if there are other builds " + \
-			"running and the load average is at " + \
-			"least LOAD (a floating-point number). " + \
-			"This option is recommended for use " + \
-			"in combination with --jobs in " + \
-			"order to avoid excess load. See " + \
-			"make(1) for information about " + \
-			"analogous options that should be " + \
-			"configured via MAKEOPTS in " + \
-			"make.conf(5)."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--misspell-suggestions") + " < %s | %s >" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Enable or disable misspell suggestions. By default, " + \
-			"emerge will show a list of packages with similar names " + \
-			"when a package doesn't exist. The EMERGE_DEFAULT_OPTS " + \
-			"variable may be used to disable this option by default"
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--newuse")+" ("+green("-N")+" short option)")
-		desc = "Tells emerge to include installed packages where USE " + \
-			"flags have changed since compilation. This option " + \
-			"also implies the --selective option. If you would " + \
-			"like to skip rebuilds for which disabled flags have " + \
-			"been added to or removed from IUSE, see the related " + \
-			"--reinstall=changed-use option."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--noconfmem"))
-		print("              Portage keeps track of files that have been placed into")
-		print("              CONFIG_PROTECT directories, and normally it will not merge the")
-		print("              same file more than once, as that would become annoying. This")
-		print("              can lead to problems when the user wants the file in the case")
-		print("              of accidental deletion. With this option, files will always be")
-		print("              merged to the live fs instead of silently dropped.")
-		print()
-		print("       "+green("--nodeps")+" ("+green("-O")+" short option)")
-		print("              Merge specified packages, but don't merge any dependencies.")
-		print("              Note that the build may fail if deps aren't satisfied.")
-		print() 
-		print("       "+green("--noreplace")+" ("+green("-n")+" short option)")
-		print("              Skip the packages specified on the command-line that have")
-		print("              already been installed.  Without this option, any packages,")
-		print("              ebuilds, or deps you specify on the command-line *will* cause")
-		print("              Portage to remerge the package, even if it is already installed.")
-		print("              Note that Portage won't remerge dependencies by default.")
-		print() 
-		print("       "+green("--nospinner"))
-		print("              Disables the spinner regardless of terminal type.")
-		print()
-		print("       " + green("--usepkg-exclude") + " " + turquoise("ATOMS"))
-		desc = "A space separated list of package names or slot atoms." + \
-			" Emerge will ignore matching binary packages."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--rebuild-exclude") + " " + turquoise("ATOMS"))
-		desc = "A space separated list of package names or slot atoms." + \
-			" Emerge will not rebuild matching packages due to --rebuild."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--rebuild-ignore") + " " + turquoise("ATOMS"))
-		desc = "A space separated list of package names or slot atoms." + \
-			" Emerge will not rebuild packages that depend on matching " + \
-			" packages due to --rebuild."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--oneshot")+" ("+green("-1")+" short option)")
-		print("              Emerge as normal, but don't add packages to the world profile.")
-		print("              This package will only be updated if it is depended upon by")
-		print("              another package.")
-		print()
-		print("       "+green("--onlydeps")+" ("+green("-o")+" short option)")
-		print("              Only merge (or pretend to merge) the dependencies of the")
-		print("              specified packages, not the packages themselves.")
-		print()
-		print("       " + green("--package-moves") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Perform package moves when necessary. This option " + \
-			"is enabled by default. WARNING: This option " + \
-			"should remain enabled under normal circumstances. " + \
-			"Do not disable it unless you know what you are " + \
-			"doing."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--pretend")+" ("+green("-p")+" short option)")
-		print("              Instead of actually performing the merge, simply display what")
-		print("              ebuilds and tbz2s *would* have been installed if --pretend")
-		print("              weren't used.  Using --pretend is strongly recommended before")
-		print("              installing an unfamiliar package.  In the printout, N = new,")
-		print("              U = updating, R = replacing, F = fetch  restricted, B = blocked")
-		print("              by an already installed package, D = possible downgrading,")
-		print("              S = slotted install. --verbose causes affecting use flags to be")
-		print("              printed out accompanied by a '+' for enabled and a '-' for")
-		print("              disabled USE flags.")
-		print()
-		print("       " + green("--quiet") + \
-			" [ %s | %s ] (%s short option)" % \
-			(turquoise("y"), turquoise("n"), green("-q")))
-		print("              Effects vary, but the general outcome is a reduced or condensed")
-		print("              output from portage's displays.")
-		print()
-		print("       " + green("--quiet-build") + \
-			" [ %s | %s ]" % (turquoise("y"), turquoise("n")))
-		desc = "Redirect all build output to logs alone, and do not " + \
-			"display it on stdout."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--quiet-unmerge-warn"))
-		desc = "Disable the warning message that's shown prior to " + \
-			"--unmerge actions. This option is intended " + \
-			"to be set in the make.conf(5) " + \
-			"EMERGE_DEFAULT_OPTS variable."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--rebuild-if-new-rev") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Rebuild packages when dependencies that are " + \
-			"used at both build-time and run-time are built, " + \
-			"if the dependency is not already installed with the " + \
-			"same version and revision."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--rebuild-if-new-ver") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Rebuild packages when dependencies that are " + \
-			"used at both build-time and run-time are built, " + \
-			"if the dependency is not already installed with the " + \
-			"same version. Revision numbers are ignored."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--rebuild-if-unbuilt") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Rebuild packages when dependencies that are " + \
-			"used at both build-time and run-time are built."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--rebuilt-binaries") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Replace installed packages with binary packages that have " + \
-			"been rebuilt. Rebuilds are detected by comparison of " + \
-			"BUILD_TIME package metadata. This option is enabled " + \
-			"automatically when using binary packages " + \
-			"(--usepkgonly or --getbinpkgonly) together with " + \
-			"--update and --deep."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--rebuilt-binaries-timestamp") + "=%s" % turquoise("TIMESTAMP"))
-		desc = "This option modifies emerge's behaviour only if " + \
-			"--rebuilt-binaries is given. Only binaries that " + \
-			"have a BUILD_TIME that is larger than the given TIMESTAMP " + \
-			"and that is larger than that of the installed package will " + \
-			"be considered by the rebuilt-binaries logic."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--reinstall ") + turquoise("changed-use"))
-		print("              Tells emerge to include installed packages where USE flags have")
-		print("              changed since installation.  Unlike --newuse, this option does")
-		print("              not trigger reinstallation when flags that the user has not")
-		print("              enabled are added or removed.")
-		print()
-		print("       " + green("--reinstall-atoms") + " " + turquoise("ATOMS"))
-		desc = "A space separated list of package names or slot atoms. " + \
-			"Emerge will treat matching packages as if they are not " + \
-			"installed, and reinstall them if necessary."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--root=DIR"))
-		desc = "Set the ROOT environment variable " + \
-			"which is documented in the emerge(1) man page."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--root-deps[=rdeps]"))
-		desc = "If no argument is given then build-time dependencies of packages for " + \
-			"ROOT are installed to " + \
-			"ROOT instead of /. If the rdeps argument is given then discard " + \
-			"all build-time dependencies of packages for ROOT. This option is " + \
-			"only meaningful when used together with ROOT and it should not " + \
-			"be enabled under normal circumstances. For currently supported " + \
-			"EAPI values, the build-time dependencies are specified in the " + \
-			"DEPEND variable. However, behavior may change for new " + \
-			"EAPIs when related extensions are added in the future."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--select") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Add specified packages to the world set (inverse of " + \
-			"--oneshot). This is useful if you want to " + \
-			"use EMERGE_DEFAULT_OPTS to make " + \
-			"--oneshot behavior default."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--selective") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "This identical to the --noreplace option. " + \
-			"Some options, such as --update, imply --selective. " + \
-			"Use --selective=n if you want to forcefully disable " + \
-			"--selective, regardless of options like --update."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--skipfirst"))
-		desc = "This option is only valid when " + \
-			"used with --resume.  It removes the " + \
-			"first package in the resume list. " + \
-			"Dependencies are recalculated for " + \
-			"remaining packages and any that " + \
-			"have unsatisfied dependencies or are " + \
-			"masked will be automatically dropped. " + \
-			"Also see the related " + \
-			"--keep-going option."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--tree")+" ("+green("-t")+" short option)")
-		print("              Shows the dependency tree using indentation for dependencies.")
-		print("              The packages are also listed in reverse merge order so that")
-		print("              a package's dependencies follow the package. Only really useful")
-		print("              in combination with --emptytree, --update or --deep.")
-		print()
-		print("       " + green("--unordered-display"))
-		desc = "By default the displayed merge list is sorted using the " + \
-			"order in which the packages will be merged. When " + \
-			"--tree is used together with this option, this " + \
-			"constraint is removed, hopefully leading to a more " + \
-			"readable dependency tree."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       "+green("--update")+" ("+green("-u")+" short option)")
-		desc = "Updates packages to the best version available, which may " + \
-			"not always be the  highest version number due to masking " + \
-			"for testing and development. Package atoms specified on " + \
-			"the command line are greedy, meaning that unspecific " + \
-			"atoms may match multiple versions of slotted packages."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--use-ebuild-visibility") + " [ %s | %s ]" % \
-			(turquoise("y"), turquoise("n")))
-		desc = "Use unbuilt ebuild metadata for visibility " + \
-			"checks on built packages."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--useoldpkg-atoms") + " " + turquoise("ATOMS"))
-		desc = "A space separated list of package names or slot atoms." + \
-			" Emerge will prefer matching binary packages over newer" + \
-			" unbuilt packages."
-		for line in wrap(desc, desc_width):
-			print(desc_indent + line)
-		print()
-		print("       " + green("--usepkg") + \
-			" [ %s | %s ] (%s short option)" % \
-			(turquoise("y"), turquoise("n"), green("-k")))
-		print("              Tell emerge to use binary packages (from $PKGDIR) if they are")
-		print("              available, thus possibly avoiding some time-consuming compiles.")
-		print("              This option is useful for CD installs; you can export")
-		print("              PKGDIR=/mnt/cdrom/packages and then use this option to have")
-		print("              emerge \"pull\" binary packages from the CD in order to satisfy") 
-		print("              dependencies.")
-		print()
-		print("       " + green("--usepkgonly") + \
-			" [ %s | %s ] (%s short option)" % \
-			(turquoise("y"), turquoise("n"), green("-K")))
-		print("              Like --usepkg above, except this only allows the use of binary")
-		print("              packages, and it will abort the emerge if the package is not")
-		print("              available at the time of dependency calculation.")
-		print()
-		print("       "+green("--verbose")+" ("+green("-v")+" short option)")
-		print("              Effects vary, but the general outcome is an increased or expanded")
-		print("              display of content in portage's displays.")
-		print()
-		print("       "+green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" >")
-		print("              In dependency calculations, pull in build time dependencies that")
-		print("              are not strictly required. This defaults to 'n' for installation")
-		print("              actions and 'y' for the --depclean action. This setting can be")
-		print("              added to EMERGE_DEFAULT_OPTS (see make.conf(5)) and later")
-		print("              overridden via the command line.")
-		print()
+	print()
+	print("   For more help consult the man page.")

diff --git a/portage_with_autodep/pym/_emerge/help.pyo b/portage_with_autodep/pym/_emerge/help.pyo
new file mode 100644
index 0000000..f6fea4e
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/help.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/is_valid_package_atom.pyo b/portage_with_autodep/pym/_emerge/is_valid_package_atom.pyo
new file mode 100644
index 0000000..20edc85
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/is_valid_package_atom.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/main.py b/portage_with_autodep/pym/_emerge/main.py
index 2830214..c52a3ea 100644
--- a/portage_with_autodep/pym/_emerge/main.py
+++ b/portage_with_autodep/pym/_emerge/main.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -6,10 +6,14 @@ from __future__ import print_function
 import logging
 import signal
 import stat
+import subprocess
 import sys
 import textwrap
 import platform
 import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.news:count_unread_news,display_news_notifications',
+)
 from portage import os
 from portage import _encodings
 from portage import _unicode_decode
@@ -28,7 +32,8 @@ import portage.exception
 from portage.data import secpass
 from portage.dbapi.dep_expand import dep_expand
 from portage.util import normalize_path as normpath
-from portage.util import shlex_split, writemsg_level, writemsg_stdout
+from portage.util import (shlex_split, varexpand,
+	writemsg_level, writemsg_stdout)
 from portage._sets import SETPREFIX
 from portage._global_updates import _global_updates
 
@@ -62,6 +67,7 @@ options=[
 "--nodeps",       "--noreplace",
 "--nospinner",    "--oneshot",
 "--onlydeps",     "--pretend",
+"--quiet-repo-display",
 "--quiet-unmerge-warn",
 "--resume",
 "--searchdesc",
@@ -70,6 +76,7 @@ options=[
 "--unordered-display",
 "--update",
 "--verbose",
+"--verbose-main-repo-display",
 ]
 
 shortmapping={
@@ -92,6 +99,21 @@ shortmapping={
 "v":"--verbose",   "V":"--version"
 }
 
+COWSAY_MOO = """
+
+  Larry loves Gentoo (%s)
+
+ _______________________
+< Have you mooed today? >
+ -----------------------
+        \   ^__^
+         \  (oo)\_______
+            (__)\       )\/\ 
+                ||----w |
+                ||     ||
+
+"""
+
 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
 
 	if os.path.exists("/usr/bin/install-info"):
@@ -158,11 +180,21 @@ def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
 									raise
 								del e
 					processed_count += 1
-					myso = portage.subprocess_getstatusoutput(
-						"LANG=C LANGUAGE=C /usr/bin/install-info " +
-						"--dir-file=%s/dir %s/%s" % (inforoot, inforoot, x))[1]
+					try:
+						proc = subprocess.Popen(
+							['/usr/bin/install-info',
+							'--dir-file=%s' % os.path.join(inforoot, "dir"),
+							os.path.join(inforoot, x)],
+							env=dict(os.environ, LANG="C", LANGUAGE="C"),
+							stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+					except OSError:
+						myso = None
+					else:
+						myso = _unicode_decode(
+							proc.communicate()[0]).rstrip("\n")
+						proc.wait()
 					existsstr="already exists, for file `"
-					if myso!="":
+					if myso:
 						if re.search(existsstr,myso):
 							# Already exists... Don't increment the count for this.
 							pass
@@ -233,7 +265,6 @@ def display_preserved_libs(vardbapi, myopts):
 		linkmap = vardbapi._linkmap
 		consumer_map = {}
 		owners = {}
-		linkmap_broken = False
 
 		try:
 			linkmap.rebuild()
@@ -241,7 +272,6 @@ def display_preserved_libs(vardbapi, myopts):
 			writemsg_level("!!! Command Not Found: %s\n" % (e,),
 				level=logging.ERROR, noiselevel=-1)
 			del e
-			linkmap_broken = True
 		else:
 			search_for_owners = set()
 			for cpv in plibdata:
@@ -315,7 +345,7 @@ def post_emerge(myaction, myopts, myfiles,
 	@type myopts: dict
 	@param myfiles: emerge arguments
 	@type myfiles: list
-	@param target_root: The target ROOT for myaction
+	@param target_root: The target EROOT for myaction
 	@type target_root: String
 	@param trees: A dictionary mapping each ROOT to it's package databases
 	@type trees: dict
@@ -326,7 +356,7 @@ def post_emerge(myaction, myopts, myfiles,
 	"""
 
 	root_config = trees[target_root]["root_config"]
-	vardbapi = trees[target_root]["vartree"].dbapi
+	vardbapi = trees[target_root]['vartree'].dbapi
 	settings = vardbapi.settings
 	info_mtimes = mtimedb["info"]
 
@@ -351,7 +381,9 @@ def post_emerge(myaction, myopts, myfiles,
 	_flush_elog_mod_echo()
 
 	if not vardbapi._pkgs_changed:
-		display_news_notification(root_config, myopts)
+		# GLEP 42 says to display news *after* an emerge --pretend
+		if "--pretend" in myopts:
+			display_news_notification(root_config, myopts)
 		# If vdb state has not changed then there's nothing else to do.
 		return
 
@@ -372,11 +404,10 @@ def post_emerge(myaction, myopts, myfiles,
 			if vdb_lock:
 				vardbapi.unlock()
 
+	display_preserved_libs(vardbapi, myopts)
 	chk_updated_cfg_files(settings['EROOT'], config_protect)
 
 	display_news_notification(root_config, myopts)
-	if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
-		display_preserved_libs(vardbapi, myopts)	
 
 	postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
 		portage.USER_CONFIG_PATH, "bin", "post_emerge")
@@ -388,6 +419,8 @@ def post_emerge(myaction, myopts, myfiles,
 				" %s spawn failed of %s\n" % (bad("*"), postemerge,),
 				level=logging.ERROR, noiselevel=-1)
 
+	clean_logs(settings)
+
 	if "--quiet" not in myopts and \
 		myaction is None and "@world" in myfiles:
 		show_depclean_suggestion()
@@ -428,6 +461,8 @@ def insert_optional_args(args):
 	default_arg_opts = {
 		'--ask'                  : y_or_n,
 		'--autounmask'           : y_or_n,
+		'--autounmask-keep-masks': y_or_n,
+		'--autounmask-unrestricted-atoms' : y_or_n,
 		'--autounmask-write'     : y_or_n,
 		'--buildpkg'             : y_or_n,
 		'--complete-graph'       : y_or_n,
@@ -551,19 +586,25 @@ def insert_optional_args(args):
 
 	return new_args
 
-def _find_bad_atoms(atoms):
+def _find_bad_atoms(atoms, less_strict=False):
+	"""
+	Declares all atoms as invalid that have an operator,
+	a use dependency, a blocker or a repo spec.
+	It accepts atoms with wildcards.
+	In less_strict mode it accepts operators and repo specs.
+	"""
 	bad_atoms = []
 	for x in ' '.join(atoms).split():
 		bad_atom = False
 		try:
-			atom = portage.dep.Atom(x, allow_wildcard=True)
+			atom = portage.dep.Atom(x, allow_wildcard=True, allow_repo=less_strict)
 		except portage.exception.InvalidAtom:
 			try:
-				atom = portage.dep.Atom("*/"+x, allow_wildcard=True)
+				atom = portage.dep.Atom("*/"+x, allow_wildcard=True, allow_repo=less_strict)
 			except portage.exception.InvalidAtom:
 				bad_atom = True
 
-		if bad_atom or atom.operator or atom.blocker or atom.use:
+		if bad_atom or (atom.operator and not less_strict) or atom.blocker or atom.use:
 			bad_atoms.append(x)
 	return bad_atoms
 
@@ -573,16 +614,15 @@ def parse_opts(tmpcmdline, silent=False):
 	myopts = {}
 	myfiles=[]
 
-	global options, shortmapping
-
 	actions = frozenset([
-		"clean", "config", "depclean", "help",
-		"info", "list-sets", "metadata",
+		"clean", "check-news", "config", "depclean", "help",
+		"info", "list-sets", "metadata", "moo",
 		"prune", "regen",  "search",
 		"sync",  "unmerge", "version",
 	])
 
 	longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
+	y_or_n = ("y", "n")
 	true_y_or_n = ("True", "y", "n")
 	true_y = ("True", "y")
 	argument_options = {
@@ -600,6 +640,18 @@ def parse_opts(tmpcmdline, silent=False):
 			"choices" : true_y_or_n
 		},
 
+		"--autounmask-unrestricted-atoms": {
+			"help"    : "write autounmask changes with >= atoms if possible",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
+		"--autounmask-keep-masks": {
+			"help"    : "don't add package.unmask entries",
+			"type"    : "choice",
+			"choices" : true_y_or_n
+		},
+
 		"--autounmask-write": {
 			"help"    : "write changes made by --autounmask to disk",
 			"type"    : "choice",
@@ -626,6 +678,14 @@ def parse_opts(tmpcmdline, silent=False):
 			"choices"  : true_y_or_n
 		},
 
+		"--buildpkg-exclude": {
+			"help"   :"A space separated list of package atoms for which " + \
+				"no binary packages should be built. This option overrides all " + \
+				"possible ways to enable building of binary packages.",
+
+			"action" : "append"
+		},
+
 		"--config-root": {
 			"help":"specify the location for portage configuration files",
 			"action":"store"
@@ -642,6 +702,12 @@ def parse_opts(tmpcmdline, silent=False):
 			"choices" : true_y_or_n
 		},
 
+		"--complete-graph-if-new-ver": {
+			"help"    : "trigger --complete-graph behavior if an installed package version will change (upgrade or downgrade)",
+			"type"    : "choice",
+			"choices" : y_or_n
+		},
+
 		"--deep": {
 
 			"shortopt" : "-D",
@@ -660,6 +726,12 @@ def parse_opts(tmpcmdline, silent=False):
 			"choices" : true_y_or_n
 		},
 
+		"--dynamic-deps": {
+			"help": "substitute the dependencies of installed packages with the dependencies of unbuilt ebuilds",
+			"type": "choice",
+			"choices": y_or_n
+		},
+
 		"--exclude": {
 			"help"   :"A space separated list of package names or slot atoms. " + \
 				"Emerge won't  install any ebuild or binary package that " + \
@@ -784,7 +856,7 @@ def parse_opts(tmpcmdline, silent=False):
 		"--quiet-build": {
 			"help"     : "redirect build output to logs",
 			"type"     : "choice",
-			"choices"  : true_y_or_n
+			"choices"  : true_y_or_n,
 		},
 
 		"--rebuild-if-new-rev": {
@@ -923,13 +995,23 @@ def parse_opts(tmpcmdline, silent=False):
 	if myoptions.autounmask in true_y:
 		myoptions.autounmask = True
 
+	if myoptions.autounmask_unrestricted_atoms in true_y:
+		myoptions.autounmask_unrestricted_atoms = True
+
+	if myoptions.autounmask_keep_masks in true_y:
+		myoptions.autounmask_keep_masks = True
+
 	if myoptions.autounmask_write in true_y:
 		myoptions.autounmask_write = True
 
 	if myoptions.buildpkg in true_y:
 		myoptions.buildpkg = True
-	else:
-		myoptions.buildpkg = None
+
+	if myoptions.buildpkg_exclude:
+		bad_atoms = _find_bad_atoms(myoptions.buildpkg_exclude, less_strict=True)
+		if bad_atoms and not silent:
+			parser.error("Invalid Atom(s) in --buildpkg-exclude parameter: '%s'\n" % \
+				(",".join(bad_atoms),))
 
 	if myoptions.changed_use is not False:
 		myoptions.reinstall = "changed-use"
@@ -938,10 +1020,11 @@ def parse_opts(tmpcmdline, silent=False):
 	if myoptions.deselect in true_y:
 		myoptions.deselect = True
 
-	if myoptions.binpkg_respect_use in true_y:
-		myoptions.binpkg_respect_use = True
-	else:
-		myoptions.binpkg_respect_use = None
+	if myoptions.binpkg_respect_use is not None:
+		if myoptions.binpkg_respect_use in true_y:
+			myoptions.binpkg_respect_use = 'y'
+		else:
+			myoptions.binpkg_respect_use = 'n'
 
 	if myoptions.complete_graph in true_y:
 		myoptions.complete_graph = True
@@ -1015,9 +1098,7 @@ def parse_opts(tmpcmdline, silent=False):
 		myoptions.quiet = None
 
 	if myoptions.quiet_build in true_y:
-		myoptions.quiet_build = True
-	else:
-		myoptions.quiet_build = None
+		myoptions.quiet_build = 'y'
 
 	if myoptions.rebuild_if_new_ver in true_y:
 		myoptions.rebuild_if_new_ver = True
@@ -1172,8 +1253,7 @@ def parse_opts(tmpcmdline, silent=False):
 	if myaction is None and myoptions.deselect is True:
 		myaction = 'deselect'
 
-	if myargs and sys.hexversion < 0x3000000 and \
-		not isinstance(myargs[0], unicode):
+	if myargs and isinstance(myargs[0], bytes):
 		for i in range(len(myargs)):
 			myargs[i] = portage._unicode_decode(myargs[i])
 
@@ -1222,7 +1302,6 @@ def ionice(settings):
 	if not ionice_cmd:
 		return
 
-	from portage.util import varexpand
 	variables = {"PID" : str(os.getpid())}
 	cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
 
@@ -1238,6 +1317,35 @@ def ionice(settings):
 		out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
 		out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
 
+def clean_logs(settings):
+
+	if "clean-logs" not in settings.features:
+		return
+
+	clean_cmd = settings.get("PORT_LOGDIR_CLEAN")
+	if clean_cmd:
+		clean_cmd = shlex_split(clean_cmd)
+	if not clean_cmd:
+		return
+
+	logdir = settings.get("PORT_LOGDIR")
+	if logdir is None or not os.path.isdir(logdir):
+		return
+
+	variables = {"PORT_LOGDIR" : logdir}
+	cmd = [varexpand(x, mydict=variables) for x in clean_cmd]
+
+	try:
+		rval = portage.process.spawn(cmd, env=os.environ)
+	except portage.exception.CommandNotFound:
+		rval = 127
+
+	if rval != os.EX_OK:
+		out = portage.output.EOutput()
+		out.eerror("PORT_LOGDIR_CLEAN returned %d" % (rval,))
+		out.eerror("See the make.conf(5) man page for "
+			"PORT_LOGDIR_CLEAN usage instructions.")
+
 def setconfig_fallback(root_config):
 	from portage._sets.base import DummyPackageSet
 	from portage._sets.files import WorldSelectedSet
@@ -1451,25 +1559,26 @@ def repo_name_duplicate_check(trees):
 
 def config_protect_check(trees):
 	for root, root_trees in trees.items():
-		if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
+		settings = root_trees["root_config"].settings
+		if not settings.get("CONFIG_PROTECT"):
 			msg = "!!! CONFIG_PROTECT is empty"
-			if root != "/":
+			if settings["ROOT"] != "/":
 				msg += " for '%s'" % root
 			msg += "\n"
 			writemsg_level(msg, level=logging.WARN, noiselevel=-1)
 
 def profile_check(trees, myaction):
-	if myaction in ("help", "info", "sync", "version"):
+	if myaction in ("help", "info", "search", "sync", "version"):
 		return os.EX_OK
-	for root, root_trees in trees.items():
+	for root_trees in trees.values():
 		if root_trees["root_config"].settings.profiles:
 			continue
 		# generate some profile related warning messages
 		validate_ebuild_environment(trees)
-		msg = "If you have just changed your profile configuration, you " + \
-			"should revert back to the previous configuration. Due to " + \
-			"your current profile being invalid, allowed actions are " + \
-			"limited to --help, --info, --sync, and --version."
+		msg = ("Your current profile is invalid. If you have just changed "
+			"your profile configuration, you should revert back to the "
+			"previous configuration. Allowed actions are limited to "
+			"--help, --info, --search, --sync, and --version.")
 		writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
 			level=logging.ERROR, noiselevel=-1)
 		return 1
@@ -1515,7 +1624,7 @@ def emerge_main(args=None):
 	# Portage needs to ensure a sane umask for the files it creates.
 	os.umask(0o22)
 	settings, trees, mtimedb = load_emerge_config()
-	portdb = trees[settings["ROOT"]]["porttree"].dbapi
+	portdb = trees[settings['EROOT']]['porttree'].dbapi
 	rval = profile_check(trees, myaction)
 	if rval != os.EX_OK:
 		return rval
@@ -1526,13 +1635,14 @@ def emerge_main(args=None):
 	tmpcmdline.extend(args)
 	myaction, myopts, myfiles = parse_opts(tmpcmdline)
 
-	if myaction not in ('help', 'info', 'version') and \
+	# skip global updates prior to sync, since it's called after sync
+	if myaction not in ('help', 'info', 'sync', 'version') and \
 		myopts.get('--package-moves') != 'n' and \
 		_global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
 		mtimedb.commit()
 		# Reload the whole config from scratch.
 		settings, trees, mtimedb = load_emerge_config(trees=trees)
-		portdb = trees[settings["ROOT"]]["porttree"].dbapi
+		portdb = trees[settings['EROOT']]['porttree'].dbapi
 
 	xterm_titles = "notitles" not in settings.features
 	if xterm_titles:
@@ -1543,19 +1653,24 @@ def emerge_main(args=None):
 		# Reload the whole config from scratch so that the portdbapi internal
 		# config is updated with new FEATURES.
 		settings, trees, mtimedb = load_emerge_config(trees=trees)
-		portdb = trees[settings["ROOT"]]["porttree"].dbapi
+		portdb = trees[settings['EROOT']]['porttree'].dbapi
+
+	# NOTE: adjust_configs() can map options to FEATURES, so any relevant
+	# options adjustments should be made prior to calling adjust_configs().
+	if "--buildpkgonly" in myopts:
+		myopts["--buildpkg"] = True
 
 	adjust_configs(myopts, trees)
 	apply_priorities(settings)
 
 	if myaction == 'version':
 		writemsg_stdout(getportageversion(
-			settings["PORTDIR"], settings["ROOT"],
+			settings["PORTDIR"], None,
 			settings.profile_path, settings["CHOST"],
-			trees[settings["ROOT"]]["vartree"].dbapi) + '\n', noiselevel=-1)
+			trees[settings['EROOT']]['vartree'].dbapi) + '\n', noiselevel=-1)
 		return 0
 	elif myaction == 'help':
-		_emerge.help.help(myopts, portage.output.havecolor)
+		_emerge.help.help()
 		return 0
 
 	spinner = stdout_spinner()
@@ -1587,9 +1702,6 @@ def emerge_main(args=None):
 	if "--usepkgonly" in myopts:
 		myopts["--usepkg"] = True
 
-	if "buildpkg" in settings.features or "--buildpkgonly" in myopts:
-		myopts["--buildpkg"] = True
-
 	if "--buildpkgonly" in myopts:
 		# --buildpkgonly will not merge anything, so
 		# it cancels all binary package options.
@@ -1613,20 +1725,11 @@ def emerge_main(args=None):
 	del mytrees, mydb
 
 	if "moo" in myfiles:
-		print("""
-
-  Larry loves Gentoo (""" + platform.system() + """)
-
- _______________________
-< Have you mooed today? >
- -----------------------
-        \   ^__^
-         \  (oo)\_______
-            (__)\       )\/\ 
-                ||----w |
-                ||     ||
-
-""")
+		print(COWSAY_MOO % platform.system())
+		msg = ("The above `emerge moo` display is deprecated. "
+			"Please use `emerge --moo` instead.")
+		for line in textwrap.wrap(msg, 50):
+			print(" %s %s" % (colorize("WARN", "*"), line))
 
 	for x in myfiles:
 		ext = os.path.splitext(x)[1]
@@ -1634,10 +1737,22 @@ def emerge_main(args=None):
 			print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
 			break
 
-	root_config = trees[settings["ROOT"]]["root_config"]
-	if myaction == "list-sets":
+	root_config = trees[settings['EROOT']]['root_config']
+	if myaction == "moo":
+		print(COWSAY_MOO % platform.system())
+		return os.EX_OK
+	elif myaction == "list-sets":
 		writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
 		return os.EX_OK
+	elif myaction == "check-news":
+		news_counts = count_unread_news(
+			root_config.trees["porttree"].dbapi,
+			root_config.trees["vartree"].dbapi)
+		if any(news_counts.values()):
+			display_news_notifications(news_counts)
+		elif "--quiet" not in myopts:
+			print("", colorize("GOOD", "*"), "No news items were found.")
+		return os.EX_OK
 
 	ensure_required_sets(trees)
 
@@ -1703,7 +1818,7 @@ def emerge_main(args=None):
 		print("myopts", myopts)
 
 	if not myaction and not myfiles and "--resume" not in myopts:
-		_emerge.help.help(myopts, portage.output.havecolor)
+		_emerge.help.help()
 		return 1
 
 	pretend = "--pretend" in myopts
@@ -1735,7 +1850,7 @@ def emerge_main(args=None):
 						portage_group_warning()
 					if userquery("Would you like to add --pretend to options?",
 						"--ask-enter-invalid" in myopts) == "No":
-						return 1
+						return 128 + signal.SIGINT
 					myopts["--pretend"] = True
 					del myopts["--ask"]
 				else:
@@ -1753,7 +1868,11 @@ def emerge_main(args=None):
 		if x in myopts:
 			disable_emergelog = True
 			break
-	if myaction in ("search", "info"):
+	if disable_emergelog:
+		pass
+	elif myaction in ("search", "info"):
+		disable_emergelog = True
+	elif portage.data.secpass < 1:
 		disable_emergelog = True
 
 	_emerge.emergelog._disable = disable_emergelog
@@ -1768,8 +1887,13 @@ def emerge_main(args=None):
 					"EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
 					(settings['EMERGE_LOG_DIR'], e),
 					noiselevel=-1, level=logging.ERROR)
+				portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
 			else:
 				_emerge.emergelog._emerge_log_dir = settings["EMERGE_LOG_DIR"]
+		else:
+			_emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
+				settings["EPREFIX"].lstrip(os.sep), "var", "log")
+			portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
 
 	if not "--pretend" in myopts:
 		emergelog(xterm_titles, "Started emerge on: "+\
@@ -1778,9 +1902,19 @@ def emerge_main(args=None):
 				encoding=_encodings['content'], errors='replace'))
 		myelogstr=""
 		if myopts:
-			myelogstr=" ".join(myopts)
+			opt_list = []
+			for opt, arg in myopts.items():
+				if arg is True:
+					opt_list.append(opt)
+				elif isinstance(arg, list):
+					# arguments like --exclude that use 'append' action
+					for x in arg:
+						opt_list.append("%s=%s" % (opt, x))
+				else:
+					opt_list.append("%s=%s" % (opt, arg))
+			myelogstr=" ".join(opt_list)
 		if myaction:
-			myelogstr+=" "+myaction
+			myelogstr += " --" + myaction
 		if myfiles:
 			myelogstr += " " + " ".join(oldargs)
 		emergelog(xterm_titles, " *** emerge " + myelogstr)
@@ -1824,7 +1958,7 @@ def emerge_main(args=None):
 	# SEARCH action
 	elif "search"==myaction:
 		validate_ebuild_environment(trees)
-		action_search(trees[settings["ROOT"]]["root_config"],
+		action_search(trees[settings['EROOT']]['root_config'],
 			myopts, myfiles, spinner)
 
 	elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
@@ -1832,19 +1966,19 @@ def emerge_main(args=None):
 		rval = action_uninstall(settings, trees, mtimedb["ldpath"],
 			myopts, myaction, myfiles, spinner)
 		if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
-			post_emerge(myaction, myopts, myfiles, settings["ROOT"],
+			post_emerge(myaction, myopts, myfiles, settings['EROOT'],
 				trees, mtimedb, rval)
 		return rval
 
 	elif myaction == 'info':
 
 		# Ensure atoms are valid before calling unmerge().
-		vardb = trees[settings["ROOT"]]["vartree"].dbapi
-		portdb = trees[settings["ROOT"]]["porttree"].dbapi
-		bindb = trees[settings["ROOT"]]["bintree"].dbapi
+		vardb = trees[settings['EROOT']]['vartree'].dbapi
+		portdb = trees[settings['EROOT']]['porttree'].dbapi
+		bindb = trees[settings['EROOT']]["bintree"].dbapi
 		valid_atoms = []
 		for x in myfiles:
-			if is_valid_package_atom(x):
+			if is_valid_package_atom(x, allow_repo=True):
 				try:
 					#look at the installed files first, if there is no match
 					#look at the ebuilds, since EAPI 4 allows running pkg_info
@@ -1900,11 +2034,12 @@ def emerge_main(args=None):
 				level=logging.ERROR, noiselevel=-1)
 			return 1
 
+		# GLEP 42 says to display news *after* an emerge --pretend
 		if "--pretend" not in myopts:
 			display_news_notification(root_config, myopts)
 		retval = action_build(settings, trees, mtimedb,
 			myopts, myaction, myfiles, spinner)
-		post_emerge(myaction, myopts, myfiles, settings["ROOT"],
+		post_emerge(myaction, myopts, myfiles, settings['EROOT'],
 			trees, mtimedb, retval)
 
 		return retval

diff --git a/portage_with_autodep/pym/_emerge/main.pyo b/portage_with_autodep/pym/_emerge/main.pyo
new file mode 100644
index 0000000..aaeb5b9
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/main.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/post_emerge.py b/portage_with_autodep/pym/_emerge/post_emerge.py
new file mode 100644
index 0000000..d5f1ba5
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/post_emerge.py
@@ -0,0 +1,165 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import textwrap
+
+import portage
+from portage import os
+from portage.emaint.modules.logs.logs import CleanLogs
+from portage.news import count_unread_news, display_news_notifications
+from portage.output import colorize
+from portage.util._dyn_libs.display_preserved_libs import \
+	display_preserved_libs
+from portage.util._info_files import chk_updated_info_files
+
+from .chk_updated_cfg_files import chk_updated_cfg_files
+from .emergelog import emergelog
+from ._flush_elog_mod_echo import _flush_elog_mod_echo
+
+def clean_logs(settings):
+
+	if "clean-logs" not in settings.features:
+		return
+
+	logdir = settings.get("PORT_LOGDIR")
+	if logdir is None or not os.path.isdir(logdir):
+		return
+
+	cleanlogs = CleanLogs()
+	errors = cleanlogs.clean(settings=settings)
+	if errors:
+		out = portage.output.EOutput()
+		for msg in errors:
+			out.eerror(msg)
+
+def display_news_notification(root_config, myopts):
+	if "news" not in root_config.settings.features:
+		return
+	portdb = root_config.trees["porttree"].dbapi
+	vardb = root_config.trees["vartree"].dbapi
+	news_counts = count_unread_news(portdb, vardb)
+	display_news_notifications(news_counts)
+
+def show_depclean_suggestion():
+	out = portage.output.EOutput()
+	msg = "After world updates, it is important to remove " + \
+		"obsolete packages with emerge --depclean. Refer " + \
+		"to `man emerge` for more information."
+	for line in textwrap.wrap(msg, 72):
+		out.ewarn(line)
+
+def post_emerge(myaction, myopts, myfiles,
+	target_root, trees, mtimedb, retval):
+	"""
+	Misc. things to run at the end of a merge session.
+
+	Update Info Files
+	Update Config Files
+	Update News Items
+	Commit mtimeDB
+	Display preserved libs warnings
+
+	@param myaction: The action returned from parse_opts()
+	@type myaction: String
+	@param myopts: emerge options
+	@type myopts: dict
+	@param myfiles: emerge arguments
+	@type myfiles: list
+	@param target_root: The target EROOT for myaction
+	@type target_root: String
+	@param trees: A dictionary mapping each ROOT to it's package databases
+	@type trees: dict
+	@param mtimedb: The mtimeDB to store data needed across merge invocations
+	@type mtimedb: MtimeDB class instance
+	@param retval: Emerge's return value
+	@type retval: Int
+	"""
+
+	root_config = trees[target_root]["root_config"]
+	vardbapi = trees[target_root]['vartree'].dbapi
+	settings = vardbapi.settings
+	info_mtimes = mtimedb["info"]
+
+	# Load the most current variables from ${ROOT}/etc/profile.env
+	settings.unlock()
+	settings.reload()
+	settings.regenerate()
+	settings.lock()
+
+	config_protect = portage.util.shlex_split(
+		settings.get("CONFIG_PROTECT", ""))
+	infodirs = settings.get("INFOPATH","").split(":") + \
+		settings.get("INFODIR","").split(":")
+
+	os.chdir("/")
+
+	if retval == os.EX_OK:
+		exit_msg = " *** exiting successfully."
+	else:
+		exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+	emergelog("notitles" not in settings.features, exit_msg)
+
+	_flush_elog_mod_echo()
+
+	if not vardbapi._pkgs_changed:
+		# GLEP 42 says to display news *after* an emerge --pretend
+		if "--pretend" in myopts:
+			display_news_notification(root_config, myopts)
+		# If vdb state has not changed then there's nothing else to do.
+		return
+
+	vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+	portage.util.ensure_dirs(vdb_path)
+	vdb_lock = None
+	if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+		vardbapi.lock()
+		vdb_lock = True
+
+	if vdb_lock:
+		try:
+			if "noinfo" not in settings.features:
+				chk_updated_info_files(target_root,
+					infodirs, info_mtimes)
+			mtimedb.commit()
+		finally:
+			if vdb_lock:
+				vardbapi.unlock()
+
+	# Explicitly load and prune the PreservedLibsRegistry in order
+	# to ensure that we do not display stale data.
+	vardbapi._plib_registry.load()
+
+	if vardbapi._plib_registry.hasEntries():
+		if "--quiet" in myopts:
+			print()
+			print(colorize("WARN", "!!!") + " existing preserved libs found")
+		else:
+			print()
+			print(colorize("WARN", "!!!") + " existing preserved libs:")
+			display_preserved_libs(vardbapi)
+			print("Use " + colorize("GOOD", "emerge @preserved-rebuild") +
+				" to rebuild packages using these libraries")
+
+	chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+	display_news_notification(root_config, myopts)
+
+	postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+		portage.USER_CONFIG_PATH, "bin", "post_emerge")
+	if os.access(postemerge, os.X_OK):
+		hook_retval = portage.process.spawn(
+						[postemerge], env=settings.environ())
+		if hook_retval != os.EX_OK:
+			portage.util.writemsg_level(
+				" %s spawn failed of %s\n" %
+				(colorize("BAD", "*"), postemerge,),
+				level=logging.ERROR, noiselevel=-1)
+
+	clean_logs(settings)
+
+	if "--quiet" not in myopts and \
+		myaction is None and "@world" in myfiles:
+		show_depclean_suggestion()

diff --git a/portage_with_autodep/pym/_emerge/resolver/__init__.pyo b/portage_with_autodep/pym/_emerge/resolver/__init__.pyo
new file mode 100644
index 0000000..5c1b374
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/resolver/__init__.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/resolver/backtracking.py b/portage_with_autodep/pym/_emerge/resolver/backtracking.py
index dcdaee0..f2857b0 100644
--- a/portage_with_autodep/pym/_emerge/resolver/backtracking.py
+++ b/portage_with_autodep/pym/_emerge/resolver/backtracking.py
@@ -47,7 +47,7 @@ class BacktrackParameter(object):
 			self.reinstall_list == other.reinstall_list
 
 
-class _BacktrackNode:
+class _BacktrackNode(object):
 
 	__slots__ = (
 		"parameter", "depth", "mask_steps", "terminal",
@@ -84,6 +84,9 @@ class Backtracker(object):
 		Adds a newly computed backtrack parameter. Makes sure that it doesn't already exist and
 		that we don't backtrack deeper than we are allowed by --backtrack.
 		"""
+		if not self._check_runtime_pkg_mask(node.parameter.runtime_pkg_mask):
+			return
+
 		if node.mask_steps <= self._max_depth and node not in self._nodes:
 			if explore:
 				self._unexplored_nodes.append(node)
@@ -105,6 +108,28 @@ class Backtracker(object):
 	def __len__(self):
 		return len(self._unexplored_nodes)
 
+	def _check_runtime_pkg_mask(self, runtime_pkg_mask):
+		"""
+		If a package gets masked that caused other packages to be masked
+		before, we revert the mask for other packages (bug 375573).
+		"""
+
+		for pkg in runtime_pkg_mask:
+
+			if "missing dependency" in runtime_pkg_mask[pkg]:
+				continue
+
+			entry_is_valid = False
+
+			for ppkg, patom in runtime_pkg_mask[pkg].get("slot conflict", set()):
+				if ppkg not in runtime_pkg_mask:
+					entry_is_valid = True
+					break
+
+			if not entry_is_valid:
+				return False
+
+		return True
 
 	def _feedback_slot_conflict(self, conflict_data):
 		for pkg, parent_atoms in conflict_data:

diff --git a/portage_with_autodep/pym/_emerge/resolver/backtracking.pyo b/portage_with_autodep/pym/_emerge/resolver/backtracking.pyo
new file mode 100644
index 0000000..d989c15
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/resolver/backtracking.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
index d113c5e..aca81fa 100644
--- a/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
+++ b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
@@ -143,7 +143,8 @@ class circular_dependency_handler(object):
 
 			#If any of the flags we're going to touch is in REQUIRED_USE, add all
 			#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
-			required_use_flags = get_required_use_flags(parent.metadata["REQUIRED_USE"])
+			required_use_flags = get_required_use_flags(
+				parent.metadata.get("REQUIRED_USE", ""))
 
 			if affecting_use.intersection(required_use_flags):
 				# TODO: Find out exactly which REQUIRED_USE flags are
@@ -185,7 +186,7 @@ class circular_dependency_handler(object):
 					parent_atom not in reduced_dep:
 					#We found an assignment that removes the atom from 'dep'.
 					#Make sure it doesn't conflict with REQUIRED_USE.
-					required_use = parent.metadata["REQUIRED_USE"]
+					required_use = parent.metadata.get("REQUIRED_USE", "")
 
 					if check_required_use(required_use, current_use, parent.iuse.is_valid_flag):
 						use = self.depgraph._pkg_use_enabled(parent)

diff --git a/portage_with_autodep/pym/_emerge/resolver/circular_dependency.pyo b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.pyo
new file mode 100644
index 0000000..c1f95dc
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/resolver/output.py b/portage_with_autodep/pym/_emerge/resolver/output.py
index 05e316a..1208bf9 100644
--- a/portage_with_autodep/pym/_emerge/resolver/output.py
+++ b/portage_with_autodep/pym/_emerge/resolver/output.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 """Resolver output display operation.
@@ -13,14 +13,14 @@ import sys
 from portage import os
 from portage import _unicode_decode
 from portage.dbapi.dep_expand import dep_expand
-from portage.const import PORTAGE_PACKAGE_ATOM
-from portage.dep import cpvequal, match_from_list
-from portage.exception import InvalidDependString
-from portage.output import ( blue, bold, colorize, create_color_func,
+from portage.dep import cpvequal, _repo_separator
+from portage.exception import InvalidDependString, SignatureException
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+from portage.output import ( blue, colorize, create_color_func,
 	darkblue, darkgreen, green, nc_len, red, teal, turquoise, yellow )
 bad = create_color_func("BAD")
-from portage.util import writemsg_stdout, writemsg_level
-from portage.versions import best, catpkgsplit, cpv_getkey
+from portage.util import writemsg_stdout
+from portage.versions import best, catpkgsplit
 
 from _emerge.Blocker import Blocker
 from _emerge.create_world_atom import create_world_atom
@@ -72,7 +72,7 @@ class Display(object):
 		"""Processes pkg for blockers and adds colorized strings to
 		self.print_msg and self.blockers
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@param fetch_symbol: string
 		@rtype: bool
 		Modifies class globals: self.blocker_style, self.resolved,
@@ -121,7 +121,7 @@ class Display(object):
 	def _display_use(self, pkg, myoldbest, myinslotlist):
 		""" USE flag display
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@param myoldbest: list of installed versions
 		@param myinslotlist: list of installed slots
 		Modifies class globals: self.forced_flags, self.cur_iuse,
@@ -161,7 +161,7 @@ class Display(object):
 
 	def gen_mask_str(self, pkg):
 		"""
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		"""
 		hardmasked = pkg.isHardMasked()
 		mask_str = " "
@@ -223,7 +223,7 @@ class Display(object):
 		""" Prevent USE_EXPAND_HIDDEN flags from being hidden if they
 		are the only thing that triggered reinstallation.
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		Modifies self.use_expand_hidden, self.use_expand, self.verboseadd
 		"""
 		reinst_flags_map = {}
@@ -302,68 +302,78 @@ class Display(object):
 	def verbose_size(self, pkg, repoadd_set, pkg_info):
 		"""Determines the size of the downloads required
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@param repoadd_set: set of repos to add
 		@param pkg_info: dictionary
 		Modifies class globals: self.myfetchlist, self.counters.totalsize,
 			self.verboseadd, repoadd_set.
 		"""
 		mysize = 0
-		if pkg.type_name == "ebuild" and pkg_info.merge:
+		if pkg.type_name in ("binary", "ebuild") and pkg_info.merge:
+			db = pkg.root_config.trees[
+				pkg.root_config.pkg_tree_map[pkg.type_name]].dbapi
+			kwargs = {}
+			if pkg.type_name == "ebuild":
+				kwargs["useflags"] = pkg_info.use
+				kwargs["myrepo"] = pkg.repo
+			myfilesdict = None
 			try:
-				myfilesdict = self.portdb.getfetchsizes(pkg.cpv,
-					useflags=pkg_info.use, myrepo=pkg.repo)
+				myfilesdict = db.getfetchsizes(pkg.cpv, **kwargs)
 			except InvalidDependString as e:
 				# FIXME: validate SRC_URI earlier
-				depstr, = self.portdb.aux_get(pkg.cpv,
+				depstr, = db.aux_get(pkg.cpv,
 					["SRC_URI"], myrepo=pkg.repo)
 				show_invalid_depstring_notice(
 					pkg, depstr, str(e))
 				raise
+			except SignatureException:
+				# missing/invalid binary package SIZE signature
+				pass
 			if myfilesdict is None:
 				myfilesdict = "[empty/missing/bad digest]"
 			else:
 				for myfetchfile in myfilesdict:
 					if myfetchfile not in self.myfetchlist:
 						mysize += myfilesdict[myfetchfile]
-						self.myfetchlist.append(myfetchfile)
+						self.myfetchlist.add(myfetchfile)
 				if pkg_info.ordered:
 					self.counters.totalsize += mysize
 			self.verboseadd += _format_size(mysize)
 
-		# overlay verbose
-		# assign index for a previous version in the same slot
-		slot_matches = self.vardb.match(pkg.slot_atom)
-		if slot_matches:
-			repo_name_prev = self.vardb.aux_get(slot_matches[0],
-				["repository"])[0]
-		else:
-			repo_name_prev = None
+		if self.quiet_repo_display:
+			# overlay verbose
+			# assign index for a previous version in the same slot
+			slot_matches = self.vardb.match(pkg.slot_atom)
+			if slot_matches:
+				repo_name_prev = self.vardb.aux_get(slot_matches[0],
+					["repository"])[0]
+			else:
+				repo_name_prev = None
 
-		# now use the data to generate output
-		if pkg.installed or not slot_matches:
-			self.repoadd = self.conf.repo_display.repoStr(
-				pkg_info.repo_path_real)
-		else:
-			repo_path_prev = None
-			if repo_name_prev:
-				repo_path_prev = self.portdb.getRepositoryPath(
-					repo_name_prev)
-			if repo_path_prev == pkg_info.repo_path_real:
+			# now use the data to generate output
+			if pkg.installed or not slot_matches:
 				self.repoadd = self.conf.repo_display.repoStr(
 					pkg_info.repo_path_real)
 			else:
-				self.repoadd = "%s=>%s" % (
-					self.conf.repo_display.repoStr(repo_path_prev),
-					self.conf.repo_display.repoStr(pkg_info.repo_path_real))
-		if self.repoadd:
-			repoadd_set.add(self.repoadd)
+				repo_path_prev = None
+				if repo_name_prev:
+					repo_path_prev = self.portdb.getRepositoryPath(
+						repo_name_prev)
+				if repo_path_prev == pkg_info.repo_path_real:
+					self.repoadd = self.conf.repo_display.repoStr(
+						pkg_info.repo_path_real)
+				else:
+					self.repoadd = "%s=>%s" % (
+						self.conf.repo_display.repoStr(repo_path_prev),
+						self.conf.repo_display.repoStr(pkg_info.repo_path_real))
+			if self.repoadd:
+				repoadd_set.add(self.repoadd)
 
 
-	@staticmethod
-	def convert_myoldbest(myoldbest):
+	def convert_myoldbest(self, pkg, myoldbest):
 		"""converts and colorizes a version list to a string
 
+		@param pkg: _emerge.Package.Package instance
 		@param myoldbest: list
 		@rtype string.
 		"""
@@ -371,11 +381,13 @@ class Display(object):
 		myoldbest_str = ""
 		if myoldbest:
 			versions = []
-			for pos, pkg in enumerate(myoldbest):
-				key = catpkgsplit(pkg.cpv)[2] + \
-					"-" + catpkgsplit(pkg.cpv)[3]
+			for pos, old_pkg in enumerate(myoldbest):
+				key = catpkgsplit(old_pkg.cpv)[2] + "-" + catpkgsplit(old_pkg.cpv)[3]
 				if key[-3:] == "-r0":
 					key = key[:-3]
+				if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+					any(x.repo != self.portdb.repositories.mainRepo().name for x in myoldbest + [pkg])):
+					key += _repo_separator + old_pkg.repo
 				versions.append(key)
 			myoldbest_str = blue("["+", ".join(versions)+"]")
 		return myoldbest_str
@@ -385,7 +397,7 @@ class Display(object):
 		"""Increments counters.interactive if the pkg is to
 		be merged and it's metadata has interactive set True
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@param ordered: boolean
 		@param addl: already defined string to add to
 		"""
@@ -401,13 +413,17 @@ class Display(object):
 
 		@param addl: already defined string to add to
 		@param pkg_info: dictionary
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@rtype string
 		"""
+		ver_str = pkg_info.ver
+		if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+			any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+			ver_str += _repo_separator + pkg.repo
 		if self.conf.quiet:
 			myprint = addl + " " + self.indent + \
 				self.pkgprint(pkg_info.cp, pkg_info)
-			myprint = myprint+darkblue(" "+pkg_info.ver)+" "
+			myprint = myprint+darkblue(" "+ver_str)+" "
 			myprint = myprint+pkg_info.oldbest
 			myprint = myprint+darkgreen("to "+pkg.root)
 			self.verboseadd = None
@@ -422,7 +438,7 @@ class Display(object):
 					self.indent, self.pkgprint(pkg.cp, pkg_info))
 			if (self.newlp-nc_len(myprint)) > 0:
 				myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
-			myprint = myprint+"["+darkblue(pkg_info.ver)+"] "
+			myprint = myprint+" "+darkblue("["+ver_str+"]")+" "
 			if (self.oldlp-nc_len(myprint)) > 0:
 				myprint = myprint+" "*(self.oldlp-nc_len(myprint))
 			myprint = myprint+pkg_info.oldbest
@@ -435,14 +451,18 @@ class Display(object):
 
 		@param addl: already defined string to add to
 		@param pkg_info: dictionary
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@rtype string
 		Modifies self.verboseadd
 		"""
+		ver_str = pkg_info.ver
+		if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+			any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+			ver_str += _repo_separator + pkg.repo
 		if self.conf.quiet:
 			myprint = addl + " " + self.indent + \
 				self.pkgprint(pkg_info.cp, pkg_info)
-			myprint = myprint+" "+green(pkg_info.ver)+" "
+			myprint = myprint+" "+green(ver_str)+" "
 			myprint = myprint+pkg_info.oldbest
 			self.verboseadd = None
 		else:
@@ -457,7 +477,7 @@ class Display(object):
 					self.indent, self.pkgprint(pkg.cp, pkg_info))
 			if (self.newlp-nc_len(myprint)) > 0:
 				myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
-			myprint = myprint+green(" ["+pkg_info.ver+"] ")
+			myprint = myprint+" "+green("["+ver_str+"]")+" "
 			if (self.oldlp-nc_len(myprint)) > 0:
 				myprint = myprint+(" "*(self.oldlp-nc_len(myprint)))
 			myprint += pkg_info.oldbest
@@ -467,31 +487,35 @@ class Display(object):
 	def _set_no_columns(self, pkg, pkg_info, addl):
 		"""prints pkg info without column indentation.
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@param pkg_info: dictionary
 		@param addl: the current text to add for the next line to output
 		@rtype the updated addl
 		"""
+		pkg_str = pkg.cpv
+		if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+			any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+			pkg_str += _repo_separator + pkg.repo
 		if not pkg_info.merge:
 			addl = self.empty_space_in_brackets()
 			myprint = "[%s%s] %s%s %s" % \
 				(self.pkgprint(pkg_info.operation.ljust(13),
 				pkg_info), addl,
-				self.indent, self.pkgprint(pkg.cpv, pkg_info),
+				self.indent, self.pkgprint(pkg_str, pkg_info),
 				pkg_info.oldbest)
 		else:
 			myprint = "[%s %s] %s%s %s" % \
 				(self.pkgprint(pkg.type_name, pkg_info),
 				addl, self.indent,
-				self.pkgprint(pkg.cpv, pkg_info), pkg_info.oldbest)
+				self.pkgprint(pkg_str, pkg_info), pkg_info.oldbest)
 		return myprint
 
 
 	def _insert_slot(self, pkg, pkg_info, myinslotlist):
 		"""Adds slot info to the message
 
-		@returns addl: formatted slot info
-		@returns myoldbest: installed version list
+		@return addl: formatted slot info
+		@return myoldbest: installed version list
 		Modifies self.counters.downgrades, self.counters.upgrades,
 			self.counters.binary
 		"""
@@ -517,8 +541,8 @@ class Display(object):
 	def _new_slot(self, pkg, pkg_info):
 		"""New slot, mark it new.
 
-		@returns addl: formatted slot info
-		@returns myoldbest: installed version list
+		@return addl: formatted slot info
+		@return myoldbest: installed version list
 		Modifies self.counters.newslot, self.counters.binary
 		"""
 		addl = " " + green("NS") + pkg_info.fetch_symbol + "  "
@@ -574,11 +598,9 @@ class Display(object):
 	def print_changelog(self):
 		"""Prints the changelog text to std_out
 		"""
-		writemsg_stdout('\n', noiselevel=-1)
-		for revision, text in self.changelogs:
-			writemsg_stdout(bold('*'+revision) + '\n' + text,
+		for chunk in self.changelogs:
+			writemsg_stdout(chunk,
 				noiselevel=-1)
-		return
 
 
 	def get_display_list(self, mylist):
@@ -613,7 +635,7 @@ class Display(object):
 	def set_pkg_info(self, pkg, ordered):
 		"""Sets various pkg_info dictionary variables
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@param ordered: bool
 		@rtype pkg_info dictionary
 		Modifies self.counters.restrict_fetch,
@@ -643,7 +665,6 @@ class Display(object):
 		pkg_info.use = list(self.conf.pkg_use_enabled(pkg))
 		if not pkg.built and pkg.operation == 'merge' and \
 			'fetch' in pkg.metadata.restrict:
-			pkg_info.fetch_symbol = red("F")
 			if pkg_info.ordered:
 				self.counters.restrict_fetch += 1
 			if not self.portdb.getfetchsizes(pkg.cpv,
@@ -651,13 +672,17 @@ class Display(object):
 				pkg_info.fetch_symbol = green("f")
 				if pkg_info.ordered:
 					self.counters.restrict_fetch_satisfied += 1
+			else:
+				pkg_info.fetch_symbol = red("F")
+				if pkg_info.ebuild_path is not None:
+					self.restrict_fetch_list[pkg] = pkg_info
 		return pkg_info
 
 
 	def do_changelog(self, pkg, pkg_info):
 		"""Processes and adds the changelog text to the master text for output
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@param pkg_info: dictionay
 		Modifies self.changelogs
 		"""
@@ -676,7 +701,7 @@ class Display(object):
 	def check_system_world(self, pkg):
 		"""Checks for any occurances of the package in the system or world sets
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@rtype system and world booleans
 		"""
 		root_config = self.conf.roots[pkg.root]
@@ -706,7 +731,7 @@ class Display(object):
 	@staticmethod
 	def get_ver_str(pkg):
 		"""Obtains the version string
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@rtype string
 		"""
 		ver_str = list(catpkgsplit(pkg.cpv)[2:])
@@ -723,7 +748,7 @@ class Display(object):
 		param is used for -u, where you still *do* want to see when
 		something is being upgraded.
 
-		@param pkg: _emerge.Package instance
+		@param pkg: _emerge.Package.Package instance
 		@param pkg_info: dictionay
 		@rtype addl, myoldbest: list, myinslotlist: list
 		Modifies self.counters.reinst, self.counters.binary, self.counters.new
@@ -734,6 +759,9 @@ class Display(object):
 		installed_versions = self.vardb.match_pkgs(pkg.cp)
 		if self.vardb.cpv_exists(pkg.cpv):
 			addl = "  "+yellow("R")+pkg_info.fetch_symbol+"  "
+			installed_version = self.vardb.match_pkgs(pkg.cpv)[0]
+			if not self.quiet_repo_display and installed_version.repo != pkg.repo:
+				myoldbest = [installed_version]
 			if pkg_info.ordered:
 				if pkg_info.merge:
 					self.counters.reinst += 1
@@ -784,10 +812,16 @@ class Display(object):
 		mylist = self.get_display_list(self.conf.mylist)
 		# files to fetch list - avoids counting a same file twice
 		# in size display (verbose mode)
-		self.myfetchlist = []
-		# Use this set to detect when all the "repoadd" strings are "[0]"
-		# and disable the entire repo display in this case.
-		repoadd_set = set()
+		self.myfetchlist = set()
+		
+		self.quiet_repo_display = "--quiet-repo-display" in depgraph._frozen_config.myopts
+		if self.quiet_repo_display:
+			# Use this set to detect when all the "repoadd" strings are "[0]"
+			# and disable the entire repo display in this case.
+			repoadd_set = set()
+
+		self.verbose_main_repo_display = "--verbose-main-repo-display" in depgraph._frozen_config.myopts
+		self.restrict_fetch_list = {}
 
 		for mylist_index in range(len(mylist)):
 			pkg, depth, ordered = mylist[mylist_index]
@@ -801,21 +835,25 @@ class Display(object):
 					continue
 			else:
 				pkg_info = self.set_pkg_info(pkg, ordered)
-				addl, pkg_info.oldbest, myinslotlist = \
+				addl, pkg_info.oldbest_list, myinslotlist = \
 					self._get_installed_best(pkg, pkg_info)
 				self.verboseadd = ""
-				self.repoadd = None
-				self._display_use(pkg, pkg_info.oldbest, myinslotlist)
+				if self.quiet_repo_display:
+					self.repoadd = None
+				self._display_use(pkg, pkg_info.oldbest_list, myinslotlist)
 				self.recheck_hidden(pkg)
 				if self.conf.verbosity == 3:
-					self.verbose_size(pkg, repoadd_set, pkg_info)
+					if self.quiet_repo_display:
+						self.verbose_size(pkg, repoadd_set, pkg_info)
+					else:
+						self.verbose_size(pkg, None, pkg_info)
 
 				pkg_info.cp = pkg.cp
 				pkg_info.ver = self.get_ver_str(pkg)
 
 				self.oldlp = self.conf.columnwidth - 30
 				self.newlp = self.oldlp - 30
-				pkg_info.oldbest = self.convert_myoldbest(pkg_info.oldbest)
+				pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info.oldbest_list)
 				pkg_info.system, pkg_info.world = \
 					self.check_system_world(pkg)
 				addl = self.set_interactive(pkg, pkg_info.ordered, addl)
@@ -823,13 +861,17 @@ class Display(object):
 				if self.include_mask_str():
 					addl += self.gen_mask_str(pkg)
 
-				if pkg.root != "/":
+				if pkg.root_config.settings["ROOT"] != "/":
 					if pkg_info.oldbest:
 						pkg_info.oldbest += " "
 					if self.conf.columns:
 						myprint = self._set_non_root_columns(
 							addl, pkg_info, pkg)
 					else:
+						pkg_str = pkg.cpv
+						if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+							any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+							pkg_str += _repo_separator + pkg.repo
 						if not pkg_info.merge:
 							addl = self.empty_space_in_brackets()
 							myprint = "[%s%s] " % (
@@ -840,7 +882,7 @@ class Display(object):
 							myprint = "[%s %s] " % (
 								self.pkgprint(pkg.type_name, pkg_info), addl)
 						myprint += self.indent + \
-							self.pkgprint(pkg.cpv, pkg_info) + " " + \
+							self.pkgprint(pkg_str, pkg_info) + " " + \
 							pkg_info.oldbest + darkgreen("to " + pkg.root)
 				else:
 					if self.conf.columns:
@@ -852,36 +894,23 @@ class Display(object):
 
 				if self.conf.columns and pkg.operation == "uninstall":
 					continue
-				self.print_msg.append((myprint, self.verboseadd, self.repoadd))
-
-				if not self.conf.tree_display \
-					and not self.conf.no_restart \
-					and pkg.root == self.conf.running_root.root \
-					and match_from_list(PORTAGE_PACKAGE_ATOM, [pkg]) \
-					and not self.conf.quiet:
-
-					if not self.vardb.cpv_exists(pkg.cpv) or \
-						'9999' in pkg.cpv or \
-						'git' in pkg.inherited or \
-						'git-2' in pkg.inherited:
-						if mylist_index < len(mylist) - 1:
-							self.print_msg.append(
-								colorize(
-									"WARN", "*** Portage will stop merging "
-									"at this point and reload itself,"
-									)
-								)
-							self.print_msg.append(
-								colorize("WARN", "    then resume the merge.")
-								)
+				if self.quiet_repo_display:
+					self.print_msg.append((myprint, self.verboseadd, self.repoadd))
+				else:
+					self.print_msg.append((myprint, self.verboseadd, None))
 
-		show_repos = repoadd_set and repoadd_set != set(["0"])
+		show_repos = self.quiet_repo_display and repoadd_set and repoadd_set != set(["0"])
 
 		# now finally print out the messages
 		self.print_messages(show_repos)
 		self.print_blockers()
 		if self.conf.verbosity == 3:
 			self.print_verbose(show_repos)
+		for pkg, pkg_info in self.restrict_fetch_list.items():
+			writemsg_stdout("\nFetch instructions for %s:\n" % (pkg.cpv,),
+							noiselevel=-1)
+			spawn_nofetch(self.conf.trees[pkg.root]["porttree"].dbapi,
+				pkg_info.ebuild_path)
 		if self.conf.changelog:
 			self.print_changelog()
 

diff --git a/portage_with_autodep/pym/_emerge/resolver/output.pyo b/portage_with_autodep/pym/_emerge/resolver/output.pyo
new file mode 100644
index 0000000..bd2ae2f
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/resolver/output.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/resolver/output_helpers.py b/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
index b7e7376..e751dd8 100644
--- a/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
+++ b/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
@@ -14,10 +14,10 @@ import sys
 from portage import os
 from portage import _encodings, _unicode_encode
 from portage._sets.base import InternalPackageSet
-from portage.output import blue, colorize, create_color_func, green, red, \
-	teal, yellow
+from portage.output import (blue, bold, colorize, create_color_func,
+	green, red, teal, yellow)
 bad = create_color_func("BAD")
-from portage.util import writemsg
+from portage.util import shlex_split, writemsg
 from portage.versions import catpkgsplit
 
 from _emerge.Blocker import Blocker
@@ -39,7 +39,7 @@ class _RepoDisplay(object):
 				repo_paths.add(portdir)
 			overlays = root_config.settings.get("PORTDIR_OVERLAY")
 			if overlays:
-				repo_paths.update(overlays.split())
+				repo_paths.update(shlex_split(overlays))
 		repo_paths = list(repo_paths)
 		self._repo_paths = repo_paths
 		self._repo_paths_real = [ os.path.realpath(repo_path) \
@@ -198,7 +198,6 @@ class _DisplayConfig(object):
 		self.print_use_string = self.verbosity != 1 or "--verbose" in frozen_config.myopts
 		self.changelog = "--changelog" in frozen_config.myopts
 		self.edebug = frozen_config.edebug
-		self.no_restart = frozen_config._opts_no_restart.intersection(frozen_config.myopts)
 		self.unordered_display = "--unordered-display" in frozen_config.myopts
 
 		mywidth = 130
@@ -212,7 +211,8 @@ class _DisplayConfig(object):
 				del e
 		self.columnwidth = mywidth
 
-		self.repo_display = _RepoDisplay(frozen_config.roots)
+		if "--quiet-repo-display" in frozen_config.myopts:
+			self.repo_display = _RepoDisplay(frozen_config.roots)
 		self.trees = frozen_config.trees
 		self.pkgsettings = frozen_config.pkgsettings
 		self.target_root = frozen_config.target_root
@@ -500,63 +500,120 @@ def _calc_changelog(ebuildpath,current,next):
 	next = '-'.join(catpkgsplit(next)[1:])
 	if next.endswith('-r0'):
 		next = next[:-3]
-	changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
-	try:
-		changelog = io.open(_unicode_encode(changelogpath,
-			encoding=_encodings['fs'], errors='strict'),
-			mode='r', encoding=_encodings['repo.content'], errors='replace'
-		).read()
-	except SystemExit:
-		raise # Needed else can't exit
-	except:
+
+	changelogdir = os.path.dirname(ebuildpath)
+	changelogs = ['ChangeLog']
+	# ChangeLog-YYYY (see bug #389611)
+	changelogs.extend(sorted((fn for fn in os.listdir(changelogdir)
+		if fn.startswith('ChangeLog-')), reverse=True))
+
+	divisions = []
+	found_current = False
+	for fn in changelogs:
+		changelogpath = os.path.join(changelogdir, fn)
+		try:
+			with io.open(_unicode_encode(changelogpath,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'],
+				errors='replace') as f:
+				changelog = f.read()
+		except EnvironmentError:
+			return []
+		for node in _find_changelog_tags(changelog):
+			if node[0] == current:
+				found_current = True
+				break
+			else:
+				divisions.append(node)
+		if found_current:
+			break
+
+	if not found_current:
 		return []
-	divisions = _find_changelog_tags(changelog)
+
 	#print 'XX from',current,'to',next
 	#for div,text in divisions: print 'XX',div
 	# skip entries for all revisions above the one we are about to emerge
-	for i in range(len(divisions)):
-		if divisions[i][0]==next:
-			divisions = divisions[i:]
-			break
-	# find out how many entries we are going to display
-	for i in range(len(divisions)):
-		if divisions[i][0]==current:
-			divisions = divisions[:i]
+	later_rev_index = None
+	for i, node in enumerate(divisions):
+		if node[0] == next:
+			if later_rev_index is not None:
+				first_node = divisions[later_rev_index]
+				# Discard the later revision and the first ChangeLog entry
+				# that follows it. We want to display all the entries after
+				# that first entry, as discussed in bug #373009.
+				trimmed_lines = []
+				iterator = iter(first_node[1])
+				for l in iterator:
+					if not l:
+						# end of the first entry that's discarded
+						break
+				first_node = (None, list(iterator))
+				divisions = [first_node] + divisions[later_rev_index+1:]
 			break
-	else:
-		# couldnt find the current revision in the list. display nothing
-		return []
-	return divisions
-
+		if node[0] is not None:
+			later_rev_index = i
+
+	output = []
+	prev_blank = False
+	prev_rev = False
+	for rev, lines in divisions:
+		if rev is not None:
+			if not (prev_blank or prev_rev):
+				output.append("\n")
+			output.append(bold('*' + rev) + '\n')
+			prev_rev = True
+			prev_blank = False
+		if lines:
+			prev_rev = False
+			if not prev_blank:
+				output.append("\n")
+			for l in lines:
+				output.append(l + "\n")
+			output.append("\n")
+			prev_blank = True
+	return output
+
+def _strip_header_comments(lines):
+	# strip leading and trailing blank or header/comment lines
+	i = 0
+	while i < len(lines) and (not lines[i] or lines[i][:1] == "#"):
+		i += 1
+	if i:
+		lines = lines[i:]
+	while lines and (not lines[-1] or lines[-1][:1] == "#"):
+		lines.pop()
+	return lines
 
 def _find_changelog_tags(changelog):
 	divs = []
+	if not changelog:
+		return divs
 	release = None
-	while 1:
-		match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
-		if match is None:
-			if release is not None:
-				divs.append((release,changelog))
-			return divs
-		if release is not None:
-			divs.append((release,changelog[:match.start()]))
-		changelog = changelog[match.end():]
+	release_end = 0
+	for match in re.finditer(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?$',
+		changelog, re.M):
+		divs.append((release, _strip_header_comments(
+			changelog[release_end:match.start()].splitlines())))
+		release_end = match.end()
 		release = match.group(1)
 		if release.endswith('.ebuild'):
 			release = release[:-7]
 		if release.endswith('-r0'):
 			release = release[:-3]
 
+	divs.append((release,
+		_strip_header_comments(changelog[release_end:].splitlines())))
+	return divs
 
 class PkgInfo(object):
 	"""Simple class to hold instance attributes for current
 	information about the pkg being printed.
 	"""
 	
-	__slots__ = ("ordered", "fetch_symbol", "operation", "merge",
-		"built", "cp", "ebuild_path", "repo_name", "repo_path_real", 
-		"world", "system", "use", "oldbest", "ver"
-		)
+	__slots__ = ("built", "cp", "ebuild_path", "fetch_symbol", "merge",
+		"oldbest", "oldbest_list", "operation", "ordered",
+		"repo_name", "repo_path_real", "system", "use", "ver", "world")
 
 
 	def __init__(self):
@@ -566,6 +623,7 @@ class PkgInfo(object):
 		self.fetch_symbol = ''
 		self.merge = ''
 		self.oldbest = ''
+		self.oldbest_list = []
 		self.operation = ''
 		self.ordered = False
 		self.repo_path_real = ''

diff --git a/portage_with_autodep/pym/_emerge/resolver/output_helpers.pyo b/portage_with_autodep/pym/_emerge/resolver/output_helpers.pyo
new file mode 100644
index 0000000..ae39dd4
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/resolver/output_helpers.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/resolver/slot_collision.py b/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
index 0df8f20..a1c8714 100644
--- a/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
+++ b/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -80,6 +80,8 @@ class slot_conflict_handler(object):
 	the needed USE changes and prepare the message for the user.
 	"""
 
+	_check_configuration_max = 1024
+
 	def __init__(self, depgraph):
 		self.depgraph = depgraph
 		self.myopts = depgraph._frozen_config.myopts
@@ -244,7 +246,7 @@ class slot_conflict_handler(object):
 		for (slot_atom, root), pkgs \
 			in self.slot_collision_info.items():
 			msg.append(str(slot_atom))
-			if root != '/':
+			if root != self.depgraph._frozen_config._running_root.root:
 				msg.append(" for %s" % (root,))
 			msg.append("\n\n")
 
@@ -663,14 +665,24 @@ class slot_conflict_handler(object):
 
 		solutions = []
 		sol_gen = _solution_candidate_generator(all_involved_flags)
-		while(True):
+		checked = 0
+		while True:
 			candidate = sol_gen.get_candidate()
 			if not candidate:
 				break
 			solution = self._check_solution(config, candidate, all_conflict_atoms_by_slotatom)
+			checked += 1
 			if solution:
 				solutions.append(solution)
-		
+
+			if checked >= self._check_configuration_max:
+				# TODO: Implement early elimination for candidates that would
+				# change forced or masked flags, and don't count them here.
+				if self.debug:
+					writemsg("\nAborting _check_configuration due to "
+						"excessive number of candidates.\n", noiselevel=-1)
+				break
+
 		if self.debug:
 			if not solutions:
 				writemsg("No viable solutions. Rejecting configuration.\n", noiselevel=-1)
@@ -843,7 +855,7 @@ class slot_conflict_handler(object):
 
 		#Make sure the changes don't violate REQUIRED_USE
 		for pkg in required_changes:
-			required_use = pkg.metadata["REQUIRED_USE"]
+			required_use = pkg.metadata.get("REQUIRED_USE")
 			if not required_use:
 				continue
 

diff --git a/portage_with_autodep/pym/_emerge/resolver/slot_collision.pyo b/portage_with_autodep/pym/_emerge/resolver/slot_collision.pyo
new file mode 100644
index 0000000..1fc3a13
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/resolver/slot_collision.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/search.py b/portage_with_autodep/pym/_emerge/search.py
index 35f0412..5abc8a0 100644
--- a/portage_with_autodep/pym/_emerge/search.py
+++ b/portage_with_autodep/pym/_emerge/search.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -6,6 +6,7 @@ from __future__ import print_function
 import re
 import portage
 from portage import os
+from portage.dbapi.porttree import _parse_uri_map
 from portage.output import  bold, bold as white, darkgreen, green, red
 from portage.util import writemsg_stdout
 
@@ -150,7 +151,7 @@ class search(object):
 					if not result or cpv == portage.best([cpv, result]):
 						result = cpv
 				else:
-					db_keys = Package.metadata_keys
+					db_keys = list(db._aux_cache_keys)
 					# break out of this loop with highest visible
 					# match, checked in descending order
 					for cpv in reversed(db.match(atom)):
@@ -261,11 +262,13 @@ class search(object):
 		msg.append("[ Applications found : " + \
 			bold(str(self.mlen)) + " ]\n\n")
 		vardb = self.vartree.dbapi
+		metadata_keys = set(Package.metadata_keys)
+		metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"])
+		metadata_keys = tuple(metadata_keys)
 		for mtype in self.matches:
 			for match,masked in self.matches[mtype]:
 				full_package = None
 				if mtype == "pkg":
-					catpack = match
 					full_package = self._xmatch(
 						"bestmatch-visible", match)
 					if not full_package:
@@ -285,11 +288,16 @@ class search(object):
 							+ "\n\n")
 				if full_package:
 					try:
-						desc, homepage, license = self._aux_get(
-							full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
+						metadata = dict(zip(metadata_keys,
+							self._aux_get(full_package, metadata_keys)))
 					except KeyError:
 						msg.append("emerge: search: aux_get() failed, skipping\n")
 						continue
+
+					desc = metadata["DESCRIPTION"]
+					homepage = metadata["HOMEPAGE"]
+					license = metadata["LICENSE"]
+
 					if masked:
 						msg.append(green("*") + "  " + \
 							white(match) + " " + red("[ Masked ]") + "\n")
@@ -304,12 +312,17 @@ class search(object):
 					mycpv = match + "-" + myversion
 					myebuild = self._findname(mycpv)
 					if myebuild:
+						pkg = Package(built=False, cpv=mycpv,
+							installed=False, metadata=metadata,
+							root_config=self.root_config, type_name="ebuild")
 						pkgdir = os.path.dirname(myebuild)
-						from portage import manifest
-						mf = manifest.Manifest(
+						mf = self.settings.repositories.get_repo_for_location(
+							os.path.dirname(os.path.dirname(pkgdir)))
+						mf = mf.load_manifest(
 							pkgdir, self.settings["DISTDIR"])
 						try:
-							uri_map = self._getFetchMap(mycpv)
+							uri_map = _parse_uri_map(mycpv, metadata,
+								use=pkg.use.enabled)
 						except portage.exception.InvalidDependString as e:
 							file_size_str = "Unknown (%s)" % (e,)
 							del e

diff --git a/portage_with_autodep/pym/_emerge/search.pyo b/portage_with_autodep/pym/_emerge/search.pyo
new file mode 100644
index 0000000..055a734
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/search.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.pyo b/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.pyo
new file mode 100644
index 0000000..337e135
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/stdout_spinner.pyo b/portage_with_autodep/pym/_emerge/stdout_spinner.pyo
new file mode 100644
index 0000000..b091171
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/stdout_spinner.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/sync/__init__.pyo b/portage_with_autodep/pym/_emerge/sync/__init__.pyo
new file mode 100644
index 0000000..7314f80
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/sync/__init__.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.pyo b/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.pyo
new file mode 100644
index 0000000..8e41377
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.pyo b/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.pyo
new file mode 100644
index 0000000..5b59c5a
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/unmerge.py b/portage_with_autodep/pym/_emerge/unmerge.py
index 3db3a8b..b46b89c 100644
--- a/portage_with_autodep/pym/_emerge/unmerge.py
+++ b/portage_with_autodep/pym/_emerge/unmerge.py
@@ -1,9 +1,10 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
 
 import logging
+import signal
 import sys
 import textwrap
 import portage
@@ -12,7 +13,7 @@ from portage.dbapi._expand_new_virt import expand_new_virt
 from portage.output import bold, colorize, darkgreen, green
 from portage._sets import SETPREFIX
 from portage._sets.base import EditablePackageSet
-from portage.util import cmp_sort_key
+from portage.versions import cpv_sort_key, _pkg_str
 
 from _emerge.emergelog import emergelog
 from _emerge.Package import Package
@@ -467,20 +468,22 @@ def _unmerge_display(root_config, myopts, unmerge_action,
 			if not quiet:
 				writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
 			if pkgmap[x][mytype]:
-				sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
-				sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
-				for pn, ver, rev in sorted_pkgs:
-					if rev == "r0":
-						myversion = ver
-					else:
-						myversion = ver + "-" + rev
+				sorted_pkgs = []
+				for mypkg in pkgmap[x][mytype]:
+					try:
+						sorted_pkgs.append(mypkg.cpv)
+					except AttributeError:
+						sorted_pkgs.append(_pkg_str(mypkg))
+				sorted_pkgs.sort(key=cpv_sort_key())
+				for mypkg in sorted_pkgs:
 					if mytype == "selected":
 						writemsg_level(
-							colorize("UNMERGE_WARN", myversion + " "),
+							colorize("UNMERGE_WARN", mypkg.version + " "),
 							noiselevel=-1)
 					else:
 						writemsg_level(
-							colorize("GOOD", myversion + " "), noiselevel=-1)
+							colorize("GOOD", mypkg.version + " "),
+							noiselevel=-1)
 			else:
 				writemsg_level("none ", noiselevel=-1)
 			if not quiet:
@@ -503,7 +506,8 @@ def unmerge(root_config, myopts, unmerge_action,
 	clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
 	scheduler=None, writemsg_level=portage.util.writemsg_level):
 	"""
-	Returns 1 if successful, otherwise 0.
+	Returns os.EX_OK if no errors occur, 1 if an error occurs, and
+	130 if interrupted due to a 'no' answer for --ask.
 	"""
 
 	if clean_world:
@@ -515,7 +519,7 @@ def unmerge(root_config, myopts, unmerge_action,
 		writemsg_level=writemsg_level)
 
 	if rval != os.EX_OK:
-		return 0
+		return rval
 
 	enter_invalid = '--ask-enter-invalid' in myopts
 	vartree = root_config.trees["vartree"]
@@ -526,7 +530,7 @@ def unmerge(root_config, myopts, unmerge_action,
 
 	if "--pretend" in myopts:
 		#we're done... return
-		return 0
+		return os.EX_OK
 	if "--ask" in myopts:
 		if userquery("Would you like to unmerge these packages?",
 			enter_invalid) == "No":
@@ -535,19 +539,32 @@ def unmerge(root_config, myopts, unmerge_action,
 			print()
 			print("Quitting.")
 			print()
-			return 0
+			return 128 + signal.SIGINT
 	#the real unmerging begins, after a short delay....
 	if clean_delay and not autoclean:
 		countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
 
+	all_selected = set()
+	all_selected.update(*[x["selected"] for x in pkgmap])
+
+	# Set counter variables
+	curval = 1
+	maxval = len(all_selected)
+
 	for x in range(len(pkgmap)):
 		for y in pkgmap[x]["selected"]:
-			writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
 			emergelog(xterm_titles, "=== Unmerging... ("+y+")")
+			message = ">>> Unmerging ({0} of {1}) {2}...\n".format(
+				colorize("MERGE_LIST_PROGRESS", str(curval)),
+				colorize("MERGE_LIST_PROGRESS", str(maxval)),
+				y)
+			writemsg_level(message, noiselevel=-1)
+			curval += 1
+
 			mysplit = y.split("/")
 			#unmerge...
-			retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
-				mysettings, unmerge_action not in ["clean","prune"],
+			retval = portage.unmerge(mysplit[0], mysplit[1],
+				settings=mysettings,
 				vartree=vartree, ldpath_mtimes=ldpath_mtimes,
 				scheduler=scheduler)
 
@@ -574,5 +591,5 @@ def unmerge(root_config, myopts, unmerge_action,
 			sets["selected"].remove(SETPREFIX + s)
 		sets["selected"].unlock()
 
-	return 1
+	return os.EX_OK
 

diff --git a/portage_with_autodep/pym/_emerge/unmerge.pyo b/portage_with_autodep/pym/_emerge/unmerge.pyo
new file mode 100644
index 0000000..c16c9ec
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/unmerge.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/userquery.py b/portage_with_autodep/pym/_emerge/userquery.py
index e7ed400..efae80a 100644
--- a/portage_with_autodep/pym/_emerge/userquery.py
+++ b/portage_with_autodep/pym/_emerge/userquery.py
@@ -1,8 +1,9 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
 
+import signal
 import sys
 
 from portage.output import bold, create_color_func
@@ -51,5 +52,4 @@ def userquery(prompt, enter_invalid, responses=None, colours=None):
 			print("Sorry, response '%s' not understood." % response, end=' ')
 	except (EOFError, KeyboardInterrupt):
 		print("Interrupted.")
-		sys.exit(1)
-
+		sys.exit(128 + signal.SIGINT)

diff --git a/portage_with_autodep/pym/_emerge/userquery.pyo b/portage_with_autodep/pym/_emerge/userquery.pyo
new file mode 100644
index 0000000..5492f90
Binary files /dev/null and b/portage_with_autodep/pym/_emerge/userquery.pyo differ

diff --git a/portage_with_autodep/pym/portage/__init__.py b/portage_with_autodep/pym/portage/__init__.py
index 2a2eb99..431dc26 100644
--- a/portage_with_autodep/pym/portage/__init__.py
+++ b/portage_with_autodep/pym/portage/__init__.py
@@ -2,7 +2,7 @@
 # Copyright 1998-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-VERSION="HEAD"
+VERSION="2.2.0_alpha108"
 
 # ===========================================================================
 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
@@ -148,20 +148,35 @@ if sys.hexversion >= 0x3000000:
 	basestring = str
 	long = int
 
-# Assume utf_8 fs encoding everywhere except in merge code, where the
-# user's locale is respected.
+# We use utf_8 encoding everywhere. Previously, we used
+# sys.getfilesystemencoding() for the 'merge' encoding, but that had
+# various problems:
+#
+#   1) If the locale is ever changed then it can cause orphan files due
+#      to changed character set translation.
+#
+#   2) Ebuilds typically install files with utf_8 encoded file names,
+#      and then portage would be forced to rename those files to match
+#      sys.getfilesystemencoding(), possibly breaking things.
+#
+#   3) Automatic translation between encodings can lead to nonsensical
+#      file names when the source encoding is unknown by portage.
+#
+#   4) It's inconvenient for ebuilds to convert the encodings of file
+#      names to match the current locale, and upstreams typically encode
+#      file names with utf_8 encoding.
+#
+# So, instead of relying on sys.getfilesystemencoding(), we avoid the above
+# problems by using a constant utf_8 'merge' encoding for all locales, as
+# discussed in bug #382199 and bug #381509.
 _encodings = {
 	'content'                : 'utf_8',
 	'fs'                     : 'utf_8',
-	'merge'                  : sys.getfilesystemencoding(),
+	'merge'                  : 'utf_8',
 	'repo.content'           : 'utf_8',
 	'stdio'                  : 'utf_8',
 }
 
-# This can happen if python is built with USE=build (stage 1).
-if _encodings['merge'] is None:
-	_encodings['merge'] = 'ascii'
-
 if sys.hexversion >= 0x3000000:
 	def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
 		if isinstance(s, str):
@@ -215,7 +230,7 @@ class _unicode_func_wrapper(object):
 		rval = self._func(*wrapped_args, **wrapped_kwargs)
 
 		# Don't use isinstance() since we don't want to convert subclasses
-		# of tuple such as posix.stat_result in python-3.2.
+		# of tuple such as posix.stat_result in Python >=3.2.
 		if rval.__class__ in (list, tuple):
 			decoded_rval = []
 			for x in rval:
@@ -320,6 +335,16 @@ _python_interpreter = os.path.realpath(sys.executable)
 _bin_path = PORTAGE_BIN_PATH
 _pym_path = PORTAGE_PYM_PATH
 
+if sys.hexversion >= 0x3030000:
+	# Workaround for http://bugs.python.org/issue14007
+	def _test_xml_etree_ElementTree_TreeBuilder_type():
+		import subprocess
+		p = subprocess.Popen([_python_interpreter, "-c",
+			"import sys, xml.etree.ElementTree; sys.exit(not isinstance(xml.etree.ElementTree.TreeBuilder, type))"])
+		if p.wait() != 0:
+			sys.modules["_elementtree"] = None
+	_test_xml_etree_ElementTree_TreeBuilder_type()
+
 def _shell_quote(s):
 	"""
 	Quote a string in double-quotes and use backslashes to
@@ -380,9 +405,12 @@ def getcwd():
 		return "/"
 getcwd()
 
-def abssymlink(symlink):
+def abssymlink(symlink, target=None):
 	"This reads symlinks, resolving the relative symlinks, and returning the absolute."
-	mylink=os.readlink(symlink)
+	if target is not None:
+		mylink = target
+	else:
+		mylink = os.readlink(symlink)
 	if mylink[0] != '/':
 		mydir=os.path.dirname(symlink)
 		mylink=mydir+"/"+mylink
@@ -417,29 +445,25 @@ def eapi_is_supported(eapi):
 		return False
 	return eapi <= portage.const.EAPI
 
-# Generally, it's best not to assume that cache entries for unsupported EAPIs
-# can be validated. However, the current package manager specification does not
-# guarantee that the EAPI can be parsed without sourcing the ebuild, so
-# it's too costly to discard existing cache entries for unsupported EAPIs.
-# Therefore, by default, assume that cache entries for unsupported EAPIs can be
-# validated. If FEATURES=parse-eapi-* is enabled, this assumption is discarded
-# since the EAPI can be determined without the incurring the cost of sourcing
-# the ebuild.
-_validate_cache_for_unsupported_eapis = True
-
-_parse_eapi_ebuild_head_re = re.compile(r'^EAPI=[\'"]?([^\'"#]*)')
-_parse_eapi_ebuild_head_max_lines = 30
+# This pattern is specified by PMS section 7.3.1.
+_pms_eapi_re = re.compile(r"^[ \t]*EAPI=(['\"]?)([A-Za-z0-9+_.-]*)\1[ \t]*([ \t]#.*)?$")
+_comment_or_blank_line = re.compile(r"^\s*(#.*)?$")
 
 def _parse_eapi_ebuild_head(f):
-	count = 0
+	eapi = None
+	eapi_lineno = None
+	lineno = 0
 	for line in f:
-		m = _parse_eapi_ebuild_head_re.match(line)
-		if m is not None:
-			return m.group(1).strip()
-		count += 1
-		if count >= _parse_eapi_ebuild_head_max_lines:
+		lineno += 1
+		m = _comment_or_blank_line.match(line)
+		if m is None:
+			eapi_lineno = lineno
+			m = _pms_eapi_re.match(line)
+			if m is not None:
+				eapi = m.group(2)
 			break
-	return '0'
+
+	return (eapi, eapi_lineno)
 
 def _movefile(src, dest, **kwargs):
 	"""Calls movefile and raises a PortageException if an error occurs."""
@@ -461,10 +485,16 @@ def portageexit():
 	if data.secpass > 1 and os.environ.get("SANDBOX_ON") != "1":
 		close_portdbapi_caches()
 
-def create_trees(config_root=None, target_root=None, trees=None):
-	if trees is None:
-		trees = {}
-	else:
+class _trees_dict(dict):
+	__slots__ = ('_running_eroot', '_target_eroot',)
+	def __init__(self, *pargs, **kargs):
+		dict.__init__(self, *pargs, **kargs)
+		self._running_eroot = None
+		self._target_eroot = None
+
+def create_trees(config_root=None, target_root=None, trees=None, env=None,
+	eprefix=None):
+	if trees is not None:
 		# clean up any existing portdbapi instances
 		for myroot in trees:
 			portdb = trees[myroot]["porttree"].dbapi
@@ -472,12 +502,25 @@ def create_trees(config_root=None, target_root=None, trees=None):
 			portdbapi.portdbapi_instances.remove(portdb)
 			del trees[myroot]["porttree"], myroot, portdb
 
+	if trees is None:
+		trees = _trees_dict()
+	elif not isinstance(trees, _trees_dict):
+		# caller passed a normal dict or something,
+		# but we need a _trees_dict instance
+		trees = _trees_dict(trees)
+
+	if env is None:
+		env = os.environ
+
 	settings = config(config_root=config_root, target_root=target_root,
-		config_incrementals=portage.const.INCREMENTALS)
+		env=env, eprefix=eprefix)
 	settings.lock()
 
-	myroots = [(settings["ROOT"], settings)]
-	if settings["ROOT"] != "/":
+	trees._target_eroot = settings['EROOT']
+	myroots = [(settings['EROOT'], settings)]
+	if settings["ROOT"] == "/":
+		trees._running_eroot = trees._target_eroot
+	else:
 
 		# When ROOT != "/" we only want overrides from the calling
 		# environment to apply to the config that's associated
@@ -485,24 +528,27 @@ def create_trees(config_root=None, target_root=None, trees=None):
 		clean_env = {}
 		for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_USERNAME',
 			'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
-			'ftp_proxy', 'http_proxy', 'no_proxy'):
+			'ftp_proxy', 'http_proxy', 'no_proxy',
+			'__PORTAGE_TEST_HARDLINK_LOCKS'):
 			v = settings.get(k)
 			if v is not None:
 				clean_env[k] = v
-		settings = config(config_root=None, target_root="/", env=clean_env)
+		settings = config(config_root=None, target_root="/",
+			env=clean_env, eprefix=eprefix)
 		settings.lock()
-		myroots.append((settings["ROOT"], settings))
+		trees._running_eroot = settings['EROOT']
+		myroots.append((settings['EROOT'], settings))
 
 	for myroot, mysettings in myroots:
 		trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
 		trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals)
 		trees[myroot].addLazySingleton(
-			"vartree", vartree, myroot, categories=mysettings.categories,
+			"vartree", vartree, categories=mysettings.categories,
 				settings=mysettings)
 		trees[myroot].addLazySingleton("porttree",
-			portagetree, myroot, settings=mysettings)
+			portagetree, settings=mysettings)
 		trees[myroot].addLazySingleton("bintree",
-			binarytree, myroot, mysettings["PKGDIR"], settings=mysettings)
+			binarytree, pkgdir=mysettings["PKGDIR"], settings=mysettings)
 	return trees
 
 if VERSION == 'HEAD':

diff --git a/portage_with_autodep/pym/portage/__init__.pyo b/portage_with_autodep/pym/portage/__init__.pyo
new file mode 100644
index 0000000..9fc449e
Binary files /dev/null and b/portage_with_autodep/pym/portage/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/_emirrordist/Config.py b/portage_with_autodep/pym/portage/_emirrordist/Config.py
new file mode 100644
index 0000000..db4bfeb
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_emirrordist/Config.py
@@ -0,0 +1,132 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+import io
+import logging
+import shelve
+import sys
+import time
+
+import portage
+from portage import os
+from portage.util import grabdict, grablines
+from portage.util._ShelveUnicodeWrapper import ShelveUnicodeWrapper
+
+class Config(object):
+	def __init__(self, options, portdb, event_loop):
+		self.options = options
+		self.portdb = portdb
+		self.event_loop = event_loop
+		self.added_byte_count = 0
+		self.added_file_count = 0
+		self.scheduled_deletion_count = 0
+		self.delete_count = 0
+		self.file_owners = {}
+		self.file_failures = {}
+		self.start_time = time.time()
+		self._open_files = []
+
+		self.log_success = self._open_log('success', options.success_log, 'a')
+		self.log_failure = self._open_log('failure', options.failure_log, 'a')
+
+		self.distfiles = None
+		if options.distfiles is not None:
+			self.distfiles = options.distfiles
+
+		self.mirrors = copy.copy(portdb.settings.thirdpartymirrors())
+
+		if options.mirror_overrides is not None:
+			self.mirrors.update(grabdict(options.mirror_overrides))
+
+		if options.mirror_skip is not None:
+			for x in options.mirror_skip.split(","):
+				self.mirrors[x] = []
+
+		self.whitelist = None
+		if options.whitelist_from is not None:
+			self.whitelist = set()
+			for filename in options.whitelist_from:
+				for line in grablines(filename):
+					line = line.strip()
+					if line and not line.startswith("#"):
+						self.whitelist.add(line)
+
+		self.restrict_mirror_exemptions = None
+		if options.restrict_mirror_exemptions is not None:
+			self.restrict_mirror_exemptions = frozenset(
+				options.restrict_mirror_exemptions.split(","))
+
+		self.recycle_db = None
+		if options.recycle_db is not None:
+			self.recycle_db = self._open_shelve(
+				options.recycle_db, 'recycle')
+
+		self.distfiles_db = None
+		if options.distfiles_db is not None:
+			self.distfiles_db = self._open_shelve(
+				options.distfiles_db, 'distfiles')
+
+		self.deletion_db = None
+		if options.deletion_db is not None:
+			self.deletion_db = self._open_shelve(
+				options.deletion_db, 'deletion')
+
+	def _open_log(self, log_desc, log_path, mode):
+
+		if log_path is None or self.options.dry_run:
+			log_func = logging.info
+			line_format = "%s: %%s" % log_desc
+			add_newline = False
+			if log_path is not None:
+				logging.warn(("dry-run: %s log "
+					"redirected to logging.info") % log_desc)
+		else:
+			self._open_files.append(io.open(log_path, mode=mode,
+				encoding='utf_8'))
+			line_format = "%s\n"
+			log_func = self._open_files[-1].write
+
+		return self._LogFormatter(line_format, log_func)
+
+	class _LogFormatter(object):
+
+		__slots__ = ('_line_format', '_log_func')
+
+		def __init__(self, line_format, log_func):
+			self._line_format = line_format
+			self._log_func = log_func
+
+		def __call__(self, msg):
+			self._log_func(self._line_format % (msg,))
+
+	def _open_shelve(self, db_file, db_desc):
+		if self.options.dry_run:
+			open_flag = "r"
+		else:
+			open_flag = "c"
+
+		if self.options.dry_run and not os.path.exists(db_file):
+			db = {}
+		else:
+			db = shelve.open(db_file, flag=open_flag)
+			if sys.hexversion < 0x3000000:
+				db = ShelveUnicodeWrapper(db)
+
+		if self.options.dry_run:
+			logging.warn("dry-run: %s db opened in readonly mode" % db_desc)
+			if not isinstance(db, dict):
+				volatile_db = dict((k, db[k]) for k in db)
+				db.close()
+				db = volatile_db
+		else:
+			self._open_files.append(db)
+
+		return db
+
+	def __enter__(self):
+		return self
+
+	def __exit__(self, exc_type, exc_value, traceback):
+		while self._open_files:
+			self._open_files.pop().close()

diff --git a/portage_with_autodep/pym/portage/_emirrordist/DeletionIterator.py b/portage_with_autodep/pym/portage/_emirrordist/DeletionIterator.py
new file mode 100644
index 0000000..dff52c0
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_emirrordist/DeletionIterator.py
@@ -0,0 +1,83 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import stat
+
+from portage import os
+from .DeletionTask import DeletionTask
+
+class DeletionIterator(object):
+
+	def __init__(self, config):
+		self._config = config
+
+	def __iter__(self):
+		distdir = self._config.options.distfiles
+		file_owners = self._config.file_owners
+		whitelist = self._config.whitelist
+		distfiles_local = self._config.options.distfiles_local
+		deletion_db = self._config.deletion_db
+		deletion_delay = self._config.options.deletion_delay
+		start_time = self._config.start_time
+		distfiles_set = set(os.listdir(self._config.options.distfiles))
+		for filename in distfiles_set:
+			try:
+				st = os.stat(os.path.join(distdir, filename))
+			except OSError as e:
+				logging.error("stat failed on '%s' in distfiles: %s\n" %
+					(filename, e))
+				continue
+			if not stat.S_ISREG(st.st_mode):
+				continue
+			elif filename in file_owners:
+				if deletion_db is not None:
+					try:
+						del deletion_db[filename]
+					except KeyError:
+						pass
+			elif whitelist is not None and filename in whitelist:
+				if deletion_db is not None:
+					try:
+						del deletion_db[filename]
+					except KeyError:
+						pass
+			elif distfiles_local is not None and \
+				os.path.exists(os.path.join(distfiles_local, filename)):
+				if deletion_db is not None:
+					try:
+						del deletion_db[filename]
+					except KeyError:
+						pass
+			else:
+				self._config.scheduled_deletion_count += 1
+
+				if deletion_db is None or deletion_delay is None:
+
+					yield DeletionTask(background=True,
+						distfile=filename,
+						config=self._config)
+
+				else:
+					deletion_entry = deletion_db.get(filename)
+
+					if deletion_entry is None:
+						logging.debug("add '%s' to deletion db" % filename)
+						deletion_db[filename] = start_time
+
+					elif deletion_entry + deletion_delay <= start_time:
+
+						yield DeletionTask(background=True,
+							distfile=filename,
+							config=self._config)
+
+		if deletion_db is not None:
+			for filename in list(deletion_db):
+				if filename not in distfiles_set:
+					try:
+						del deletion_db[filename]
+					except KeyError:
+						pass
+					else:
+						logging.debug("drop '%s' from deletion db" %
+							filename)

diff --git a/portage_with_autodep/pym/portage/_emirrordist/DeletionTask.py b/portage_with_autodep/pym/portage/_emirrordist/DeletionTask.py
new file mode 100644
index 0000000..7d10957
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_emirrordist/DeletionTask.py
@@ -0,0 +1,129 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from _emerge.CompositeTask import CompositeTask
+
+class DeletionTask(CompositeTask):
+
+	__slots__ = ('distfile', 'config')
+
+	def _start(self):
+
+		distfile_path = os.path.join(
+			self.config.options.distfiles, self.distfile)
+
+		if self.config.options.recycle_dir is not None:
+			distfile_path = os.path.join(self.config.options.distfiles, self.distfile)
+			recycle_path = os.path.join(
+				self.config.options.recycle_dir, self.distfile)
+			if self.config.options.dry_run:
+				logging.info(("dry-run: move '%s' from "
+					"distfiles to recycle") % self.distfile)
+			else:
+				logging.debug(("move '%s' from "
+					"distfiles to recycle") % self.distfile)
+				try:
+					os.rename(distfile_path, recycle_path)
+				except OSError as e:
+					if e.errno != errno.EXDEV:
+						logging.error(("rename %s from distfiles to "
+							"recycle failed: %s") % (self.distfile, e))
+				else:
+					self.returncode = os.EX_OK
+					self._async_wait()
+					return
+
+				self._start_task(
+					FileCopier(src_path=distfile_path,
+						dest_path=recycle_path,
+						background=False),
+					self._recycle_copier_exit)
+				return
+
+		success = True
+
+		if self.config.options.dry_run:
+			logging.info(("dry-run: delete '%s' from "
+				"distfiles") % self.distfile)
+		else:
+			logging.debug(("delete '%s' from "
+				"distfiles") % self.distfile)
+			try:
+				os.unlink(distfile_path)
+			except OSError as e:
+				if e.errno not in (errno.ENOENT, errno.ESTALE):
+					logging.error("%s unlink failed in distfiles: %s" %
+						(self.distfile, e))
+					success = False
+
+		if success:
+			self._success()
+			self.returncode = os.EX_OK
+		else:
+			self.returncode = 1
+
+		self._async_wait()
+
+	def _recycle_copier_exit(self, copier):
+
+		self._assert_current(copier)
+		if self._was_cancelled():
+			self.wait()
+			return
+
+		success = True
+		if copier.returncode == os.EX_OK:
+
+			try:
+				os.unlink(copier.src_path)
+			except OSError as e:
+				if e.errno not in (errno.ENOENT, errno.ESTALE):
+					logging.error("%s unlink failed in distfiles: %s" %
+						(self.distfile, e))
+					success = False
+
+		else:
+			logging.error(("%s copy from distfiles "
+				"to recycle failed: %s") % (self.distfile, e))
+			success = False
+
+		if success:
+			self._success()
+			self.returncode = os.EX_OK
+		else:
+			self.returncode = 1
+
+		self._current_task = None
+		self.wait()
+
+	def _success(self):
+
+		cpv = "unknown"
+		if self.config.distfiles_db is not None:
+			cpv = self.config.distfiles_db.get(self.distfile, cpv)
+
+		self.config.delete_count += 1
+		self.config.log_success("%s\t%s\tremoved" % (cpv, self.distfile))
+
+		if self.config.distfiles_db is not None:
+			try:
+				del self.config.distfiles_db[self.distfile]
+			except KeyError:
+				pass
+			else:
+				logging.debug(("drop '%s' from "
+					"distfiles db") % self.distfile)
+
+		if self.config.deletion_db is not None:
+			try:
+				del self.config.deletion_db[self.distfile]
+			except KeyError:
+				pass
+			else:
+				logging.debug(("drop '%s' from "
+					"deletion db") % self.distfile)

diff --git a/portage_with_autodep/pym/portage/_emirrordist/FetchIterator.py b/portage_with_autodep/pym/portage/_emirrordist/FetchIterator.py
new file mode 100644
index 0000000..16a0b04
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_emirrordist/FetchIterator.py
@@ -0,0 +1,147 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.checksum import (_apply_hash_filter,
+	_filter_unaccelarated_hashes, _hash_filter)
+from portage.dep import use_reduce
+from portage.exception import PortageException
+from .FetchTask import FetchTask
+
+class FetchIterator(object):
+
+	def __init__(self, config):
+		self._config = config
+		self._log_failure = config.log_failure
+
+	def _iter_every_cp(self):
+		# List categories individually, in order to start yielding quicker,
+		# and in order to reduce latency in case of a signal interrupt.
+		cp_all = self._config.portdb.cp_all
+		for category in sorted(self._config.portdb.categories):
+			for cp in cp_all(categories=(category,)):
+				yield cp
+
+	def __iter__(self):
+
+		portdb = self._config.portdb
+		get_repo_for_location = portdb.repositories.get_repo_for_location
+		file_owners = self._config.file_owners
+		file_failures = self._config.file_failures
+		restrict_mirror_exemptions = self._config.restrict_mirror_exemptions
+
+		hash_filter = _hash_filter(
+			portdb.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+		if hash_filter.transparent:
+			hash_filter = None
+
+		for cp in self._iter_every_cp():
+
+			for tree in portdb.porttrees:
+
+				# Reset state so the Manifest is pulled once
+				# for this cp / tree combination.
+				digests = None
+				repo_config = get_repo_for_location(tree)
+
+				for cpv in portdb.cp_list(cp, mytree=tree):
+
+					try:
+						restrict, = portdb.aux_get(cpv, ("RESTRICT",),
+							mytree=tree)
+					except (KeyError, PortageException) as e:
+						self._log_failure("%s\t\taux_get exception %s" %
+							(cpv, e))
+						continue
+
+					# Here we use matchnone=True to ignore conditional parts
+					# of RESTRICT since they don't apply unconditionally.
+					# Assume such conditionals only apply on the client side.
+					try:
+						restrict = frozenset(use_reduce(restrict,
+							flat=True, matchnone=True))
+					except PortageException as e:
+						self._log_failure("%s\t\tuse_reduce exception %s" %
+							(cpv, e))
+						continue
+
+					if "fetch" in restrict:
+						continue
+
+					try:
+						uri_map = portdb.getFetchMap(cpv)
+					except PortageException as e:
+						self._log_failure("%s\t\tgetFetchMap exception %s" %
+							(cpv, e))
+						continue
+
+					if not uri_map:
+						continue
+
+					if "mirror" in restrict:
+						skip = False
+						if restrict_mirror_exemptions is not None:
+							new_uri_map = {}
+							for filename, uri_tuple in uri_map.items():
+								for uri in uri_tuple:
+									if uri[:9] == "mirror://":
+										i = uri.find("/", 9)
+										if i != -1 and uri[9:i].strip("/") in \
+											restrict_mirror_exemptions:
+											new_uri_map[filename] = uri_tuple
+											break
+							if new_uri_map:
+								uri_map = new_uri_map
+							else:
+								skip = True
+						else:
+							skip = True
+
+						if skip:
+							continue
+
+					# Parse Manifest for this cp if we haven't yet.
+					if digests is None:
+						try:
+							digests = repo_config.load_manifest(
+								os.path.join(repo_config.location, cp)
+								).getTypeDigests("DIST")
+						except (EnvironmentError, PortageException) as e:
+							for filename in uri_map:
+								self._log_failure(
+									"%s\t%s\tManifest exception %s" %
+									(cpv, filename, e))
+								file_failures[filename] = cpv
+							continue
+
+					if not digests:
+						for filename in uri_map:
+							self._log_failure("%s\t%s\tdigest entry missing" %
+								(cpv, filename))
+							file_failures[filename] = cpv
+						continue
+
+					for filename, uri_tuple in uri_map.items():
+						file_digests = digests.get(filename)
+						if file_digests is None:
+							self._log_failure("%s\t%s\tdigest entry missing" %
+								(cpv, filename))
+							file_failures[filename] = cpv
+							continue
+						if filename in file_owners:
+							continue
+						file_owners[filename] = cpv
+
+						file_digests = \
+							_filter_unaccelarated_hashes(file_digests)
+						if hash_filter is not None:
+							file_digests = _apply_hash_filter(
+								file_digests, hash_filter)
+
+						yield FetchTask(cpv=cpv,
+							background=True,
+							digests=file_digests,
+							distfile=filename,
+							restrict=restrict,
+							uri_tuple=uri_tuple,
+							config=self._config)

diff --git a/portage_with_autodep/pym/portage/_emirrordist/FetchTask.py b/portage_with_autodep/pym/portage/_emirrordist/FetchTask.py
new file mode 100644
index 0000000..66c41c1
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_emirrordist/FetchTask.py
@@ -0,0 +1,629 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import collections
+import errno
+import logging
+import random
+import stat
+import subprocess
+import sys
+
+import portage
+from portage import _encodings, _unicode_encode
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from portage.util._async.FileDigester import FileDigester
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+
+default_hash_name = portage.const.MANIFEST2_REQUIRED_HASH
+
+# Use --no-check-certificate since Manifest digests should provide
+# enough security, and certificates can be self-signed or whatnot.
+default_fetchcommand = "wget -c -v -t 1 --passive-ftp --no-check-certificate --timeout=60 -O \"${DISTDIR}/${FILE}\" \"${URI}\""
+
+class FetchTask(CompositeTask):
+
+	__slots__ = ('distfile', 'digests', 'config', 'cpv',
+		'restrict', 'uri_tuple', '_current_mirror',
+		'_current_stat', '_fetch_tmp_dir_info', '_fetch_tmp_file',
+		'_fs_mirror_stack', '_mirror_stack',
+		'_previously_added',
+		'_primaryuri_stack', '_log_path', '_tried_uris')
+
+	def _start(self):
+
+		if self.config.options.fetch_log_dir is not None and \
+			not self.config.options.dry_run:
+			self._log_path = os.path.join(
+				self.config.options.fetch_log_dir,
+				self.distfile + '.log')
+
+		self._previously_added = True
+		if self.config.distfiles_db is not None and \
+			self.distfile not in self.config.distfiles_db:
+			self._previously_added = False
+			self.config.distfiles_db[self.distfile] = self.cpv
+
+		if not self._have_needed_digests():
+			msg = "incomplete digests: %s" % " ".join(self.digests)
+			self.scheduler.output(msg, background=self.background,
+				log_path=self._log_path)
+			self.config.log_failure("%s\t%s\t%s" %
+				(self.cpv, self.distfile, msg))
+			self.config.file_failures[self.distfile] = self.cpv
+			self.returncode = os.EX_OK
+			self._async_wait()
+			return
+
+		distfile_path = os.path.join(
+			self.config.options.distfiles, self.distfile)
+
+		st = None
+		size_ok = False
+		try:
+			st = os.stat(distfile_path)
+		except OSError as e:
+			if e.errno not in (errno.ENOENT, errno.ESTALE):
+				msg = "%s stat failed in %s: %s" % \
+					(self.distfile, "distfiles", e)
+				self.scheduler.output(msg + '\n', background=True,
+					log_path=self._log_path)
+				logging.error(msg)
+		else:
+			size_ok = st.st_size == self.digests["size"]
+
+		if not size_ok:
+			if self.config.options.dry_run:
+				if st is not None:
+					logging.info(("dry-run: delete '%s' with "
+						"wrong size from distfiles") % (self.distfile,))
+			else:
+				# Do the unlink in order to ensure that the path is clear,
+				# even if stat raised ENOENT, since a broken symlink can
+				# trigger ENOENT.
+				if self._unlink_file(distfile_path, "distfiles"):
+					if st is not None:
+						logging.debug(("delete '%s' with "
+							"wrong size from distfiles") % (self.distfile,))
+				else:
+					self.config.log_failure("%s\t%s\t%s" %
+						(self.cpv, self.distfile, "unlink failed in distfiles"))
+					self.returncode = os.EX_OK
+					self._async_wait()
+					return
+
+		if size_ok:
+			if self.config.options.verify_existing_digest:
+				self._start_task(
+					FileDigester(file_path=distfile_path,
+						hash_names=(self._select_hash(),),
+						background=self.background,
+						logfile=self._log_path), self._distfiles_digester_exit)
+				return
+
+			self._success()
+			self.returncode = os.EX_OK
+			self._async_wait()
+			return
+
+		self._start_fetch()
+
+	def _success(self):
+		if not self._previously_added:
+			size = self.digests["size"]
+			self.config.added_byte_count += size
+			self.config.added_file_count += 1
+			self.config.log_success("%s\t%s\tadded %i bytes" %
+				(self.cpv, self.distfile, size))
+
+		if self._log_path is not None:
+			if not self.config.options.dry_run:
+				try:
+					os.unlink(self._log_path)
+				except OSError:
+					pass
+
+		if self.config.options.recycle_dir is not None:
+
+			recycle_file = os.path.join(
+				self.config.options.recycle_dir, self.distfile)
+
+			if self.config.options.dry_run:
+				if os.path.exists(recycle_file):
+					logging.info("dry-run: delete '%s' from recycle" %
+						(self.distfile,))
+			else:
+				try:
+					os.unlink(recycle_file)
+				except OSError:
+					pass
+				else:
+					logging.debug("delete '%s' from recycle" %
+						(self.distfile,))
+
+	def _distfiles_digester_exit(self, digester):
+
+		self._assert_current(digester)
+		if self._was_cancelled():
+			self.wait()
+			return
+
+		if self._default_exit(digester) != os.EX_OK:
+			# IOError reading file in our main distfiles directory? This
+			# is a bad situation which normally does not occur, so
+			# skip this file and report it, in order to draw attention
+			# from the administrator.
+			msg = "%s distfiles digester failed unexpectedly" % \
+				(self.distfile,)
+			self.scheduler.output(msg + '\n', background=True,
+				log_path=self._log_path)
+			logging.error(msg)
+			self.config.log_failure("%s\t%s\t%s" %
+				(self.cpv, self.distfile, msg))
+			self.config.file_failures[self.distfile] = self.cpv
+			self.wait()
+			return
+
+		wrong_digest = self._find_bad_digest(digester.digests)
+		if wrong_digest is None:
+			self._success()
+			self.returncode = os.EX_OK
+			self.wait()
+			return
+
+		self._start_fetch()
+
+	_mirror_info = collections.namedtuple('_mirror_info',
+		'name location')
+
+	def _start_fetch(self):
+
+		self._previously_added = False
+		self._fs_mirror_stack = []
+		if self.config.options.distfiles_local is not None:
+			self._fs_mirror_stack.append(self._mirror_info(
+				'distfiles-local', self.config.options.distfiles_local))
+		if self.config.options.recycle_dir is not None:
+			self._fs_mirror_stack.append(self._mirror_info(
+				'recycle', self.config.options.recycle_dir))
+
+		self._primaryuri_stack = []
+		self._mirror_stack = []
+		for uri in reversed(self.uri_tuple):
+			if uri.startswith('mirror://'):
+				self._mirror_stack.append(
+					self._mirror_iterator(uri, self.config.mirrors))
+			else:
+				self._primaryuri_stack.append(uri)
+
+		self._tried_uris = set()
+		self._try_next_mirror()
+
+	@staticmethod
+	def _mirror_iterator(uri, mirrors_dict):
+
+		slash_index = uri.find("/", 9)
+		if slash_index != -1:
+			mirror_name = uri[9:slash_index].strip("/")
+			mirrors = mirrors_dict.get(mirror_name)
+			if not mirrors:
+				return
+			mirrors = list(mirrors)
+			while mirrors:
+				mirror = mirrors.pop(random.randint(0, len(mirrors) - 1))
+				yield mirror.rstrip("/") + "/" + uri[slash_index+1:]
+
+	def _try_next_mirror(self):
+		if self._fs_mirror_stack:
+			self._fetch_fs(self._fs_mirror_stack.pop())
+			return
+		else:
+			uri = self._next_uri()
+			if uri is not None:
+				self._tried_uris.add(uri)
+				self._fetch_uri(uri)
+				return
+
+		if self._tried_uris:
+			msg = "all uris failed"
+		else:
+			msg = "no fetchable uris"
+
+		self.config.log_failure("%s\t%s\t%s" %
+			(self.cpv, self.distfile, msg))
+		self.config.file_failures[self.distfile] = self.cpv
+		self.returncode = os.EX_OK
+		self.wait()
+
+	def _next_uri(self):
+		remaining_tries = self.config.options.tries - len(self._tried_uris)
+		if remaining_tries > 0:
+
+			if remaining_tries <= self.config.options.tries / 2:
+				while self._primaryuri_stack:
+					uri = self._primaryuri_stack.pop()
+					if uri not in self._tried_uris:
+						return uri
+
+			while self._mirror_stack:
+				uri = next(self._mirror_stack[-1], None)
+				if uri is None:
+					self._mirror_stack.pop()
+				else:
+					if uri not in self._tried_uris:
+						return uri
+
+			while self._primaryuri_stack:
+				uri = self._primaryuri_stack.pop()
+				if uri not in self._tried_uris:
+					return uri
+
+		return None
+
+	def _fetch_fs(self, mirror_info):
+		file_path = os.path.join(mirror_info.location, self.distfile)
+
+		st = None
+		size_ok = False
+		try:
+			st = os.stat(file_path)
+		except OSError as e:
+			if e.errno not in (errno.ENOENT, errno.ESTALE):
+				msg = "%s stat failed in %s: %s" % \
+					(self.distfile, mirror_info.name, e)
+				self.scheduler.output(msg + '\n', background=True,
+					log_path=self._log_path)
+				logging.error(msg)
+		else:
+			size_ok = st.st_size == self.digests["size"]
+			self._current_stat = st
+
+		if size_ok:
+			self._current_mirror = mirror_info
+			self._start_task(
+				FileDigester(file_path=file_path,
+					hash_names=(self._select_hash(),),
+					background=self.background,
+					logfile=self._log_path),
+				self._fs_mirror_digester_exit)
+		else:
+			self._try_next_mirror()
+
+	def _fs_mirror_digester_exit(self, digester):
+
+		self._assert_current(digester)
+		if self._was_cancelled():
+			self.wait()
+			return
+
+		current_mirror = self._current_mirror
+		if digester.returncode != os.EX_OK:
+			msg = "%s %s digester failed unexpectedly" % \
+			(self.distfile, current_mirror.name)
+			self.scheduler.output(msg + '\n', background=True,
+				log_path=self._log_path)
+			logging.error(msg)
+		else:
+			bad_digest = self._find_bad_digest(digester.digests)
+			if bad_digest is not None:
+				msg = "%s %s has bad %s digest: expected %s, got %s" % \
+					(self.distfile, current_mirror.name, bad_digest,
+					self.digests[bad_digest], digester.digests[bad_digest])
+				self.scheduler.output(msg + '\n', background=True,
+					log_path=self._log_path)
+				logging.error(msg)
+			elif self.config.options.dry_run:
+				# Report success without actually touching any files
+				if self._same_device(current_mirror.location,
+					self.config.options.distfiles):
+					logging.info(("dry-run: hardlink '%s' from %s "
+						"to distfiles") % (self.distfile, current_mirror.name))
+				else:
+					logging.info("dry-run: copy '%s' from %s to distfiles" %
+						(self.distfile, current_mirror.name))
+				self._success()
+				self.returncode = os.EX_OK
+				self.wait()
+				return
+			else:
+				src = os.path.join(current_mirror.location, self.distfile)
+				dest = os.path.join(self.config.options.distfiles, self.distfile)
+				if self._hardlink_atomic(src, dest,
+					"%s to %s" % (current_mirror.name, "distfiles")):
+					logging.debug("hardlink '%s' from %s to distfiles" %
+						(self.distfile, current_mirror.name))
+					self._success()
+					self.returncode = os.EX_OK
+					self.wait()
+					return
+				else:
+					self._start_task(
+						FileCopier(src_path=src, dest_path=dest,
+							background=(self.background and
+								self._log_path is not None),
+							logfile=self._log_path),
+						self._fs_mirror_copier_exit)
+					return
+
+		self._try_next_mirror()
+
+	def _fs_mirror_copier_exit(self, copier):
+
+		self._assert_current(copier)
+		if self._was_cancelled():
+			self.wait()
+			return
+
+		current_mirror = self._current_mirror
+		if copier.returncode != os.EX_OK:
+			msg = "%s %s copy failed unexpectedly" % \
+				(self.distfile, current_mirror.name)
+			self.scheduler.output(msg + '\n', background=True,
+				log_path=self._log_path)
+			logging.error(msg)
+		else:
+
+			logging.debug("copy '%s' from %s to distfiles" %
+				(self.distfile, current_mirror.name))
+
+			# Apply the timestamp from the source file, but
+			# just rely on umask for permissions.
+			try:
+				if sys.hexversion >= 0x3030000:
+					os.utime(copier.dest_path,
+						ns=(self._current_stat.st_mtime_ns,
+						self._current_stat.st_mtime_ns))
+				else:
+					os.utime(copier.dest_path,
+						(self._current_stat[stat.ST_MTIME],
+						self._current_stat[stat.ST_MTIME]))
+			except OSError as e:
+				msg = "%s %s utime failed unexpectedly: %s" % \
+					(self.distfile, current_mirror.name, e)
+				self.scheduler.output(msg + '\n', background=True,
+					log_path=self._log_path)
+				logging.error(msg)
+
+			self._success()
+			self.returncode = os.EX_OK
+			self.wait()
+			return
+
+		self._try_next_mirror()
+
+	def _fetch_uri(self, uri):
+
+		if self.config.options.dry_run:
+			# Simply report success.
+			logging.info("dry-run: fetch '%s' from '%s'" %
+				(self.distfile, uri))
+			self._success()
+			self.returncode = os.EX_OK
+			self.wait()
+			return
+
+		if self.config.options.temp_dir:
+			self._fetch_tmp_dir_info = 'temp-dir'
+			distdir = self.config.options.temp_dir
+		else:
+			self._fetch_tmp_dir_info = 'distfiles'
+			distdir = self.config.options.distfiles
+
+		tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid()
+
+		variables = {
+			"DISTDIR": distdir,
+			"URI":     uri,
+			"FILE":    tmp_basename
+		}
+
+		self._fetch_tmp_file = os.path.join(distdir, tmp_basename)
+
+		try:
+			os.unlink(self._fetch_tmp_file)
+		except OSError:
+			pass
+
+		args = portage.util.shlex_split(default_fetchcommand)
+		args = [portage.util.varexpand(x, mydict=variables)
+			for x in args]
+
+		if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+			not os.path.isabs(args[0]):
+			# Python 3.1 _execvp throws TypeError for non-absolute executable
+			# path passed as bytes (see http://bugs.python.org/issue8513).
+			fullname = portage.process.find_binary(args[0])
+			if fullname is None:
+				raise portage.exception.CommandNotFound(args[0])
+			args[0] = fullname
+
+		args = [_unicode_encode(x,
+			encoding=_encodings['fs'], errors='strict') for x in args]
+
+		null_fd = os.open(os.devnull, os.O_RDONLY)
+		fetcher = PopenProcess(background=self.background,
+			proc=subprocess.Popen(args, stdin=null_fd,
+			stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+			scheduler=self.scheduler)
+		os.close(null_fd)
+
+		fetcher.pipe_reader = PipeLogger(background=self.background,
+			input_fd=fetcher.proc.stdout, log_file_path=self._log_path,
+			scheduler=self.scheduler)
+
+		self._start_task(fetcher, self._fetcher_exit)
+
+	def _fetcher_exit(self, fetcher):
+
+		self._assert_current(fetcher)
+		if self._was_cancelled():
+			self.wait()
+			return
+		
+		if os.path.exists(self._fetch_tmp_file):
+			self._start_task(
+				FileDigester(file_path=self._fetch_tmp_file,
+					hash_names=(self._select_hash(),),
+					background=self.background,
+					logfile=self._log_path),
+					self._fetch_digester_exit)
+		else:
+			self._try_next_mirror()
+
+	def _fetch_digester_exit(self, digester):
+
+		self._assert_current(digester)
+		if self._was_cancelled():
+			self.wait()
+			return
+
+		if digester.returncode != os.EX_OK:
+			msg = "%s %s digester failed unexpectedly" % \
+			(self.distfile, self._fetch_tmp_dir_info)
+			self.scheduler.output(msg + '\n', background=True,
+				log_path=self._log_path)
+			logging.error(msg)
+		else:
+			bad_digest = self._find_bad_digest(digester.digests)
+			if bad_digest is not None:
+				msg = "%s has bad %s digest: expected %s, got %s" % \
+					(self.distfile, bad_digest,
+					self.digests[bad_digest], digester.digests[bad_digest])
+				self.scheduler.output(msg + '\n', background=True,
+					log_path=self._log_path)
+				try:
+					os.unlink(self._fetch_tmp_file)
+				except OSError:
+					pass
+			else:
+				dest = os.path.join(self.config.options.distfiles, self.distfile)
+				try:
+					os.rename(self._fetch_tmp_file, dest)
+				except OSError:
+					self._start_task(
+						FileCopier(src_path=self._fetch_tmp_file,
+							dest_path=dest,
+							background=(self.background and
+								self._log_path is not None),
+							logfile=self._log_path),
+						self._fetch_copier_exit)
+					return
+				else:
+					self._success()
+					self.returncode = os.EX_OK
+					self.wait()
+					return
+
+		self._try_next_mirror()
+
+	def _fetch_copier_exit(self, copier):
+
+		self._assert_current(copier)
+
+		try:
+			os.unlink(self._fetch_tmp_file)
+		except OSError:
+			pass
+
+		if self._was_cancelled():
+			self.wait()
+			return
+
+		if copier.returncode == os.EX_OK:
+			self._success()
+			self.returncode = os.EX_OK
+			self.wait()
+		else:
+			# out of space?
+			msg = "%s %s copy failed unexpectedly" % \
+				(self.distfile, self._fetch_tmp_dir_info)
+			self.scheduler.output(msg + '\n', background=True,
+				log_path=self._log_path)
+			logging.error(msg)
+			self.config.log_failure("%s\t%s\t%s" %
+				(self.cpv, self.distfile, msg))
+			self.config.file_failures[self.distfile] = self.cpv
+			self.returncode = 1
+			self.wait()
+
+	def _unlink_file(self, file_path, dir_info):
+		try:
+			os.unlink(file_path)
+		except OSError as e:
+			if e.errno not in (errno.ENOENT, errno.ESTALE):
+				msg = "unlink '%s' failed in %s: %s" % \
+					(self.distfile, dir_info, e)
+				self.scheduler.output(msg + '\n', background=True,
+					log_path=self._log_path)
+				logging.error(msg)
+				return False
+		return True
+
+	def _have_needed_digests(self):
+		return "size" in self.digests and \
+			self._select_hash() is not None
+
+	def _select_hash(self):
+		if default_hash_name in self.digests:
+			return default_hash_name
+		else:
+			for hash_name in self.digests:
+				if hash_name != "size" and \
+					hash_name in portage.checksum.hashfunc_map:
+					return hash_name
+
+		return None
+
+	def _find_bad_digest(self, digests):
+		for hash_name, hash_value in digests.items():
+			if self.digests[hash_name] != hash_value:
+				return hash_name
+		return None
+
+	@staticmethod
+	def _same_device(path1, path2):
+		try:
+			st1 = os.stat(path1)
+			st2 = os.stat(path2)
+		except OSError:
+			return False
+		else:
+			return st1.st_dev == st2.st_dev
+
+	def _hardlink_atomic(self, src, dest, dir_info):
+
+		head, tail = os.path.split(dest)
+		hardlink_tmp = os.path.join(head, ".%s._mirrordist_hardlink_.%s" % \
+			(tail, os.getpid()))
+
+		try:
+			try:
+				os.link(src, hardlink_tmp)
+			except OSError as e:
+				if e.errno != errno.EXDEV:
+					msg = "hardlink %s from %s failed: %s" % \
+						(self.distfile, dir_info, e)
+					self.scheduler.output(msg + '\n', background=True,
+						log_path=self._log_path)
+					logging.error(msg)
+				return False
+
+			try:
+				os.rename(hardlink_tmp, dest)
+			except OSError as e:
+				msg = "hardlink rename '%s' from %s failed: %s" % \
+					(self.distfile, dir_info, e)
+				self.scheduler.output(msg + '\n', background=True,
+					log_path=self._log_path)
+				logging.error(msg)
+				return False
+		finally:
+			try:
+				os.unlink(hardlink_tmp)
+			except OSError:
+				pass
+
+		return True

diff --git a/portage_with_autodep/pym/portage/_emirrordist/MirrorDistTask.py b/portage_with_autodep/pym/portage/_emirrordist/MirrorDistTask.py
new file mode 100644
index 0000000..b6f875d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_emirrordist/MirrorDistTask.py
@@ -0,0 +1,218 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import sys
+import time
+
+try:
+	import threading
+except ImportError:
+	import dummy_threading as threading
+
+import portage
+from portage import os
+from portage.util._async.TaskScheduler import TaskScheduler
+from _emerge.CompositeTask import CompositeTask
+from .FetchIterator import FetchIterator
+from .DeletionIterator import DeletionIterator
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+class MirrorDistTask(CompositeTask):
+
+	__slots__ = ('_config', '_terminated', '_term_check_id')
+
+	def __init__(self, config):
+		CompositeTask.__init__(self, scheduler=config.event_loop)
+		self._config = config
+		self._terminated = threading.Event()
+
+	def _start(self):
+		self._term_check_id = self.scheduler.idle_add(self._termination_check)
+		fetch = TaskScheduler(iter(FetchIterator(self._config)),
+			max_jobs=self._config.options.jobs,
+			max_load=self._config.options.load_average,
+			event_loop=self._config.event_loop)
+		self._start_task(fetch, self._fetch_exit)
+
+	def _fetch_exit(self, fetch):
+
+		self._assert_current(fetch)
+		if self._was_cancelled():
+			self.wait()
+			return
+
+		if self._config.options.delete:
+			deletion = TaskScheduler(iter(DeletionIterator(self._config)),
+				max_jobs=self._config.options.jobs,
+				max_load=self._config.options.load_average,
+				event_loop=self._config.event_loop)
+			self._start_task(deletion, self._deletion_exit)
+			return
+
+		self._post_deletion()
+
+	def _deletion_exit(self, deletion):
+
+		self._assert_current(deletion)
+		if self._was_cancelled():
+			self.wait()
+			return
+
+		self._post_deletion()
+
+	def _post_deletion(self):
+
+		if self._config.options.recycle_db is not None:
+			self._update_recycle_db()
+
+		if self._config.options.scheduled_deletion_log is not None:
+			self._scheduled_deletion_log()
+
+		self._summary()
+
+		self.returncode = os.EX_OK
+		self._current_task = None
+		self.wait()
+
+	def _update_recycle_db(self):
+
+		start_time = self._config.start_time
+		recycle_dir = self._config.options.recycle_dir
+		recycle_db = self._config.recycle_db
+		r_deletion_delay = self._config.options.recycle_deletion_delay
+
+		# Use a dict optimize access.
+		recycle_db_cache = dict(recycle_db.items())
+
+		for filename in os.listdir(recycle_dir):
+
+			recycle_file = os.path.join(recycle_dir, filename)
+
+			try:
+				st = os.stat(recycle_file)
+			except OSError as e:
+				if e.errno not in (errno.ENOENT, errno.ESTALE):
+					logging.error(("stat failed for '%s' in "
+						"recycle: %s") % (filename, e))
+				continue
+
+			value = recycle_db_cache.pop(filename, None)
+			if value is None:
+				logging.debug(("add '%s' to "
+					"recycle db") % filename)
+				recycle_db[filename] = (st.st_size, start_time)
+			else:
+				r_size, r_time = value
+				if long(r_size) != st.st_size:
+					recycle_db[filename] = (st.st_size, start_time)
+				elif r_time + r_deletion_delay < start_time:
+					if self._config.options.dry_run:
+						logging.info(("dry-run: delete '%s' from "
+							"recycle") % filename)
+						logging.info(("drop '%s' from "
+							"recycle db") % filename)
+					else:
+						try:
+							os.unlink(recycle_file)
+						except OSError as e:
+							if e.errno not in (errno.ENOENT, errno.ESTALE):
+								logging.error(("delete '%s' from "
+									"recycle failed: %s") % (filename, e))
+						else:
+							logging.debug(("delete '%s' from "
+								"recycle") % filename)
+							try:
+								del recycle_db[filename]
+							except KeyError:
+								pass
+							else:
+								logging.debug(("drop '%s' from "
+									"recycle db") % filename)
+
+		# Existing files were popped from recycle_db_cache,
+		# so any remaining entries are for files that no
+		# longer exist.
+		for filename in recycle_db_cache:
+			try:
+				del recycle_db[filename]
+			except KeyError:
+				pass
+			else:
+				logging.debug(("drop non-existent '%s' from "
+					"recycle db") % filename)
+
+	def _scheduled_deletion_log(self):
+
+		start_time = self._config.start_time
+		dry_run = self._config.options.dry_run
+		deletion_delay = self._config.options.deletion_delay
+		distfiles_db = self._config.distfiles_db
+
+		date_map = {}
+		for filename, timestamp in self._config.deletion_db.items():
+			date = timestamp + deletion_delay
+			if date < start_time:
+				date = start_time
+			date = time.strftime("%Y-%m-%d", time.gmtime(date))
+			date_files = date_map.get(date)
+			if date_files is None:
+				date_files = []
+				date_map[date] = date_files
+			date_files.append(filename)
+
+		if dry_run:
+			logging.warn(("dry-run: scheduled-deletions log "
+				"will be summarized via logging.info"))
+
+		lines = []
+		for date in sorted(date_map):
+			date_files = date_map[date]
+			if dry_run:
+				logging.info(("dry-run: scheduled deletions for %s: %s files") %
+					(date, len(date_files)))
+			lines.append("%s\n" % date)
+			for filename in date_files:
+				cpv = "unknown"
+				if distfiles_db is not None:
+					cpv = distfiles_db.get(filename, cpv)
+				lines.append("\t%s\t%s\n" % (filename, cpv))
+
+		if not dry_run:
+			portage.util.write_atomic(
+				self._config.options.scheduled_deletion_log,
+				"".join(lines))
+
+	def _summary(self):
+		elapsed_time = time.time() - self._config.start_time
+		fail_count = len(self._config.file_failures)
+		delete_count = self._config.delete_count
+		scheduled_deletion_count = self._config.scheduled_deletion_count - delete_count
+		added_file_count = self._config.added_file_count
+		added_byte_count = self._config.added_byte_count
+
+		logging.info("finished in %i seconds" % elapsed_time)
+		logging.info("failed to fetch %i files" % fail_count)
+		logging.info("deleted %i files" % delete_count)
+		logging.info("deletion of %i files scheduled" %
+			scheduled_deletion_count)
+		logging.info("added %i files" % added_file_count)
+		logging.info("added %i bytes total" % added_byte_count)
+
+	def terminate(self):
+		self._terminated.set()
+
+	def _termination_check(self):
+		if self._terminated.is_set():
+			self.cancel()
+			self.wait()
+		return True
+
+	def _wait(self):
+		CompositeTask._wait(self)
+		if self._term_check_id is not None:
+			self.scheduler.source_remove(self._term_check_id)
+			self._term_check_id = None

diff --git a/portage_with_autodep/pym/portage/tests/resolver/__init__.py b/portage_with_autodep/pym/portage/_emirrordist/__init__.py
similarity index 65%
rename from portage_with_autodep/pym/portage/tests/resolver/__init__.py
rename to portage_with_autodep/pym/portage/_emirrordist/__init__.py
index 21a391a..6cde932 100644
--- a/portage_with_autodep/pym/portage/tests/resolver/__init__.py
+++ b/portage_with_autodep/pym/portage/_emirrordist/__init__.py
@@ -1,2 +1,2 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2013 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/_emirrordist/main.py b/portage_with_autodep/pym/portage/_emirrordist/main.py
new file mode 100644
index 0000000..f28aad7
--- /dev/null
+++ b/portage_with_autodep/pym/portage/_emirrordist/main.py
@@ -0,0 +1,455 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import sys
+
+import portage
+from portage import os
+from portage.util import normalize_path, writemsg_level
+from portage.util._argparse import ArgumentParser
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from .Config import Config
+from .MirrorDistTask import MirrorDistTask
+
+if sys.hexversion >= 0x3000000:
+	long = int
+
+seconds_per_day = 24 * 60 * 60
+
+common_options = (
+	{
+		"longopt"  : "--dry-run",
+		"help"     : "perform a trial run with no changes made (usually combined "
+			"with --verbose)",
+		"action"   : "store_true"
+	},
+	{
+		"longopt"  : "--verbose",
+		"shortopt" : "-v",
+		"help"     : "display extra information on stderr "
+			"(multiple occurences increase verbosity)",
+		"action"   : "count",
+		"default"  : 0,
+	},
+	{
+		"longopt"  : "--ignore-default-opts",
+		"help"     : "do not use the EMIRRORDIST_DEFAULT_OPTS environment variable",
+		"action"   : "store_true"
+	},
+	{
+		"longopt"  : "--distfiles",
+		"help"     : "distfiles directory to use (required)",
+		"metavar"  : "DIR"
+	},
+	{
+		"longopt"  : "--jobs",
+		"shortopt" : "-j",
+		"help"     : "number of concurrent jobs to run",
+		"type"     : int
+	},
+	{
+		"longopt"  : "--load-average",
+		"shortopt" : "-l",
+		"help"     : "load average limit for spawning of new concurrent jobs",
+		"metavar"  : "LOAD",
+		"type"     : float
+	},
+	{
+		"longopt"  : "--tries",
+		"help"     : "maximum number of tries per file, 0 means unlimited (default is 10)",
+		"default"  : 10,
+		"type"     : int
+	},
+	{
+		"longopt"  : "--repo",
+		"help"     : "name of repo to operate on"
+	},
+	{
+		"longopt"  : "--config-root",
+		"help"     : "location of portage config files",
+		"metavar"  : "DIR"
+	},
+	{
+		"longopt"  : "--portdir",
+		"help"     : "override the PORTDIR variable (deprecated in favor of --repositories-configuration)",
+		"metavar"  : "DIR"
+	},
+	{
+		"longopt"  : "--portdir-overlay",
+		"help"     : "override the PORTDIR_OVERLAY variable (deprecated in favor of --repositories-configuration)"
+	},
+	{
+		"longopt"  : "--repositories-configuration",
+		"help"     : "override configuration of repositories (in format of repos.conf)"
+	},
+	{
+		"longopt"  : "--strict-manifests",
+		"help"     : "manually override \"strict\" FEATURES setting",
+		"choices"  : ("y", "n"),
+		"metavar"  : "<y|n>",
+	},
+	{
+		"longopt"  : "--failure-log",
+		"help"     : "log file for fetch failures, with tab-delimited "
+			"output, for reporting purposes",
+		"metavar"  : "FILE"
+	},
+	{
+		"longopt"  : "--success-log",
+		"help"     : "log file for fetch successes, with tab-delimited "
+			"output, for reporting purposes",
+		"metavar"  : "FILE"
+	},
+	{
+		"longopt"  : "--scheduled-deletion-log",
+		"help"     : "log file for scheduled deletions, with tab-delimited "
+			"output, for reporting purposes",
+		"metavar"  : "FILE"
+	},
+	{
+		"longopt"  : "--delete",
+		"help"     : "enable deletion of unused distfiles",
+		"action"   : "store_true"
+	},
+	{
+		"longopt"  : "--deletion-db",
+		"help"     : "database file used to track lifetime of files "
+			"scheduled for delayed deletion",
+		"metavar"  : "FILE"
+	},
+	{
+		"longopt"  : "--deletion-delay",
+		"help"     : "delay time for deletion, measured in seconds",
+		"metavar"  : "SECONDS"
+	},
+	{
+		"longopt"  : "--temp-dir",
+		"help"     : "temporary directory for downloads",
+		"metavar"  : "DIR"
+	},
+	{
+		"longopt"  : "--mirror-overrides",
+		"help"     : "file holding a list of mirror overrides",
+		"metavar"  : "FILE"
+	},
+	{
+		"longopt"  : "--mirror-skip",
+		"help"     : "comma delimited list of mirror targets to skip "
+			"when fetching"
+	},
+	{
+		"longopt"  : "--restrict-mirror-exemptions",
+		"help"     : "comma delimited list of mirror targets for which to "
+			"ignore RESTRICT=\"mirror\""
+	},
+	{
+		"longopt"  : "--verify-existing-digest",
+		"help"     : "use digest as a verification of whether existing "
+			"distfiles are valid",
+		"action"   : "store_true"
+	},
+	{
+		"longopt"  : "--distfiles-local",
+		"help"     : "distfiles-local directory to use",
+		"metavar"  : "DIR"
+	},
+	{
+		"longopt"  : "--distfiles-db",
+		"help"     : "database file used to track which ebuilds a "
+			"distfile belongs to",
+		"metavar"  : "FILE"
+	},
+	{
+		"longopt"  : "--recycle-dir",
+		"help"     : "directory for extended retention of files that "
+			"are removed from distdir with the --delete option",
+		"metavar"  : "DIR"
+	},
+	{
+		"longopt"  : "--recycle-db",
+		"help"     : "database file used to track lifetime of files "
+			"in recycle dir",
+		"metavar"  : "FILE"
+	},
+	{
+		"longopt"  : "--recycle-deletion-delay",
+		"help"     : "delay time for deletion of unused files from "
+			"recycle dir, measured in seconds (defaults to "
+			"the equivalent of 60 days)",
+		"default"  : 60 * seconds_per_day,
+		"metavar"  : "SECONDS",
+		"type"     : int
+	},
+	{
+		"longopt"  : "--fetch-log-dir",
+		"help"     : "directory for individual fetch logs",
+		"metavar"  : "DIR"
+	},
+	{
+		"longopt"  : "--whitelist-from",
+		"help"     : "specifies a file containing a list of files to "
+			"whitelist, one per line, # prefixed lines ignored",
+		"action"   : "append",
+		"metavar"  : "FILE"
+	},
+)
+
+def parse_args(args):
+	description = "emirrordist - a fetch tool for mirroring " \
+		"of package distfiles"
+	usage = "emirrordist [options] <action>"
+	parser = ArgumentParser(description=description, usage=usage)
+
+	actions = parser.add_argument_group('Actions')
+	actions.add_argument("--version",
+		action="store_true",
+		help="display portage version and exit")
+	actions.add_argument("--mirror",
+		action="store_true",
+		help="mirror distfiles for the selected repository")
+
+	common = parser.add_argument_group('Common options')
+	for opt_info in common_options:
+		opt_pargs = [opt_info["longopt"]]
+		if opt_info.get("shortopt"):
+			opt_pargs.append(opt_info["shortopt"])
+		opt_kwargs = {"help" : opt_info["help"]}
+		for k in ("action", "choices", "default", "metavar", "type"):
+			if k in opt_info:
+				opt_kwargs[k] = opt_info[k]
+		common.add_argument(*opt_pargs, **opt_kwargs)
+
+	options, args = parser.parse_known_args(args)
+
+	return (parser, options, args)
+
+def emirrordist_main(args):
+
+	# The calling environment is ignored, so the program is
+	# completely controlled by commandline arguments.
+	env = {}
+
+	if not sys.stdout.isatty():
+		portage.output.nocolor()
+		env['NOCOLOR'] = 'true'
+
+	parser, options, args = parse_args(args)
+
+	if options.version:
+		sys.stdout.write("Portage %s\n" % portage.VERSION)
+		return os.EX_OK
+
+	config_root = options.config_root
+
+	if options.portdir is not None:
+		writemsg_level("emirrordist: warning: --portdir option is deprecated in favor of --repositories-configuration option\n",
+			level=logging.WARNING, noiselevel=-1)
+	if options.portdir_overlay is not None:
+		writemsg_level("emirrordist: warning: --portdir-overlay option is deprecated in favor of --repositories-configuration option\n",
+			level=logging.WARNING, noiselevel=-1)	
+
+	if options.repositories_configuration is not None:
+		env['PORTAGE_REPOSITORIES'] = options.repositories_configuration
+	elif options.portdir_overlay is not None:
+		env['PORTDIR_OVERLAY'] = options.portdir_overlay
+
+	if options.portdir is not None:
+		env['PORTDIR'] = options.portdir
+
+	settings = portage.config(config_root=config_root,
+		local_config=False, env=env)
+
+	default_opts = None
+	if not options.ignore_default_opts:
+		default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split()
+
+	if default_opts:
+		parser, options, args = parse_args(default_opts + args)
+
+		settings = portage.config(config_root=config_root,
+			local_config=False, env=env)
+
+	if options.repo is None:
+		if len(settings.repositories.prepos) == 2:
+			for repo in settings.repositories:
+				if repo.name != "DEFAULT":
+					options.repo = repo.name
+					break
+
+		if options.repo is None:
+			parser.error("--repo option is required")
+
+	repo_path = settings.repositories.treemap.get(options.repo)
+	if repo_path is None:
+		parser.error("Unable to locate repository named '%s'" % (options.repo,))
+
+	if options.jobs is not None:
+		options.jobs = int(options.jobs)
+
+	if options.load_average is not None:
+		options.load_average = float(options.load_average)
+
+	if options.failure_log is not None:
+		options.failure_log = normalize_path(
+			os.path.abspath(options.failure_log))
+
+		parent_dir = os.path.dirname(options.failure_log)
+		if not (os.path.isdir(parent_dir) and
+			os.access(parent_dir, os.W_OK|os.X_OK)):
+			parser.error(("--failure-log '%s' parent is not a "
+				"writable directory") % options.failure_log)
+
+	if options.success_log is not None:
+		options.success_log = normalize_path(
+			os.path.abspath(options.success_log))
+
+		parent_dir = os.path.dirname(options.success_log)
+		if not (os.path.isdir(parent_dir) and
+			os.access(parent_dir, os.W_OK|os.X_OK)):
+			parser.error(("--success-log '%s' parent is not a "
+				"writable directory") % options.success_log)
+
+	if options.scheduled_deletion_log is not None:
+		options.scheduled_deletion_log = normalize_path(
+			os.path.abspath(options.scheduled_deletion_log))
+
+		parent_dir = os.path.dirname(options.scheduled_deletion_log)
+		if not (os.path.isdir(parent_dir) and
+			os.access(parent_dir, os.W_OK|os.X_OK)):
+			parser.error(("--scheduled-deletion-log '%s' parent is not a "
+				"writable directory") % options.scheduled_deletion_log)
+
+		if options.deletion_db is None:
+			parser.error("--scheduled-deletion-log requires --deletion-db")
+
+	if options.deletion_delay is not None:
+		options.deletion_delay = long(options.deletion_delay)
+		if options.deletion_db is None:
+			parser.error("--deletion-delay requires --deletion-db")
+
+	if options.deletion_db is not None:
+		if options.deletion_delay is None:
+			parser.error("--deletion-db requires --deletion-delay")
+		options.deletion_db = normalize_path(
+			os.path.abspath(options.deletion_db))
+
+	if options.temp_dir is not None:
+		options.temp_dir = normalize_path(
+			os.path.abspath(options.temp_dir))
+
+		if not (os.path.isdir(options.temp_dir) and
+			os.access(options.temp_dir, os.W_OK|os.X_OK)):
+			parser.error(("--temp-dir '%s' is not a "
+				"writable directory") % options.temp_dir)
+
+	if options.distfiles is not None:
+		options.distfiles = normalize_path(
+			os.path.abspath(options.distfiles))
+
+		if not (os.path.isdir(options.distfiles) and
+			os.access(options.distfiles, os.W_OK|os.X_OK)):
+			parser.error(("--distfiles '%s' is not a "
+				"writable directory") % options.distfiles)
+	else:
+		parser.error("missing required --distfiles parameter")
+
+	if options.mirror_overrides is not None:
+		options.mirror_overrides = normalize_path(
+			os.path.abspath(options.mirror_overrides))
+
+		if not (os.access(options.mirror_overrides, os.R_OK) and
+			os.path.isfile(options.mirror_overrides)):
+			parser.error(
+				"--mirror-overrides-file '%s' is not a readable file" %
+				options.mirror_overrides)
+
+	if options.distfiles_local is not None:
+		options.distfiles_local = normalize_path(
+			os.path.abspath(options.distfiles_local))
+
+		if not (os.path.isdir(options.distfiles_local) and
+			os.access(options.distfiles_local, os.W_OK|os.X_OK)):
+			parser.error(("--distfiles-local '%s' is not a "
+				"writable directory") % options.distfiles_local)
+
+	if options.distfiles_db is not None:
+		options.distfiles_db = normalize_path(
+			os.path.abspath(options.distfiles_db))
+
+	if options.tries is not None:
+		options.tries = int(options.tries)
+
+	if options.recycle_dir is not None:
+		options.recycle_dir = normalize_path(
+			os.path.abspath(options.recycle_dir))
+		if not (os.path.isdir(options.recycle_dir) and
+			os.access(options.recycle_dir, os.W_OK|os.X_OK)):
+			parser.error(("--recycle-dir '%s' is not a "
+				"writable directory") % options.recycle_dir)
+
+	if options.recycle_db is not None:
+		if options.recycle_dir is None:
+			parser.error("--recycle-db requires "
+				"--recycle-dir to be specified")
+		options.recycle_db = normalize_path(
+			os.path.abspath(options.recycle_db))
+
+	if options.recycle_deletion_delay is not None:
+		options.recycle_deletion_delay = \
+			long(options.recycle_deletion_delay)
+
+	if options.fetch_log_dir is not None:
+		options.fetch_log_dir = normalize_path(
+			os.path.abspath(options.fetch_log_dir))
+
+		if not (os.path.isdir(options.fetch_log_dir) and
+			os.access(options.fetch_log_dir, os.W_OK|os.X_OK)):
+			parser.error(("--fetch-log-dir '%s' is not a "
+				"writable directory") % options.fetch_log_dir)
+
+	if options.whitelist_from:
+		normalized_paths = []
+		for x in options.whitelist_from:
+			path = normalize_path(os.path.abspath(x))
+			normalized_paths.append(path)
+			if not (os.access(path, os.R_OK) and os.path.isfile(path)):
+				parser.error(
+					"--whitelist-from '%s' is not a readable file" % x)
+		options.whitelist_from = normalized_paths
+
+	if options.strict_manifests is not None:
+		if options.strict_manifests == "y":
+			settings.features.add("strict")
+		else:
+			settings.features.discard("strict")
+
+	settings.lock()
+
+	portdb = portage.portdbapi(mysettings=settings)
+
+	# Limit ebuilds to the specified repo.
+	portdb.porttrees = [repo_path]
+
+	portage.util.initialize_logger()
+
+	if options.verbose > 0:
+		l = logging.getLogger()
+		l.setLevel(l.getEffectiveLevel() - 10 * options.verbose)
+
+	with Config(options, portdb,
+		SchedulerInterface(global_event_loop())) as config:
+
+		if not options.mirror:
+			parser.error('No action specified')
+
+		returncode = os.EX_OK
+
+		if options.mirror:
+			signum = run_main_scheduler(MirrorDistTask(config))
+			if signum is not None:
+				sys.exit(128 + signum)
+
+	return returncode

diff --git a/portage_with_autodep/pym/portage/_global_updates.py b/portage_with_autodep/pym/portage/_global_updates.py
index 868d1ee..5175043 100644
--- a/portage_with_autodep/pym/portage/_global_updates.py
+++ b/portage_with_autodep/pym/portage/_global_updates.py
@@ -15,7 +15,7 @@ from portage.update import grab_updates, parse_updates, update_config_files, upd
 from portage.util import grabfile, shlex_split, \
 	writemsg, writemsg_stdout, write_atomic
 
-def _global_updates(trees, prev_mtimes, quiet=False):
+def _global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
 	"""
 	Perform new global updates if they exist in 'profiles/updates/'
 	subdirectories of all active repositories (PORTDIR + PORTDIR_OVERLAY).
@@ -37,7 +37,7 @@ def _global_updates(trees, prev_mtimes, quiet=False):
 		"SANDBOX_ACTIVE" in os.environ or \
 		len(trees) != 1:
 		return retupd
-	root = "/"
+	root = trees._running_eroot
 	mysettings = trees[root]["vartree"].settings
 	portdb = trees[root]["porttree"].dbapi
 	vardb = trees[root]["vartree"].dbapi
@@ -73,10 +73,10 @@ def _global_updates(trees, prev_mtimes, quiet=False):
 			continue
 
 		try:
-			if mysettings.get("PORTAGE_CALLER") == "fixpackages":
-				update_data = grab_updates(updpath)
+			if if_mtime_changed:
+				update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
 			else:
-				update_data = grab_updates(updpath, prev_mtimes)
+				update_data = grab_updates(updpath)
 		except DirectoryNotFound:
 			continue
 		myupd = []
@@ -217,8 +217,7 @@ def _global_updates(trees, prev_mtimes, quiet=False):
 
 			do_upgrade_packagesmessage = False
 			# We gotta do the brute force updates for these now.
-			if mysettings.get("PORTAGE_CALLER") == "fixpackages" or \
-			"fixpackages" in mysettings.features:
+			if True:
 				def onUpdate(maxval, curval):
 					if curval > 0:
 						writemsg_stdout("#")

diff --git a/portage_with_autodep/pym/portage/_global_updates.pyo b/portage_with_autodep/pym/portage/_global_updates.pyo
new file mode 100644
index 0000000..3e2e8de
Binary files /dev/null and b/portage_with_autodep/pym/portage/_global_updates.pyo differ

diff --git a/portage_with_autodep/pym/portage/_legacy_globals.py b/portage_with_autodep/pym/portage/_legacy_globals.py
index 615591a..abffa0e 100644
--- a/portage_with_autodep/pym/portage/_legacy_globals.py
+++ b/portage_with_autodep/pym/portage/_legacy_globals.py
@@ -35,19 +35,14 @@ def _get_legacy_global(name):
 	constructed.add('db')
 	del portage._initializing_globals
 
-	settings = portage.db["/"]["vartree"].settings
-
-	for root in portage.db:
-		if root != "/":
-			settings = portage.db[root]["vartree"].settings
-			break
-
-	portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
+	settings = portage.db[portage.db._target_eroot]["vartree"].settings
 
 	portage.settings = settings
 	constructed.add('settings')
 
-	portage.root = root
+	# Since portage.db now uses EROOT for keys instead of ROOT, we make
+	# portage.root refer to EROOT such that it continues to work as a key.
+	portage.root = portage.db._target_eroot
 	constructed.add('root')
 
 	# COMPATIBILITY

diff --git a/portage_with_autodep/pym/portage/_legacy_globals.pyo b/portage_with_autodep/pym/portage/_legacy_globals.pyo
new file mode 100644
index 0000000..2e50cbe
Binary files /dev/null and b/portage_with_autodep/pym/portage/_legacy_globals.pyo differ

diff --git a/portage_with_autodep/pym/portage/_selinux.pyo b/portage_with_autodep/pym/portage/_selinux.pyo
new file mode 100644
index 0000000..7a413e0
Binary files /dev/null and b/portage_with_autodep/pym/portage/_selinux.pyo differ

diff --git a/portage_with_autodep/pym/portage/_sets/__init__.py b/portage_with_autodep/pym/portage/_sets/__init__.py
index 1b3484e..88a4b3b 100644
--- a/portage_with_autodep/pym/portage/_sets/__init__.py
+++ b/portage_with_autodep/pym/portage/_sets/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2007 Gentoo Foundation
+# Copyright 2007-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -6,16 +6,27 @@ from __future__ import print_function
 __all__ = ["SETPREFIX", "get_boolean", "SetConfigError",
 	"SetConfig", "load_default_config"]
 
+import io
+import logging
+import sys
 try:
-	from configparser import SafeConfigParser, NoOptionError
+	from configparser import NoOptionError, ParsingError
+	if sys.hexversion >= 0x3020000:
+		from configparser import ConfigParser as SafeConfigParser
+	else:
+		from configparser import SafeConfigParser
 except ImportError:
-	from ConfigParser import SafeConfigParser, NoOptionError
+	from ConfigParser import SafeConfigParser, NoOptionError, ParsingError
 from portage import os
 from portage import load_mod
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage import _encodings
 from portage.const import USER_CONFIG_PATH, GLOBAL_CONFIG_PATH
 from portage.const import _ENABLE_SET_CONFIG
 from portage.exception import PackageSetNotFound
 from portage.localization import _
+from portage.util import writemsg_level
 
 SETPREFIX = "@"
 
@@ -43,7 +54,32 @@ class SetConfig(object):
 			})
 
 		if _ENABLE_SET_CONFIG:
-			self._parser.read(paths)
+			# use read_file/readfp in order to control decoding of unicode
+			try:
+				# Python >=3.2
+				read_file = self._parser.read_file
+			except AttributeError:
+				read_file = self._parser.readfp
+
+			for p in paths:
+				f = None
+				try:
+					f = io.open(_unicode_encode(p,
+						encoding=_encodings['fs'], errors='strict'),
+						mode='r', encoding=_encodings['repo.content'],
+						errors='replace')
+				except EnvironmentError:
+					pass
+				else:
+					try:
+						read_file(f)
+					except ParsingError as e:
+						writemsg_level(_unicode_decode(
+							_("!!! Error while reading sets config file: %s\n")
+							) % e, level=logging.ERROR, noiselevel=-1)
+				finally:
+					if f is not None:
+						f.close()
 		else:
 			self._create_default_config()
 
@@ -201,7 +237,6 @@ class SetConfig(object):
 		except KeyError:
 			raise PackageSetNotFound(setname)
 		myatoms = myset.getAtoms()
-		parser = self._parser
 
 		if ignorelist is None:
 			ignorelist = set()

diff --git a/portage_with_autodep/pym/portage/_sets/__init__.pyo b/portage_with_autodep/pym/portage/_sets/__init__.pyo
new file mode 100644
index 0000000..5318dbe
Binary files /dev/null and b/portage_with_autodep/pym/portage/_sets/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/_sets/base.pyo b/portage_with_autodep/pym/portage/_sets/base.pyo
new file mode 100644
index 0000000..89e53be
Binary files /dev/null and b/portage_with_autodep/pym/portage/_sets/base.pyo differ

diff --git a/portage_with_autodep/pym/portage/_sets/dbapi.py b/portage_with_autodep/pym/portage/_sets/dbapi.py
index 0f238f0..4982a92 100644
--- a/portage_with_autodep/pym/portage/_sets/dbapi.py
+++ b/portage_with_autodep/pym/portage/_sets/dbapi.py
@@ -1,10 +1,10 @@
-# Copyright 2007-2010 Gentoo Foundation
+# Copyright 2007-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import time
 
 from portage import os
-from portage.versions import catpkgsplit, catsplit, pkgcmp, best
+from portage.versions import best, catsplit, vercmp
 from portage.dep import Atom
 from portage.localization import _
 from portage._sets.base import PackageSet
@@ -72,18 +72,16 @@ class OwnerSet(PackageSet):
 		aux_keys = ["SLOT"]
 		if exclude_paths is None:
 			for link, p in vardb._owners.iter_owners(paths):
-				cat, pn = catpkgsplit(link.mycpv)[:2]
 				slot, = aux_get(link.mycpv, aux_keys)
-				rValue.add("%s/%s:%s" % (cat, pn, slot))
+				rValue.add("%s:%s" % (link.mycpv.cp, slot))
 		else:
 			all_paths = set()
 			all_paths.update(paths)
 			all_paths.update(exclude_paths)
 			exclude_atoms = set()
 			for link, p in vardb._owners.iter_owners(all_paths):
-				cat, pn = catpkgsplit(link.mycpv)[:2]
 				slot, = aux_get(link.mycpv, aux_keys)
-				atom = "%s/%s:%s" % (cat, pn, slot)
+				atom = "%s:%s" % (link.mycpv.cp, slot)
 				rValue.add(atom)
 				if p in exclude_paths:
 					exclude_atoms.add(atom)
@@ -184,9 +182,7 @@ class DowngradeSet(PackageSet):
 				ebuild = xmatch(xmatch_level, slot_atom)
 				if not ebuild:
 					continue
-				ebuild_split = catpkgsplit(ebuild)[1:]
-				installed_split = catpkgsplit(cpv)[1:]
-				if pkgcmp(installed_split, ebuild_split) > 0:
+				if vercmp(cpv.version, ebuild.version) > 0:
 					atoms.append(slot_atom)
 
 		self._setAtoms(atoms)

diff --git a/portage_with_autodep/pym/portage/_sets/dbapi.pyo b/portage_with_autodep/pym/portage/_sets/dbapi.pyo
new file mode 100644
index 0000000..20bf848
Binary files /dev/null and b/portage_with_autodep/pym/portage/_sets/dbapi.pyo differ

diff --git a/portage_with_autodep/pym/portage/_sets/files.pyo b/portage_with_autodep/pym/portage/_sets/files.pyo
new file mode 100644
index 0000000..eb03c00
Binary files /dev/null and b/portage_with_autodep/pym/portage/_sets/files.pyo differ

diff --git a/portage_with_autodep/pym/portage/_sets/libs.pyo b/portage_with_autodep/pym/portage/_sets/libs.pyo
new file mode 100644
index 0000000..72fc1bb
Binary files /dev/null and b/portage_with_autodep/pym/portage/_sets/libs.pyo differ

diff --git a/portage_with_autodep/pym/portage/_sets/profiles.pyo b/portage_with_autodep/pym/portage/_sets/profiles.pyo
new file mode 100644
index 0000000..9502044
Binary files /dev/null and b/portage_with_autodep/pym/portage/_sets/profiles.pyo differ

diff --git a/portage_with_autodep/pym/portage/_sets/security.py b/portage_with_autodep/pym/portage/_sets/security.py
index 2d8fcf6..7e856bc 100644
--- a/portage_with_autodep/pym/portage/_sets/security.py
+++ b/portage_with_autodep/pym/portage/_sets/security.py
@@ -1,9 +1,9 @@
-# Copyright 2007 Gentoo Foundation
+# Copyright 2007-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import portage.glsa as glsa
 from portage._sets.base import PackageSet
-from portage.versions import catpkgsplit, pkgcmp
+from portage.versions import vercmp
 from portage._sets import get_boolean
 
 __all__ = ["SecuritySet", "NewGlsaSet", "NewAffectedSet", "AffectedSet"]
@@ -45,12 +45,12 @@ class SecuritySet(PackageSet):
 		for atom in atomlist[:]:
 			cpv = self._portdbapi.xmatch("match-all", atom)[0]
 			slot = self._portdbapi.aux_get(cpv, ["SLOT"])[0]
-			cps = "/".join(catpkgsplit(cpv)[0:2]) + ":" + slot
+			cps = "%s:%s" % (cpv.cp, slot)
 			if not cps in mydict:
 				mydict[cps] = (atom, cpv)
 			else:
 				other_cpv = mydict[cps][1]
-				if pkgcmp(catpkgsplit(cpv)[1:], catpkgsplit(other_cpv)[1:]) > 0:
+				if vercmp(cpv.version, other_cpv.version) > 0:
 					atomlist.remove(mydict[cps][0])
 					mydict[cps] = (atom, cpv)
 		return atomlist

diff --git a/portage_with_autodep/pym/portage/_sets/security.pyo b/portage_with_autodep/pym/portage/_sets/security.pyo
new file mode 100644
index 0000000..ea67514
Binary files /dev/null and b/portage_with_autodep/pym/portage/_sets/security.pyo differ

diff --git a/portage_with_autodep/pym/portage/_sets/shell.pyo b/portage_with_autodep/pym/portage/_sets/shell.pyo
new file mode 100644
index 0000000..e5e4561
Binary files /dev/null and b/portage_with_autodep/pym/portage/_sets/shell.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/__init__.pyo b/portage_with_autodep/pym/portage/cache/__init__.pyo
new file mode 100644
index 0000000..eb5a90e
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/anydbm.pyo b/portage_with_autodep/pym/portage/cache/anydbm.pyo
new file mode 100644
index 0000000..5946da9
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/anydbm.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/cache_errors.pyo b/portage_with_autodep/pym/portage/cache/cache_errors.pyo
new file mode 100644
index 0000000..866088e
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/cache_errors.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/ebuild_xattr.py b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py
index 6b388fa..0086e40 100644
--- a/portage_with_autodep/pym/portage/cache/ebuild_xattr.py
+++ b/portage_with_autodep/pym/portage/cache/ebuild_xattr.py
@@ -1,5 +1,6 @@
+# -*- coding: UTF8 -*-
 # Copyright: 2009-2011 Gentoo Foundation
-# Author(s): Petteri R&#228;ty (betelgeuse@gentoo.org)
+# Author(s): Petteri Räty (betelgeuse@gentoo.org)
 # License: GPL2
 
 __all__ = ['database']

diff --git a/portage_with_autodep/pym/portage/cache/ebuild_xattr.pyo b/portage_with_autodep/pym/portage/cache/ebuild_xattr.pyo
new file mode 100644
index 0000000..fe32dcc
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/ebuild_xattr.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/flat_hash.py b/portage_with_autodep/pym/portage/cache/flat_hash.py
index b6bc074..2eae9f6 100644
--- a/portage_with_autodep/pym/portage/cache/flat_hash.py
+++ b/portage_with_autodep/pym/portage/cache/flat_hash.py
@@ -31,7 +31,7 @@ class database(fs_template.FsBased):
 			self.label.lstrip(os.path.sep).rstrip(os.path.sep))
 		write_keys = set(self._known_keys)
 		write_keys.add("_eclasses_")
-		write_keys.add("_mtime_")
+		write_keys.add("_%s_" % (self.validation_chf,))
 		self._write_keys = sorted(write_keys)
 		if not self.readonly and not os.path.exists(self.location):
 			self._ensure_dirs()
@@ -69,7 +69,6 @@ class database(fs_template.FsBased):
 			raise cache_errors.CacheCorruption(cpv, e)
 
 	def _setitem(self, cpv, values):
-#		import pdb;pdb.set_trace()
 		s = cpv.rfind("/")
 		fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
 		try:
@@ -153,3 +152,9 @@ class database(fs_template.FsBased):
 						dirs.append((depth+1, p))
 					continue
 				yield p[len_base+1:]
+
+
+class md5_database(database):
+
+	validation_chf = 'md5'
+	store_eclass_paths = False

diff --git a/portage_with_autodep/pym/portage/cache/flat_hash.pyo b/portage_with_autodep/pym/portage/cache/flat_hash.pyo
new file mode 100644
index 0000000..4f568a8
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/flat_hash.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/flat_list.pyo b/portage_with_autodep/pym/portage/cache/flat_list.pyo
new file mode 100644
index 0000000..ab7dc82
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/flat_list.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/fs_template.pyo b/portage_with_autodep/pym/portage/cache/fs_template.pyo
new file mode 100644
index 0000000..6cbbc2f
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/fs_template.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/mappings.py b/portage_with_autodep/pym/portage/cache/mappings.py
index 60a918e..bc8ce9a 100644
--- a/portage_with_autodep/pym/portage/cache/mappings.py
+++ b/portage_with_autodep/pym/portage/cache/mappings.py
@@ -316,7 +316,7 @@ def slot_dict_class(keys, prefix="_val_"):
 		attribute names from keys
 	@type prefix: String
 	@rtype: SlotDict
-	@returns: A class that constructs SlotDict instances
+	@return: A class that constructs SlotDict instances
 		having the specified keys.
 	"""
 	if isinstance(keys, frozenset):

diff --git a/portage_with_autodep/pym/portage/cache/mappings.pyo b/portage_with_autodep/pym/portage/cache/mappings.pyo
new file mode 100644
index 0000000..1eb3f4f
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/mappings.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/metadata.py b/portage_with_autodep/pym/portage/cache/metadata.py
index 4c735d7..9d2c3a5 100644
--- a/portage_with_autodep/pym/portage/cache/metadata.py
+++ b/portage_with_autodep/pym/portage/cache/metadata.py
@@ -6,6 +6,7 @@ import errno
 import re
 import stat
 import sys
+from operator import attrgetter
 from portage import os
 from portage import _encodings
 from portage import _unicode_encode
@@ -63,13 +64,14 @@ class database(flat_hash.database):
 			if "INHERITED" in d:
 				if self.ec is None:
 					self.ec = portage.eclass_cache.cache(self.location[:-15])
+				getter = attrgetter(self.validation_chf)
 				try:
-					d["_eclasses_"] = self.ec.get_eclass_data(
-						d["INHERITED"].split())
+					ec_data = self.ec.get_eclass_data(d["INHERITED"].split())
+					d["_eclasses_"] = dict((k, (v.eclass_dir, getter(v)))
+						for k,v in ec_data.items())
 				except KeyError as e:
 					# INHERITED contains a non-existent eclass.
 					raise cache_errors.CacheCorruption(cpv, e)
-				del d["INHERITED"]
 			else:
 				d["_eclasses_"] = {}
 		elif isinstance(d["_eclasses_"], basestring):

diff --git a/portage_with_autodep/pym/portage/cache/metadata.pyo b/portage_with_autodep/pym/portage/cache/metadata.pyo
new file mode 100644
index 0000000..c98445b
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/metadata.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/metadata_overlay.py b/portage_with_autodep/pym/portage/cache/metadata_overlay.py
deleted file mode 100644
index cfa0051..0000000
--- a/portage_with_autodep/pym/portage/cache/metadata_overlay.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 1999-2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.cache import template
-from portage.cache.cache_errors import CacheCorruption
-from portage.cache.flat_hash import database as db_rw
-from portage.cache.metadata import database as db_ro
-
-class database(template.database):
-
-	serialize_eclasses = False
-
-	def __init__(self, location, label, auxdbkeys, db_rw=db_rw, db_ro=db_ro,
-		*args, **config):
-		super_config = config.copy()
-		super_config.pop("gid", None)
-		super_config.pop("perms", None)
-		super(database, self).__init__(location, label, auxdbkeys,
-			*args, **super_config)
-		self.db_rw = db_rw(location, label, auxdbkeys, **config)
-		self.commit = self.db_rw.commit
-		self.autocommits = self.db_rw.autocommits
-		if isinstance(db_ro, type):
-			ro_config = config.copy()
-			ro_config["readonly"] = True
-			self.db_ro = db_ro(label, "metadata/cache", auxdbkeys, **ro_config)
-		else:
-			self.db_ro = db_ro
-
-	def __getitem__(self, cpv):
-		"""funnel whiteout validation through here, since value needs to be fetched"""
-		try:
-			value = self.db_rw[cpv]
-		except KeyError:
-			return self.db_ro[cpv] # raises a KeyError when necessary
-		except CacheCorruption:
-			del self.db_rw[cpv]
-			return self.db_ro[cpv] # raises a KeyError when necessary
-		if self._is_whiteout(value):
-			if self._is_whiteout_valid(cpv, value):
-				raise KeyError(cpv)
-			else:
-				del self.db_rw[cpv]
-				return self.db_ro[cpv] # raises a KeyError when necessary
-		else:
-			return value
-
-	def _setitem(self, name, values):
-		try:
-			value_ro = self.db_ro.get(name)
-		except CacheCorruption:
-			value_ro = None
-		if value_ro is not None and \
-			self._are_values_identical(value_ro, values):
-			# we have matching values in the underlying db_ro
-			# so it is unnecessary to store data in db_rw
-			try:
-				del self.db_rw[name] # delete unwanted whiteout when necessary
-			except KeyError:
-				pass
-			return
-		self.db_rw[name] = values
-
-	def _delitem(self, cpv):
-		value = self[cpv] # validates whiteout and/or raises a KeyError when necessary
-		if cpv in self.db_ro:
-			self.db_rw[cpv] = self._create_whiteout(value)
-		else:
-			del self.db_rw[cpv]
-
-	def __contains__(self, cpv):
-		try:
-			self[cpv] # validates whiteout when necessary
-		except KeyError:
-			return False
-		return True
-
-	def __iter__(self):
-		s = set()
-		for cpv in self.db_rw:
-			if cpv in self: # validates whiteout when necessary
-				yield cpv
-			# set includes whiteouts so they won't be yielded later
-			s.add(cpv)
-		for cpv in self.db_ro:
-			if cpv not in s:
-				yield cpv
-
-	def _is_whiteout(self, value):
-		return value["EAPI"] == "whiteout"
-
-	def _create_whiteout(self, value):
-		return {"EAPI":"whiteout","_eclasses_":value["_eclasses_"],"_mtime_":value["_mtime_"]}
-
-	def _is_whiteout_valid(self, name, value_rw):
-		try:
-			value_ro = self.db_ro[name]
-			return self._are_values_identical(value_rw,value_ro)
-		except KeyError:
-			return False
-
-	def _are_values_identical(self, value1, value2):
-		if value1['_mtime_'] != value2['_mtime_']:
-			return False
-		return value1["_eclasses_"] == value2["_eclasses_"]

diff --git a/portage_with_autodep/pym/portage/cache/sql_template.pyo b/portage_with_autodep/pym/portage/cache/sql_template.pyo
new file mode 100644
index 0000000..e2c5974
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/sql_template.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/sqlite.pyo b/portage_with_autodep/pym/portage/cache/sqlite.pyo
new file mode 100644
index 0000000..a82d25f
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/sqlite.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/template.py b/portage_with_autodep/pym/portage/cache/template.py
index f84d8f4..cf1e8ae 100644
--- a/portage_with_autodep/pym/portage/cache/template.py
+++ b/portage_with_autodep/pym/portage/cache/template.py
@@ -1,4 +1,4 @@
-# Copyright: 2005 Gentoo Foundation
+# Copyright: 2005-2012 Gentoo Foundation
 # Author(s): Brian Harring (ferringb@gentoo.org)
 # License: GPL2
 
@@ -7,10 +7,14 @@ from portage.cache.cache_errors import InvalidRestriction
 from portage.cache.mappings import ProtectedDict
 import sys
 import warnings
+import operator
 
 if sys.hexversion >= 0x3000000:
+	_unicode = str
 	basestring = str
 	long = int
+else:
+	_unicode = unicode
 
 class database(object):
 	# this is for metadata/cache transfer.
@@ -21,6 +25,8 @@ class database(object):
 	autocommits = False
 	cleanse_keys = False
 	serialize_eclasses = True
+	validation_chf = 'mtime'
+	store_eclass_paths = True
 
 	def __init__(self, location, label, auxdbkeys, readonly=False):
 		""" initialize the derived class; specifically, store label/keys"""
@@ -40,9 +46,15 @@ class database(object):
 			self.updates = 0
 		d=self._getitem(cpv)
 		if self.serialize_eclasses and "_eclasses_" in d:
-			d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"])
+			d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"],
+				self.validation_chf, paths=self.store_eclass_paths)
 		elif "_eclasses_" not in d:
 			d["_eclasses_"] = {}
+		# Never return INHERITED, since portdbapi.aux_get() will
+		# generate it automatically from _eclasses_, and we want
+		# to omit it in comparisons between cache entries like
+		# those that egencache uses to avoid redundant writes.
+		d.pop("INHERITED", None)
 		mtime = d.get('_mtime_')
 		if mtime is None:
 			raise cache_errors.CacheCorruption(cpv,
@@ -60,22 +72,46 @@ class database(object):
 		override this in derived classess"""
 		raise NotImplementedError
 
+	@staticmethod
+	def _internal_eclasses(extern_ec_dict, chf_type, paths):
+		"""
+		When serialize_eclasses is False, we have to convert an external
+		eclass dict containing hashed_path objects into an appropriate
+		internal dict containing values of chf_type (and eclass dirs
+		if store_eclass_paths is True).
+		"""
+		if not extern_ec_dict:
+			return extern_ec_dict
+		chf_getter = operator.attrgetter(chf_type)
+		if paths:
+			intern_ec_dict = dict((k, (v.eclass_dir, chf_getter(v)))
+				for k, v in extern_ec_dict.items())
+		else:
+			intern_ec_dict = dict((k, chf_getter(v))
+				for k, v in extern_ec_dict.items())
+		return intern_ec_dict
+
 	def __setitem__(self, cpv, values):
 		"""set a cpv to values
 		This shouldn't be overriden in derived classes since it handles the readonly checks"""
 		if self.readonly:
 			raise cache_errors.ReadOnlyRestriction()
+		d = None
 		if self.cleanse_keys:
 			d=ProtectedDict(values)
 			for k, v in list(d.items()):
 				if not v:
 					del d[k]
-			if self.serialize_eclasses and "_eclasses_" in values:
-				d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
-		elif self.serialize_eclasses and "_eclasses_" in values:
-			d = ProtectedDict(values)
-			d["_eclasses_"] = serialize_eclasses(d["_eclasses_"])
-		else:
+		if "_eclasses_" in values:
+			if d is None:
+				d = ProtectedDict(values)
+			if self.serialize_eclasses:
+				d["_eclasses_"] = serialize_eclasses(d["_eclasses_"],
+					self.validation_chf, paths=self.store_eclass_paths)
+			else:
+				d["_eclasses_"] = self._internal_eclasses(d["_eclasses_"],
+					self.validation_chf, self.store_eclass_paths)
+		elif d is None:
 			d = values
 		self._setitem(cpv, d)
 		if not self.autocommits:
@@ -159,6 +195,23 @@ class database(object):
 		except KeyError:
 			return x
 
+	def validate_entry(self, entry, ebuild_hash, eclass_db):
+		hash_key = '_%s_' % self.validation_chf
+		try:
+			entry_hash = entry[hash_key]
+		except KeyError:
+			return False
+		else:
+			if entry_hash != getattr(ebuild_hash, self.validation_chf):
+				return False
+		update = eclass_db.validate_and_rewrite_cache(entry['_eclasses_'], self.validation_chf,
+			self.store_eclass_paths)
+		if update is None:
+			return False
+		if update:
+			entry['_eclasses_'] = update
+		return True
+
 	def get_matches(self, match_dict):
 		"""generic function for walking the entire cache db, matching restrictions to
 		filter what cpv's are returned.  Derived classes should override this if they
@@ -195,7 +248,9 @@ class database(object):
 		keys = __iter__
 		items = iteritems
 
-def serialize_eclasses(eclass_dict):
+_keysorter = operator.itemgetter(0)
+
+def serialize_eclasses(eclass_dict, chf_type='mtime', paths=True):
 	"""takes a dict, returns a string representing said dict"""
 	"""The "new format", which causes older versions of <portage-2.1.2 to
 	traceback with a ValueError due to failed long() conversion.  This format
@@ -206,27 +261,40 @@ def serialize_eclasses(eclass_dict):
 	"""
 	if not eclass_dict:
 		return ""
-	return "\t".join(k + "\t%s\t%s" % eclass_dict[k] \
-		for k in sorted(eclass_dict))
+	getter = operator.attrgetter(chf_type)
+	if paths:
+		return "\t".join("%s\t%s\t%s" % (k, v.eclass_dir, getter(v))
+			for k, v in sorted(eclass_dict.items(), key=_keysorter))
+	return "\t".join("%s\t%s" % (k, getter(v))
+		for k, v in sorted(eclass_dict.items(), key=_keysorter))
+
 
-def reconstruct_eclasses(cpv, eclass_string):
+def reconstruct_eclasses(cpv, eclass_string, chf_type='mtime', paths=True):
 	"""returns a dict when handed a string generated by serialize_eclasses"""
 	eclasses = eclass_string.rstrip().lstrip().split("\t")
 	if eclasses == [""]:
 		# occasionally this occurs in the fs backends.  they suck.
 		return {}
-	
-	if len(eclasses) % 2 != 0 and len(eclasses) % 3 != 0:
+
+	converter = _unicode
+	if chf_type == 'mtime':
+		converter = long
+
+	if paths:
+		if len(eclasses) % 3 != 0:
+			raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+	elif len(eclasses) % 2 != 0:
 		raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
 	d={}
 	try:
-		if eclasses[1].isdigit():
-			for x in range(0, len(eclasses), 2):
-				d[eclasses[x]] = ("", long(eclasses[x + 1]))
-		else:
+		i = iter(eclasses)
+		if paths:
 			# The old format contains paths that will be discarded.
-			for x in range(0, len(eclasses), 3):
-				d[eclasses[x]] = (eclasses[x + 1], long(eclasses[x + 2]))
+			for name, path, val in zip(i, i, i):
+				d[name] = (path, converter(val))
+		else:
+			for name, val in zip(i, i):
+				d[name] = converter(val)
 	except IndexError:
 		raise cache_errors.CacheCorruption(cpv,
 			"_eclasses_ was of invalid len %i" % len(eclasses))

diff --git a/portage_with_autodep/pym/portage/cache/template.pyo b/portage_with_autodep/pym/portage/cache/template.pyo
new file mode 100644
index 0000000..45da015
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/template.pyo differ

diff --git a/portage_with_autodep/pym/portage/cache/util.py b/portage_with_autodep/pym/portage/cache/util.py
deleted file mode 100644
index b824689..0000000
--- a/portage_with_autodep/pym/portage/cache/util.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# Copyright: 2005 Gentoo Foundation
-# Author(s): Brian Harring (ferringb@gentoo.org)
-# License: GPL2
-
-from __future__ import print_function
-
-__all__ = ["mirror_cache", "non_quiet_mirroring", "quiet_mirroring"]
-
-from itertools import chain
-from portage.cache import cache_errors
-from portage.localization import _
-
-def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, eclass_cache=None, verbose_instance=None):
-
-	from portage import eapi_is_supported, \
-		_validate_cache_for_unsupported_eapis
-	if not src_cache.complete_eclass_entries and not eclass_cache:
-		raise Exception("eclass_cache required for cache's of class %s!" % src_cache.__class__)
-
-	if verbose_instance == None:
-		noise=quiet_mirroring()
-	else:
-		noise=verbose_instance
-
-	dead_nodes = set(trg_cache)
-	count=0
-
-	if not trg_cache.autocommits:
-		trg_cache.sync(100)
-
-	for x in valid_nodes_iterable:
-#		print "processing x=",x
-		count+=1
-		dead_nodes.discard(x)
-		try:
-			entry = src_cache[x]
-		except KeyError as e:
-			noise.missing_entry(x)
-			del e
-			continue
-		except cache_errors.CacheError as ce:
-			noise.exception(x, ce)
-			del ce
-			continue
-
-		eapi = entry.get('EAPI')
-		if not eapi:
-			eapi = '0'
-		eapi = eapi.lstrip('-')
-		eapi_supported = eapi_is_supported(eapi)
-		if not eapi_supported:
-			if not _validate_cache_for_unsupported_eapis:
-				noise.misc(x, _("unable to validate cache for EAPI='%s'") % eapi)
-				continue
-
-		write_it = True
-		trg = None
-		try:
-			trg = trg_cache[x]
-		except (KeyError, cache_errors.CacheError):
-			pass
-		else:
-			if trg['_mtime_'] == entry['_mtime_'] and \
-				eclass_cache.is_eclass_data_valid(trg['_eclasses_']) and \
-				set(trg['_eclasses_']) == set(entry['_eclasses_']):
-				write_it = False
-
-		for d in (entry, trg):
-			if d is not None and d.get('EAPI') in ('', '0'):
-				del d['EAPI']
-
-		if trg and not write_it:
-			""" We don't want to skip the write unless we're really sure that
-			the existing cache is identical, so don't trust _mtime_ and
-			_eclasses_ alone."""
-			for k in set(chain(entry, trg)).difference(
-				("_mtime_", "_eclasses_")):
-				if trg.get(k, "") != entry.get(k, ""):
-					write_it = True
-					break
-
-		if write_it:
-			try:
-				inherited = entry.get("INHERITED", "")
-				eclasses = entry.get("_eclasses_")
-			except cache_errors.CacheError as ce:
-				noise.exception(x, ce)
-				del ce
-				continue
-
-			if eclasses is not None:
-				if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]):
-					noise.eclass_stale(x)
-					continue
-				inherited = eclasses
-			else:
-				inherited = inherited.split()
-
-			if inherited:
-				if src_cache.complete_eclass_entries and eclasses is None:
-					noise.corruption(x, "missing _eclasses_ field")
-					continue
-
-				# Even if _eclasses_ already exists, replace it with data from
-				# eclass_cache, in order to insert local eclass paths.
-				try:
-					eclasses = eclass_cache.get_eclass_data(inherited)
-				except KeyError:
-					# INHERITED contains a non-existent eclass.
-					noise.eclass_stale(x)
-					continue
-
-				if eclasses is None:
-					noise.eclass_stale(x)
-					continue
-				entry["_eclasses_"] = eclasses
-
-			if not eapi_supported:
-				for k in set(entry).difference(("_mtime_", "_eclasses_")):
-					entry[k] = ""
-				entry["EAPI"] = "-" + eapi
-
-			# by this time, if it reaches here, the eclass has been validated, and the entry has 
-			# been updated/translated (if needs be, for metadata/cache mainly)
-			try:
-				trg_cache[x] = entry
-			except cache_errors.CacheError as ce:
-				noise.exception(x, ce)
-				del ce
-				continue
-		if count >= noise.call_update_min:
-			noise.update(x)
-			count = 0
-
-	if not trg_cache.autocommits:
-		trg_cache.commit()
-
-	# ok.  by this time, the trg_cache is up to date, and we have a dict
-	# with a crapload of cpv's.  we now walk the target db, removing stuff if it's in the list.
-	for key in dead_nodes:
-		try:
-			del trg_cache[key]
-		except KeyError:
-			pass
-		except cache_errors.CacheError as ce:
-			noise.exception(ce)
-			del ce
-	noise.finish()
-
-
-class quiet_mirroring(object):
-	# call_update_every is used by mirror_cache to determine how often to call in.
-	# quiet defaults to 2^24 -1.  Don't call update, 'cept once every 16 million or so :)
-	call_update_min = 0xffffff
-	def update(self,key,*arg):		pass
-	def exception(self,key,*arg):	pass
-	def eclass_stale(self,*arg):	pass
-	def missing_entry(self, key):	pass
-	def misc(self,key,*arg):		pass
-	def corruption(self, key, s):	pass
-	def finish(self, *arg):			pass
-	
-class non_quiet_mirroring(quiet_mirroring):
-	call_update_min=1
-	def update(self,key,*arg):	print("processed",key)
-	def exception(self, key, *arg):	print("exec",key,arg)
-	def missing(self,key):		print("key %s is missing", key)
-	def corruption(self,key,*arg):	print("corrupt %s:" % key,arg)
-	def eclass_stale(self,key,*arg):print("stale %s:"%key,arg)
-

diff --git a/portage_with_autodep/pym/portage/cache/volatile.py b/portage_with_autodep/pym/portage/cache/volatile.py
index 0bf6bab..5516745 100644
--- a/portage_with_autodep/pym/portage/cache/volatile.py
+++ b/portage_with_autodep/pym/portage/cache/volatile.py
@@ -8,18 +8,23 @@ class database(template.database):
 
 	autocommits = True
 	serialize_eclasses = False
+	store_eclass_paths = False
 
 	def __init__(self, *args, **config):
 		config.pop("gid", None)
 		config.pop("perms", None)
 		super(database, self).__init__(*args, **config)
 		self._data = {}
-		self.__iter__ = self._data.__iter__
 		self._delitem = self._data.__delitem__
-		self.__contains__ = self._data.__contains__
 
 	def _setitem(self, name, values):
 		self._data[name] = copy.deepcopy(values)
 
-	def _getitem(self, cpv):
+	def __getitem__(self, cpv):
 		return copy.deepcopy(self._data[cpv])
+
+	def __iter__(self):
+		return iter(self._data)
+
+	def __contains__(self, key):
+		return key in self._data

diff --git a/portage_with_autodep/pym/portage/cache/volatile.pyo b/portage_with_autodep/pym/portage/cache/volatile.pyo
new file mode 100644
index 0000000..fac5d55
Binary files /dev/null and b/portage_with_autodep/pym/portage/cache/volatile.pyo differ

diff --git a/portage_with_autodep/pym/portage/checksum.py b/portage_with_autodep/pym/portage/checksum.py
index 9e7e455..bd416ac 100644
--- a/portage_with_autodep/pym/portage/checksum.py
+++ b/portage_with_autodep/pym/portage/checksum.py
@@ -1,5 +1,5 @@
 # checksum.py -- core Portage functionality
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import portage
@@ -16,8 +16,31 @@ import tempfile
 hashfunc_map = {}
 hashorigin_map = {}
 
-def _generate_hash_function(hashtype, hashobject, origin="unknown"):
-	def pyhash(filename):
+def _open_file(filename):
+	try:
+		return open(_unicode_encode(filename,
+			encoding=_encodings['fs'], errors='strict'), 'rb')
+	except IOError as e:
+		func_call = "open('%s')" % filename
+		if e.errno == errno.EPERM:
+			raise portage.exception.OperationNotPermitted(func_call)
+		elif e.errno == errno.EACCES:
+			raise portage.exception.PermissionDenied(func_call)
+		elif e.errno == errno.ENOENT:
+			raise portage.exception.FileNotFound(filename)
+		else:
+			raise
+
+class _generate_hash_function(object):
+
+	__slots__ = ("_hashobject",)
+
+	def __init__(self, hashtype, hashobject, origin="unknown"):
+		self._hashobject = hashobject
+		hashfunc_map[hashtype] = self
+		hashorigin_map[hashtype] = origin
+
+	def __call__(self, filename):
 		"""
 		Run a checksum against a file.
 	
@@ -25,23 +48,11 @@ def _generate_hash_function(hashtype, hashobject, origin="unknown"):
 		@type filename: String
 		@return: The hash and size of the data
 		"""
-		try:
-			f = open(_unicode_encode(filename,
-				encoding=_encodings['fs'], errors='strict'), 'rb')
-		except IOError as e:
-			func_call = "open('%s')" % filename
-			if e.errno == errno.EPERM:
-				raise portage.exception.OperationNotPermitted(func_call)
-			elif e.errno == errno.EACCES:
-				raise portage.exception.PermissionDenied(func_call)
-			elif e.errno == errno.ENOENT:
-				raise portage.exception.FileNotFound(filename)
-			else:
-				raise
+		f = _open_file(filename)
 		blocksize = HASHING_BLOCKSIZE
 		data = f.read(blocksize)
 		size = 0
-		checksum = hashobject()
+		checksum = self._hashobject()
 		while data:
 			checksum.update(data)
 			size = size + len(data)
@@ -49,9 +60,6 @@ def _generate_hash_function(hashtype, hashobject, origin="unknown"):
 		f.close()
 
 		return (checksum.hexdigest(), size)
-	hashfunc_map[hashtype] = pyhash
-	hashorigin_map[hashtype] = origin
-	return pyhash
 
 # Define hash functions, try to use the best module available. Later definitions
 # override earlier ones
@@ -71,40 +79,72 @@ except ImportError:
 
 sha1hash = _generate_hash_function("SHA1", _new_sha1, origin="internal")
 
+# Try to use mhash if available
+# mhash causes GIL presently, so it gets less priority than hashlib and
+# pycrypto. However, it might be the only accelerated implementation of
+# WHIRLPOOL available.
+try:
+	import mhash, functools
+	md5hash = _generate_hash_function("MD5", functools.partial(mhash.MHASH, mhash.MHASH_MD5), origin="mhash")
+	sha1hash = _generate_hash_function("SHA1", functools.partial(mhash.MHASH, mhash.MHASH_SHA1), origin="mhash")
+	sha256hash = _generate_hash_function("SHA256", functools.partial(mhash.MHASH, mhash.MHASH_SHA256), origin="mhash")
+	sha512hash = _generate_hash_function("SHA512", functools.partial(mhash.MHASH, mhash.MHASH_SHA512), origin="mhash")
+	for local_name, hash_name in (("rmd160", "ripemd160"), ("whirlpool", "whirlpool")):
+		if hasattr(mhash, 'MHASH_%s' % local_name.upper()):
+			globals()['%shash' % local_name] = \
+				_generate_hash_function(local_name.upper(), \
+				functools.partial(mhash.MHASH, getattr(mhash, 'MHASH_%s' % hash_name.upper())), \
+				origin='mhash')
+except ImportError:
+	pass
+
 # Use pycrypto when available, prefer it over the internal fallbacks
+# Check for 'new' attributes, since they can be missing if the module
+# is broken somehow.
 try:
 	from Crypto.Hash import SHA256, RIPEMD
-	sha256hash = _generate_hash_function("SHA256", SHA256.new, origin="pycrypto")
-	rmd160hash = _generate_hash_function("RMD160", RIPEMD.new, origin="pycrypto")
-except ImportError as e:
+	sha256hash = getattr(SHA256, 'new', None)
+	if sha256hash is not None:
+		sha256hash = _generate_hash_function("SHA256",
+			sha256hash, origin="pycrypto")
+	rmd160hash = getattr(RIPEMD, 'new', None)
+	if rmd160hash is not None:
+		rmd160hash = _generate_hash_function("RMD160",
+			rmd160hash, origin="pycrypto")
+except ImportError:
 	pass
 
 # Use hashlib from python-2.5 if available and prefer it over pycrypto and internal fallbacks.
-# Need special handling for RMD160 as it may not always be provided by hashlib.
+# Need special handling for RMD160/WHIRLPOOL as they may not always be provided by hashlib.
 try:
-	import hashlib
+	import hashlib, functools
 	
 	md5hash = _generate_hash_function("MD5", hashlib.md5, origin="hashlib")
 	sha1hash = _generate_hash_function("SHA1", hashlib.sha1, origin="hashlib")
 	sha256hash = _generate_hash_function("SHA256", hashlib.sha256, origin="hashlib")
-	try:
-		hashlib.new('ripemd160')
-	except ValueError:
-		pass
-	else:
-		def rmd160():
-			return hashlib.new('ripemd160')
-		rmd160hash = _generate_hash_function("RMD160", rmd160, origin="hashlib")
-except ImportError as e:
+	sha512hash = _generate_hash_function("SHA512", hashlib.sha512, origin="hashlib")
+	for local_name, hash_name in (("rmd160", "ripemd160"), ("whirlpool", "whirlpool")):
+		try:
+			hashlib.new(hash_name)
+		except ValueError:
+			pass
+		else:
+			globals()['%shash' % local_name] = \
+				_generate_hash_function(local_name.upper(), \
+				functools.partial(hashlib.new, hash_name), \
+				origin='hashlib')
+
+except ImportError:
 	pass
-	
+
+if "WHIRLPOOL" not in hashfunc_map:
+	# Bundled WHIRLPOOL implementation
+	from portage.util.whirlpool import new as _new_whirlpool
+	whirlpoolhash = _generate_hash_function("WHIRLPOOL", _new_whirlpool, origin="bundled")
 
 # Use python-fchksum if available, prefer it over all other MD5 implementations
 try:
-	import fchksum
-	
-	def md5hash(filename):
-		return fchksum.fmd5t(filename)
+	from fchksum import fmd5t as md5hash
 	hashfunc_map["MD5"] = md5hash
 	hashorigin_map["MD5"] = "python-fchksum"
 
@@ -127,6 +167,15 @@ if os.path.exists(PRELINK_BINARY):
 		prelink_capable=1
 	del results
 
+def is_prelinkable_elf(filename):
+	f = _open_file(filename)
+	try:
+		magic = f.read(17)
+	finally:
+		f.close()
+	return (len(magic) == 17 and magic.startswith(b'\x7fELF') and
+		magic[16] in (b'\x02', b'\x03')) # 2=ET_EXEC, 3=ET_DYN
+
 def perform_md5(x, calc_prelink=0):
 	return perform_checksum(x, "MD5", calc_prelink)[0]
 
@@ -137,7 +186,7 @@ def _perform_md5_merge(x, **kwargs):
 def perform_all(x, calc_prelink=0):
 	mydict = {}
 	for k in hashfunc_map:
-		mydict[k] = perform_checksum(x, hashfunc_map[k], calc_prelink)[0]
+		mydict[k] = perform_checksum(x, k, calc_prelink)[0]
 	return mydict
 
 def get_valid_checksum_keys():
@@ -234,7 +283,8 @@ def perform_checksum(filename, hashname="MD5", calc_prelink=0):
 	myfilename = filename
 	prelink_tmpfile = None
 	try:
-		if calc_prelink and prelink_capable:
+		if (calc_prelink and prelink_capable and
+		    is_prelinkable_elf(filename)):
 			# Create non-prelinked temporary file to checksum.
 			# Files rejected by prelink are summed in place.
 			try:
@@ -255,8 +305,10 @@ def perform_checksum(filename, hashname="MD5", calc_prelink=0):
 					" hash function not available (needs dev-python/pycrypto)")
 			myhash, mysize = hashfunc_map[hashname](myfilename)
 		except (OSError, IOError) as e:
-			if e.errno == errno.ENOENT:
+			if e.errno in (errno.ENOENT, errno.ESTALE):
 				raise portage.exception.FileNotFound(myfilename)
+			elif e.errno == portage.exception.PermissionDenied.errno:
+				raise portage.exception.PermissionDenied(myfilename)
 			raise
 		return myhash, mysize
 	finally:

diff --git a/portage_with_autodep/pym/portage/checksum.pyo b/portage_with_autodep/pym/portage/checksum.pyo
new file mode 100644
index 0000000..00231af
Binary files /dev/null and b/portage_with_autodep/pym/portage/checksum.pyo differ

diff --git a/portage_with_autodep/pym/portage/const.py b/portage_with_autodep/pym/portage/const.py
index 2a391db..614dcdb 100644
--- a/portage_with_autodep/pym/portage/const.py
+++ b/portage_with_autodep/pym/portage/const.py
@@ -67,8 +67,7 @@ FAKEROOT_BINARY          = "/usr/bin/fakeroot"
 BASH_BINARY              = "/bin/bash"
 MOVE_BINARY              = "/bin/mv"
 PRELINK_BINARY           = "/usr/sbin/prelink"
-AUTODEP_LIBRARY			 = "/usr/lib/file_hook.so"
-#AUTODEP_LIBRARY			 = "/home/bay/autodep/src/hook_lib/file_hook.so"
+AUTODEP_LIBRARY          = "/usr/lib/file_hook.so"
 
 
 INVALID_ENV_FILE         = "/etc/spork/is/not/valid/profile.env"
@@ -89,12 +88,12 @@ EBUILD_PHASES            = ("pretend", "setup", "unpack", "prepare", "configure"
                            "package", "preinst", "postinst","prerm", "postrm",
                            "nofetch", "config", "info", "other")
 SUPPORTED_FEATURES       = frozenset([
-                           "allow-missing-manifests",
                            "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy",
-                           "ccache", "chflags", "collision-protect", "compress-build-logs", 
-                           "depcheck", "depcheckstrict",
+                           "ccache", "chflags", "clean-logs",
+                           "collision-protect", "compress-build-logs", "compressdebug",
+                           "config-protect-if-modified", "depcheck", "depcheckstrict",
                            "digest", "distcc", "distcc-pump", "distlocks", "ebuild-locks", "fakeroot",
-                           "fail-clean", "fixpackages", "force-mirror", "getbinpkg",
+                           "fail-clean", "force-mirror", "force-prefix", "getbinpkg",
                            "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",
                            "metadata-transfer", "mirror", "multilib-strict", "news",
                            "noauto", "noclean", "nodoc", "noinfo", "noman",
@@ -107,18 +106,57 @@ SUPPORTED_FEATURES       = frozenset([
                            "strict", "stricter", "suidctl", "test", "test-fail-continue",
                            "unknown-features-filter", "unknown-features-warn",
                            "unmerge-logs", "unmerge-orphans", "userfetch", "userpriv",
-                           "usersandbox", "usersync", "webrsync-gpg"])
+                           "usersandbox", "usersync", "webrsync-gpg", "xattr"])
 
 EAPI                     = 4
 
 HASHING_BLOCKSIZE        = 32768
 MANIFEST1_HASH_FUNCTIONS = ("MD5", "SHA256", "RMD160")
-MANIFEST2_HASH_FUNCTIONS = ("SHA1", "SHA256", "RMD160")
-
 MANIFEST1_REQUIRED_HASH  = "MD5"
-MANIFEST2_REQUIRED_HASH  = "SHA1"
+
+# Future events:
+#
+# After WHIRLPOOL is supported in stable portage:
+# - Add SHA256 and WHIRLPOOL to MANIFEST2_HASH_DEFAULTS.
+# - Remove SHA1 and RMD160 from MANIFEST2_HASH_*.
+# - Set manifest-hashes in gentoo-x86/metadata/layout.conf as follows:
+#     manifest-hashes = SHA256 SHA512 WHIRLPOOL
+#
+# After WHIRLPOOL is supported in stable portage for at least 1 year:
+# - Change MANIFEST2_REQUIRED_HASH to WHIRLPOOL.
+# - Remove SHA256 from MANIFEST2_HASH_*.
+# - Set manifest-hashes in gentoo-x86/metadata/layout.conf as follows:
+#     manifest-hashes = SHA512 WHIRLPOOL
+#
+# After SHA-3 is approved:
+# - Add new hashes to MANIFEST2_HASH_*.
+#
+# After SHA-3 is supported in stable portage:
+# - Set manifest-hashes in gentoo-x86/metadata/layout.conf as follows:
+#     manifest-hashes = SHA3 SHA512 WHIRLPOOL
+#
+# After layout.conf settings correspond to defaults in stable portage:
+# - Remove redundant settings from gentoo-x86/metadata/layout.conf.
+
+MANIFEST2_HASH_FUNCTIONS = ("RMD160", "SHA1", "SHA256", "SHA512", "WHIRLPOOL")
+MANIFEST2_HASH_DEFAULTS = frozenset(["SHA1", "SHA256", "RMD160"])
+MANIFEST2_REQUIRED_HASH  = "SHA256"
 
 MANIFEST2_IDENTIFIERS    = ("AUX", "MISC", "DIST", "EBUILD")
+
+# The EPREFIX for the current install is hardcoded here, but access to this
+# constant should be minimal, in favor of access via the EPREFIX setting of
+# a config instance (since it's possible to contruct a config instance with
+# a different EPREFIX). Therefore, the EPREFIX constant should *NOT* be used
+# in the definition of any other constants within this file.
+EPREFIX=""
+
+# pick up EPREFIX from the environment if set
+if "PORTAGE_OVERRIDE_EPREFIX" in os.environ:
+	EPREFIX = os.environ["PORTAGE_OVERRIDE_EPREFIX"]
+	if EPREFIX:
+		EPREFIX = os.path.normpath(EPREFIX)
+
 # ===========================================================================
 # END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
 # ===========================================================================
@@ -129,7 +167,6 @@ _ENABLE_DYN_LINK_MAP    = True
 _ENABLE_PRESERVE_LIBS   = True
 _ENABLE_REPO_NAME_WARN  = True
 _ENABLE_SET_CONFIG      = True
-_SANDBOX_COMPAT_LEVEL   = "22"
 
 
 # The definitions above will differ between branches, so it's useful to have

diff --git a/portage_with_autodep/pym/portage/const.py.rej b/portage_with_autodep/pym/portage/const.py.rej
new file mode 100644
index 0000000..9fe70f8
--- /dev/null
+++ b/portage_with_autodep/pym/portage/const.py.rej
@@ -0,0 +1,12 @@
+--- pym/portage/const.py
++++ pym/portage/const.py
+@@ -90,7 +92,8 @@
+ SUPPORTED_FEATURES       = frozenset([
+                            "allow-missing-manifests",
+                            "assume-digests", "binpkg-logs", "buildpkg", "buildsyspkg", "candy",
+-                           "ccache", "chflags", "collision-protect", "compress-build-logs",
++                           "ccache", "chflags", "collision-protect", "compress-build-logs", 
++                           "depcheck", "depcheckstrict",
+                            "digest", "distcc", "distcc-pump", "distlocks", "ebuild-locks", "fakeroot",
+                            "fail-clean", "fixpackages", "force-mirror", "getbinpkg",
+                            "installsources", "keeptemp", "keepwork", "fixlafiles", "lmirror",

diff --git a/portage_with_autodep/pym/portage/const.pyo b/portage_with_autodep/pym/portage/const.pyo
new file mode 100644
index 0000000..804420f
Binary files /dev/null and b/portage_with_autodep/pym/portage/const.pyo differ

diff --git a/portage_with_autodep/pym/portage/cvstree.py b/portage_with_autodep/pym/portage/cvstree.py
index 9ba22f3..3680ae4 100644
--- a/portage_with_autodep/pym/portage/cvstree.py
+++ b/portage_with_autodep/pym/portage/cvstree.py
@@ -248,11 +248,13 @@ def getentries(mydir,recursive=0):
 			if entries["files"][mysplit[1]]["revision"][0]=="-":
 				entries["files"][mysplit[1]]["status"]+=["removed"]
 
-	for file in apply_cvsignore_filter(os.listdir(mydir)):
+	for file in os.listdir(mydir):
 		if file=="CVS":
 			continue
 		if os.path.isdir(mydir+"/"+file):
 			if file not in entries["dirs"]:
+				if ignore_list.match(file) is not None:
+					continue
 				entries["dirs"][file]={"dirs":{},"files":{}}
 				# It's normal for a directory to be unlisted in Entries
 				# when checked out without -P (see bug #257660).
@@ -266,6 +268,8 @@ def getentries(mydir,recursive=0):
 				entries["dirs"][file]["status"]=["exists"]
 		elif os.path.isfile(mydir+"/"+file):
 			if file not in entries["files"]:
+				if ignore_list.match(file) is not None:
+					continue
 				entries["files"][file]={"revision":"","date":"","flags":"","tags":""}
 			if "status" in entries["files"][file]:
 				if "exists" not in entries["files"][file]["status"]:
@@ -285,7 +289,9 @@ def getentries(mydir,recursive=0):
 				print("failed to stat",file)
 				print(e)
 				return
-				
+
+		elif ignore_list.match(file) is not None:
+			pass
 		else:
 			print()
 			print("File of unknown type:",mydir+"/"+file)

diff --git a/portage_with_autodep/pym/portage/cvstree.pyo b/portage_with_autodep/pym/portage/cvstree.pyo
new file mode 100644
index 0000000..4719daf
Binary files /dev/null and b/portage_with_autodep/pym/portage/cvstree.pyo differ

diff --git a/portage_with_autodep/pym/portage/data.py b/portage_with_autodep/pym/portage/data.py
index c38fa17..c4d967a 100644
--- a/portage_with_autodep/pym/portage/data.py
+++ b/portage_with_autodep/pym/portage/data.py
@@ -1,5 +1,5 @@
 # data.py -- Calculated/Discovered Data Values
-# Copyright 1998-2010 Gentoo Foundation
+# Copyright 1998-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import os, pwd, grp, platform
@@ -58,65 +58,165 @@ def portage_group_warning():
 # If the "wheel" group does not exist then wheelgid falls back to 0.
 # If the "portage" group does not exist then portage_uid falls back to wheelgid.
 
-secpass=0
-
 uid=os.getuid()
 wheelgid=0
 
-if uid==0:
-	secpass=2
 try:
 	wheelgid=grp.getgrnam("wheel")[2]
 except KeyError:
 	pass
 
-# Allow the overriding of the user used for 'userpriv' and 'userfetch'
-_portage_uname = os.environ.get('PORTAGE_USERNAME', 'portage')
-_portage_grpname = os.environ.get('PORTAGE_GRPNAME', 'portage')
+# The portage_uid and portage_gid global constants, and others that
+# depend on them are initialized lazily, in order to allow configuration
+# via make.conf. Eventually, these constants may be deprecated in favor
+# of config attributes, since it's conceivable that multiple
+# configurations with different constants could be used simultaneously.
+_initialized_globals = set()
 
-#Discover the uid and gid of the portage user/group
-try:
-	portage_uid = pwd.getpwnam(_portage_uname)[2]
-	portage_gid = grp.getgrnam(_portage_grpname)[2]
-	if secpass < 1 and portage_gid in os.getgroups():
-		secpass=1
-except KeyError:
-	portage_uid=0
-	portage_gid=0
-	userpriv_groups = [portage_gid]
-	writemsg(colorize("BAD",
-		_("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1)
-	writemsg(_(
-		"         For the defaults, line 1 goes into passwd, "
-		"and 2 into group.\n"), noiselevel=-1)
-	writemsg(colorize("GOOD",
-		"         portage:x:250:250:portage:/var/tmp/portage:/bin/false") \
-		+ "\n", noiselevel=-1)
-	writemsg(colorize("GOOD", "         portage::250:portage") + "\n",
-		noiselevel=-1)
-	portage_group_warning()
-else:
-	userpriv_groups = [portage_gid]
-	if secpass >= 2:
-		class _LazyUserprivGroups(portage.proxy.objectproxy.ObjectProxy):
-			def _get_target(self):
-				global userpriv_groups
-				if userpriv_groups is not self:
-					return userpriv_groups
-				userpriv_groups = _userpriv_groups
-				# Get a list of group IDs for the portage user. Do not use
-				# grp.getgrall() since it is known to trigger spurious
-				# SIGPIPE problems with nss_ldap.
-				mystatus, myoutput = \
-					portage.subprocess_getstatusoutput("id -G %s" % _portage_uname)
-				if mystatus == os.EX_OK:
-					for x in myoutput.split():
-						try:
-							userpriv_groups.append(int(x))
-						except ValueError:
-							pass
-					userpriv_groups[:] = sorted(set(userpriv_groups))
-				return userpriv_groups
-
-		_userpriv_groups = userpriv_groups
-		userpriv_groups = _LazyUserprivGroups()
+def _get_global(k):
+	if k in _initialized_globals:
+		return globals()[k]
+
+	if k in ('portage_gid', 'portage_uid', 'secpass'):
+		global portage_gid, portage_uid, secpass
+		secpass = 0
+		if uid == 0:
+			secpass = 2
+		elif portage.const.EPREFIX:
+			secpass = 2
+		#Discover the uid and gid of the portage user/group
+		try:
+			portage_uid = pwd.getpwnam(_get_global('_portage_username')).pw_uid
+			_portage_grpname = _get_global('_portage_grpname')
+			if platform.python_implementation() == 'PyPy':
+				# Somehow this prevents "TypeError: expected string" errors
+				# from grp.getgrnam() with PyPy 1.7
+				_portage_grpname = str(_portage_grpname)
+			portage_gid = grp.getgrnam(_portage_grpname).gr_gid
+			if secpass < 1 and portage_gid in os.getgroups():
+				secpass = 1
+		except KeyError:
+			portage_uid = 0
+			portage_gid = 0
+			writemsg(colorize("BAD",
+				_("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1)
+			writemsg(_(
+				"         For the defaults, line 1 goes into passwd, "
+				"and 2 into group.\n"), noiselevel=-1)
+			writemsg(colorize("GOOD",
+				"         portage:x:250:250:portage:/var/tmp/portage:/bin/false") \
+				+ "\n", noiselevel=-1)
+			writemsg(colorize("GOOD", "         portage::250:portage") + "\n",
+				noiselevel=-1)
+			portage_group_warning()
+
+		_initialized_globals.add('portage_gid')
+		_initialized_globals.add('portage_uid')
+		_initialized_globals.add('secpass')
+
+		if k == 'portage_gid':
+			return portage_gid
+		elif k == 'portage_uid':
+			return portage_uid
+		elif k == 'secpass':
+			return secpass
+		else:
+			raise AssertionError('unknown name: %s' % k)
+
+	elif k == 'userpriv_groups':
+		v = [portage_gid]
+		if secpass >= 2:
+			# Get a list of group IDs for the portage user. Do not use
+			# grp.getgrall() since it is known to trigger spurious
+			# SIGPIPE problems with nss_ldap.
+			mystatus, myoutput = \
+				portage.subprocess_getstatusoutput("id -G %s" % _portage_username)
+			if mystatus == os.EX_OK:
+				for x in myoutput.split():
+					try:
+						v.append(int(x))
+					except ValueError:
+						pass
+				v = sorted(set(v))
+
+	# Avoid instantiating portage.settings when the desired
+	# variable is set in os.environ.
+	elif k in ('_portage_grpname', '_portage_username'):
+		v = None
+		if k == '_portage_grpname':
+			env_key = 'PORTAGE_GRPNAME'
+		else:
+			env_key = 'PORTAGE_USERNAME'
+
+		if env_key in os.environ:
+			v = os.environ[env_key]
+		elif hasattr(portage, 'settings'):
+			v = portage.settings.get(env_key)
+		elif portage.const.EPREFIX:
+			# For prefix environments, default to the UID and GID of
+			# the top-level EROOT directory. The config class has
+			# equivalent code, but we also need to do it here if
+			# _disable_legacy_globals() has been called.
+			eroot = os.path.join(os.environ.get('ROOT', os.sep),
+				portage.const.EPREFIX.lstrip(os.sep))
+			try:
+				eroot_st = os.stat(eroot)
+			except OSError:
+				pass
+			else:
+				if k == '_portage_grpname':
+					try:
+						grp_struct = grp.getgrgid(eroot_st.st_gid)
+					except KeyError:
+						pass
+					else:
+						v = grp_struct.gr_name
+				else:
+					try:
+						pwd_struct = pwd.getpwuid(eroot_st.st_uid)
+					except KeyError:
+						pass
+					else:
+						v = pwd_struct.pw_name
+
+		if v is None:
+			v = 'portage'
+	else:
+		raise AssertionError('unknown name: %s' % k)
+
+	globals()[k] = v
+	_initialized_globals.add(k)
+	return v
+
+class _GlobalProxy(portage.proxy.objectproxy.ObjectProxy):
+
+	__slots__ = ('_name',)
+
+	def __init__(self, name):
+		portage.proxy.objectproxy.ObjectProxy.__init__(self)
+		object.__setattr__(self, '_name', name)
+
+	def _get_target(self):
+		return _get_global(object.__getattribute__(self, '_name'))
+
+for k in ('portage_gid', 'portage_uid', 'secpass', 'userpriv_groups',
+	'_portage_grpname', '_portage_username'):
+	globals()[k] = _GlobalProxy(k)
+del k
+
+def _init(settings):
+	"""
+	Use config variables like PORTAGE_GRPNAME and PORTAGE_USERNAME to
+	initialize global variables. This allows settings to come from make.conf
+	instead of requiring them to be set in the calling environment.
+	"""
+	if '_portage_grpname' not in _initialized_globals and \
+		'_portage_username' not in _initialized_globals:
+
+		v = settings.get('PORTAGE_GRPNAME', 'portage')
+		globals()['_portage_grpname'] = v
+		_initialized_globals.add('_portage_grpname')
+
+		v = settings.get('PORTAGE_USERNAME', 'portage')
+		globals()['_portage_username'] = v
+		_initialized_globals.add('_portage_username')

diff --git a/portage_with_autodep/pym/portage/data.pyo b/portage_with_autodep/pym/portage/data.pyo
new file mode 100644
index 0000000..7f749e0
Binary files /dev/null and b/portage_with_autodep/pym/portage/data.pyo differ

diff --git a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
index 34ed031..b5f6a0b 100644
--- a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
+++ b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.py
@@ -1,22 +1,16 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import io
-import shutil
 import signal
-import tempfile
+import sys
 import traceback
 
 import errno
 import fcntl
 import portage
 from portage import os, _unicode_decode
-from portage.const import PORTAGE_PACKAGE_ATOM
-from portage.dep import match_from_list
 import portage.elog.messages
-from portage.elog import _preload_elog_modules
-from portage.util import ensure_dirs
-from _emerge.PollConstants import PollConstants
 from _emerge.SpawnProcess import SpawnProcess
 
 class MergeProcess(SpawnProcess):
@@ -26,7 +20,7 @@ class MergeProcess(SpawnProcess):
 	"""
 
 	__slots__ = ('mycat', 'mypkg', 'settings', 'treetype',
-		'vartree', 'scheduler', 'blockers', 'pkgloc', 'infloc', 'myebuild',
+		'vartree', 'blockers', 'pkgloc', 'infloc', 'myebuild',
 		'mydbapi', 'prev_mtimes', 'unmerge', '_elog_reader_fd', '_elog_reg_id',
 		'_buf', '_elog_keys', '_locked_vdb')
 
@@ -46,8 +40,12 @@ class MergeProcess(SpawnProcess):
 			settings.reset()
 			settings.setcpv(cpv, mydb=self.mydbapi)
 
-		if not self.unmerge:
-			self._handle_self_reinstall()
+		# Inherit stdin by default, so that the pdb SIGUSR1
+		# handler is usable for the subprocess.
+		if self.fd_pipes is None:
+			self.fd_pipes = {}
+		self.fd_pipes.setdefault(0, sys.stdin.fileno())
+
 		super(MergeProcess, self)._start()
 
 	def _lock_vdb(self):
@@ -69,59 +67,9 @@ class MergeProcess(SpawnProcess):
 			self.vartree.dbapi.unlock()
 			self._locked_vdb = False
 
-	def _handle_self_reinstall(self):
-		"""
-		If portage is reinstalling itself, create temporary
-		copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH in order
-		to avoid relying on the new versions which may be
-		incompatible. Register an atexit hook to clean up the
-		temporary directories. Pre-load elog modules here since
-		we won't be able to later if they get unmerged (happens
-		when namespace changes).
-		"""
-
-		settings = self.settings
-		cpv = settings.mycpv
-		reinstall_self = False
-		if self.settings["ROOT"] == "/" and \
-			match_from_list(PORTAGE_PACKAGE_ATOM, [cpv]):
-			inherited = frozenset(self.settings.get('INHERITED', '').split())
-			if not self.vartree.dbapi.cpv_exists(cpv) or \
-				'9999' in cpv or \
-				'git' in inherited or \
-				'git-2' in inherited:
-				reinstall_self = True
-
-		if reinstall_self:
-			# Load lazily referenced portage submodules into memory,
-			# so imports won't fail during portage upgrade/downgrade.
-			_preload_elog_modules(self.settings)
-			portage.proxy.lazyimport._preload_portage_submodules()
-
-			# Make the temp directory inside $PORTAGE_TMPDIR/portage, since
-			# it's common for /tmp and /var/tmp to be mounted with the
-			# "noexec" option (see bug #346899).
-			build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
-			ensure_dirs(build_prefix)
-			base_path_tmp = tempfile.mkdtemp(
-				"", "._portage_reinstall_.", build_prefix)
-			portage.process.atexit_register(shutil.rmtree, base_path_tmp)
-			dir_perms = 0o755
-			for subdir in "bin", "pym":
-				var_name = "PORTAGE_%s_PATH" % subdir.upper()
-				var_orig = settings[var_name]
-				var_new = os.path.join(base_path_tmp, subdir)
-				settings[var_name] = var_new
-				settings.backup_changes(var_name)
-				shutil.copytree(var_orig, var_new, symlinks=True)
-				os.chmod(var_new, dir_perms)
-			portage._bin_path = settings['PORTAGE_BIN_PATH']
-			portage._pym_path = settings['PORTAGE_PYM_PATH']
-			os.chmod(base_path_tmp, dir_perms)
-
 	def _elog_output_handler(self, fd, event):
 		output = None
-		if event & PollConstants.POLLIN:
+		if event & self.scheduler.IO_IN:
 			try:
 				output = os.read(fd, self._bufsize)
 			except OSError as e:
@@ -141,6 +89,15 @@ class MergeProcess(SpawnProcess):
 					reporter = getattr(portage.elog.messages, funcname)
 					reporter(msg, phase=phase, key=key, out=out)
 
+		if event & self.scheduler.IO_HUP:
+			self.scheduler.unregister(self._elog_reg_id)
+			self._elog_reg_id = None
+			os.close(self._elog_reader_fd)
+			self._elog_reader_fd = None
+			return False
+
+		return True
+
 	def _spawn(self, args, fd_pipes, **kwargs):
 		"""
 		Fork a subprocess, apply local settings, and call
@@ -178,6 +135,10 @@ class MergeProcess(SpawnProcess):
 
 		pid = os.fork()
 		if pid != 0:
+			if not isinstance(pid, int):
+				raise AssertionError(
+					"fork returned non-integer: %s" % (repr(pid),))
+
 			os.close(elog_writer_fd)
 			self._elog_reader_fd = elog_reader_fd
 			self._buf = ""
@@ -193,7 +154,9 @@ class MergeProcess(SpawnProcess):
 			return [pid]
 
 		os.close(elog_reader_fd)
-		portage.process._setup_pipes(fd_pipes)
+		portage.locks._close_fds()
+		# Disable close_fds since we don't exec (see _setup_pipes docstring).
+		portage.process._setup_pipes(fd_pipes, close_fds=False)
 
 		# Use default signal handlers since the ones inherited
 		# from the parent process are irrelevant here.
@@ -270,7 +233,7 @@ class MergeProcess(SpawnProcess):
 		if self._elog_reg_id is not None:
 			self.scheduler.unregister(self._elog_reg_id)
 			self._elog_reg_id = None
-		if self._elog_reader_fd:
+		if self._elog_reader_fd is not None:
 			os.close(self._elog_reader_fd)
 			self._elog_reader_fd = None
 		if self._elog_keys is not None:

diff --git a/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo
new file mode 100644
index 0000000..5839ad8
Binary files /dev/null and b/portage_with_autodep/pym/portage/dbapi/_MergeProcess.pyo differ

diff --git a/portage_with_autodep/pym/portage/dbapi/_SyncfsProcess.py b/portage_with_autodep/pym/portage/dbapi/_SyncfsProcess.py
new file mode 100644
index 0000000..7518214
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/_SyncfsProcess.py
@@ -0,0 +1,53 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.util._ctypes import find_library, LoadLibrary
+from portage.util._async.ForkProcess import ForkProcess
+
+class SyncfsProcess(ForkProcess):
+	"""
+	Isolate ctypes usage in a subprocess, in order to avoid
+	potential problems with stale cached libraries as
+	described in bug #448858, comment #14 (also see
+	http://bugs.python.org/issue14597).
+	"""
+
+	__slots__ = ('paths',)
+
+	@staticmethod
+	def _get_syncfs():
+
+		filename = find_library("c")
+		if filename is not None:
+			library = LoadLibrary(filename)
+			if library is not None:
+				try:
+					return library.syncfs
+				except AttributeError:
+					pass
+
+		return None
+
+	def _run(self):
+
+		syncfs_failed = False
+		syncfs = self._get_syncfs()
+
+		if syncfs is not None:
+			for path in self.paths:
+				try:
+					fd = os.open(path, os.O_RDONLY)
+				except OSError:
+					pass
+				else:
+					try:
+						if syncfs(fd) != 0:
+							# Happens with PyPy (bug #446610)
+							syncfs_failed = True
+					finally:
+						os.close(fd)
+
+		if syncfs is None or syncfs_failed:
+			return 1
+		return os.EX_OK

diff --git a/portage_with_autodep/pym/portage/dbapi/__init__.py b/portage_with_autodep/pym/portage/dbapi/__init__.py
index e386faa..a1c5c56 100644
--- a/portage_with_autodep/pym/portage/dbapi/__init__.py
+++ b/portage_with_autodep/pym/portage/dbapi/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = ["dbapi"]
@@ -11,7 +11,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
 	'portage.dep:match_from_list',
 	'portage.output:colorize',
 	'portage.util:cmp_sort_key,writemsg',
-	'portage.versions:catsplit,catpkgsplit,vercmp',
+	'portage.versions:catsplit,catpkgsplit,vercmp,_pkg_str',
 )
 
 from portage import os
@@ -46,7 +46,12 @@ class dbapi(object):
 	def cp_list(self, cp, use_cache=1):
 		raise NotImplementedError(self)
 
-	def _cpv_sort_ascending(self, cpv_list):
+	@staticmethod
+	def _cmp_cpv(cpv1, cpv2):
+		return vercmp(cpv1.version, cpv2.version)
+
+	@staticmethod
+	def _cpv_sort_ascending(cpv_list):
 		"""
 		Use this to sort self.cp_list() results in ascending
 		order. It sorts in place and returns None.
@@ -55,12 +60,7 @@ class dbapi(object):
 			# If the cpv includes explicit -r0, it has to be preserved
 			# for consistency in findname and aux_get calls, so use a
 			# dict to map strings back to their original values.
-			ver_map = {}
-			for cpv in cpv_list:
-				ver_map[cpv] = '-'.join(catpkgsplit(cpv)[2:])
-			def cmp_cpv(cpv1, cpv2):
-				return vercmp(ver_map[cpv1], ver_map[cpv2])
-			cpv_list.sort(key=cmp_sort_key(cmp_cpv))
+			cpv_list.sort(key=cmp_sort_key(dbapi._cmp_cpv))
 
 	def cpv_all(self):
 		"""Return all CPVs in the db
@@ -155,64 +155,74 @@ class dbapi(object):
 		2) Check enabled/disabled flag states.
 		"""
 
-		iuse_implicit_match = self.settings._iuse_implicit_match
+		aux_keys = ["IUSE", "SLOT", "USE", "repository"]
 		for cpv in cpv_iter:
 			try:
-				iuse, slot, use = self.aux_get(cpv, ["IUSE", "SLOT", "USE"], myrepo=atom.repo)
+				metadata = dict(zip(aux_keys,
+					self.aux_get(cpv, aux_keys, myrepo=atom.repo)))
 			except KeyError:
 				continue
-			iuse = frozenset(x.lstrip('+-') for x in iuse.split())
-			missing_iuse = False
-			for x in atom.unevaluated_atom.use.required:
-				if x not in iuse and not iuse_implicit_match(x):
-					missing_iuse = True
-					break
-			if missing_iuse:
+
+			if not self._match_use(atom, cpv, metadata):
 				continue
-			if not atom.use:
-				pass
-			elif not self._use_mutable:
-				# Use IUSE to validate USE settings for built packages,
-				# in case the package manager that built this package
-				# failed to do that for some reason (or in case of
-				# data corruption).
-				use = frozenset(x for x in use.split() if x in iuse or \
-					iuse_implicit_match(x))
-				missing_enabled = atom.use.missing_enabled.difference(iuse)
-				missing_disabled = atom.use.missing_disabled.difference(iuse)
-
-				if atom.use.enabled:
-					if atom.use.enabled.intersection(missing_disabled):
-						continue
-					need_enabled = atom.use.enabled.difference(use)
+
+			yield cpv
+
+	def _match_use(self, atom, cpv, metadata):
+		iuse_implicit_match = self.settings._iuse_implicit_match
+		iuse = frozenset(x.lstrip('+-') for x in metadata["IUSE"].split())
+
+		for x in atom.unevaluated_atom.use.required:
+			if x not in iuse and not iuse_implicit_match(x):
+				return False
+
+		if atom.use is None:
+			pass
+
+		elif not self._use_mutable:
+			# Use IUSE to validate USE settings for built packages,
+			# in case the package manager that built this package
+			# failed to do that for some reason (or in case of
+			# data corruption).
+			use = frozenset(x for x in metadata["USE"].split()
+				if x in iuse or iuse_implicit_match(x))
+			missing_enabled = atom.use.missing_enabled.difference(iuse)
+			missing_disabled = atom.use.missing_disabled.difference(iuse)
+
+			if atom.use.enabled:
+				if atom.use.enabled.intersection(missing_disabled):
+					return False
+				need_enabled = atom.use.enabled.difference(use)
+				if need_enabled:
+					need_enabled = need_enabled.difference(missing_enabled)
 					if need_enabled:
-						need_enabled = need_enabled.difference(missing_enabled)
-						if need_enabled:
-							continue
+						return False
 
-				if atom.use.disabled:
-					if atom.use.disabled.intersection(missing_enabled):
-						continue
-					need_disabled = atom.use.disabled.intersection(use)
+			if atom.use.disabled:
+				if atom.use.disabled.intersection(missing_enabled):
+					return False
+				need_disabled = atom.use.disabled.intersection(use)
+				if need_disabled:
+					need_disabled = need_disabled.difference(missing_disabled)
 					if need_disabled:
-						need_disabled = need_disabled.difference(missing_disabled)
-						if need_disabled:
-							continue
-			else:
-				# Check masked and forced flags for repoman.
-				mysettings = getattr(self, 'settings', None)
-				if mysettings is not None and not mysettings.local_config:
+						return False
 
-					pkg = "%s:%s" % (cpv, slot)
-					usemask = mysettings._getUseMask(pkg)
-					if usemask.intersection(atom.use.enabled):
-						continue
+		elif not self.settings.local_config:
+			# Check masked and forced flags for repoman.
+			if hasattr(cpv, 'slot'):
+				pkg = cpv
+			else:
+				pkg = _pkg_str(cpv, slot=metadata["SLOT"],
+					repo=metadata.get("repository"))
+			usemask = self.settings._getUseMask(pkg)
+			if usemask.intersection(atom.use.enabled):
+				return False
 
-					useforce = mysettings._getUseForce(pkg).difference(usemask)
-					if useforce.intersection(atom.use.disabled):
-						continue
+			useforce = self.settings._getUseForce(pkg).difference(usemask)
+			if useforce.intersection(atom.use.disabled):
+				return False
 
-			yield cpv
+		return True
 
 	def invalidentry(self, mypath):
 		if '/-MERGING-' in mypath:

diff --git a/portage_with_autodep/pym/portage/dbapi/__init__.pyo b/portage_with_autodep/pym/portage/dbapi/__init__.pyo
new file mode 100644
index 0000000..e7b494d
Binary files /dev/null and b/portage_with_autodep/pym/portage/dbapi/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
index 6d6a27d..d379b4c 100644
--- a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
+++ b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.py
@@ -63,7 +63,8 @@ def expand_new_virt(vardb, atom):
 
 		success, atoms = portage.dep_check(rdepend,
 			None, vardb.settings, myuse=valid_use,
-			myroot=vardb.root, trees={vardb.root:{"porttree":vardb.vartree,
+			myroot=vardb.settings['EROOT'],
+			trees={vardb.settings['EROOT']:{"porttree":vardb.vartree,
 			"vartree":vardb.vartree}})
 
 		if success:

diff --git a/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo
new file mode 100644
index 0000000..6c23a7e
Binary files /dev/null and b/portage_with_autodep/pym/portage/dbapi/_expand_new_virt.pyo differ

diff --git a/portage_with_autodep/pym/portage/dbapi/_similar_name_search.py b/portage_with_autodep/pym/portage/dbapi/_similar_name_search.py
new file mode 100644
index 0000000..b6e4a1f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dbapi/_similar_name_search.py
@@ -0,0 +1,57 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import difflib
+
+from portage.versions import catsplit
+
+def similar_name_search(dbs, atom):
+
+	cp_lower = atom.cp.lower()
+	cat, pkg = catsplit(cp_lower)
+	if cat == "null":
+		cat = None
+
+	all_cp = set()
+	for db in dbs:
+		all_cp.update(db.cp_all())
+
+	# discard dir containing no ebuilds
+	all_cp.discard(atom.cp)
+
+	orig_cp_map = {}
+	for cp_orig in all_cp:
+		orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
+	all_cp = set(orig_cp_map)
+
+	if cat:
+		matches = difflib.get_close_matches(cp_lower, all_cp)
+	else:
+		pkg_to_cp = {}
+		for other_cp in list(all_cp):
+			other_pkg = catsplit(other_cp)[1]
+			if other_pkg == pkg:
+				# Check for non-identical package that
+				# differs only by upper/lower case.
+				identical = True
+				for cp_orig in orig_cp_map[other_cp]:
+					if catsplit(cp_orig)[1] != \
+						catsplit(atom.cp)[1]:
+						identical = False
+						break
+				if identical:
+					# discard dir containing no ebuilds
+					all_cp.discard(other_cp)
+					continue
+			pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
+
+		pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
+		matches = []
+		for pkg_match in pkg_matches:
+			matches.extend(pkg_to_cp[pkg_match])
+
+	matches_orig_case = []
+	for cp in matches:
+		matches_orig_case.extend(orig_cp_map[cp])
+
+	return matches_orig_case

diff --git a/portage_with_autodep/pym/portage/dbapi/bintree.py b/portage_with_autodep/pym/portage/dbapi/bintree.py
index 62fc623..a8027ee 100644
--- a/portage_with_autodep/pym/portage/dbapi/bintree.py
+++ b/portage_with_autodep/pym/portage/dbapi/bintree.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = ["bindbapi", "binarytree"]
@@ -11,19 +11,20 @@ portage.proxy.lazyimport.lazyimport(globals(),
 	'portage.output:EOutput,colorize',
 	'portage.locks:lockfile,unlockfile',
 	'portage.package.ebuild.doebuild:_vdb_use_conditional_atoms',
-	'portage.package.ebuild.fetch:_check_distfile',
+	'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd',
 	'portage.update:update_dbentries',
 	'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
 		'writemsg,writemsg_stdout',
 	'portage.util.listdir:listdir',
-	'portage.versions:best,catpkgsplit,catsplit',
+	'portage.util._urlopen:urlopen@_urlopen',
+	'portage.versions:best,catpkgsplit,catsplit,_pkg_str',
 )
 
 from portage.cache.mappings import slot_dict_class
 from portage.const import CACHE_PATH
 from portage.dbapi.virtual import fakedbapi
 from portage.dep import Atom, use_reduce, paren_enclose
-from portage.exception import AlarmSignal, InvalidPackageName, \
+from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
 	PermissionDenied, PortageException
 from portage.localization import _
 from portage import _movefile
@@ -35,19 +36,17 @@ from portage import _unicode_encode
 import codecs
 import errno
 import io
-import re
 import stat
 import subprocess
 import sys
 import tempfile
 import textwrap
+import warnings
 from itertools import chain
 try:
 	from urllib.parse import urlparse
-	from urllib.request import urlopen as urllib_request_urlopen
 except ImportError:
 	from urlparse import urlparse
-	from urllib import urlopen as urllib_request_urlopen
 
 if sys.hexversion >= 0x3000000:
 	basestring = str
@@ -67,7 +66,7 @@ class bindbapi(fakedbapi):
 			["BUILD_TIME", "CHOST", "DEPEND", "EAPI", "IUSE", "KEYWORDS",
 			"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE",
 			"RDEPEND", "repository", "RESTRICT", "SLOT", "USE", "DEFINED_PHASES",
-			"REQUIRED_USE"])
+			])
 		self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
 		self._aux_cache = {}
 
@@ -177,6 +176,34 @@ class bindbapi(fakedbapi):
 			self.bintree.populate()
 		return fakedbapi.cpv_all(self)
 
+	def getfetchsizes(self, pkg):
+		"""
+		This will raise MissingSignature if SIZE signature is not available,
+		or InvalidSignature if SIZE signature is invalid.
+		"""
+
+		if not self.bintree.populated:
+			self.bintree.populate()
+
+		pkg = getattr(pkg, 'cpv', pkg)
+
+		filesdict = {}
+		if not self.bintree.isremote(pkg):
+			pass
+		else:
+			metadata = self.bintree._remotepkgs[pkg]
+			try:
+				size = int(metadata["SIZE"])
+			except KeyError:
+				raise portage.exception.MissingSignature("SIZE")
+			except ValueError:
+				raise portage.exception.InvalidSignature(
+					"SIZE: %s" % metadata["SIZE"])
+			else:
+				filesdict[os.path.basename(self.bintree.getname(pkg))] = size
+
+		return filesdict
+
 def _pkgindex_cpv_map_latest_build(pkgindex):
 	"""
 	Given a PackageIndex instance, create a dict of cpv -> metadata map.
@@ -185,13 +212,20 @@ def _pkgindex_cpv_map_latest_build(pkgindex):
 	@param pkgindex: A PackageIndex instance.
 	@type pkgindex: PackageIndex
 	@rtype: dict
-	@returns: a dict containing entry for the give cpv.
+	@return: a dict containing entry for the give cpv.
 	"""
 	cpv_map = {}
 
 	for d in pkgindex.packages:
 		cpv = d["CPV"]
 
+		try:
+			cpv = _pkg_str(cpv)
+		except InvalidData:
+			writemsg(_("!!! Invalid remote binary package: %s\n") % cpv,
+				noiselevel=-1)
+			continue
+
 		btime = d.get('BUILD_TIME', '')
 		try:
 			btime = int(btime)
@@ -208,16 +242,35 @@ def _pkgindex_cpv_map_latest_build(pkgindex):
 			if other_btime and (not btime or other_btime > btime):
 				continue
 
-		cpv_map[cpv] = d
+		cpv_map[_pkg_str(cpv)] = d
 
 	return cpv_map
 
 class binarytree(object):
 	"this tree scans for a list of all packages available in PKGDIR"
-	def __init__(self, root, pkgdir, virtual=None, settings=None):
+	def __init__(self, _unused=None, pkgdir=None,
+		virtual=DeprecationWarning, settings=None):
+
+		if pkgdir is None:
+			raise TypeError("pkgdir parameter is required")
+
+		if settings is None:
+			raise TypeError("settings parameter is required")
+
+		if _unused is not None and _unused != settings['ROOT']:
+			warnings.warn("The root parameter of the "
+				"portage.dbapi.bintree.binarytree"
+				" constructor is now unused. Use "
+				"settings['ROOT'] instead.",
+				DeprecationWarning, stacklevel=2)
+
+		if virtual is not DeprecationWarning:
+			warnings.warn("The 'virtual' parameter of the "
+				"portage.dbapi.bintree.binarytree"
+				" constructor is unused",
+				DeprecationWarning, stacklevel=2)
+
 		if True:
-			self.root = root
-			#self.pkgdir=settings["PKGDIR"]
 			self.pkgdir = normalize_path(pkgdir)
 			self.dbapi = bindbapi(self, settings=settings)
 			self.update_ents = self.dbapi.update_ents
@@ -242,7 +295,7 @@ class binarytree(object):
 				["BUILD_TIME", "CHOST", "DEPEND", "DESCRIPTION", "EAPI",
 				"IUSE", "KEYWORDS", "LICENSE", "PDEPEND", "PROPERTIES",
 				"PROVIDE", "RDEPEND", "repository", "SLOT", "USE", "DEFINED_PHASES",
-				"REQUIRED_USE", "BASE_URI"]
+				"BASE_URI"]
 			self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
 			self._pkgindex_use_evaluated_keys = \
 				("LICENSE", "RDEPEND", "DEPEND",
@@ -268,7 +321,6 @@ class binarytree(object):
 				"SLOT"    : "0",
 				"USE"     : "",
 				"DEFINED_PHASES" : "",
-				"REQUIRED_USE" : ""
 			}
 			self._pkgindex_inherited_keys = ["CHOST", "repository"]
 
@@ -302,6 +354,15 @@ class binarytree(object):
 				chain(*self._pkgindex_translated_keys)
 			))
 
+	@property
+	def root(self):
+		warnings.warn("The root attribute of "
+			"portage.dbapi.bintree.binarytree"
+			" is deprecated. Use "
+			"settings['ROOT'] instead.",
+			DeprecationWarning, stacklevel=3)
+		return self.settings['ROOT']
+
 	def move_ent(self, mylist, repo_match=None):
 		if not self.populated:
 			self.populate()
@@ -603,6 +664,7 @@ class binarytree(object):
 							if mycpv in pkg_paths:
 								# discard duplicates (All/ is preferred)
 								continue
+							mycpv = _pkg_str(mycpv)
 							pkg_paths[mycpv] = mypath
 							# update the path if the package has been moved
 							oldpath = d.get("PATH")
@@ -678,6 +740,7 @@ class binarytree(object):
 							(mycpv, self.settings["PORTAGE_CONFIGROOT"]),
 							noiselevel=-1)
 						continue
+					mycpv = _pkg_str(mycpv)
 					pkg_paths[mycpv] = mypath
 					self.dbapi.cpv_inject(mycpv)
 					update_pkgindex = True
@@ -787,7 +850,7 @@ class binarytree(object):
 				# slash, so join manually...
 				url = base_url.rstrip("/") + "/Packages"
 				try:
-					f = urllib_request_urlopen(url)
+					f = _urlopen(url)
 				except IOError:
 					path = parsed_url.path.rstrip("/") + "/Packages"
 
@@ -859,7 +922,7 @@ class binarytree(object):
 							noiselevel=-1)
 			except EnvironmentError as e:
 				writemsg(_("\n\n!!! Error fetching binhost package" \
-					" info from '%s'\n") % base_url)
+					" info from '%s'\n") % _hide_url_passwd(base_url))
 				writemsg("!!! %s\n\n" % str(e))
 				del e
 				pkgindex = None
@@ -935,7 +998,7 @@ class binarytree(object):
 			writemsg_stdout("\n")
 			writemsg_stdout(
 				colorize("GOOD", _("Fetching bininfo from ")) + \
-				re.sub(r'//(.+):.+@(.+)/', r'//\1:*password*@\2/', base_url) + "\n")
+				_hide_url_passwd(base_url) + "\n")
 			remotepkgs = portage.getbinpkg.dir_get_metadata(
 				base_url, chunk_size=chunk_size)
 
@@ -947,7 +1010,12 @@ class binarytree(object):
 						noiselevel=-1)
 					continue
 				mycat = mycat.strip()
-				fullpkg = mycat+"/"+mypkg[:-5]
+				try:
+					fullpkg = _pkg_str(mycat+"/"+mypkg[:-5])
+				except InvalidData:
+					writemsg(_("!!! Invalid remote binary package: %s\n") % mypkg,
+						noiselevel=-1)
+					continue
 
 				if fullpkg in metadata:
 					# When using this old protocol, comparison with the remote
@@ -1101,7 +1169,7 @@ class binarytree(object):
 		Performs checksums and evaluates USE flag conditionals.
 		Raises InvalidDependString if necessary.
 		@rtype: dict
-		@returns: a dict containing entry for the give cpv.
+		@return: a dict containing entry for the give cpv.
 		"""
 
 		pkg_path = self.getname(cpv)
@@ -1307,7 +1375,7 @@ class binarytree(object):
 		Verify digests for the given package and raise DigestException
 		if verification fails.
 		@rtype: bool
-		@returns: True if digests could be located, False otherwise.
+		@return: True if digests could be located, False otherwise.
 		"""
 		cpv = pkg
 		if not isinstance(cpv, basestring):

diff --git a/portage_with_autodep/pym/portage/dbapi/bintree.pyo b/portage_with_autodep/pym/portage/dbapi/bintree.pyo
new file mode 100644
index 0000000..f99f377
Binary files /dev/null and b/portage_with_autodep/pym/portage/dbapi/bintree.pyo differ

diff --git a/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo b/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo
new file mode 100644
index 0000000..cf1a428
Binary files /dev/null and b/portage_with_autodep/pym/portage/dbapi/cpv_expand.pyo differ

diff --git a/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo b/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo
new file mode 100644
index 0000000..b323f5b
Binary files /dev/null and b/portage_with_autodep/pym/portage/dbapi/dep_expand.pyo differ

diff --git a/portage_with_autodep/pym/portage/dbapi/porttree.py b/portage_with_autodep/pym/portage/dbapi/porttree.py
index ecf275c..c5ee770 100644
--- a/portage_with_autodep/pym/portage/dbapi/porttree.py
+++ b/portage_with_autodep/pym/portage/dbapi/porttree.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = [
@@ -14,20 +14,19 @@ portage.proxy.lazyimport.lazyimport(globals(),
 	'portage.package.ebuild.doebuild:doebuild',
 	'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level',
 	'portage.util.listdir:listdir',
-	'portage.versions:best,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp',
+	'portage.versions:best,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp,_pkg_str',
 )
 
-from portage.cache import metadata_overlay, volatile
+from portage.cache import volatile
 from portage.cache.cache_errors import CacheError
 from portage.cache.mappings import Mapping
 from portage.dbapi import dbapi
 from portage.exception import PortageException, \
 	FileNotFound, InvalidAtom, InvalidDependString, InvalidPackageName
 from portage.localization import _
-from portage.manifest import Manifest
 
-from portage import eclass_cache, auxdbkeys, \
-	eapi_is_supported, dep_check, \
+from portage import eclass_cache, \
+	eapi_is_supported, \
 	_eapi_is_deprecated
 from portage import os
 from portage import _encodings
@@ -37,8 +36,6 @@ from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
 from _emerge.PollScheduler import PollScheduler
 
 import os as _os
-import io
-import stat
 import sys
 import traceback
 import warnings
@@ -47,15 +44,6 @@ if sys.hexversion >= 0x3000000:
 	basestring = str
 	long = int
 
-class _repo_info(object):
-	__slots__ = ('name', 'path', 'eclass_db', 'portdir', 'portdir_overlay')
-	def __init__(self, name, path, eclass_db):
-		self.name = name
-		self.path = path
-		self.eclass_db = eclass_db
-		self.portdir = eclass_db.porttrees[0]
-		self.portdir_overlay = ' '.join(eclass_db.porttrees[1:])
-
 class portdbapi(dbapi):
 	"""this tree will scan a portage directory located at root (passed to init)"""
 	portdbapi_instances = []
@@ -69,6 +57,13 @@ class portdbapi(dbapi):
 	def porttree_root(self):
 		return self.settings.repositories.mainRepoLocation()
 
+	@property
+	def eclassdb(self):
+		main_repo = self.repositories.mainRepo()
+		if main_repo is None:
+			return None
+		return main_repo.eclass_db
+
 	def __init__(self, _unused_param=None, mysettings=None):
 		"""
 		@param _unused_param: deprecated, use mysettings['PORTDIR'] instead
@@ -100,6 +95,7 @@ class portdbapi(dbapi):
 		# this purpose because doebuild makes many changes to the config
 		# instance that is passed in.
 		self.doebuild_settings = config(clone=self.settings)
+		self._scheduler = PollScheduler().sched_iface
 		self.depcachedir = os.path.realpath(self.settings.depcachedir)
 		
 		if os.environ.get("SANDBOX_ON") == "1":
@@ -112,7 +108,6 @@ class portdbapi(dbapi):
 					":".join(filter(None, sandbox_write))
 
 		self.porttrees = list(self.settings.repositories.repoLocationList())
-		self.eclassdb = eclass_cache.cache(self.settings.repositories.mainRepoLocation())
 
 		# This is used as sanity check for aux_get(). If there is no
 		# root eclass dir, we assume that PORTDIR is invalid or
@@ -121,86 +116,74 @@ class portdbapi(dbapi):
 		self._have_root_eclass_dir = os.path.isdir(
 			os.path.join(self.settings.repositories.mainRepoLocation(), "eclass"))
 
-		self.metadbmodule = self.settings.load_best_module("portdbapi.metadbmodule")
-
 		#if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
 		self.xcache = {}
 		self.frozen = 0
 
-		#Create eclass dbs
-		self._repo_info = {}
-		eclass_dbs = {self.settings.repositories.mainRepoLocation() : self.eclassdb}
-		for repo in self.repositories:
-			if repo.location in self._repo_info:
-				continue
-
-			eclass_db = None
-			for eclass_location in repo.eclass_locations:
-				tree_db = eclass_dbs.get(eclass_location)
-				if tree_db is None:
-					tree_db = eclass_cache.cache(eclass_location)
-					eclass_dbs[eclass_location] = tree_db
-				if eclass_db is None:
-					eclass_db = tree_db.copy()
-				else:
-					eclass_db.append(tree_db)
-
-			self._repo_info[repo.location] = _repo_info(repo.name, repo.location, eclass_db)
-
 		#Keep a list of repo names, sorted by priority (highest priority first).
 		self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order))
 
 		self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule")
 		self.auxdb = {}
 		self._pregen_auxdb = {}
+		# If the current user doesn't have depcachedir write permission,
+		# then the depcachedir cache is kept here read-only access.
+		self._ro_auxdb = {}
 		self._init_cache_dirs()
-		depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
-		cache_kwargs = {
-			'gid'     : portage_gid,
-			'perms'   : 0o664
-		}
-
-		# XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
-		# ~harring
-		filtered_auxdbkeys = [x for x in auxdbkeys if not x.startswith("UNUSED_0")]
-		filtered_auxdbkeys.sort()
+		try:
+			depcachedir_st = os.stat(self.depcachedir)
+			depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
+		except OSError:
+			depcachedir_st = None
+			depcachedir_w_ok = False
+
+		cache_kwargs = {}
+
+		depcachedir_unshared = False
+		if portage.data.secpass < 1 and \
+			depcachedir_w_ok and \
+			depcachedir_st is not None and \
+			os.getuid() == depcachedir_st.st_uid and \
+			os.getgid() == depcachedir_st.st_gid:
+			# If this user owns depcachedir and is not in the
+			# portage group, then don't bother to set permissions
+			# on cache entries. This makes it possible to run
+			# egencache without any need to be a member of the
+			# portage group.
+			depcachedir_unshared = True
+		else:
+			cache_kwargs.update({
+				'gid'     : portage_gid,
+				'perms'   : 0o664
+			})
+
 		# If secpass < 1, we don't want to write to the cache
 		# since then we won't be able to apply group permissions
 		# to the cache entries/directories.
-		if secpass < 1 or not depcachedir_w_ok:
+		if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok:
 			for x in self.porttrees:
+				self.auxdb[x] = volatile.database(
+					self.depcachedir, x, self._known_keys,
+					**cache_kwargs)
 				try:
-					db_ro = self.auxdbmodule(self.depcachedir, x,
-						filtered_auxdbkeys, readonly=True, **cache_kwargs)
+					self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir, x,
+						self._known_keys, readonly=True, **cache_kwargs)
 				except CacheError:
-					self.auxdb[x] = volatile.database(
-						self.depcachedir, x, filtered_auxdbkeys,
-						**cache_kwargs)
-				else:
-					self.auxdb[x] = metadata_overlay.database(
-						self.depcachedir, x, filtered_auxdbkeys,
-						db_rw=volatile.database, db_ro=db_ro,
-						**cache_kwargs)
+					pass
 		else:
 			for x in self.porttrees:
 				if x in self.auxdb:
 					continue
 				# location, label, auxdbkeys
 				self.auxdb[x] = self.auxdbmodule(
-					self.depcachedir, x, filtered_auxdbkeys, **cache_kwargs)
-				if self.auxdbmodule is metadata_overlay.database:
-					self.auxdb[x].db_ro.ec = self._repo_info[x].eclass_db
+					self.depcachedir, x, self._known_keys, **cache_kwargs)
 		if "metadata-transfer" not in self.settings.features:
 			for x in self.porttrees:
 				if x in self._pregen_auxdb:
 					continue
-				if os.path.isdir(os.path.join(x, "metadata", "cache")):
-					self._pregen_auxdb[x] = self.metadbmodule(
-						x, "metadata/cache", filtered_auxdbkeys, readonly=True)
-					try:
-						self._pregen_auxdb[x].ec = self._repo_info[x].eclass_db
-					except AttributeError:
-						pass
+				cache = self._create_pregen_cache(x)
+				if cache is not None:
+					self._pregen_auxdb[x] = cache
 		# Selectively cache metadata in order to optimize dep matching.
 		self._aux_cache_keys = set(
 			["DEPEND", "EAPI", "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
@@ -210,18 +193,28 @@ class portdbapi(dbapi):
 		self._aux_cache = {}
 		self._broken_ebuilds = set()
 
+	def _create_pregen_cache(self, tree):
+		conf = self.repositories.get_repo_for_location(tree)
+		cache = conf.get_pregenerated_cache(
+			self._known_keys, readonly=True)
+		if cache is not None:
+			try:
+				cache.ec = self.repositories.get_repo_for_location(tree).eclass_db
+			except AttributeError:
+				pass
+		return cache
+
 	def _init_cache_dirs(self):
 		"""Create /var/cache/edb/dep and adjust permissions for the portage
 		group."""
 
 		dirmode  = 0o2070
-		filemode =   0o60
 		modemask =    0o2
 
 		try:
 			ensure_dirs(self.depcachedir, gid=portage_gid,
 				mode=dirmode, mask=modemask)
-		except PortageException as e:
+		except PortageException:
 			pass
 
 	def close_caches(self):
@@ -260,7 +253,7 @@ class portdbapi(dbapi):
 		@param canonical_repo_path: the canonical path of a repository, as
 			resolved by os.path.realpath()
 		@type canonical_repo_path: String
-		@returns: The repo_name for the corresponding repository, or None
+		@return: The repo_name for the corresponding repository, or None
 			if the path does not correspond a known repository
 		@rtype: String or None
 		"""
@@ -332,63 +325,33 @@ class portdbapi(dbapi):
 				return (filename, x)
 		return (None, 0)
 
-	def _metadata_process(self, cpv, ebuild_path, repo_path):
-		"""
-		Create an EbuildMetadataPhase instance to generate metadata for the
-		give ebuild.
-		@rtype: EbuildMetadataPhase
-		@returns: A new EbuildMetadataPhase instance, or None if the
-			metadata cache is already valid.
-		"""
-		metadata, st, emtime = self._pull_valid_cache(cpv, ebuild_path, repo_path)
-		if metadata is not None:
-			return None
-
-		process = EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
-			ebuild_mtime=emtime, metadata_callback=self._metadata_callback,
-			portdb=self, repo_path=repo_path, settings=self.doebuild_settings)
-		return process
-
-	def _metadata_callback(self, cpv, ebuild_path, repo_path, metadata, mtime):
-
-		i = metadata
-		if hasattr(metadata, "items"):
-			i = iter(metadata.items())
-		metadata = dict(i)
-
-		if metadata.get("INHERITED", False):
-			metadata["_eclasses_"] = self._repo_info[repo_path
-				].eclass_db.get_eclass_data(metadata["INHERITED"].split())
-		else:
-			metadata["_eclasses_"] = {}
-
-		metadata.pop("INHERITED", None)
-		metadata["_mtime_"] = mtime
-
-		eapi = metadata.get("EAPI")
-		if not eapi or not eapi.strip():
-			eapi = "0"
-			metadata["EAPI"] = eapi
-		if not eapi_is_supported(eapi):
-			for k in set(metadata).difference(("_mtime_", "_eclasses_")):
-				metadata[k] = ""
-			metadata["EAPI"] = "-" + eapi.lstrip("-")
+	def _write_cache(self, cpv, repo_path, metadata, ebuild_hash):
 
 		try:
-			self.auxdb[repo_path][cpv] = metadata
+			cache = self.auxdb[repo_path]
+			chf = cache.validation_chf
+			metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
 		except CacheError:
 			# Normally this shouldn't happen, so we'll show
 			# a traceback for debugging purposes.
 			traceback.print_exc()
-		return metadata
+			cache = None
+
+		if cache is not None:
+			try:
+				cache[cpv] = metadata
+			except CacheError:
+				# Normally this shouldn't happen, so we'll show
+				# a traceback for debugging purposes.
+				traceback.print_exc()
 
 	def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
 		try:
-			# Don't use unicode-wrapped os module, for better performance.
-			st = _os.stat(_unicode_encode(ebuild_path,
-				encoding=_encodings['fs'], errors='strict'))
-			emtime = st[stat.ST_MTIME]
-		except OSError:
+			ebuild_hash = eclass_cache.hashed_path(ebuild_path)
+			# snag mtime since we use it later, and to trigger stat failure
+			# if it doesn't exist
+			ebuild_hash.mtime
+		except FileNotFound:
 			writemsg(_("!!! aux_get(): ebuild for " \
 				"'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
 			writemsg("!!!            %s\n" % ebuild_path, noiselevel=-1)
@@ -401,39 +364,39 @@ class portdbapi(dbapi):
 		pregen_auxdb = self._pregen_auxdb.get(repo_path)
 		if pregen_auxdb is not None:
 			auxdbs.append(pregen_auxdb)
+		ro_auxdb = self._ro_auxdb.get(repo_path)
+		if ro_auxdb is not None:
+			auxdbs.append(ro_auxdb)
 		auxdbs.append(self.auxdb[repo_path])
-		eclass_db = self._repo_info[repo_path].eclass_db
+		eclass_db = self.repositories.get_repo_for_location(repo_path).eclass_db
 
-		doregen = True
 		for auxdb in auxdbs:
 			try:
 				metadata = auxdb[cpv]
 			except KeyError:
-				pass
+				continue
 			except CacheError:
-				if auxdb is not pregen_auxdb:
+				if not auxdb.readonly:
 					try:
 						del auxdb[cpv]
-					except KeyError:
-						pass
-					except CacheError:
+					except (KeyError, CacheError):
 						pass
-			else:
-				eapi = metadata.get('EAPI', '').strip()
-				if not eapi:
-					eapi = '0'
-				if not (eapi[:1] == '-' and eapi_is_supported(eapi[1:])) and \
-					emtime == metadata['_mtime_'] and \
-					eclass_db.is_eclass_data_valid(metadata['_eclasses_']):
-					doregen = False
-
-			if not doregen:
+				continue
+			eapi = metadata.get('EAPI', '').strip()
+			if not eapi:
+				eapi = '0'
+				metadata['EAPI'] = eapi
+			if not eapi_is_supported(eapi):
+				# Since we're supposed to be able to efficiently obtain the
+				# EAPI from _parse_eapi_ebuild_head, we disregard cache entries
+				# for unsupported EAPIs.
+				continue
+			if auxdb.validate_entry(metadata, ebuild_hash, eclass_db):
 				break
-
-		if doregen:
+		else:
 			metadata = None
 
-		return (metadata, st, emtime)
+		return (metadata, ebuild_hash)
 
 	def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
 		"stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
@@ -445,15 +408,22 @@ class portdbapi(dbapi):
 			if mytree is None:
 				raise KeyError(myrepo)
 
-		if not mytree:
+		if mytree is not None and len(self.porttrees) == 1 \
+			and mytree == self.porttrees[0]:
+			# mytree matches our only tree, so it's safe to
+			# ignore mytree and cache the result
+			mytree = None
+			myrepo = None
+
+		if mytree is None:
 			cache_me = True
-		if not mytree and not self._known_keys.intersection(
+		if mytree is None and not self._known_keys.intersection(
 			mylist).difference(self._aux_cache_keys):
 			aux_cache = self._aux_cache.get(mycpv)
 			if aux_cache is not None:
 				return [aux_cache.get(x, "") for x in mylist]
 			cache_me = True
-		global auxdbkeys, auxdbkeylen
+
 		try:
 			cat, pkg = mycpv.split("/", 1)
 		except ValueError:
@@ -467,60 +437,35 @@ class portdbapi(dbapi):
 				_("ebuild not found for '%s'") % mycpv, noiselevel=1)
 			raise KeyError(mycpv)
 
-		mydata, st, emtime = self._pull_valid_cache(mycpv, myebuild, mylocation)
+		mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild, mylocation)
 		doregen = mydata is None
 
 		if doregen:
 			if myebuild in self._broken_ebuilds:
 				raise KeyError(mycpv)
 
-			self.doebuild_settings.setcpv(mycpv)
-			eapi = None
-
-			if eapi is None and \
-				'parse-eapi-ebuild-head' in self.doebuild_settings.features:
-				eapi = portage._parse_eapi_ebuild_head(io.open(
-					_unicode_encode(myebuild,
-					encoding=_encodings['fs'], errors='strict'),
-					mode='r', encoding=_encodings['repo.content'],
-					errors='replace'))
-
-			if eapi is not None:
-				self.doebuild_settings.configdict['pkg']['EAPI'] = eapi
-
-			if eapi is not None and not portage.eapi_is_supported(eapi):
-				mydata = self._metadata_callback(
-					mycpv, myebuild, mylocation, {'EAPI':eapi}, emtime)
-			else:
-				proc = EbuildMetadataPhase(cpv=mycpv, ebuild_path=myebuild,
-					ebuild_mtime=emtime,
-					metadata_callback=self._metadata_callback, portdb=self,
-					repo_path=mylocation,
-					scheduler=PollScheduler().sched_iface,
-					settings=self.doebuild_settings)
+			proc = EbuildMetadataPhase(cpv=mycpv,
+				ebuild_hash=ebuild_hash, portdb=self,
+				repo_path=mylocation, scheduler=self._scheduler,
+				settings=self.doebuild_settings)
 
-				proc.start()
-				proc.wait()
+			proc.start()
+			proc.wait()
 
-				if proc.returncode != os.EX_OK:
-					self._broken_ebuilds.add(myebuild)
-					raise KeyError(mycpv)
+			if proc.returncode != os.EX_OK:
+				self._broken_ebuilds.add(myebuild)
+				raise KeyError(mycpv)
 
-				mydata = proc.metadata
+			mydata = proc.metadata
 
-		# do we have a origin repository name for the current package
 		mydata["repository"] = self.repositories.get_name_for_location(mylocation)
-		mydata["INHERITED"] = ' '.join(mydata.get("_eclasses_", []))
-		mydata["_mtime_"] = st[stat.ST_MTIME]
-
+		mydata["_mtime_"] = ebuild_hash.mtime
 		eapi = mydata.get("EAPI")
 		if not eapi:
 			eapi = "0"
 			mydata["EAPI"] = eapi
-		if not eapi_is_supported(eapi):
-			for k in set(mydata).difference(("_mtime_", "_eclasses_")):
-				mydata[k] = ""
-			mydata["EAPI"] = "-" + eapi.lstrip("-")
+		if eapi_is_supported(eapi):
+			mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", []))
 
 		#finally, we look at our internal cache entry and return the requested data.
 		returnme = [mydata.get(x, "") for x in mylist]
@@ -546,7 +491,7 @@ class portdbapi(dbapi):
 		@param mytree: The canonical path of the tree in which the ebuild
 			is located, or None for automatic lookup
 		@type mypkg: String
-		@returns: A dict which maps each file name to a set of alternative
+		@return: A dict which maps each file name to a set of alternative
 			URIs.
 		@rtype: dict
 		"""
@@ -565,7 +510,7 @@ class portdbapi(dbapi):
 			# since callers already handle it.
 			raise portage.exception.InvalidDependString(
 				"getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
-				(mypkg, eapi.lstrip("-")))
+				(mypkg, eapi))
 
 		return _parse_uri_map(mypkg, {'EAPI':eapi,'SRC_URI':myuris},
 			use=useflags)
@@ -576,7 +521,9 @@ class portdbapi(dbapi):
 		if myebuild is None:
 			raise AssertionError(_("ebuild not found for '%s'") % mypkg)
 		pkgdir = os.path.dirname(myebuild)
-		mf = Manifest(pkgdir, self.settings["DISTDIR"])
+		mf = self.repositories.get_repo_for_location(
+			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
+				pkgdir, self.settings["DISTDIR"])
 		checksums = mf.getDigests()
 		if not checksums:
 			if debug: 
@@ -597,7 +544,7 @@ class portdbapi(dbapi):
 			mystat = None
 			try:
 				mystat = os.stat(file_path)
-			except OSError as e:
+			except OSError:
 				pass
 			if mystat is None:
 				existing_size = 0
@@ -644,7 +591,9 @@ class portdbapi(dbapi):
 		if myebuild is None:
 			raise AssertionError(_("ebuild not found for '%s'") % mypkg)
 		pkgdir = os.path.dirname(myebuild)
-		mf = Manifest(pkgdir, self.settings["DISTDIR"])
+		mf = self.repositories.get_repo_for_location(
+			os.path.dirname(os.path.dirname(pkgdir)))
+		mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"])
 		mysums = mf.getDigests()
 
 		failures = {}
@@ -706,15 +655,22 @@ class portdbapi(dbapi):
 		return l
 
 	def cp_list(self, mycp, use_cache=1, mytree=None):
+		# NOTE: Cache can be safely shared with the match cache, since the
+		# match cache uses the result from dep_expand for the cache_key.
+		if self.frozen and mytree is not None \
+			and len(self.porttrees) == 1 \
+			and mytree == self.porttrees[0]:
+			# mytree matches our only tree, so it's safe to
+			# ignore mytree and cache the result
+			mytree = None
+
 		if self.frozen and mytree is None:
 			cachelist = self.xcache["cp-list"].get(mycp)
 			if cachelist is not None:
 				# Try to propagate this to the match-all cache here for
 				# repoman since he uses separate match-all caches for each
-				# profile (due to old-style virtuals). Do not propagate
-				# old-style virtuals since cp_list() doesn't expand them.
-				if not (not cachelist and mycp.startswith("virtual/")):
-					self.xcache["match-all"][mycp] = cachelist
+				# profile (due to differences in _get_implicit_iuse).
+				self.xcache["match-all"][(mycp, mycp)] = cachelist
 				return cachelist[:]
 		mysplit = mycp.split("/")
 		invalid_category = mysplit[0] not in self._categories
@@ -752,7 +708,7 @@ class portdbapi(dbapi):
 						writemsg(_("\nInvalid ebuild version: %s\n") % \
 							os.path.join(oroot, mycp, x), noiselevel=-1)
 						continue
-					d[mysplit[0]+"/"+pf] = None
+					d[_pkg_str(mysplit[0]+"/"+pf)] = None
 		if invalid_category and d:
 			writemsg(_("\n!!! '%s' has a category that is not listed in " \
 				"%setc/portage/categories\n") % \
@@ -766,14 +722,11 @@ class portdbapi(dbapi):
 		if self.frozen and mytree is None:
 			cachelist = mylist[:]
 			self.xcache["cp-list"][mycp] = cachelist
-			# Do not propagate old-style virtuals since
-			# cp_list() doesn't expand them.
-			if not (not cachelist and mycp.startswith("virtual/")):
-				self.xcache["match-all"][mycp] = cachelist
+			self.xcache["match-all"][(mycp, mycp)] = cachelist
 		return mylist
 
 	def freeze(self):
-		for x in "bestmatch-visible", "cp-list", "list-visible", "match-all", \
+		for x in "bestmatch-visible", "cp-list", "match-all", \
 			"match-all-cpv-only", "match-visible", "minimum-all", \
 			"minimum-visible":
 			self.xcache[x]={}
@@ -785,12 +738,12 @@ class portdbapi(dbapi):
 
 	def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
 		"caching match function; very trick stuff"
-		#if no updates are being made to the tree, we can consult our xcache...
-		if self.frozen:
-			try:
-				return self.xcache[level][origdep][:]
-			except KeyError:
-				pass
+		if level == "list-visible":
+			level = "match-visible"
+			warnings.warn("The 'list-visible' mode of "
+				"portage.dbapi.porttree.portdbapi.xmatch "
+				"has been renamed to match-visible",
+				DeprecationWarning, stacklevel=2)
 
 		if mydep is None:
 			#this stuff only runs on first call of xmatch()
@@ -798,12 +751,24 @@ class portdbapi(dbapi):
 			mydep = dep_expand(origdep, mydb=self, settings=self.settings)
 			mykey = mydep.cp
 
+		#if no updates are being made to the tree, we can consult our xcache...
+		cache_key = None
+		if self.frozen:
+			cache_key = (mydep, mydep.unevaluated_atom)
+			try:
+				return self.xcache[level][cache_key][:]
+			except KeyError:
+				pass
+
 		myval = None
 		mytree = None
 		if mydep.repo is not None:
 			mytree = self.treemap.get(mydep.repo)
 			if mytree is None:
-				myval = []
+				if level.startswith("match-"):
+					myval = []
+				else:
+					myval = ""
 
 		if myval is not None:
 			# Unknown repo, empty result.
@@ -822,27 +787,8 @@ class portdbapi(dbapi):
 				myval = match_from_list(mydep,
 					self.cp_list(mykey, mytree=mytree))
 
-		elif level == "list-visible":
-			#a list of all visible packages, not called directly (just by xmatch())
-			#myval = self.visible(self.cp_list(mykey))
-
-			myval = self.gvisible(self.visible(
-				self.cp_list(mykey, mytree=mytree)))
-		elif level == "minimum-all":
-			# Find the minimum matching version. This is optimized to
-			# minimize the number of metadata accesses (improves performance
-			# especially in cases where metadata needs to be generated).
-			if mydep == mykey:
-				cpv_iter = iter(self.cp_list(mykey, mytree=mytree))
-			else:
-				cpv_iter = self._iter_match(mydep,
-					self.cp_list(mykey, mytree=mytree))
-			try:
-				myval = next(cpv_iter)
-			except StopIteration:
-				myval = ""
-
-		elif level in ("minimum-visible", "bestmatch-visible"):
+		elif level in ("bestmatch-visible", "match-all", "match-visible",
+			"minimum-all", "minimum-visible"):
 			# Find the minimum matching visible version. This is optimized to
 			# minimize the number of metadata accesses (improves performance
 			# especially in cases where metadata needs to be generated).
@@ -851,158 +797,172 @@ class portdbapi(dbapi):
 			else:
 				mylist = match_from_list(mydep,
 					self.cp_list(mykey, mytree=mytree))
-			myval = ""
-			settings = self.settings
-			local_config = settings.local_config
+
+			visibility_filter = level not in ("match-all", "minimum-all")
+			single_match = level not in ("match-all", "match-visible")
+			myval = []
 			aux_keys = list(self._aux_cache_keys)
-			if level == "minimum-visible":
+			if level == "bestmatch-visible":
+				iterfunc = reversed
+			else:
 				iterfunc = iter
+
+			if mydep.repo is not None:
+				repos = [mydep.repo]
 			else:
-				iterfunc = reversed
+				# We iterate over self.porttrees, since it's common to
+				# tweak this attribute in order to adjust match behavior.
+				repos = []
+				for tree in reversed(self.porttrees):
+					repos.append(self.repositories.get_name_for_location(tree))
+
 			for cpv in iterfunc(mylist):
-				try:
-					metadata = dict(zip(aux_keys,
-						self.aux_get(cpv, aux_keys)))
-				except KeyError:
-					# ebuild masked by corruption
-					continue
-				if not eapi_is_supported(metadata["EAPI"]):
-					continue
-				if mydep.slot and mydep.slot != metadata["SLOT"]:
-					continue
-				if settings._getMissingKeywords(cpv, metadata):
-					continue
-				if settings._getMaskAtom(cpv, metadata):
-					continue
-				if settings._getProfileMaskAtom(cpv, metadata):
-					continue
-				if local_config:
-					metadata["USE"] = ""
-					if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
-						self.doebuild_settings.setcpv(cpv, mydb=metadata)
-						metadata["USE"] = self.doebuild_settings.get("USE", "")
+				for repo in repos:
 					try:
-						if settings._getMissingLicenses(cpv, metadata):
-							continue
-						if settings._getMissingProperties(cpv, metadata):
-							continue
-					except InvalidDependString:
+						metadata = dict(zip(aux_keys,
+							self.aux_get(cpv, aux_keys, myrepo=repo)))
+					except KeyError:
+						# ebuild not in this repo, or masked by corruption
 						continue
-				if mydep.use:
-					has_iuse = False
-					for has_iuse in self._iter_match_use(mydep, [cpv]):
-						break
-					if not has_iuse:
+
+					if visibility_filter and not self._visible(cpv, metadata):
 						continue
-				myval = cpv
-				break
+
+					if mydep.slot is not None and \
+						mydep.slot != metadata["SLOT"]:
+						continue
+
+					if mydep.unevaluated_atom.use is not None and \
+						not self._match_use(mydep, cpv, metadata):
+						continue
+
+					myval.append(cpv)
+					# only yield a given cpv once
+					break
+
+				if myval and single_match:
+					break
+
+			if single_match:
+				if myval:
+					myval = myval[0]
+				else:
+					myval = ""
+
 		elif level == "bestmatch-list":
 			#dep match -- find best match but restrict search to sublist
-			#no point in calling xmatch again since we're not caching list deps
-
+			warnings.warn("The 'bestmatch-list' mode of "
+				"portage.dbapi.porttree.portdbapi.xmatch is deprecated",
+				DeprecationWarning, stacklevel=2)
 			myval = best(list(self._iter_match(mydep, mylist)))
 		elif level == "match-list":
 			#dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
-
+			warnings.warn("The 'match-list' mode of "
+				"portage.dbapi.porttree.portdbapi.xmatch is deprecated",
+				DeprecationWarning, stacklevel=2)
 			myval = list(self._iter_match(mydep, mylist))
-		elif level == "match-visible":
-			#dep match -- find all visible matches
-			#get all visible packages, then get the matching ones
-			myval = list(self._iter_match(mydep,
-				self.xmatch("list-visible", mykey, mydep=Atom(mykey), mykey=mykey)))
-		elif level == "match-all":
-			#match *all* visible *and* masked packages
-			if mydep == mykey:
-				myval = self.cp_list(mykey, mytree=mytree)
-			else:
-				myval = list(self._iter_match(mydep,
-					self.cp_list(mykey, mytree=mytree)))
 		else:
 			raise AssertionError(
 				"Invalid level argument: '%s'" % level)
 
-		if self.frozen and (level not in ["match-list", "bestmatch-list"]):
-			self.xcache[level][mydep] = myval
-			if origdep and origdep != mydep:
-				self.xcache[level][origdep] = myval
-		return myval[:]
+		if self.frozen:
+			xcache_this_level = self.xcache.get(level)
+			if xcache_this_level is not None:
+				xcache_this_level[cache_key] = myval
+				if not isinstance(myval, _pkg_str):
+					myval = myval[:]
+
+		return myval
 
 	def match(self, mydep, use_cache=1):
 		return self.xmatch("match-visible", mydep)
 
-	def visible(self, mylist):
-		"""two functions in one.  Accepts a list of cpv values and uses the package.mask *and*
-		packages file to remove invisible entries, returning remaining items.  This function assumes
-		that all entries in mylist have the same category and package name."""
-		if not mylist:
-			return []
-
-		db_keys = ["SLOT"]
-		visible = []
-		getMaskAtom = self.settings._getMaskAtom
-		getProfileMaskAtom = self.settings._getProfileMaskAtom
-		for cpv in mylist:
-			try:
-				metadata = dict(zip(db_keys, self.aux_get(cpv, db_keys)))
-			except KeyError:
-				# masked by corruption
-				continue
-			if not metadata["SLOT"]:
-				continue
-			if getMaskAtom(cpv, metadata):
-				continue
-			if getProfileMaskAtom(cpv, metadata):
-				continue
-			visible.append(cpv)
-		return visible
-
-	def gvisible(self,mylist):
-		"strip out group-masked (not in current group) entries"
+	def gvisible(self, mylist):
+		warnings.warn("The 'gvisible' method of "
+			"portage.dbapi.porttree.portdbapi "
+			"is deprecated",
+			DeprecationWarning, stacklevel=2)
+		return list(self._iter_visible(iter(mylist)))
 
-		if mylist is None:
+	def visible(self, cpv_iter):
+		warnings.warn("The 'visible' method of "
+			"portage.dbapi.porttree.portdbapi "
+			"is deprecated",
+			DeprecationWarning, stacklevel=2)
+		if cpv_iter is None:
 			return []
-		newlist=[]
+		return list(self._iter_visible(iter(cpv_iter)))
+
+	def _iter_visible(self, cpv_iter, myrepo=None):
+		"""
+		Return a new list containing only visible packages.
+		"""
 		aux_keys = list(self._aux_cache_keys)
 		metadata = {}
-		local_config = self.settings.local_config
-		chost = self.settings.get('CHOST', '')
-		accept_chost = self.settings._accept_chost
-		for mycpv in mylist:
-			metadata.clear()
-			try:
-				metadata.update(zip(aux_keys, self.aux_get(mycpv, aux_keys)))
-			except KeyError:
-				continue
-			except PortageException as e:
-				writemsg("!!! Error: aux_get('%s', %s)\n" % (mycpv, aux_keys),
-					noiselevel=-1)
-				writemsg("!!! %s\n" % (e,), noiselevel=-1)
-				del e
-				continue
-			eapi = metadata["EAPI"]
-			if not eapi_is_supported(eapi):
-				continue
-			if _eapi_is_deprecated(eapi):
-				continue
-			if self.settings._getMissingKeywords(mycpv, metadata):
-				continue
-			if local_config:
-				metadata['CHOST'] = chost
-				if not accept_chost(mycpv, metadata):
-					continue
-				metadata["USE"] = ""
-				if "?" in metadata["LICENSE"] or "?" in metadata["PROPERTIES"]:
-					self.doebuild_settings.setcpv(mycpv, mydb=metadata)
-					metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
+
+		if myrepo is not None:
+			repos = [myrepo]
+		else:
+			# We iterate over self.porttrees, since it's common to
+			# tweak this attribute in order to adjust match behavior.
+			repos = []
+			for tree in reversed(self.porttrees):
+				repos.append(self.repositories.get_name_for_location(tree))
+
+		for mycpv in cpv_iter:
+			for repo in repos:
+				metadata.clear()
 				try:
-					if self.settings._getMissingLicenses(mycpv, metadata):
-						continue
-					if self.settings._getMissingProperties(mycpv, metadata):
-						continue
-				except InvalidDependString:
+					metadata.update(zip(aux_keys,
+						self.aux_get(mycpv, aux_keys, myrepo=repo)))
+				except KeyError:
+					continue
+				except PortageException as e:
+					writemsg("!!! Error: aux_get('%s', %s)\n" %
+						(mycpv, aux_keys), noiselevel=-1)
+					writemsg("!!! %s\n" % (e,), noiselevel=-1)
+					del e
 					continue
-			newlist.append(mycpv)
-		return newlist
+
+				if not self._visible(mycpv, metadata):
+					continue
+
+				yield mycpv
+				# only yield a given cpv once
+				break
+
+	def _visible(self, cpv, metadata):
+		eapi = metadata["EAPI"]
+		if not eapi_is_supported(eapi):
+			return False
+		if _eapi_is_deprecated(eapi):
+			return False
+		if not metadata["SLOT"]:
+			return False
+
+		settings = self.settings
+		if settings._getMaskAtom(cpv, metadata):
+			return False
+		if settings._getMissingKeywords(cpv, metadata):
+			return False
+		if settings.local_config:
+			metadata['CHOST'] = settings.get('CHOST', '')
+			if not settings._accept_chost(cpv, metadata):
+				return False
+			metadata["USE"] = ""
+			if "?" in metadata["LICENSE"] or \
+				"?" in metadata["PROPERTIES"]:
+				self.doebuild_settings.setcpv(cpv, mydb=metadata)
+				metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
+			try:
+				if settings._getMissingLicenses(cpv, metadata):
+					return False
+				if settings._getMissingProperties(cpv, metadata):
+					return False
+			except InvalidDependString:
+				return False
+
+		return True
 
 def close_portdbapi_caches():
 	for i in portdbapi.portdbapi_instances:
@@ -1011,7 +971,7 @@ def close_portdbapi_caches():
 portage.process.atexit_register(portage.portageexit)
 
 class portagetree(object):
-	def __init__(self, root=None, virtual=None, settings=None):
+	def __init__(self, root=None, virtual=DeprecationWarning, settings=None):
 		"""
 		Constructor for a PortageTree
 		
@@ -1034,8 +994,14 @@ class portagetree(object):
 				"settings['ROOT'] instead.",
 				DeprecationWarning, stacklevel=2)
 
+		if virtual is not DeprecationWarning:
+			warnings.warn("The 'virtual' parameter of the "
+				"portage.dbapi.porttree.portagetree"
+				" constructor is unused",
+				DeprecationWarning, stacklevel=2)
+
 		self.portroot = settings["PORTDIR"]
-		self.virtual = virtual
+		self.__virtual = virtual
 		self.dbapi = portdbapi(mysettings=settings)
 
 	@property
@@ -1044,9 +1010,17 @@ class portagetree(object):
 			"portage.dbapi.porttree.portagetree" + \
 			" is deprecated. Use " + \
 			"settings['ROOT'] instead.",
-			DeprecationWarning, stacklevel=2)
+			DeprecationWarning, stacklevel=3)
 		return self.settings['ROOT']
 
+	@property
+	def virtual(self):
+		warnings.warn("The 'virtual' attribute of " + \
+			"portage.dbapi.porttree.portagetree" + \
+			" is deprecated.",
+			DeprecationWarning, stacklevel=3)
+		return self.__virtual
+
 	def dep_bestmatch(self,mydep):
 		"compatibility method"
 		mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
@@ -1077,17 +1051,14 @@ class portagetree(object):
 		psplit = pkgsplit(mysplit[1])
 		return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
 
-	def depcheck(self, mycheck, use="yes", myusesplit=None):
-		return dep_check(mycheck, self.dbapi, use=use, myuse=myusesplit)
-
 	def getslot(self,mycatpkg):
 		"Get a slot for a catpkg; assume it exists."
 		myslot = ""
 		try:
 			myslot = self.dbapi.aux_get(mycatpkg, ["SLOT"])[0]
-		except SystemExit as e:
+		except SystemExit:
 			raise
-		except Exception as e:
+		except Exception:
 			pass
 		return myslot
 
@@ -1148,7 +1119,7 @@ def _parse_uri_map(cpv, metadata, use=None):
 	while myuris:
 		uri = myuris.pop()
 		if myuris and myuris[-1] == "->":
-			operator = myuris.pop()
+			myuris.pop()
 			distfile = myuris.pop()
 		else:
 			distfile = os.path.basename(uri)
@@ -1163,6 +1134,5 @@ def _parse_uri_map(cpv, metadata, use=None):
 			uri_map[distfile] = uri_set
 		uri_set.add(uri)
 		uri = None
-		operator = None
 
 	return uri_map

diff --git a/portage_with_autodep/pym/portage/dbapi/porttree.pyo b/portage_with_autodep/pym/portage/dbapi/porttree.pyo
new file mode 100644
index 0000000..fb57919
Binary files /dev/null and b/portage_with_autodep/pym/portage/dbapi/porttree.pyo differ

diff --git a/portage_with_autodep/pym/portage/dbapi/vartree.py b/portage_with_autodep/pym/portage/dbapi/vartree.py
index 7f7873b..517c873 100644
--- a/portage_with_autodep/pym/portage/dbapi/vartree.py
+++ b/portage_with_autodep/pym/portage/dbapi/vartree.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = [
@@ -18,7 +18,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
 	'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
 	'portage.output:bold,colorize',
 	'portage.package.ebuild.doebuild:doebuild_environment,' + \
-		'_spawn_phase',
+		'_merge_unicode_error', '_spawn_phase',
 	'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
 	'portage.update:fixdbentries',
 	'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
@@ -27,10 +27,13 @@ portage.proxy.lazyimport.lazyimport(globals(),
 	'portage.util.digraph:digraph',
 	'portage.util.env_update:env_update',
 	'portage.util.listdir:dircache,listdir',
+	'portage.util.movefile:movefile',
 	'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
 	'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
-	'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,pkgcmp,' + \
-		'_pkgsplit@pkgsplit',
+	'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
+		'_pkgsplit@pkgsplit,_pkg_str',
+	'subprocess',
+	'tarfile',
 )
 
 from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
@@ -41,12 +44,12 @@ from portage.exception import CommandNotFound, \
 	InvalidData, InvalidLocation, InvalidPackageName, \
 	FileNotFound, PermissionDenied, UnsupportedAPIException
 from portage.localization import _
-from portage.util.movefile import movefile
 
 from portage import abssymlink, _movefile, bsd_chflags
 
 # This is a special version of the os module, wrapped for unicode support.
 from portage import os
+from portage import shutil
 from portage import _encodings
 from portage import _os_merge
 from portage import _selinux_merge
@@ -60,13 +63,15 @@ from _emerge.PollScheduler import PollScheduler
 from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
 
 import errno
+import fnmatch
 import gc
+import grp
 import io
 from itertools import chain
 import logging
 import os as _os
+import pwd
 import re
-import shutil
 import stat
 import sys
 import tempfile
@@ -82,6 +87,9 @@ except ImportError:
 if sys.hexversion >= 0x3000000:
 	basestring = str
 	long = int
+	_unicode = str
+else:
+	_unicode = unicode
 
 class vardbapi(dbapi):
 
@@ -129,12 +137,11 @@ class vardbapi(dbapi):
 		if settings is None:
 			settings = portage.settings
 		self.settings = settings
-		self.root = settings['ROOT']
 
-		if _unused_param is not None and _unused_param != self.root:
-			warnings.warn("The first parameter of the " + \
-				"portage.dbapi.vartree.vardbapi" + \
-				" constructor is now unused. Use " + \
+		if _unused_param is not None and _unused_param != settings['ROOT']:
+			warnings.warn("The first parameter of the "
+				"portage.dbapi.vartree.vardbapi"
+				" constructor is now unused. Use "
 				"settings['ROOT'] instead.",
 				DeprecationWarning, stacklevel=2)
 
@@ -148,14 +155,14 @@ class vardbapi(dbapi):
 		self._fs_lock_count = 0
 
 		if vartree is None:
-			vartree = portage.db[self.root]["vartree"]
+			vartree = portage.db[settings['EROOT']]['vartree']
 		self.vartree = vartree
 		self._aux_cache_keys = set(
 			["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
 			"EAPI", "HOMEPAGE", "IUSE", "KEYWORDS",
 			"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
 			"repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
-			"REQUIRED_USE"])
+			])
 		self._aux_cache_obj = None
 		self._aux_cache_filename = os.path.join(self._eroot,
 			CACHE_PATH, "vdb_metadata.pickle")
@@ -164,7 +171,7 @@ class vardbapi(dbapi):
 
 		self._plib_registry = None
 		if _ENABLE_PRESERVE_LIBS:
-			self._plib_registry = PreservedLibsRegistry(self.root,
+			self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
 				os.path.join(self._eroot, PRIVATE_PATH,
 				"preserved_libs_registry"))
 
@@ -175,6 +182,15 @@ class vardbapi(dbapi):
 
 		self._cached_counter = None
 
+	@property
+	def root(self):
+		warnings.warn("The root attribute of "
+			"portage.dbapi.vartree.vardbapi"
+			" is deprecated. Use "
+			"settings['ROOT'] instead.",
+			DeprecationWarning, stacklevel=3)
+		return self.settings['ROOT']
+
 	def getpath(self, mykey, filename=None):
 		# This is an optimized hotspot, so don't use unicode-wrapped
 		# os module and don't use os.path.join().
@@ -373,7 +389,7 @@ class vardbapi(dbapi):
 				continue
 			if len(mysplit) > 1:
 				if ps[0] == mysplit[1]:
-					returnme.append(mysplit[0]+"/"+x)
+					returnme.append(_pkg_str(mysplit[0]+"/"+x))
 		self._cpv_sort_ascending(returnme)
 		if use_cache:
 			self.cpcache[mycp] = [mystat, returnme[:]]
@@ -472,6 +488,7 @@ class vardbapi(dbapi):
 		"caching match function"
 		mydep = dep_expand(
 			origdep, mydb=self, use_cache=use_cache, settings=self.settings)
+		cache_key = (mydep, mydep.unevaluated_atom)
 		mykey = dep_getkey(mydep)
 		mycat = catsplit(mykey)[0]
 		if not use_cache:
@@ -493,8 +510,8 @@ class vardbapi(dbapi):
 		if mydep not in self.matchcache[mycat]:
 			mymatch = list(self._iter_match(mydep,
 				self.cp_list(mydep.cp, use_cache=use_cache)))
-			self.matchcache[mycat][mydep] = mymatch
-		return self.matchcache[mycat][mydep][:]
+			self.matchcache[mycat][cache_key] = mymatch
+		return self.matchcache[mycat][cache_key][:]
 
 	def findname(self, mycpv, myrepo=None):
 		return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
@@ -555,8 +572,11 @@ class vardbapi(dbapi):
 			aux_cache = mypickle.load()
 			f.close()
 			del f
-		except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
-			if isinstance(e, pickle.UnpicklingError):
+		except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
+			if isinstance(e, EnvironmentError) and \
+				getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
+				pass
+			else:
 				writemsg(_unicode_decode(_("!!! Error loading '%s': %s\n")) % \
 					(self._aux_cache_filename, e), noiselevel=-1)
 			del e
@@ -610,7 +630,8 @@ class vardbapi(dbapi):
 				cache_these_wants.add(x)
 
 		if not cache_these_wants:
-			return self._aux_get(mycpv, wants)
+			mydata = self._aux_get(mycpv, wants)
+			return [mydata[x] for x in wants]
 
 		cache_these = set(self._aux_cache_keys)
 		cache_these.update(cache_these_wants)
@@ -655,16 +676,15 @@ class vardbapi(dbapi):
 		if pull_me:
 			# pull any needed data and cache it
 			aux_keys = list(pull_me)
-			for k, v in zip(aux_keys,
-				self._aux_get(mycpv, aux_keys, st=mydir_stat)):
-				mydata[k] = v
+			mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat))
 			if not cache_valid or cache_these.difference(metadata):
 				cache_data = {}
 				if cache_valid and metadata:
 					cache_data.update(metadata)
 				for aux_key in cache_these:
 					cache_data[aux_key] = mydata[aux_key]
-				self._aux_cache["packages"][mycpv] = (mydir_mtime, cache_data)
+				self._aux_cache["packages"][_unicode(mycpv)] = \
+					(mydir_mtime, cache_data)
 				self._aux_cache["modified"].add(mycpv)
 
 		if _slot_re.match(mydata['SLOT']) is None:
@@ -688,10 +708,11 @@ class vardbapi(dbapi):
 					raise
 		if not stat.S_ISDIR(st.st_mode):
 			raise KeyError(mycpv)
-		results = []
+		results = {}
+		env_keys = []
 		for x in wants:
 			if x == "_mtime_":
-				results.append(st[stat.ST_MTIME])
+				results[x] = st[stat.ST_MTIME]
 				continue
 			try:
 				myf = io.open(
@@ -703,16 +724,103 @@ class vardbapi(dbapi):
 					myd = myf.read()
 				finally:
 					myf.close()
-				# Preserve \n for metadata that is known to
-				# contain multiple lines.
-				if self._aux_multi_line_re.match(x) is None:
-					myd = " ".join(myd.split())
 			except IOError:
+				if x not in self._aux_cache_keys and \
+					self._aux_cache_keys_re.match(x) is None:
+					env_keys.append(x)
+					continue
 				myd = _unicode_decode('')
-			if x == "EAPI" and not myd:
-				results.append(_unicode_decode('0'))
-			else:
-				results.append(myd)
+
+			# Preserve \n for metadata that is known to
+			# contain multiple lines.
+			if self._aux_multi_line_re.match(x) is None:
+				myd = " ".join(myd.split())
+
+			results[x] = myd
+
+		if env_keys:
+			env_results = self._aux_env_search(mycpv, env_keys)
+			for k in env_keys:
+				v = env_results.get(k)
+				if v is None:
+					v = _unicode_decode('')
+				if self._aux_multi_line_re.match(k) is None:
+					v = " ".join(v.split())
+				results[k] = v
+
+		if results.get("EAPI") == "":
+			results[_unicode_decode("EAPI")] = _unicode_decode('0')
+
+		return results
+
+	def _aux_env_search(self, cpv, variables):
+		"""
+		Search environment.bz2 for the specified variables. Returns
+		a dict mapping variables to values, and any variables not
+		found in the environment will not be included in the dict.
+		This is useful for querying variables like ${SRC_URI} and
+		${A}, which are not saved in separate files but are available
+		in environment.bz2 (see bug #395463).
+		"""
+		env_file = self.getpath(cpv, filename="environment.bz2")
+		if not os.path.isfile(env_file):
+			return {}
+		bunzip2_cmd = portage.util.shlex_split(
+			self.settings.get("PORTAGE_BUNZIP2_COMMAND", ""))
+		if not bunzip2_cmd:
+			bunzip2_cmd = portage.util.shlex_split(
+				self.settings["PORTAGE_BZIP2_COMMAND"])
+			bunzip2_cmd.append("-d")
+		args = bunzip2_cmd + ["-c", env_file]
+		try:
+			proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+		except EnvironmentError as e:
+			if e.errno != errno.ENOENT:
+				raise
+			raise portage.exception.CommandNotFound(args[0])
+
+		# Parts of the following code are borrowed from
+		# filter-bash-environment.py (keep them in sync).
+		var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$')
+		close_quote_re = re.compile(r'(\\"|"|\')\s*$')
+		def have_end_quote(quote, line):
+			close_quote_match = close_quote_re.search(line)
+			return close_quote_match is not None and \
+				close_quote_match.group(1) == quote
+
+		variables = frozenset(variables)
+		results = {}
+		for line in proc.stdout:
+			line = _unicode_decode(line,
+				encoding=_encodings['content'], errors='replace')
+			var_assign_match = var_assign_re.match(line)
+			if var_assign_match is not None:
+				key = var_assign_match.group(2)
+				quote = var_assign_match.group(3)
+				if quote is not None:
+					if have_end_quote(quote,
+						line[var_assign_match.end(2)+2:]):
+						value = var_assign_match.group(4)
+					else:
+						value = [var_assign_match.group(4)]
+						for line in proc.stdout:
+							line = _unicode_decode(line,
+								encoding=_encodings['content'],
+								errors='replace')
+							value.append(line)
+							if have_end_quote(quote, line):
+								break
+						value = ''.join(value)
+					# remove trailing quote and whitespace
+					value = value.rstrip()[:-1]
+				else:
+					value = var_assign_match.group(4).rstrip()
+
+				if key in variables:
+					results[key] = value
+
+		proc.wait()
+		proc.stdout.close()
 		return results
 
 	def aux_update(self, cpv, values):
@@ -758,8 +866,7 @@ class vardbapi(dbapi):
 
 		@param myroot: ignored, self._eroot is used instead
 		"""
-		myroot = None
-		new_vdb = False
+		del myroot
 		counter = -1
 		try:
 			cfile = io.open(
@@ -768,8 +875,9 @@ class vardbapi(dbapi):
 				mode='r', encoding=_encodings['repo.content'],
 				errors='replace')
 		except EnvironmentError as e:
-			new_vdb = not bool(self.cpv_all())
-			if not new_vdb:
+			# Silently allow ENOENT since files under
+			# /var/cache/ are allowed to disappear.
+			if e.errno != errno.ENOENT:
 				writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
 					self._counter_path, noiselevel=-1)
 				writemsg("!!! %s\n" % str(e), noiselevel=-1)
@@ -806,10 +914,6 @@ class vardbapi(dbapi):
 				if pkg_counter > max_counter:
 					max_counter = pkg_counter
 
-		if counter < 0 and not new_vdb:
-			writemsg(_("!!! Initializing COUNTER to " \
-				"value of %d\n") % max_counter, noiselevel=-1)
-
 		return max_counter + 1
 
 	def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
@@ -823,7 +927,7 @@ class vardbapi(dbapi):
 		@param myroot: ignored, self._eroot is used instead
 		@param mycpv: ignored
 		@rtype: int
-		@returns: new counter value
+		@return: new counter value
 		"""
 		myroot = None
 		mycpv = None
@@ -959,7 +1063,7 @@ class vardbapi(dbapi):
 				counter = int(counter)
 			except ValueError:
 				counter = 0
-			return (cpv, counter, mtime)
+			return (_unicode(cpv), counter, mtime)
 
 	class _owners_db(object):
 
@@ -1149,24 +1253,38 @@ class vardbapi(dbapi):
 
 class vartree(object):
 	"this tree will scan a var/db/pkg database located at root (passed to init)"
-	def __init__(self, root=None, virtual=None, categories=None,
+	def __init__(self, root=None, virtual=DeprecationWarning, categories=None,
 		settings=None):
 
 		if settings is None:
 			settings = portage.settings
-		self.root = settings['ROOT']
 
-		if root is not None and root != self.root:
-			warnings.warn("The 'root' parameter of the " + \
-				"portage.dbapi.vartree.vartree" + \
-				" constructor is now unused. Use " + \
+		if root is not None and root != settings['ROOT']:
+			warnings.warn("The 'root' parameter of the "
+				"portage.dbapi.vartree.vartree"
+				" constructor is now unused. Use "
 				"settings['ROOT'] instead.",
 				DeprecationWarning, stacklevel=2)
 
+		if virtual is not DeprecationWarning:
+			warnings.warn("The 'virtual' parameter of the "
+				"portage.dbapi.vartree.vartree"
+				" constructor is unused",
+				DeprecationWarning, stacklevel=2)
+
 		self.settings = settings
 		self.dbapi = vardbapi(settings=settings, vartree=self)
 		self.populated = 1
 
+	@property
+	def root(self):
+		warnings.warn("The root attribute of "
+			"portage.dbapi.vartree.vartree"
+			" is deprecated. Use "
+			"settings['ROOT'] instead.",
+			DeprecationWarning, stacklevel=3)
+		return self.settings['ROOT']
+
 	def getpath(self, mykey, filename=None):
 		return self.dbapi.getpath(mykey, filename=filename)
 
@@ -1276,6 +1394,20 @@ class dblink(object):
 		r')$'
 	)
 
+	# These files are generated by emerge, so we need to remove
+	# them when they are the only thing left in a directory.
+	_infodir_cleanup = frozenset(["dir", "dir.old"])
+
+	_ignored_unlink_errnos = (
+		errno.EBUSY, errno.ENOENT,
+		errno.ENOTDIR, errno.EISDIR)
+
+	_ignored_rmdir_errnos = (
+		errno.EEXIST, errno.ENOTEMPTY,
+		errno.EBUSY, errno.ENOENT,
+		errno.ENOTDIR, errno.EISDIR,
+		errno.EPERM)
+
 	def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
 		vartree=None, blockers=None, scheduler=None, pipe=None):
 		"""
@@ -1300,22 +1432,23 @@ class dblink(object):
 			raise TypeError("settings argument is required")
 
 		mysettings = settings
-		myroot = settings['ROOT']
+		self._eroot = mysettings['EROOT']
 		self.cat = cat
 		self.pkg = pkg
 		self.mycpv = self.cat + "/" + self.pkg
-		self.mysplit = list(catpkgsplit(self.mycpv)[1:])
-		self.mysplit[0] = "%s/%s" % (self.cat, self.mysplit[0])
+		if self.mycpv == settings.mycpv and \
+			isinstance(settings.mycpv, _pkg_str):
+			self.mycpv = settings.mycpv
+		else:
+			self.mycpv = _pkg_str(self.mycpv)
+		self.mysplit = list(self.mycpv.cpv_split[1:])
+		self.mysplit[0] = self.mycpv.cp
 		self.treetype = treetype
 		if vartree is None:
-			vartree = portage.db[myroot]["vartree"]
+			vartree = portage.db[self._eroot]["vartree"]
 		self.vartree = vartree
 		self._blockers = blockers
 		self._scheduler = scheduler
-
-		# WARNING: EROOT support is experimental and may be incomplete
-		# for cases in which EPREFIX is non-empty.
-		self._eroot = mysettings['EROOT']
 		self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
 		self.dbcatdir = self.dbroot+"/"+cat
 		self.dbpkgdir = self.dbcatdir+"/"+pkg
@@ -1324,14 +1457,14 @@ class dblink(object):
 		self.settings = mysettings
 		self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
 
-		self.myroot=myroot
+		self.myroot = self.settings['ROOT']
 		self._installed_instance = None
 		self.contentscache = None
 		self._contents_inodes = None
 		self._contents_basenames = None
 		self._linkmap_broken = False
-		self._md5_merge_map = {}
-		self._hash_key = (self.myroot, self.mycpv)
+		self._hardlink_merge_map = {}
+		self._hash_key = (self._eroot, self.mycpv)
 		self._protect_obj = None
 		self._pipe = pipe
 
@@ -1610,7 +1743,7 @@ class dblink(object):
 			PreservedLibsRegistry yet.
 		@type preserve_paths: set
 		@rtype: Integer
-		@returns:
+		@return:
 		1. os.EX_OK if everything went well.
 		2. return code of the failed phase (for prerm, postrm, cleanrm)
 		"""
@@ -1839,16 +1972,19 @@ class dblink(object):
 		else:
 			self.settings.pop("PORTAGE_LOG_FILE", None)
 
-		# Lock the config memory file to prevent symlink creation
-		# in merge_contents from overlapping with env-update.
-		self.vartree.dbapi._fs_lock()
-		try:
-			env_update(target_root=self.settings['ROOT'],
-				prev_mtimes=ldpath_mtimes,
-				contents=contents, env=self.settings.environ(),
-				writemsg_level=self._display_merge)
-		finally:
-			self.vartree.dbapi._fs_unlock()
+		env_update(target_root=self.settings['ROOT'],
+			prev_mtimes=ldpath_mtimes,
+			contents=contents, env=self.settings,
+			writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
+
+		unmerge_with_replacement = preserve_paths is not None
+		if not unmerge_with_replacement:
+			# When there's a replacement package which calls us via treewalk,
+			# treewalk will automatically call _prune_plib_registry for us.
+			# Otherwise, we need to call _prune_plib_registry ourselves.
+			# Don't pass in the "unmerge=True" flag here, since that flag
+			# is intended to be used _prior_ to unmerge, not after.
+			self._prune_plib_registry()
 
 		return os.EX_OK
 
@@ -1871,6 +2007,10 @@ class dblink(object):
 					log_path=log_path, background=background,
 					level=level, noiselevel=noiselevel)
 
+	def _show_unmerge(self, zing, desc, file_type, file_name):
+		self._display_merge("%s %s %s %s\n" % \
+			(zing, desc.ljust(8), file_type, file_name))
+
 	def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
 		"""
 		
@@ -1887,6 +2027,9 @@ class dblink(object):
 		os = _os_merge
 		perf_md5 = perform_md5
 		showMessage = self._display_merge
+		show_unmerge = self._show_unmerge
+		ignored_unlink_errnos = self._ignored_unlink_errnos
+		ignored_rmdir_errnos = self._ignored_rmdir_errnos
 
 		if not pkgfiles:
 			showMessage(_("No package files given... Grabbing a set.\n"))
@@ -1904,9 +2047,6 @@ class dblink(object):
 					settings=self.settings,
 					vartree=self.vartree, treetype="vartree", pipe=self._pipe))
 
-		dest_root = self._eroot
-		dest_root_len = len(dest_root) - 1
-
 		cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
 		stale_confmem = []
 		protected_symlinks = {}
@@ -1922,14 +2062,6 @@ class dblink(object):
 
 			#process symlinks second-to-last, directories last.
 			mydirs = set()
-			ignored_unlink_errnos = (
-				errno.EBUSY, errno.ENOENT,
-				errno.ENOTDIR, errno.EISDIR)
-			ignored_rmdir_errnos = (
-				errno.EEXIST, errno.ENOTEMPTY,
-				errno.EBUSY, errno.ENOENT,
-				errno.ENOTDIR, errno.EISDIR,
-				errno.EPERM)
 			modprotect = os.path.join(self._eroot, "lib/modules/")
 
 			def unlink(file_name, lstatobj):
@@ -1965,10 +2097,6 @@ class dblink(object):
 						# Restore the parent flags we saved before unlinking
 						bsd_chflags.chflags(parent_name, pflags)
 
-			def show_unmerge(zing, desc, file_type, file_name):
-					showMessage("%s %s %s %s\n" % \
-						(zing, desc.ljust(8), file_type, file_name))
-
 			unmerge_desc = {}
 			unmerge_desc["cfgpro"] = _("cfgpro")
 			unmerge_desc["replaced"] = _("replaced")
@@ -1980,14 +2108,12 @@ class dblink(object):
 			unmerge_desc["!mtime"] = _("!mtime")
 			unmerge_desc["!obj"] = _("!obj")
 			unmerge_desc["!sym"] = _("!sym")
+			unmerge_desc["!prefix"] = _("!prefix")
 
 			real_root = self.settings['ROOT']
 			real_root_len = len(real_root) - 1
-			eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
+			eroot = self.settings["EROOT"]
 
-			# These files are generated by emerge, so we need to remove
-			# them when they are the only thing left in a directory.
-			infodir_cleanup = frozenset(["dir", "dir.old"])
 			infodirs = frozenset(infodir for infodir in chain(
 				self.settings.get("INFOPATH", "").split(":"),
 				self.settings.get("INFODIR", "").split(":")) if infodir)
@@ -2023,6 +2149,12 @@ class dblink(object):
 
 				file_data = pkgfiles[objkey]
 				file_type = file_data[0]
+
+				# don't try to unmerge the prefix offset itself
+				if len(obj) <= len(eroot) or not obj.startswith(eroot):
+					show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
+					continue
+
 				statobj = None
 				try:
 					statobj = os.stat(obj)
@@ -2216,78 +2348,13 @@ class dblink(object):
 				elif pkgfiles[objkey][0] == "dev":
 					show_unmerge("---", "", file_type, obj)
 
-			mydirs = sorted(mydirs)
-			mydirs.reverse()
+			self._unmerge_dirs(mydirs, infodirs_inodes,
+				protected_symlinks, unmerge_desc, unlink, os)
+			mydirs.clear()
 
-			for obj, inode_key in mydirs:
-				# Treat any directory named "info" as a candidate here,
-				# since it might have been in INFOPATH previously even
-				# though it may not be there now.
-				if inode_key in infodirs_inodes or \
-					os.path.basename(obj) == "info":
-					try:
-						remaining = os.listdir(obj)
-					except OSError:
-						pass
-					else:
-						cleanup_info_dir = ()
-						if remaining and \
-							len(remaining) <= len(infodir_cleanup):
-							if not set(remaining).difference(infodir_cleanup):
-								cleanup_info_dir = remaining
-
-						for child in cleanup_info_dir:
-							child = os.path.join(obj, child)
-							try:
-								lstatobj = os.lstat(child)
-								if stat.S_ISREG(lstatobj.st_mode):
-									unlink(child, lstatobj)
-									show_unmerge("<<<", "", "obj", child)
-							except EnvironmentError as e:
-								if e.errno not in ignored_unlink_errnos:
-									raise
-								del e
-								show_unmerge("!!!", "", "obj", child)
-				try:
-					if bsd_chflags:
-						lstatobj = os.lstat(obj)
-						if lstatobj.st_flags != 0:
-							bsd_chflags.lchflags(obj, 0)
-						parent_name = os.path.dirname(obj)
-						# Use normal stat/chflags for the parent since we want to
-						# follow any symlinks to the real parent directory.
-						pflags = os.stat(parent_name).st_flags
-						if pflags != 0:
-							bsd_chflags.chflags(parent_name, 0)
-					try:
-						os.rmdir(obj)
-					finally:
-						if bsd_chflags and pflags != 0:
-							# Restore the parent flags we saved before unlinking
-							bsd_chflags.chflags(parent_name, pflags)
-					show_unmerge("<<<", "", "dir", obj)
-				except EnvironmentError as e:
-					if e.errno not in ignored_rmdir_errnos:
-						raise
-					if e.errno != errno.ENOENT:
-						show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
-					del e
-				else:
-					# When a directory is successfully removed, there's
-					# no need to protect symlinks that point to it.
-					unmerge_syms = protected_symlinks.pop(inode_key, None)
-					if unmerge_syms is not None:
-						for relative_path in unmerge_syms:
-							obj = os.path.join(real_root,
-								relative_path.lstrip(os.sep))
-							try:
-								unlink(obj, os.lstat(obj))
-								show_unmerge("<<<", "", "sym", obj)
-							except (OSError, IOError) as e:
-								if e.errno not in ignored_unlink_errnos:
-									raise
-								del e
-								show_unmerge("!!!", "", "sym", obj)
+		if protected_symlinks:
+			self._unmerge_protected_symlinks(others_in_slot, infodirs_inodes,
+				protected_symlinks, unmerge_desc, unlink, os)
 
 		if protected_symlinks:
 			msg = "One or more symlinks to directories have been " + \
@@ -2313,6 +2380,168 @@ class dblink(object):
 		#remove self from vartree database so that our own virtual gets zapped if we're the last node
 		self.vartree.zap(self.mycpv)
 
+	def _unmerge_protected_symlinks(self, others_in_slot, infodirs_inodes,
+		protected_symlinks, unmerge_desc, unlink, os):
+
+		real_root = self.settings['ROOT']
+		show_unmerge = self._show_unmerge
+		ignored_unlink_errnos = self._ignored_unlink_errnos
+
+		flat_list = set()
+		flat_list.update(*protected_symlinks.values())
+		flat_list = sorted(flat_list)
+
+		for f in flat_list:
+			for dblnk in others_in_slot:
+				if dblnk.isowner(f):
+					# If another package in the same slot installed
+					# a file via a protected symlink, return early
+					# and don't bother searching for any other owners.
+					return
+
+		msg = []
+		msg.append("")
+		msg.append(_("Directory symlink(s) may need protection:"))
+		msg.append("")
+
+		for f in flat_list:
+			msg.append("\t%s" % \
+				os.path.join(real_root, f.lstrip(os.path.sep)))
+
+		msg.append("")
+		msg.append(_("Searching all installed"
+			" packages for files installed via above symlink(s)..."))
+		msg.append("")
+		self._elog("elog", "postrm", msg)
+
+		self.lockdb()
+		try:
+			owners = self.vartree.dbapi._owners.get_owners(flat_list)
+			self.vartree.dbapi.flush_cache()
+		finally:
+			self.unlockdb()
+
+		for owner in list(owners):
+			if owner.mycpv == self.mycpv:
+				owners.pop(owner, None)
+
+		if not owners:
+			msg = []
+			msg.append(_("The above directory symlink(s) are all "
+				"safe to remove. Removing them now..."))
+			msg.append("")
+			self._elog("elog", "postrm", msg)
+			dirs = set()
+			for unmerge_syms in protected_symlinks.values():
+				for relative_path in unmerge_syms:
+					obj = os.path.join(real_root,
+						relative_path.lstrip(os.sep))
+					parent = os.path.dirname(obj)
+					while len(parent) > len(self._eroot):
+						try:
+							lstatobj = os.lstat(parent)
+						except OSError:
+							break
+						else:
+							dirs.add((parent,
+								(lstatobj.st_dev, lstatobj.st_ino)))
+							parent = os.path.dirname(parent)
+					try:
+						unlink(obj, os.lstat(obj))
+						show_unmerge("<<<", "", "sym", obj)
+					except (OSError, IOError) as e:
+						if e.errno not in ignored_unlink_errnos:
+							raise
+						del e
+						show_unmerge("!!!", "", "sym", obj)
+
+			protected_symlinks.clear()
+			self._unmerge_dirs(dirs, infodirs_inodes,
+				protected_symlinks, unmerge_desc, unlink, os)
+			dirs.clear()
+
+	def _unmerge_dirs(self, dirs, infodirs_inodes,
+		protected_symlinks, unmerge_desc, unlink, os):
+
+		show_unmerge = self._show_unmerge
+		infodir_cleanup = self._infodir_cleanup
+		ignored_unlink_errnos = self._ignored_unlink_errnos
+		ignored_rmdir_errnos = self._ignored_rmdir_errnos
+		real_root = self.settings['ROOT']
+
+		dirs = sorted(dirs)
+		dirs.reverse()
+
+		for obj, inode_key in dirs:
+			# Treat any directory named "info" as a candidate here,
+			# since it might have been in INFOPATH previously even
+			# though it may not be there now.
+			if inode_key in infodirs_inodes or \
+				os.path.basename(obj) == "info":
+				try:
+					remaining = os.listdir(obj)
+				except OSError:
+					pass
+				else:
+					cleanup_info_dir = ()
+					if remaining and \
+						len(remaining) <= len(infodir_cleanup):
+						if not set(remaining).difference(infodir_cleanup):
+							cleanup_info_dir = remaining
+
+					for child in cleanup_info_dir:
+						child = os.path.join(obj, child)
+						try:
+							lstatobj = os.lstat(child)
+							if stat.S_ISREG(lstatobj.st_mode):
+								unlink(child, lstatobj)
+								show_unmerge("<<<", "", "obj", child)
+						except EnvironmentError as e:
+							if e.errno not in ignored_unlink_errnos:
+								raise
+							del e
+							show_unmerge("!!!", "", "obj", child)
+			try:
+				if bsd_chflags:
+					lstatobj = os.lstat(obj)
+					if lstatobj.st_flags != 0:
+						bsd_chflags.lchflags(obj, 0)
+					parent_name = os.path.dirname(obj)
+					# Use normal stat/chflags for the parent since we want to
+					# follow any symlinks to the real parent directory.
+					pflags = os.stat(parent_name).st_flags
+					if pflags != 0:
+						bsd_chflags.chflags(parent_name, 0)
+				try:
+					os.rmdir(obj)
+				finally:
+					if bsd_chflags and pflags != 0:
+						# Restore the parent flags we saved before unlinking
+						bsd_chflags.chflags(parent_name, pflags)
+				show_unmerge("<<<", "", "dir", obj)
+			except EnvironmentError as e:
+				if e.errno not in ignored_rmdir_errnos:
+					raise
+				if e.errno != errno.ENOENT:
+					show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
+				del e
+			else:
+				# When a directory is successfully removed, there's
+				# no need to protect symlinks that point to it.
+				unmerge_syms = protected_symlinks.pop(inode_key, None)
+				if unmerge_syms is not None:
+					for relative_path in unmerge_syms:
+						obj = os.path.join(real_root,
+							relative_path.lstrip(os.sep))
+						try:
+							unlink(obj, os.lstat(obj))
+							show_unmerge("<<<", "", "sym", obj)
+						except (OSError, IOError) as e:
+							if e.errno not in ignored_unlink_errnos:
+								raise
+							del e
+							show_unmerge("!!!", "", "sym", obj)
+
 	def isowner(self, filename, destroot=None):
 		""" 
 		Check if a file belongs to this package. This may
@@ -2328,7 +2557,7 @@ class dblink(object):
 		@param destroot:
 		@type destroot:
 		@rtype: Boolean
-		@returns:
+		@return:
 		1. True if this package owns the file.
 		2. False if this package does not own the file.
 		"""
@@ -2857,9 +3086,13 @@ class dblink(object):
 
 			os = _os_merge
 
-			collision_ignore = set([normalize_path(myignore) for myignore in \
-				portage.util.shlex_split(
-				self.settings.get("COLLISION_IGNORE", ""))])
+			collision_ignore = []
+			for x in portage.util.shlex_split(
+				self.settings.get("COLLISION_IGNORE", "")):
+				if os.path.isdir(os.path.join(self._eroot, x.lstrip(os.sep))):
+					x = normalize_path(x)
+					x += "/*"
+				collision_ignore.append(x)
 
 			# For collisions with preserved libraries, the current package
 			# will assume ownership and the libraries will be unregistered.
@@ -2960,15 +3193,12 @@ class dblink(object):
 				if not isowned and self.isprotected(full_path):
 					isowned = True
 				if not isowned:
+					f_match = full_path[len(self._eroot)-1:]
 					stopmerge = True
-					if collision_ignore:
-						if f in collision_ignore:
+					for pattern in collision_ignore:
+						if fnmatch.fnmatch(f_match, pattern):
 							stopmerge = False
-						else:
-							for myignore in collision_ignore:
-								if f.startswith(myignore + os.path.sep):
-									stopmerge = False
-									break
+							break
 					if stopmerge:
 						collisions.append(f)
 			return collisions, symlink_collisions, plib_collisions
@@ -3121,9 +3351,10 @@ class dblink(object):
 					if isinstance(lines, basestring):
 						lines = [lines]
 					for line in lines:
-						fields = (funcname, phase, cpv, line.rstrip('\n'))
-						str_buffer.append(' '.join(fields))
-						str_buffer.append('\n')
+						for line in line.split('\n'):
+							fields = (funcname, phase, cpv, line)
+							str_buffer.append(' '.join(fields))
+							str_buffer.append('\n')
 			if str_buffer:
 				os.write(self._pipe, _unicode_encode(''.join(str_buffer)))
 
@@ -3157,7 +3388,7 @@ class dblink(object):
 		@param prev_mtimes: { Filename:mtime } mapping for env_update
 		@type prev_mtimes: Dictionary
 		@rtype: Boolean
-		@returns:
+		@return:
 		1. 0 on success
 		2. 1 on failure
 		
@@ -3192,17 +3423,22 @@ class dblink(object):
 					pass
 				continue
 
+			f = None
 			try:
-				val = io.open(_unicode_encode(
+				f = io.open(_unicode_encode(
 					os.path.join(inforoot, var_name),
 					encoding=_encodings['fs'], errors='strict'),
 					mode='r', encoding=_encodings['repo.content'],
-					errors='replace').readline().strip()
+					errors='replace')
+				val = f.readline().strip()
 			except EnvironmentError as e:
 				if e.errno != errno.ENOENT:
 					raise
 				del e
 				val = ''
+			finally:
+				if f is not None:
+					f.close()
 
 			if var_name == 'SLOT':
 				slot = val
@@ -3226,10 +3462,6 @@ class dblink(object):
 		if not os.path.exists(self.dbcatdir):
 			ensure_dirs(self.dbcatdir)
 
-		otherversions = []
-		for v in self.vartree.dbapi.cp_list(self.mysplit[0]):
-			otherversions.append(v.split("/")[1])
-
 		cp = self.mysplit[0]
 		slot_atom = "%s:%s" % (cp, slot)
 
@@ -3270,22 +3502,49 @@ class dblink(object):
 					max_dblnk = dblnk
 			self._installed_instance = max_dblnk
 
+		if self.settings.get("INSTALL_MASK") or \
+			"nodoc" in self.settings.features or \
+			"noinfo" in self.settings.features or \
+			"noman" in self.settings.features:
+			# Apply INSTALL_MASK before collision-protect, since it may
+			# be useful to avoid collisions in some scenarios.
+			phase = MiscFunctionsProcess(background=False,
+				commands=["preinst_mask"], phase="preinst",
+				scheduler=self._scheduler, settings=self.settings)
+			phase.start()
+			phase.wait()
+
 		# We check for unicode encoding issues after src_install. However,
 		# the check must be repeated here for binary packages (it's
 		# inexpensive since we call os.walk() here anyway).
 		unicode_errors = []
+		line_ending_re = re.compile('[\n\r]')
+		srcroot_len = len(srcroot)
+		ed_len = len(self.settings["ED"])
 
 		while True:
 
 			unicode_error = False
+			eagain_error = False
 
 			myfilelist = []
 			mylinklist = []
 			paths_with_newlines = []
-			srcroot_len = len(srcroot)
 			def onerror(e):
 				raise
-			for parent, dirs, files in os.walk(srcroot, onerror=onerror):
+			walk_iter = os.walk(srcroot, onerror=onerror)
+			while True:
+				try:
+					parent, dirs, files = next(walk_iter)
+				except StopIteration:
+					break
+				except OSError as e:
+					if e.errno != errno.EAGAIN:
+						raise
+					# Observed with PyPy 1.8.
+					eagain_error = True
+					break
+
 				try:
 					parent = _unicode_decode(parent,
 						encoding=_encodings['merge'], errors='strict')
@@ -3293,12 +3552,12 @@ class dblink(object):
 					new_parent = _unicode_decode(parent,
 						encoding=_encodings['merge'], errors='replace')
 					new_parent = _unicode_encode(new_parent,
-						encoding=_encodings['merge'], errors='backslashreplace')
+						encoding='ascii', errors='backslashreplace')
 					new_parent = _unicode_decode(new_parent,
 						encoding=_encodings['merge'], errors='replace')
 					os.rename(parent, new_parent)
 					unicode_error = True
-					unicode_errors.append(new_parent[srcroot_len:])
+					unicode_errors.append(new_parent[ed_len:])
 					break
 
 				for fname in files:
@@ -3311,13 +3570,13 @@ class dblink(object):
 						new_fname = _unicode_decode(fname,
 							encoding=_encodings['merge'], errors='replace')
 						new_fname = _unicode_encode(new_fname,
-							encoding=_encodings['merge'], errors='backslashreplace')
+							encoding='ascii', errors='backslashreplace')
 						new_fname = _unicode_decode(new_fname,
 							encoding=_encodings['merge'], errors='replace')
 						new_fpath = os.path.join(parent, new_fname)
 						os.rename(fpath, new_fpath)
 						unicode_error = True
-						unicode_errors.append(new_fpath[srcroot_len:])
+						unicode_errors.append(new_fpath[ed_len:])
 						fname = new_fname
 						fpath = new_fpath
 					else:
@@ -3325,7 +3584,7 @@ class dblink(object):
 
 					relative_path = fpath[srcroot_len:]
 
-					if "\n" in relative_path:
+					if line_ending_re.search(relative_path) is not None:
 						paths_with_newlines.append(relative_path)
 
 					file_mode = os.lstat(fpath).st_mode
@@ -3340,19 +3599,20 @@ class dblink(object):
 				if unicode_error:
 					break
 
-			if not unicode_error:
+			if not (unicode_error or eagain_error):
 				break
 
 		if unicode_errors:
-			eerror(portage._merge_unicode_error(unicode_errors))
+			self._elog("eqawarn", "preinst",
+				_merge_unicode_error(unicode_errors))
 
 		if paths_with_newlines:
 			msg = []
-			msg.append(_("This package installs one or more files containing a newline (\\n) character:"))
+			msg.append(_("This package installs one or more files containing line ending characters:"))
 			msg.append("")
 			paths_with_newlines.sort()
 			for f in paths_with_newlines:
-				msg.append("\t/%s" % (f.replace("\n", "\\n")))
+				msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r")))
 			msg.append("")
 			msg.append(_("package %s NOT merged") % self.mycpv)
 			msg.append("")
@@ -3394,14 +3654,6 @@ class dblink(object):
 			if installed_files:
 				return 1
 
-		# check for package collisions
-		blockers = self._blockers
-		if blockers is None:
-			blockers = []
-		collisions, symlink_collisions, plib_collisions = \
-			self._collision_protect(srcroot, destroot,
-			others_in_slot + blockers, myfilelist, mylinklist)
-
 		# Make sure the ebuild environment is initialized and that ${T}/elog
 		# exists for logging of collision-protect eerror messages.
 		if myebuild is None:
@@ -3413,6 +3665,29 @@ class dblink(object):
 			for other in others_in_slot])
 		prepare_build_dirs(settings=self.settings, cleanup=cleanup)
 
+		# check for package collisions
+		blockers = self._blockers
+		if blockers is None:
+			blockers = []
+		collisions, symlink_collisions, plib_collisions = \
+			self._collision_protect(srcroot, destroot,
+			others_in_slot + blockers, myfilelist, mylinklist)
+
+		if symlink_collisions:
+			# Symlink collisions need to be distinguished from other types
+			# of collisions, in order to avoid confusion (see bug #409359).
+			msg = _("Package '%s' has one or more collisions "
+				"between symlinks and directories, which is explicitly "
+				"forbidden by PMS section 13.4 (see bug #326685):") % \
+				(self.settings.mycpv,)
+			msg = textwrap.wrap(msg, 70)
+			msg.append("")
+			for f in symlink_collisions:
+				msg.append("\t%s" % os.path.join(destroot,
+					f.lstrip(os.path.sep)))
+			msg.append("")
+			self._elog("eerror", "preinst", msg)
+
 		if collisions:
 			collision_protect = "collision-protect" in self.settings.features
 			protect_owned = "protect-owned" in self.settings.features
@@ -3494,12 +3769,20 @@ class dblink(object):
 					eerror([_("None of the installed"
 						" packages claim the file(s)."), ""])
 
+			symlink_abort_msg =_("Package '%s' NOT merged since it has "
+				"one or more collisions between symlinks and directories, "
+				"which is explicitly forbidden by PMS section 13.4 "
+				"(see bug #326685).")
+
 			# The explanation about the collision and how to solve
 			# it may not be visible via a scrollback buffer, especially
 			# if the number of file collisions is large. Therefore,
 			# show a summary at the end.
 			abort = False
-			if collision_protect:
+			if symlink_collisions:
+				abort = True
+				msg = symlink_abort_msg % (self.settings.mycpv,)
+			elif collision_protect:
 				abort = True
 				msg = _("Package '%s' NOT merged due to file collisions.") % \
 					self.settings.mycpv
@@ -3507,12 +3790,6 @@ class dblink(object):
 				abort = True
 				msg = _("Package '%s' NOT merged due to file collisions.") % \
 					self.settings.mycpv
-			elif symlink_collisions:
-				abort = True
-				msg = _("Package '%s' NOT merged due to collision " + \
-				"between a symlink and a directory which is explicitly " + \
-				"forbidden by PMS (see bug #326685).") % \
-				(self.settings.mycpv,)
 			else:
 				msg = _("Package '%s' merged despite file collisions.") % \
 					self.settings.mycpv
@@ -3558,10 +3835,12 @@ class dblink(object):
 		# write local package counter for recording
 		if counter is None:
 			counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
-		io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
+		f = io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
 			encoding=_encodings['fs'], errors='strict'),
 			mode='w', encoding=_encodings['repo.content'],
-			errors='backslashreplace').write(_unicode_decode(str(counter)))
+			errors='backslashreplace')
+		f.write(_unicode_decode(str(counter)))
+		f.close()
 
 		self.updateprotect()
 
@@ -3577,9 +3856,8 @@ class dblink(object):
 			# Always behave like --noconfmem is enabled for downgrades
 			# so that people who don't know about this option are less
 			# likely to get confused when doing upgrade/downgrade cycles.
-			pv_split = catpkgsplit(self.mycpv)[1:]
 			for other in others_in_slot:
-				if pkgcmp(pv_split, catpkgsplit(other.mycpv)[1:]) < 0:
+				if vercmp(self.mycpv.version, other.mycpv.version) < 0:
 					cfgfiledict["IGNORE"] = 1
 					break
 
@@ -3798,22 +4076,11 @@ class dblink(object):
 			showMessage(_("!!! FAILED postinst: ")+str(a)+"\n",
 				level=logging.ERROR, noiselevel=-1)
 
-		downgrade = False
-		for v in otherversions:
-			if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
-				downgrade = True
-
-		# Lock the config memory file to prevent symlink creation
-		# in merge_contents from overlapping with env-update.
-		self.vartree.dbapi._fs_lock()
-		try:
-			#update environment settings, library paths. DO NOT change symlinks.
-			env_update(makelinks=(not downgrade),
-				target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
-				contents=contents, env=self.settings.environ(),
-				writemsg_level=self._display_merge)
-		finally:
-			self.vartree.dbapi._fs_unlock()
+		#update environment settings, library paths. DO NOT change symlinks.
+		env_update(
+			target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
+			contents=contents, env=self.settings,
+			writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
 
 		# For gcc upgrades, preserved libs have to be removed after the
 		# the library path has been updated.
@@ -3867,7 +4134,8 @@ class dblink(object):
 
 		# we do a first merge; this will recurse through all files in our srcroot but also build up a
 		# "second hand" of symlinks to merge later
-		if self.mergeme(srcroot, destroot, outfile, secondhand, "", cfgfiledict, mymtime):
+		if self.mergeme(srcroot, destroot, outfile, secondhand,
+			self.settings["EPREFIX"].lstrip(os.sep), cfgfiledict, mymtime):
 			return 1
 
 		# now, it's time for dealing our second hand; we'll loop until we can't merge anymore.	The rest are
@@ -3936,7 +4204,7 @@ class dblink(object):
 		@param thismtime: The current time (typically long(time.time())
 		@type thismtime: Long
 		@rtype: None or Boolean
-		@returns:
+		@return:
 		1. True on failure
 		2. None otherwise
 		
@@ -3952,6 +4220,10 @@ class dblink(object):
 		destroot = normalize_path(destroot).rstrip(sep) + sep
 		calc_prelink = "prelink-checksums" in self.settings.features
 
+		protect_if_modified = \
+			"config-protect-if-modified" in self.settings.features and \
+			self._installed_instance is not None
+
 		# this is supposed to merge a list of files.  There will be 2 forms of argument passing.
 		if isinstance(stufftomerge, basestring):
 			#A directory is specified.  Figure out protection paths, listdir() it and process it.
@@ -3985,14 +4257,37 @@ class dblink(object):
 
 			if stat.S_ISLNK(mymode):
 				# we are merging a symbolic link
-				myabsto = abssymlink(mysrc)
+				# The file name of mysrc and the actual file that it points to
+				# will have earlier been forcefully converted to the 'merge'
+				# encoding if necessary, but the content of the symbolic link
+				# may need to be forcefully converted here.
+				myto = _os.readlink(_unicode_encode(mysrc,
+					encoding=_encodings['merge'], errors='strict'))
+				try:
+					myto = _unicode_decode(myto,
+						encoding=_encodings['merge'], errors='strict')
+				except UnicodeDecodeError:
+					myto = _unicode_decode(myto, encoding=_encodings['merge'],
+						errors='replace')
+					myto = _unicode_encode(myto, encoding='ascii',
+						errors='backslashreplace')
+					myto = _unicode_decode(myto, encoding=_encodings['merge'],
+						errors='replace')
+					os.unlink(mysrc)
+					os.symlink(myto, mysrc)
+
+				# Pass in the symlink target in order to bypass the
+				# os.readlink() call inside abssymlink(), since that
+				# call is unsafe if the merge encoding is not ascii
+				# or utf_8 (see bug #382021).
+				myabsto = abssymlink(mysrc, target=myto)
+
 				if myabsto.startswith(srcroot):
 					myabsto = myabsto[len(srcroot):]
 				myabsto = myabsto.lstrip(sep)
-				myto = os.readlink(mysrc)
 				if self.settings and self.settings["D"]:
 					if myto.startswith(self.settings["D"]):
-						myto = myto[len(self.settings["D"]):]
+						myto = myto[len(self.settings["D"])-1:]
 				# myrealto contains the path of the real file to which this symlink points.
 				# we can simply test for existence of this file to see if the target has been merged yet
 				myrealto = normalize_path(os.path.join(destroot, myabsto))
@@ -4170,9 +4465,18 @@ class dblink(object):
 						# now, config file management may come into play.
 						# we only need to tweak mydest if cfg file management is in play.
 						if protected:
+							destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
+							if protect_if_modified:
+								contents_key = \
+									self._installed_instance._match_contents(myrealdest)
+								if contents_key:
+									inst_info = self._installed_instance.getcontents()[contents_key]
+									if inst_info[0] == "obj" and inst_info[2] == destmd5:
+										protected = False
+
+						if protected:
 							# we have a protection path; enable config file management.
 							cfgprot = 0
-							destmd5 = perform_md5(mydest, calc_prelink=calc_prelink)
 							if mymd5 == destmd5:
 								#file already in place; simply update mtimes of destination
 								moveme = 1
@@ -4207,10 +4511,10 @@ class dblink(object):
 					# as hardlinks (having identical st_dev and st_ino).
 					hardlink_key = (mystat.st_dev, mystat.st_ino)
 
-					hardlink_candidates = self._md5_merge_map.get(hardlink_key)
+					hardlink_candidates = self._hardlink_merge_map.get(hardlink_key)
 					if hardlink_candidates is None:
 						hardlink_candidates = []
-						self._md5_merge_map[hardlink_key] = hardlink_candidates
+						self._hardlink_merge_map[hardlink_key] = hardlink_candidates
 
 					mymtime = movefile(mysrc, mydest, newmtime=thismtime,
 						sstat=mystat, mysettings=self.settings,
@@ -4218,8 +4522,7 @@ class dblink(object):
 						encoding=_encodings['merge'])
 					if mymtime is None:
 						return 1
-					if hardlink_candidates is not None:
-						hardlink_candidates.append(mydest)
+					hardlink_candidates.append(mydest)
 					zing = ">>>"
 
 				if mymtime != None:
@@ -4445,6 +4748,7 @@ def write_contents(contents, root, f):
 
 def tar_contents(contents, root, tar, protect=None, onProgress=None):
 	os = _os_merge
+	encoding = _encodings['merge']
 
 	try:
 		for x in contents:
@@ -4464,7 +4768,9 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None):
 			pass
 		else:
 			os = portage.os
+			encoding = _encodings['fs']
 
+	tar.encoding = encoding
 	root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
 	id_strings = {}
 	maxval = len(contents)
@@ -4486,7 +4792,7 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None):
 			continue
 		contents_type = contents[path][0]
 		if path.startswith(root):
-			arcname = path[len(root):]
+			arcname = "./" + path[len(root):]
 		else:
 			raise ValueError("invalid root argument: '%s'" % root)
 		live_path = path
@@ -4498,7 +4804,51 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None):
 			# recorded as a real directory in the tar file to ensure that tar
 			# can properly extract it's children.
 			live_path = os.path.realpath(live_path)
-		tarinfo = tar.gettarinfo(live_path, arcname)
+			lst = os.lstat(live_path)
+
+		# Since os.lstat() inside TarFile.gettarinfo() can trigger a
+		# UnicodeEncodeError when python has something other than utf_8
+		# return from sys.getfilesystemencoding() (as in bug #388773),
+		# we implement the needed functionality here, using the result
+		# of our successful lstat call. An alternative to this would be
+		# to pass in the fileobj argument to TarFile.gettarinfo(), so
+		# that it could use fstat instead of lstat. However, that would
+		# have the unwanted effect of dereferencing symlinks.
+
+		tarinfo = tar.tarinfo()
+		tarinfo.name = arcname
+		tarinfo.mode = lst.st_mode
+		tarinfo.uid = lst.st_uid
+		tarinfo.gid = lst.st_gid
+		tarinfo.size = 0
+		tarinfo.mtime = lst.st_mtime
+		tarinfo.linkname = ""
+		if stat.S_ISREG(lst.st_mode):
+			inode = (lst.st_ino, lst.st_dev)
+			if (lst.st_nlink > 1 and
+				inode in tar.inodes and
+				arcname != tar.inodes[inode]):
+				tarinfo.type = tarfile.LNKTYPE
+				tarinfo.linkname = tar.inodes[inode]
+			else:
+				tar.inodes[inode] = arcname
+				tarinfo.type = tarfile.REGTYPE
+				tarinfo.size = lst.st_size
+		elif stat.S_ISDIR(lst.st_mode):
+			tarinfo.type = tarfile.DIRTYPE
+		elif stat.S_ISLNK(lst.st_mode):
+			tarinfo.type = tarfile.SYMTYPE
+			tarinfo.linkname = os.readlink(live_path)
+		else:
+			continue
+		try:
+			tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
+		except KeyError:
+			pass
+		try:
+			tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
+		except KeyError:
+			pass
 
 		if stat.S_ISREG(lst.st_mode):
 			if protect and protect(path):
@@ -4515,7 +4865,7 @@ def tar_contents(contents, root, tar, protect=None, onProgress=None):
 				f.close()
 			else:
 				f = open(_unicode_encode(path,
-					encoding=object.__getattribute__(os, '_encoding'),
+					encoding=encoding,
 					errors='strict'), 'rb')
 				try:
 					tar.addfile(tarinfo, f)

diff --git a/portage_with_autodep/pym/portage/dbapi/vartree.pyo b/portage_with_autodep/pym/portage/dbapi/vartree.pyo
new file mode 100644
index 0000000..7c186cf
Binary files /dev/null and b/portage_with_autodep/pym/portage/dbapi/vartree.pyo differ

diff --git a/portage_with_autodep/pym/portage/dbapi/virtual.py b/portage_with_autodep/pym/portage/dbapi/virtual.py
index ec97ffe..da15983 100644
--- a/portage_with_autodep/pym/portage/dbapi/virtual.py
+++ b/portage_with_autodep/pym/portage/dbapi/virtual.py
@@ -1,9 +1,10 @@
-# Copyright 1998-2007 Gentoo Foundation
+# Copyright 1998-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 
 from portage.dbapi import dbapi
-from portage import cpv_getkey
+from portage.dbapi.dep_expand import dep_expand
+from portage.versions import cpv_getkey, _pkg_str
 
 class fakedbapi(dbapi):
 	"""A fake dbapi that allows consumers to inject/remove packages to/from it
@@ -31,27 +32,30 @@ class fakedbapi(dbapi):
 			self._match_cache = {}
 
 	def match(self, origdep, use_cache=1):
-		result = self._match_cache.get(origdep, None)
+		atom = dep_expand(origdep, mydb=self, settings=self.settings)
+		cache_key = (atom, atom.unevaluated_atom)
+		result = self._match_cache.get(cache_key)
 		if result is not None:
 			return result[:]
-		result = dbapi.match(self, origdep, use_cache=use_cache)
-		self._match_cache[origdep] = result
+		result = list(self._iter_match(atom, self.cp_list(atom.cp)))
+		self._match_cache[cache_key] = result
 		return result[:]
 
 	def cpv_exists(self, mycpv, myrepo=None):
 		return mycpv in self.cpvdict
 
 	def cp_list(self, mycp, use_cache=1, myrepo=None):
-		cachelist = self._match_cache.get(mycp)
-		# cp_list() doesn't expand old-style virtuals
-		if cachelist and cachelist[0].startswith(mycp):
+		# NOTE: Cache can be safely shared with the match cache, since the
+		# match cache uses the result from dep_expand for the cache_key.
+		cache_key = (mycp, mycp)
+		cachelist = self._match_cache.get(cache_key)
+		if cachelist is not None:
 			return cachelist[:]
 		cpv_list = self.cpdict.get(mycp)
 		if cpv_list is None:
 			cpv_list = []
 		self._cpv_sort_ascending(cpv_list)
-		if not (not cpv_list and mycp.startswith("virtual/")):
-			self._match_cache[mycp] = cpv_list
+		self._match_cache[cache_key] = cpv_list
 		return cpv_list[:]
 
 	def cp_all(self):
@@ -70,7 +74,13 @@ class fakedbapi(dbapi):
 		@param metadata: dict
 		"""
 		self._clear_cache()
-		mycp = cpv_getkey(mycpv)
+		if not hasattr(mycpv, 'cp'):
+			if metadata is None:
+				mycpv = _pkg_str(mycpv)
+			else:
+				mycpv = _pkg_str(mycpv, slot=metadata.get('SLOT'),
+					repo=metadata.get('repository'))
+		mycp = mycpv.cp
 		self.cpvdict[mycpv] = metadata
 		myslot = None
 		if self._exclusive_slots and metadata:

diff --git a/portage_with_autodep/pym/portage/dbapi/virtual.pyo b/portage_with_autodep/pym/portage/dbapi/virtual.pyo
new file mode 100644
index 0000000..9f7c667
Binary files /dev/null and b/portage_with_autodep/pym/portage/dbapi/virtual.pyo differ

diff --git a/portage_with_autodep/pym/portage/debug.py b/portage_with_autodep/pym/portage/debug.py
index ce642fe..ebf1a13 100644
--- a/portage_with_autodep/pym/portage/debug.py
+++ b/portage_with_autodep/pym/portage/debug.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import os
@@ -26,7 +26,7 @@ class trace_handler(object):
 	def __init__(self):
 		python_system_paths = []
 		for x in sys.path:
-			if os.path.basename(x).startswith("python2."):
+			if os.path.basename(x) == "python%s.%s" % sys.version_info[:2]:
 				python_system_paths.append(x)
 
 		self.ignore_prefixes = []

diff --git a/portage_with_autodep/pym/portage/debug.pyo b/portage_with_autodep/pym/portage/debug.pyo
new file mode 100644
index 0000000..82a5e8f
Binary files /dev/null and b/portage_with_autodep/pym/portage/debug.pyo differ

diff --git a/portage_with_autodep/pym/portage/dep/__init__.py b/portage_with_autodep/pym/portage/dep/__init__.py
index fd5ad30..152af0a 100644
--- a/portage_with_autodep/pym/portage/dep/__init__.py
+++ b/portage_with_autodep/pym/portage/dep/__init__.py
@@ -1,5 +1,5 @@
 # deps.py -- Portage dependency resolution functions
-# Copyright 2003-2011 Gentoo Foundation
+# Copyright 2003-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = [
@@ -30,13 +30,20 @@ __all__ = [
 import re, sys
 import warnings
 from itertools import chain
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.util:cmp_sort_key,writemsg',
+)
+
 from portage import _unicode_decode
 from portage.eapi import eapi_has_slot_deps, eapi_has_src_uri_arrows, \
-	eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_use_dep_defaults
+	eapi_has_use_deps, eapi_has_strong_blocks, eapi_has_use_dep_defaults, \
+	eapi_has_repo_deps, eapi_allows_dots_in_PN, eapi_allows_dots_in_use_flags
 from portage.exception import InvalidAtom, InvalidData, InvalidDependString
 from portage.localization import _
 from portage.versions import catpkgsplit, catsplit, \
-	pkgcmp, ververify, _cp, _cpv
+	vercmp, ververify, _cp, _cpv, _pkg_str, _unknown_repo
 import portage.cache.mappings
 
 if sys.hexversion >= 0x3000000:
@@ -55,7 +62,7 @@ def cpvequal(cpv1, cpv2):
 	@param cpv2: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
 	@type cpv2: String
 	@rtype: Boolean
-	@returns:
+	@return:
 	1.  True if cpv1 = cpv2
 	2.  False Otherwise
 	3.  Throws PortageException if cpv1 or cpv2 is not a CPV
@@ -67,16 +74,27 @@ def cpvequal(cpv1, cpv2):
 
 	"""
 
-	split1 = catpkgsplit(cpv1)
-	split2 = catpkgsplit(cpv2)
-	
-	if not split1 or not split2:
+	try:
+		try:
+			split1 = cpv1.cpv_split
+		except AttributeError:
+			cpv1 = _pkg_str(cpv1)
+			split1 = cpv1.cpv_split
+
+		try:
+			split2 = cpv2.cpv_split
+		except AttributeError:
+			cpv2 = _pkg_str(cpv2)
+			split2 = cpv2.cpv_split
+
+	except InvalidData:
 		raise portage.exception.PortageException(_("Invalid data '%s, %s', parameter was not a CPV") % (cpv1, cpv2))
-	
-	if split1[0] != split2[0]:
+
+	if split1[0] != split2[0] or \
+		split1[1] != split2[1]:
 		return False
-	
-	return (pkgcmp(split1[1:], split2[1:]) == 0)
+
+	return vercmp(cpv1.version, cpv2.version) == 0
 
 def strip_empty(myarr):
 	"""
@@ -635,8 +653,8 @@ def flatten(mylist):
 
 
 _usedep_re = {
-	"0":        re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
-	"4-python": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@.-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
+	"dots_disallowed_in_use_flags": re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
+	"dots_allowed_in_use_flags":    re.compile("^(?P<prefix>[!-]?)(?P<flag>[A-Za-z0-9][A-Za-z0-9+_@.-]*)(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$"),
 }
 
 def _get_usedep_re(eapi):
@@ -649,10 +667,10 @@ def _get_usedep_re(eapi):
 	@return: A regular expression object that matches valid USE deps for the
 		given eapi.
 	"""
-	if eapi in (None, "4-python",):
-		return _usedep_re["4-python"]
+	if eapi is None or eapi_allows_dots_in_use_flags(eapi):
+		return _usedep_re["dots_allowed_in_use_flags"]
 	else:
-		return _usedep_re["0"]
+		return _usedep_re["dots_disallowed_in_use_flags"]
 
 class _use_dep(object):
 
@@ -1068,6 +1086,10 @@ class Atom(_atom_base):
 
 		_atom_base.__init__(s)
 
+		atom_re = _get_atom_re(eapi)
+		if eapi_has_repo_deps(eapi):
+			allow_repo = True
+
 		if "!" == s[:1]:
 			blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
 			if blocker.overlap.forbid:
@@ -1077,11 +1099,11 @@ class Atom(_atom_base):
 		else:
 			blocker = False
 		self.__dict__['blocker'] = blocker
-		m = _atom_re.match(s)
+		m = atom_re.match(s)
 		extended_syntax = False
 		if m is None:
 			if allow_wildcard:
-				m = _atom_wildcard_re.match(s)
+				m = _get_atom_wildcard_re(eapi).match(s)
 				if m is None:
 					raise InvalidAtom(self)
 				op = None
@@ -1096,38 +1118,44 @@ class Atom(_atom_base):
 			else:
 				raise InvalidAtom(self)
 		elif m.group('op') is not None:
-			base = _atom_re.groupindex['op']
+			base = atom_re.groupindex['op']
 			op = m.group(base + 1)
 			cpv = m.group(base + 2)
 			cp = m.group(base + 3)
-			slot = m.group(_atom_re.groups - 2)
-			repo = m.group(_atom_re.groups - 1)
-			use_str = m.group(_atom_re.groups)
+			slot = m.group(atom_re.groups - 2)
+			repo = m.group(atom_re.groups - 1)
+			use_str = m.group(atom_re.groups)
 			if m.group(base + 4) is not None:
 				raise InvalidAtom(self)
 		elif m.group('star') is not None:
-			base = _atom_re.groupindex['star']
+			base = atom_re.groupindex['star']
 			op = '=*'
 			cpv = m.group(base + 1)
 			cp = m.group(base + 2)
-			slot = m.group(_atom_re.groups - 2)
-			repo = m.group(_atom_re.groups - 1)
-			use_str = m.group(_atom_re.groups)
+			slot = m.group(atom_re.groups - 2)
+			repo = m.group(atom_re.groups - 1)
+			use_str = m.group(atom_re.groups)
 			if m.group(base + 3) is not None:
 				raise InvalidAtom(self)
 		elif m.group('simple') is not None:
 			op = None
-			cpv = cp = m.group(_atom_re.groupindex['simple'] + 1)
-			slot = m.group(_atom_re.groups - 2)
-			repo = m.group(_atom_re.groups - 1)
-			use_str = m.group(_atom_re.groups)
-			if m.group(_atom_re.groupindex['simple'] + 2) is not None:
+			cpv = cp = m.group(atom_re.groupindex['simple'] + 1)
+			slot = m.group(atom_re.groups - 2)
+			repo = m.group(atom_re.groups - 1)
+			use_str = m.group(atom_re.groups)
+			if m.group(atom_re.groupindex['simple'] + 2) is not None:
 				raise InvalidAtom(self)
 
 		else:
 			raise AssertionError(_("required group not found in atom: '%s'") % self)
 		self.__dict__['cp'] = cp
-		self.__dict__['cpv'] = cpv
+		try:
+			self.__dict__['cpv'] = _pkg_str(cpv)
+			self.__dict__['version'] = self.cpv.version
+		except InvalidData:
+			# plain cp, wildcard, or something
+			self.__dict__['cpv'] = cpv
+			self.__dict__['version'] = None
 		self.__dict__['repo'] = repo
 		self.__dict__['slot'] = slot
 		self.__dict__['operator'] = op
@@ -1216,6 +1244,23 @@ class Atom(_atom_base):
 		return Atom(self.replace(_slot_separator + self.slot, '', 1),
 			allow_repo=True, allow_wildcard=True)
 
+	def with_repo(self, repo):
+		atom = remove_slot(self)
+		if self.slot is not None:
+			atom += _slot_separator + self.slot
+		atom += _repo_separator + repo
+		if self.use is not None:
+			atom += str(self.use)
+		return Atom(atom, allow_repo=True, allow_wildcard=True)
+
+	def with_slot(self, slot):
+		atom = remove_slot(self) + _slot_separator + slot
+		if self.repo is not None:
+			atom += _repo_separator + self.repo
+		if self.use is not None:
+			atom += str(self.use)
+		return Atom(atom, allow_repo=True, allow_wildcard=True)
+
 	def __setattr__(self, name, value):
 		raise AttributeError("Atom instances are immutable",
 			self.__class__, name, value)
@@ -1353,10 +1398,13 @@ class ExtendedAtomDict(portage.cache.mappings.MutableMapping):
 			yield k
 
 	def iteritems(self):
-		for item in self._normal.items():
-			yield item
-		for item in self._extended.items():
-			yield item
+		try:
+			for item in self._normal.items():
+				yield item
+			for item in self._extended.items():
+				yield item
+		except AttributeError:
+			pass # FEATURES=python-trace
 
 	def __delitem__(self, cp):
 		if "*" in cp:
@@ -1610,20 +1658,45 @@ _repo_separator = "::"
 _repo_name = r'[\w][\w-]*'
 _repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?'
 
-_atom_re = re.compile('^(?P<without_use>(?:' +
-	'(?P<op>' + _op + _cpv + ')|' +
-	'(?P<star>=' + _cpv + r'\*)|' +
-	'(?P<simple>' + _cp + '))' + 
-	'(' + _slot_separator + _slot + ')?' + _repo + ')(' + _use + ')?$', re.VERBOSE)
+_atom_re = {
+	"dots_disallowed_in_PN": re.compile('^(?P<without_use>(?:' +
+		'(?P<op>' + _op + _cpv['dots_disallowed_in_PN'] + ')|' +
+		'(?P<star>=' + _cpv['dots_disallowed_in_PN'] + r'\*)|' +
+		'(?P<simple>' + _cp['dots_disallowed_in_PN'] + '))' + 
+		'(' + _slot_separator + _slot + ')?' + _repo + ')(' + _use + ')?$', re.VERBOSE),
+	"dots_allowed_in_PN": re.compile('^(?P<without_use>(?:' +
+		'(?P<op>' + _op + _cpv['dots_allowed_in_PN'] + ')|' +
+		'(?P<star>=' + _cpv['dots_allowed_in_PN'] + r'\*)|' +
+		'(?P<simple>' + _cp['dots_allowed_in_PN'] + '))' + 
+		'(' + _slot_separator + _slot + ')?' + _repo + ')(' + _use + ')?$', re.VERBOSE),
+}
+
+def _get_atom_re(eapi):
+	if eapi is None or eapi_allows_dots_in_PN(eapi):
+		return _atom_re["dots_allowed_in_PN"]
+	else:
+		return _atom_re["dots_disallowed_in_PN"]
 	
 _extended_cat = r'[\w+*][\w+.*-]*'
-_extended_pkg = r'[\w+*][\w+*-]*?'
+_extended_pkg = {
+	"dots_disallowed_in_PN": r'[\w+*][\w+*-]*?',
+	"dots_allowed_in_PN":    r'[\w+*][\w+.*-]*?',
+}
 
-_atom_wildcard_re = re.compile('(?P<simple>(' + _extended_cat + ')/(' + _extended_pkg + '))(:(?P<slot>' + _slot + '))?(' + _repo_separator + '(?P<repo>' + _repo_name + '))?$')
+_atom_wildcard_re = {
+	"dots_disallowed_in_PN": re.compile('(?P<simple>(' + _extended_cat + ')/(' + _extended_pkg['dots_disallowed_in_PN'] + '))(:(?P<slot>' + _slot + '))?(' + _repo_separator + '(?P<repo>' + _repo_name + '))?$'),
+	"dots_allowed_in_PN":    re.compile('(?P<simple>(' + _extended_cat + ')/(' + _extended_pkg['dots_allowed_in_PN'] + '))(:(?P<slot>' + _slot + '))?(' + _repo_separator + '(?P<repo>' + _repo_name + '))?$'),
+}
+
+def _get_atom_wildcard_re(eapi):
+	if eapi is None or eapi_allows_dots_in_PN(eapi):
+		return _atom_wildcard_re["dots_allowed_in_PN"]
+	else:
+		return _atom_wildcard_re["dots_disallowed_in_PN"]
 
 _useflag_re = {
-	"0":        re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@-]*$'),
-	"4-python": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@.-]*$'),
+	"dots_disallowed_in_use_flags": re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@-]*$'),
+	"dots_allowed_in_use_flags":    re.compile(r'^[A-Za-z0-9][A-Za-z0-9+_@.-]*$'),
 }
 
 def _get_useflag_re(eapi):
@@ -1636,10 +1709,10 @@ def _get_useflag_re(eapi):
 	@return: A regular expression object that matches valid USE flags for the
 		given eapi.
 	"""
-	if eapi in (None, "4-python",):
-		return _useflag_re["4-python"]
+	if eapi is None or eapi_allows_dots_in_use_flags(eapi):
+		return _useflag_re["dots_allowed_in_use_flags"]
 	else:
-		return _useflag_re["0"]
+		return _useflag_re["dots_disallowed_in_use_flags"]
 
 def isvalidatom(atom, allow_blockers=False, allow_wildcard=False, allow_repo=False):
 	"""
@@ -1753,7 +1826,14 @@ def match_to_list(mypkg, mylist):
 	@rtype: List
 	@return: A unique list of package atoms that match the given package atom
 	"""
-	return [ x for x in set(mylist) if match_from_list(x, [mypkg]) ]
+	matches = set()
+	result = []
+	pkgs = [mypkg]
+	for x in mylist:
+		if x not in matches and match_from_list(x, pkgs):
+			matches.add(x)
+			result.append(x)
+	return result
 
 def best_match_to_list(mypkg, mylist):
 	"""
@@ -1781,6 +1861,7 @@ def best_match_to_list(mypkg, mylist):
 		'>':2, '<':2, '>=':2, '<=':2, None:1}
 	maxvalue = -2
 	bestm  = None
+	mypkg_cpv = None
 	for x in match_to_list(mypkg, mylist):
 		if x.extended_syntax:
 			if dep_getslot(x) is not None:
@@ -1800,6 +1881,31 @@ def best_match_to_list(mypkg, mylist):
 		if op_val > maxvalue:
 			maxvalue = op_val
 			bestm  = x
+		elif op_val == maxvalue and op_val == 2:
+			# For >, <, >=, and <=, the one with the version
+			# closest to mypkg is the best match.
+			if mypkg_cpv is None:
+				try:
+					mypkg_cpv = mypkg.cpv
+				except AttributeError:
+					mypkg_cpv = _pkg_str(remove_slot(mypkg))
+			if bestm.cpv == mypkg_cpv or bestm.cpv == x.cpv:
+				pass
+			elif x.cpv == mypkg_cpv:
+				bestm = x
+			else:
+				# Sort the cpvs to find the one closest to mypkg_cpv
+				cpv_list = [bestm.cpv, mypkg_cpv, x.cpv]
+				def cmp_cpv(cpv1, cpv2):
+					return vercmp(cpv1.version, cpv2.version)
+				cpv_list.sort(key=cmp_sort_key(cmp_cpv))
+				if cpv_list[0] is mypkg_cpv or cpv_list[-1] is mypkg_cpv:
+					if cpv_list[1] is x.cpv:
+						bestm = x
+				else:
+					# TODO: handle the case where mypkg_cpv is in the middle
+					pass
+
 	return bestm
 
 def match_from_list(mydep, candidate_list):
@@ -1817,7 +1923,6 @@ def match_from_list(mydep, candidate_list):
 	if not candidate_list:
 		return []
 
-	from portage.util import writemsg
 	if "!" == mydep[:1]:
 		if "!" == mydep[1:2]:
 			mydep = mydep[2:]
@@ -1882,7 +1987,7 @@ def match_from_list(mydep, candidate_list):
 		myver = mysplit[2].lstrip("0")
 		if not myver or not myver[0].isdigit():
 			myver = "0"+myver
-		mycpv = mysplit[0]+"/"+mysplit[1]+"-"+myver
+		mycpv_cmp = mysplit[0]+"/"+mysplit[1]+"-"+myver
 		for x in candidate_list:
 			xs = getattr(x, "cpv_split", None)
 			if xs is None:
@@ -1891,7 +1996,7 @@ def match_from_list(mydep, candidate_list):
 			if not myver or not myver[0].isdigit():
 				myver = "0"+myver
 			xcpv = xs[0]+"/"+xs[1]+"-"+myver
-			if xcpv.startswith(mycpv):
+			if xcpv.startswith(mycpv_cmp):
 				mylist.append(x)
 
 	elif operator == "~": # version, any revision, match
@@ -1908,15 +2013,19 @@ def match_from_list(mydep, candidate_list):
 			mylist.append(x)
 
 	elif operator in [">", ">=", "<", "<="]:
-		mysplit = ["%s/%s" % (cat, pkg), ver, rev]
 		for x in candidate_list:
-			xs = getattr(x, "cpv_split", None)
-			if xs is None:
-				xs = catpkgsplit(remove_slot(x))
-			xcat, xpkg, xver, xrev = xs
-			xs = ["%s/%s" % (xcat, xpkg), xver, xrev]
+			if hasattr(x, 'cp'):
+				pkg = x
+			else:
+				try:
+					pkg = _pkg_str(remove_slot(x))
+				except InvalidData:
+					continue
+
+			if pkg.cp != mydep.cp:
+				continue
 			try:
-				result = pkgcmp(xs, mysplit)
+				result = vercmp(pkg.version, mydep.version)
 			except ValueError: # pkgcmp may return ValueError during int() conversion
 				writemsg(_("\nInvalid package name: %s\n") % x, noiselevel=-1)
 				raise
@@ -1993,7 +2102,8 @@ def match_from_list(mydep, candidate_list):
 			repo = getattr(x, "repo", False)
 			if repo is False:
 				repo = dep_getrepo(x)
-			if repo is not None and repo != mydep.repo:
+			if repo is not None and repo != _unknown_repo and \
+				repo != mydep.repo:
 				continue
 			mylist.append(x)
 

diff --git a/portage_with_autodep/pym/portage/dep/__init__.pyo b/portage_with_autodep/pym/portage/dep/__init__.pyo
new file mode 100644
index 0000000..c78bb23
Binary files /dev/null and b/portage_with_autodep/pym/portage/dep/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/dep/_slot_operator.py b/portage_with_autodep/pym/portage/dep/_slot_operator.py
new file mode 100644
index 0000000..7b64444
--- /dev/null
+++ b/portage_with_autodep/pym/portage/dep/_slot_operator.py
@@ -0,0 +1,97 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from portage.dep import Atom, paren_enclose, use_reduce
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData
+from _emerge.Package import Package
+
+def find_built_slot_operator_atoms(pkg):
+	atoms = {}
+	for k in Package._dep_keys:
+		atom_list = list(_find_built_slot_operator(use_reduce(pkg._metadata[k],
+			uselist=pkg.use.enabled, eapi=pkg.eapi,
+			token_class=Atom)))
+		if atom_list:
+			atoms[k] = atom_list
+	return atoms
+
+def _find_built_slot_operator(dep_struct):
+	for x in dep_struct:
+		if isinstance(x, list):
+			for atom in _find_built_slot_operator(x):
+				yield atom
+		elif isinstance(x, Atom) and x.slot_operator_built:
+			yield x
+
+def ignore_built_slot_operator_deps(dep_struct):
+	for i, x in enumerate(dep_struct):
+		if isinstance(x, list):
+			ignore_built_slot_operator_deps(x)
+		elif isinstance(x, Atom) and x.slot_operator_built:
+			# There's no way of knowing here whether the SLOT
+			# part of the slot/sub-slot pair should be kept, so we
+			# ignore both parts.
+			dep_struct[i] = x.without_slot
+
+def evaluate_slot_operator_equal_deps(settings, use, trees):
+
+	metadata = settings.configdict['pkg']
+	eapi = metadata['EAPI']
+	eapi_attrs = _get_eapi_attrs(eapi)
+	running_vardb = trees[trees._running_eroot]["vartree"].dbapi
+	target_vardb = trees[trees._target_eroot]["vartree"].dbapi
+	vardbs = [target_vardb]
+	deps = {}
+	for k in Package._dep_keys:
+		deps[k] = use_reduce(metadata[k],
+			uselist=use, eapi=eapi, token_class=Atom)
+
+	for k in Package._runtime_keys:
+		_eval_deps(deps[k], vardbs)
+
+	if eapi_attrs.hdepend:
+		_eval_deps(deps["HDEPEND"], [running_vardb])
+		_eval_deps(deps["DEPEND"], [target_vardb])
+	else:
+		if running_vardb is not target_vardb:
+			vardbs.append(running_vardb)
+		_eval_deps(deps["DEPEND"], vardbs)
+
+	result = {}
+	for k, v in deps.items():
+		result[k] = paren_enclose(v)
+
+	return result
+
+def _eval_deps(dep_struct, vardbs):
+	for i, x in enumerate(dep_struct):
+		if isinstance(x, list):
+			_eval_deps(x, vardbs)
+		elif isinstance(x, Atom) and x.slot_operator == "=":
+			for vardb in vardbs:
+				best_version = vardb.match(x)
+				if best_version:
+					best_version = best_version[-1]
+					try:
+						best_version = \
+							vardb._pkg_str(best_version, None)
+					except (KeyError, InvalidData):
+						pass
+					else:
+						slot_part = "%s/%s=" % \
+							(best_version.slot, best_version.sub_slot)
+						x = x.with_slot(slot_part)
+						dep_struct[i] = x
+						break
+			else:
+				# this dep could not be resolved, so remove the operator
+				# (user may be using package.provided and managing rebuilds
+				# manually)
+				if x.slot:
+					x = x.with_slot(x.slot)
+				else:
+					x = x.without_slot
+				dep_struct[i] = x

diff --git a/portage_with_autodep/pym/portage/dep/dep_check.py b/portage_with_autodep/pym/portage/dep/dep_check.py
index 01d5021..99a5eb0 100644
--- a/portage_with_autodep/pym/portage/dep/dep_check.py
+++ b/portage_with_autodep/pym/portage/dep/dep_check.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps']
@@ -11,7 +11,7 @@ from portage.dep import Atom, match_from_list, use_reduce
 from portage.exception import InvalidDependString, ParseError
 from portage.localization import _
 from portage.util import writemsg, writemsg_level
-from portage.versions import catpkgsplit, cpv_getkey, pkgcmp
+from portage.versions import vercmp, _pkg_str
 
 def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
 	trees=None, use_mask=None, use_force=None, **kwargs):
@@ -39,14 +39,12 @@ def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
 	parent = mytrees.get("parent")
 	virt_parent = mytrees.get("virt_parent")
 	graph_parent = None
-	eapi = None
 	if parent is not None:
 		if virt_parent is not None:
 			graph_parent = virt_parent
 			parent = virt_parent
 		else:
 			graph_parent = parent
-		eapi = parent.metadata["EAPI"]
 	repoman = not mysettings.local_config
 	if kwargs["use_binaries"]:
 		portdb = trees[myroot]["bintree"].dbapi
@@ -352,8 +350,14 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
 			avail_pkg = mydbapi.match(atom.without_use)
 			if avail_pkg:
 				avail_pkg = avail_pkg[-1] # highest (ascending order)
-				avail_slot = Atom("%s:%s" % (atom.cp,
-					mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
+				try:
+					slot = avail_pkg.slot
+				except AttributeError:
+					eapi, slot, repo = mydbapi.aux_get(avail_pkg,
+						["EAPI", "SLOT", "repository"])
+					avail_pkg = _pkg_str(avail_pkg, eapi=eapi,
+						slot=slot, repo=repo)
+				avail_slot = Atom("%s:%s" % (atom.cp, slot))
 			if not avail_pkg:
 				all_available = False
 				all_use_satisfied = False
@@ -368,16 +372,19 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
 					avail_pkg_use = avail_pkg_use[-1]
 					if avail_pkg_use != avail_pkg:
 						avail_pkg = avail_pkg_use
-						avail_slot = Atom("%s:%s" % (atom.cp,
-							mydbapi.aux_get(avail_pkg, ["SLOT"])[0]))
+						try:
+							slot = avail_pkg.slot
+						except AttributeError:
+							eapi, slot, repo = mydbapi.aux_get(avail_pkg,
+								["EAPI", "SLOT", "repository"])
+							avail_pkg = _pkg_str(avail_pkg,
+								eapi=eapi, slot=slot, repo=repo)
 
 			slot_map[avail_slot] = avail_pkg
-			pkg_cp = cpv_getkey(avail_pkg)
-			highest_cpv = cp_map.get(pkg_cp)
+			highest_cpv = cp_map.get(avail_pkg.cp)
 			if highest_cpv is None or \
-				pkgcmp(catpkgsplit(avail_pkg)[1:],
-				catpkgsplit(highest_cpv)[1:]) > 0:
-				cp_map[pkg_cp] = avail_pkg
+				vercmp(avail_pkg.version, highest_cpv.version) > 0:
+				cp_map[avail_pkg.cp] = avail_pkg
 
 		this_choice = (atoms, slot_map, cp_map, all_available)
 		if all_available:
@@ -515,8 +522,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
 				for cp in intersecting_cps:
 					version_1 = cp_map_1[cp]
 					version_2 = cp_map_2[cp]
-					difference = pkgcmp(catpkgsplit(version_1)[1:],
-						catpkgsplit(version_2)[1:])
+					difference = vercmp(version_1.version, version_2.version)
 					if difference != 0:
 						if difference > 0:
 							has_upgrade = True
@@ -539,8 +545,12 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None):
 	assert(False) # This point should not be reachable
 
 def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
-	use_cache=1, use_binaries=0, myroot="/", trees=None):
-	"""Takes a depend string and parses the condition."""
+	use_cache=1, use_binaries=0, myroot=None, trees=None):
+	"""
+	Takes a depend string, parses it, and selects atoms.
+	The myroot parameter is unused (use mysettings['EROOT'] instead).
+	"""
+	myroot = mysettings['EROOT']
 	edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
 	#check_config_instance(mysettings)
 	if trees is None:

diff --git a/portage_with_autodep/pym/portage/dep/dep_check.pyo b/portage_with_autodep/pym/portage/dep/dep_check.pyo
new file mode 100644
index 0000000..1b9e03f
Binary files /dev/null and b/portage_with_autodep/pym/portage/dep/dep_check.pyo differ

diff --git a/portage_with_autodep/pym/portage/dispatch_conf.py b/portage_with_autodep/pym/portage/dispatch_conf.py
index 4991020..4c68dfc 100644
--- a/portage_with_autodep/pym/portage/dispatch_conf.py
+++ b/portage_with_autodep/pym/portage/dispatch_conf.py
@@ -1,5 +1,5 @@
 # archive_conf.py -- functionality common to archive-conf and dispatch-conf
-# Copyright 2003-2011 Gentoo Foundation
+# Copyright 2003-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 
@@ -8,11 +8,12 @@
 
 from __future__ import print_function
 
-import os, sys, shutil
+import os, shutil, subprocess, sys
 
 import portage
 from portage.env.loaders import KeyValuePairFileLoader
 from portage.localization import _
+from portage.util import shlex_split, varexpand
 
 RCS_BRANCH = '1.1.1'
 RCS_LOCK = 'rcs -ko -M -l'
@@ -22,24 +23,29 @@ RCS_MERGE = "rcsmerge -p -r" + RCS_BRANCH + " '%s' > '%s'"
 
 DIFF3_MERGE = "diff3 -mE '%s' '%s' '%s' > '%s'"
 
-def diffstatusoutput_len(cmd):
+def diffstatusoutput(cmd, file1, file2):
     """
     Execute the string cmd in a shell with getstatusoutput() and return a
-    2-tuple (status, output_length). If getstatusoutput() raises
-    UnicodeDecodeError (known to happen with python3.1), return a
-    2-tuple (1, 1). This provides a simple way to check for non-zero
-    output length of diff commands, while providing simple handling of
-    UnicodeDecodeError when necessary.
+    2-tuple (status, output).
     """
-    try:
-        status, output = portage.subprocess_getstatusoutput(cmd)
-        return (status, len(output))
-    except UnicodeDecodeError:
-        return (1, 1)
+    # Use Popen to emulate getstatusoutput(), since getstatusoutput() may
+    # raise a UnicodeDecodeError which makes the output inaccessible.
+    args = shlex_split(cmd % (file1, file2))
+    if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
+        # Python 3.1 does not support bytes in Popen args.
+        args = [portage._unicode_encode(x, errors='strict') for x in args]
+    proc = subprocess.Popen(args,
+        stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    output = portage._unicode_decode(proc.communicate()[0])
+    if output and output[-1] == "\n":
+        # getstatusoutput strips one newline
+        output = output[:-1]
+    return (proc.wait(), output)
 
 def read_config(mandatory_opts):
-    loader = KeyValuePairFileLoader(
-        '/etc/dispatch-conf.conf', None)
+    eprefix = portage.const.EPREFIX
+    config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf")
+    loader = KeyValuePairFileLoader(config_path, None)
     opts, errors = loader.load()
     if not opts:
         print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
@@ -58,6 +64,10 @@ def read_config(mandatory_opts):
             else:
                 print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)
 
+    # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding
+    variables = {"EPREFIX": eprefix}
+    opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables)
+
     if not os.path.exists(opts['archive-dir']):
         os.mkdir(opts['archive-dir'])
         # Use restrictive permissions by default, in order to protect
@@ -132,7 +142,7 @@ def file_archive(archive, curconf, newconf, mrgconf):
 
     # Archive the current config file if it isn't already saved
     if os.path.exists(archive) \
-     and diffstatusoutput_len("diff -aq '%s' '%s'" % (curconf,archive))[1] != 0:
+     and len(diffstatusoutput("diff -aq '%s' '%s'", curconf, archive)[1]) != 0:
         suf = 1
         while suf < 9 and os.path.exists(archive + '.' + str(suf)):
             suf += 1

diff --git a/portage_with_autodep/pym/portage/dispatch_conf.pyo b/portage_with_autodep/pym/portage/dispatch_conf.pyo
new file mode 100644
index 0000000..6239859
Binary files /dev/null and b/portage_with_autodep/pym/portage/dispatch_conf.pyo differ

diff --git a/portage_with_autodep/pym/portage/eapi.py b/portage_with_autodep/pym/portage/eapi.py
index da5fd8c..79cf891 100644
--- a/portage_with_autodep/pym/portage/eapi.py
+++ b/portage_with_autodep/pym/portage/eapi.py
@@ -1,4 +1,4 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 def eapi_has_iuse_defaults(eapi):
@@ -34,6 +34,9 @@ def eapi_exports_merge_type(eapi):
 def eapi_exports_replace_vars(eapi):
 	return eapi not in ("0", "1", "2", "3")
 
+def eapi_exports_REPOSITORY(eapi):
+	return eapi in ("4-python",)
+
 def eapi_has_pkg_pretend(eapi):
 	return eapi not in ("0", "1", "2", "3")
 
@@ -48,3 +51,12 @@ def eapi_has_required_use(eapi):
 
 def eapi_has_use_dep_defaults(eapi):
 	return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_repo_deps(eapi):
+	return eapi in ("4-python",)
+
+def eapi_allows_dots_in_PN(eapi):
+	return eapi in ("4-python",)
+
+def eapi_allows_dots_in_use_flags(eapi):
+	return eapi in ("4-python",)

diff --git a/portage_with_autodep/pym/portage/eapi.pyo b/portage_with_autodep/pym/portage/eapi.pyo
new file mode 100644
index 0000000..ce67ab1
Binary files /dev/null and b/portage_with_autodep/pym/portage/eapi.pyo differ

diff --git a/portage_with_autodep/pym/portage/eclass_cache.py b/portage_with_autodep/pym/portage/eclass_cache.py
index 1374f1d..cb2cf8a 100644
--- a/portage_with_autodep/pym/portage/eclass_cache.py
+++ b/portage_with_autodep/pym/portage/eclass_cache.py
@@ -6,21 +6,59 @@ __all__ = ["cache"]
 
 import stat
 import sys
+import operator
 from portage.util import normalize_path
 import errno
-from portage.exception import PermissionDenied
+from portage.exception import FileNotFound, PermissionDenied
 from portage import os
+from portage import checksum
 
 if sys.hexversion >= 0x3000000:
 	long = int
 
+
+class hashed_path(object):
+
+	def __init__(self, location):
+		self.location = location
+
+	def __getattr__(self, attr):
+		if attr == 'mtime':
+			# use stat.ST_MTIME; accessing .st_mtime gets you a float
+			# depending on the python version, and long(float) introduces
+			# some rounding issues that aren't present for people using
+			# the straight c api.
+			# thus use the defacto python compatibility work around;
+			# access via index, which guarantees you get the raw long.
+			try:
+				self.mtime = obj = os.stat(self.location)[stat.ST_MTIME]
+			except OSError as e:
+				if e.errno in (errno.ENOENT, errno.ESTALE):
+					raise FileNotFound(self.location)
+				elif e.errno == PermissionDenied.errno:
+					raise PermissionDenied(self.location)
+				raise
+			return obj
+		if not attr.islower():
+			# we don't care to allow .mD5 as an alias for .md5
+			raise AttributeError(attr)
+		hashname = attr.upper()
+		if hashname not in checksum.hashfunc_map:
+			raise AttributeError(attr)
+		val = checksum.perform_checksum(self.location, hashname)[0]
+		setattr(self, attr, val)
+		return val
+
+	def __repr__(self):
+		return "<portage.eclass_cache.hashed_path('%s')>" % (self.location,)
+
 class cache(object):
 	"""
 	Maintains the cache information about eclasses used in ebuild.
 	"""
 	def __init__(self, porttree_root, overlays=[]):
 
-		self.eclasses = {} # {"Name": ("location","_mtime_")}
+		self.eclasses = {} # {"Name": hashed_path}
 		self._eclass_locations = {}
 
 		# screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
@@ -80,14 +118,16 @@ class cache(object):
 			for y in eclass_filenames:
 				if not y.endswith(".eclass"):
 					continue
+				obj = hashed_path(os.path.join(x, y))
+				obj.eclass_dir = x
 				try:
-					mtime = os.stat(os.path.join(x, y))[stat.ST_MTIME]
-				except OSError:
+					mtime = obj.mtime
+				except FileNotFound:
 					continue
 				ys=y[:-eclass_len]
 				if x == self._master_eclass_root:
 					master_eclasses[ys] = mtime
-					self.eclasses[ys] = (x, mtime)
+					self.eclasses[ys] = obj
 					self._eclass_locations[ys] = x
 					continue
 
@@ -98,22 +138,30 @@ class cache(object):
 						# so prefer the master entry.
 						continue
 
-				self.eclasses[ys] = (x, mtime)
+				self.eclasses[ys] = obj
 				self._eclass_locations[ys] = x
 
-	def is_eclass_data_valid(self, ec_dict):
+	def validate_and_rewrite_cache(self, ec_dict, chf_type, stores_paths):
+		"""
+		This will return an empty dict if the ec_dict parameter happens
+		to be empty, therefore callers must take care to distinguish
+		between empty dict and None return values.
+		"""
 		if not isinstance(ec_dict, dict):
-			return False
-		for eclass, tup in ec_dict.items():
-			cached_data = self.eclasses.get(eclass, None)
-			""" Only use the mtime for validation since the probability of a
-			collision is small and, depending on the cache implementation, the
-			path may not be specified (cache from rsync mirrors, for example).
-			"""
-			if cached_data is None or tup[1] != cached_data[1]:
-				return False
-
-		return True
+			return None
+		our_getter = operator.attrgetter(chf_type)
+		cache_getter = lambda x:x
+		if stores_paths:
+			cache_getter = operator.itemgetter(1)
+		d = {}
+		for eclass, ec_data in ec_dict.items():
+			cached_data = self.eclasses.get(eclass)
+			if cached_data is None:
+				return None
+			if cache_getter(ec_data) != our_getter(cached_data):
+				return None
+			d[eclass] = cached_data
+		return d
 
 	def get_eclass_data(self, inherits):
 		ec_dict = {}

diff --git a/portage_with_autodep/pym/portage/eclass_cache.pyo b/portage_with_autodep/pym/portage/eclass_cache.pyo
new file mode 100644
index 0000000..ebe3463
Binary files /dev/null and b/portage_with_autodep/pym/portage/eclass_cache.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/__init__.py b/portage_with_autodep/pym/portage/elog/__init__.py
index 1a8309d..33dac17 100644
--- a/portage_with_autodep/pym/portage/elog/__init__.py
+++ b/portage_with_autodep/pym/portage/elog/__init__.py
@@ -1,7 +1,11 @@
 # elog/__init__.py - elog core functions
-# Copyright 2006-2009 Gentoo Foundation
+# Copyright 2006-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
+import sys
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
 import portage
 portage.proxy.lazyimport.lazyimport(globals(),
 	'portage.util:writemsg',
@@ -52,11 +56,15 @@ def _combine_logentries(logentries):
 		for msgtype, msgcontent in logentries[phase]:
 			if previous_type != msgtype:
 				previous_type = msgtype
-				rValue.append("%s: %s\n" % (msgtype, phase))
-			for line in msgcontent:
-				rValue.append(line)
-			rValue.append("\n")
-	return "".join(rValue)
+				rValue.append("%s: %s" % (msgtype, phase))
+			if isinstance(msgcontent, basestring):
+				rValue.append(msgcontent.rstrip("\n"))
+			else:
+				for line in msgcontent:
+					rValue.append(line.rstrip("\n"))
+	if rValue:
+		rValue.append("")
+	return "\n".join(rValue)
 
 _elog_mod_imports = {}
 def _load_mod(name):

diff --git a/portage_with_autodep/pym/portage/elog/__init__.pyo b/portage_with_autodep/pym/portage/elog/__init__.pyo
new file mode 100644
index 0000000..39dc595
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/filtering.pyo b/portage_with_autodep/pym/portage/elog/filtering.pyo
new file mode 100644
index 0000000..3a040cc
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/filtering.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/messages.py b/portage_with_autodep/pym/portage/elog/messages.py
index 6c1580a..a4897d8 100644
--- a/portage_with_autodep/pym/portage/elog/messages.py
+++ b/portage_with_autodep/pym/portage/elog/messages.py
@@ -18,6 +18,14 @@ from portage import _unicode_decode
 import io
 import sys
 
+_log_levels = frozenset([
+	"ERROR",
+	"INFO",
+	"LOG",
+	"QA",
+	"WARN",
+])
+
 def collect_ebuild_messages(path):
 	""" Collect elog messages generated by the bash logging function stored 
 		at 'path'.
@@ -43,16 +51,21 @@ def collect_ebuild_messages(path):
 			logentries[msgfunction] = []
 		lastmsgtype = None
 		msgcontent = []
-		for l in io.open(_unicode_encode(filename,
+		f = io.open(_unicode_encode(filename,
 			encoding=_encodings['fs'], errors='strict'),
-			mode='r', encoding=_encodings['repo.content'], errors='replace'):
+			mode='r', encoding=_encodings['repo.content'], errors='replace')
+		# Use split('\n') since normal line iteration or readlines() will
+		# split on \r characters as shown in bug #390833.
+		for l in f.read().split('\n'):
 			if not l:
 				continue
 			try:
 				msgtype, msg = l.split(" ", 1)
+				if msgtype not in _log_levels:
+					raise ValueError(msgtype)
 			except ValueError:
 				writemsg(_("!!! malformed entry in "
-					"log file: '%s'\n") % filename, noiselevel=-1)
+					"log file: '%s': %s\n") % (filename, l), noiselevel=-1)
 				continue
 
 			if lastmsgtype is None:
@@ -65,6 +78,7 @@ def collect_ebuild_messages(path):
 					logentries[msgfunction].append((lastmsgtype, msgcontent))
 				msgcontent = [msg]
 			lastmsgtype = msgtype
+		f.close()
 		if msgcontent:
 			logentries[msgfunction].append((lastmsgtype, msgcontent))
 
@@ -159,13 +173,17 @@ _functions = { "einfo": ("INFO", "GOOD"),
 		"eerror": ("ERROR", "BAD"),
 }
 
-def _make_msgfunction(level, color):
-	def _elog(msg, phase="other", key=None, out=None):
-		""" Display and log a message assigned to the given key/cpv 
-		    (or unassigned if no key is given).
+class _make_msgfunction(object):
+	__slots__ = ('_color', '_level')
+	def __init__(self, level, color):
+		self._level = level
+		self._color = color
+	def __call__(self, msg, phase="other", key=None, out=None):
+		"""
+		Display and log a message assigned to the given key/cpv.
 		""" 
-		_elog_base(level, msg,  phase=phase, key=key, color=color, out=out)
-	return _elog
+		_elog_base(self._level, msg,  phase=phase,
+			key=key, color=self._color, out=out)
 
 for f in _functions:
 	setattr(sys.modules[__name__], f, _make_msgfunction(_functions[f][0], _functions[f][1]))

diff --git a/portage_with_autodep/pym/portage/elog/messages.pyo b/portage_with_autodep/pym/portage/elog/messages.pyo
new file mode 100644
index 0000000..c033f55
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/messages.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/mod_custom.pyo b/portage_with_autodep/pym/portage/elog/mod_custom.pyo
new file mode 100644
index 0000000..317fab4
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/mod_custom.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/mod_echo.py b/portage_with_autodep/pym/portage/elog/mod_echo.py
index 5de25bf..59117be 100644
--- a/portage_with_autodep/pym/portage/elog/mod_echo.py
+++ b/portage_with_autodep/pym/portage/elog/mod_echo.py
@@ -18,6 +18,19 @@ def process(mysettings, key, logentries, fulltext):
 	_items.append((mysettings["ROOT"], key, logentries))
 
 def finalize():
+	# For consistency, send all message types to stdout.
+	sys.stdout.flush()
+	sys.stderr.flush()
+	stderr = sys.stderr
+	try:
+		sys.stderr = sys.stdout
+		_finalize()
+	finally:
+		sys.stderr = stderr
+		sys.stdout.flush()
+		sys.stderr.flush()
+
+def _finalize():
 	global _items
 	printer = EOutput()
 	for root, key, logentries in _items:

diff --git a/portage_with_autodep/pym/portage/elog/mod_echo.pyo b/portage_with_autodep/pym/portage/elog/mod_echo.pyo
new file mode 100644
index 0000000..6a00d4c
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/mod_echo.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/mod_mail.pyo b/portage_with_autodep/pym/portage/elog/mod_mail.pyo
new file mode 100644
index 0000000..5d87aa6
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/mod_mail.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/mod_mail_summary.pyo b/portage_with_autodep/pym/portage/elog/mod_mail_summary.pyo
new file mode 100644
index 0000000..d7306b5
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/mod_mail_summary.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/mod_save.py b/portage_with_autodep/pym/portage/elog/mod_save.py
index 9350a6e..c69f4a3 100644
--- a/portage_with_autodep/pym/portage/elog/mod_save.py
+++ b/portage_with_autodep/pym/portage/elog/mod_save.py
@@ -4,20 +4,22 @@
 
 import io
 import time
+import portage
 from portage import os
 from portage import _encodings
 from portage import _unicode_decode
 from portage import _unicode_encode
 from portage.data import portage_gid, portage_uid
 from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs
-from portage.util import ensure_dirs, normalize_path
+from portage.util import apply_permissions, ensure_dirs, normalize_path
 
 def process(mysettings, key, logentries, fulltext):
 
 	if mysettings.get("PORT_LOGDIR"):
 		logdir = normalize_path(mysettings["PORT_LOGDIR"])
 	else:
-		logdir = os.path.join(os.sep, "var", "log", "portage")
+		logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
+			"var", "log", "portage")
 
 	if not os.path.isdir(logdir):
 		# Only initialize group/mode if the directory doesn't
@@ -25,7 +27,10 @@ def process(mysettings, key, logentries, fulltext):
 		# were previously set by the administrator.
 		# NOTE: These permissions should be compatible with our
 		# default logrotate config as discussed in bug 374287.
-		ensure_dirs(logdir, uid=portage_uid, gid=portage_gid, mode=0o2770)
+		uid = -1
+		if portage.data.secpass >= 2:
+			uid = portage_uid
+		ensure_dirs(logdir, uid=uid, gid=portage_gid, mode=0o2770)
 
 	cat = mysettings['CATEGORY']
 	pf = mysettings['PF']
@@ -48,4 +53,21 @@ def process(mysettings, key, logentries, fulltext):
 	elogfile.write(_unicode_decode(fulltext))
 	elogfile.close()
 
+	# Copy group permission bits from parent directory.
+	elogdir_st = os.stat(log_subdir)
+	elogdir_gid = elogdir_st.st_gid
+	elogdir_grp_mode = 0o060 & elogdir_st.st_mode
+
+	# Copy the uid from the parent directory if we have privileges
+	# to do so, for compatibility with our default logrotate
+	# config (see bug 378451). With the "su portage portage"
+	# directive and logrotate-3.8.0, logrotate's chown call during
+	# the compression phase will only succeed if the log file's uid
+	# is portage_uid.
+	logfile_uid = -1
+	if portage.data.secpass >= 2:
+		logfile_uid = elogdir_st.st_uid
+	apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
+		mode=elogdir_grp_mode, mask=0)
+
 	return elogfilename

diff --git a/portage_with_autodep/pym/portage/elog/mod_save.pyo b/portage_with_autodep/pym/portage/elog/mod_save.pyo
new file mode 100644
index 0000000..fb28b76
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/mod_save.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/mod_save_summary.py b/portage_with_autodep/pym/portage/elog/mod_save_summary.py
index 4adc6f3..347f66e 100644
--- a/portage_with_autodep/pym/portage/elog/mod_save_summary.py
+++ b/portage_with_autodep/pym/portage/elog/mod_save_summary.py
@@ -4,6 +4,7 @@
 
 import io
 import time
+import portage
 from portage import os
 from portage import _encodings
 from portage import _unicode_decode
@@ -17,7 +18,8 @@ def process(mysettings, key, logentries, fulltext):
 	if mysettings.get("PORT_LOGDIR"):
 		logdir = normalize_path(mysettings["PORT_LOGDIR"])
 	else:
-		logdir = os.path.join(os.sep, "var", "log", "portage")
+		logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
+			"var", "log", "portage")
 
 	if not os.path.isdir(logdir):
 		# Only initialize group/mode if the directory doesn't
@@ -25,7 +27,10 @@ def process(mysettings, key, logentries, fulltext):
 		# were previously set by the administrator.
 		# NOTE: These permissions should be compatible with our
 		# default logrotate config as discussed in bug 374287.
-		ensure_dirs(logdir, uid=portage_uid, gid=portage_gid, mode=0o2770)
+		logdir_uid = -1
+		if portage.data.secpass >= 2:
+			logdir_uid = portage_uid
+		ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770)
 
 	elogdir = os.path.join(logdir, "elog")
 	_ensure_log_subdirs(logdir, elogdir)
@@ -40,7 +45,17 @@ def process(mysettings, key, logentries, fulltext):
 	elogdir_st = os.stat(elogdir)
 	elogdir_gid = elogdir_st.st_gid
 	elogdir_grp_mode = 0o060 & elogdir_st.st_mode
-	apply_permissions(elogfilename, gid=elogdir_gid,
+
+	# Copy the uid from the parent directory if we have privileges
+	# to do so, for compatibility with our default logrotate
+	# config (see bug 378451). With the "su portage portage"
+	# directive and logrotate-3.8.0, logrotate's chown call during
+	# the compression phase will only succeed if the log file's uid
+	# is portage_uid.
+	logfile_uid = -1
+	if portage.data.secpass >= 2:
+		logfile_uid = elogdir_st.st_uid
+	apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
 		mode=elogdir_grp_mode, mask=0)
 
 	time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",

diff --git a/portage_with_autodep/pym/portage/elog/mod_save_summary.pyo b/portage_with_autodep/pym/portage/elog/mod_save_summary.pyo
new file mode 100644
index 0000000..8f99c51
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/mod_save_summary.pyo differ

diff --git a/portage_with_autodep/pym/portage/elog/mod_syslog.py b/portage_with_autodep/pym/portage/elog/mod_syslog.py
index d71dab4..c8bf441 100644
--- a/portage_with_autodep/pym/portage/elog/mod_syslog.py
+++ b/portage_with_autodep/pym/portage/elog/mod_syslog.py
@@ -1,5 +1,5 @@
 # elog/mod_syslog.py - elog dispatch module
-# Copyright 2006-2007 Gentoo Foundation
+# Copyright 2006-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import sys
@@ -7,6 +7,9 @@ import syslog
 from portage.const import EBUILD_PHASES
 from portage import _encodings
 
+if sys.hexversion >= 0x3000000:
+	basestring = str
+
 _pri = {
 	"INFO"   : syslog.LOG_INFO, 
 	"WARN"   : syslog.LOG_WARNING, 
@@ -21,12 +24,13 @@ def process(mysettings, key, logentries, fulltext):
 		if not phase in logentries:
 			continue
 		for msgtype,msgcontent in logentries[phase]:
-			msgtext = "".join(msgcontent)
-			for line in msgtext.splitlines():
+			if isinstance(msgcontent, basestring):
+				msgcontent = [msgcontent]
+			for line in msgcontent:
 				line = "%s: %s: %s" % (key, phase, line)
-				if sys.hexversion < 0x3000000 and isinstance(msgtext, unicode):
+				if sys.hexversion < 0x3000000 and not isinstance(line, bytes):
 					# Avoid TypeError from syslog.syslog()
 					line = line.encode(_encodings['content'], 
 						'backslashreplace')
-				syslog.syslog(_pri[msgtype], line)
+				syslog.syslog(_pri[msgtype], line.rstrip("\n"))
 	syslog.closelog()

diff --git a/portage_with_autodep/pym/portage/elog/mod_syslog.pyo b/portage_with_autodep/pym/portage/elog/mod_syslog.pyo
new file mode 100644
index 0000000..c7b4248
Binary files /dev/null and b/portage_with_autodep/pym/portage/elog/mod_syslog.pyo differ

diff --git a/portage_with_autodep/pym/portage/emaint/__init__.py b/portage_with_autodep/pym/portage/emaint/__init__.py
new file mode 100644
index 0000000..48bc6e2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/__init__.py
@@ -0,0 +1,5 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""System health checks and maintenance utilities.
+"""

diff --git a/portage_with_autodep/pym/portage/emaint/defaults.py b/portage_with_autodep/pym/portage/emaint/defaults.py
new file mode 100644
index 0000000..30f36af
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/defaults.py
@@ -0,0 +1,25 @@
+# Copyright 2005-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# parser option data
+CHECK = {"short": "-c", "long": "--check",
+	"help": "Check for problems (a default option for most modules)",
+	'status': "Checking %s for problems",
+	'action': 'store_true',
+	'func': 'check'
+	}
+
+FIX = {"short": "-f", "long": "--fix",
+	"help": "Attempt to fix problems (a default option for most modules)",
+	'status': "Attempting to fix %s",
+	'action': 'store_true',
+	'func': 'fix'
+	}
+
+VERSION = {"long": "--version",
+	"help": "show program's version number and exit",
+	'action': 'store_true',
+	}
+
+# parser options
+DEFAULT_OPTIONS = {'check': CHECK, 'fix': FIX, 'version': VERSION}

diff --git a/portage_with_autodep/pym/portage/emaint/main.py b/portage_with_autodep/pym/portage/emaint/main.py
new file mode 100644
index 0000000..9f987fa
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/main.py
@@ -0,0 +1,222 @@
+# Copyright 2005-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+
+import sys
+import textwrap
+
+import portage
+from portage import os
+from portage.emaint.module import Modules
+from portage.emaint.progress import ProgressBar
+from portage.emaint.defaults import DEFAULT_OPTIONS
+from portage.util._argparse import ArgumentParser
+
+class OptionItem(object):
+	"""class to hold module ArgumentParser options data
+	"""
+
+	def __init__(self, opt):
+		"""
+		@type opt: dictionary
+		@param opt: options parser options
+		"""
+		self.short = opt.get('short')
+		self.long = opt.get('long')
+		self.help = opt.get('help')
+		self.status = opt.get('status')
+		self.func = opt.get('func')
+		self.action = opt.get('action')
+		self.type = opt.get('type')
+		self.dest = opt.get('dest')
+
+	@property
+	def pargs(self):
+		pargs = []
+		if self.short is not None:
+			pargs.append(self.short)
+		if self.long is not None:
+			pargs.append(self.long)
+		return pargs
+
+	@property
+	def kwargs(self):
+		# Support for keyword arguments varies depending on the action,
+		# so only pass in the keywords that are needed, in order
+		# to avoid a TypeError.
+		kwargs = {}
+		if self.help is not None:
+			kwargs['help'] = self.help
+		if self.action is not None:
+			kwargs['action'] = self.action
+		if self.type is not None:
+			kwargs['type'] = self.type
+		if self.dest is not None:
+			kwargs['dest'] = self.dest
+		return kwargs
+
+def usage(module_controller):
+		_usage = "usage: emaint [options] COMMAND"
+
+		desc = "The emaint program provides an interface to system health " + \
+			"checks and maintenance. See the emaint(1) man page " + \
+			"for additional information about the following commands:"
+
+		_usage += "\n\n"
+		for line in textwrap.wrap(desc, 65):
+			_usage += "%s\n" % line
+		_usage += "\nCommands:\n"
+		_usage += "  %s" % "all".ljust(15) + \
+			"Perform all supported commands\n"
+		textwrap.subsequent_indent = ' '.ljust(17)
+		for mod in module_controller.module_names:
+			desc = textwrap.wrap(module_controller.get_description(mod), 65)
+			_usage += "  %s%s\n" % (mod.ljust(15), desc[0])
+			for d in desc[1:]:
+				_usage += "  %s%s\n" % (' '.ljust(15), d)
+		return _usage
+
+
+def module_opts(module_controller, module):
+	_usage = " %s module options:\n" % module
+	opts = module_controller.get_func_descriptions(module)
+	if opts == {}:
+		opts = DEFAULT_OPTIONS
+	for opt in sorted(opts):
+		optd = opts[opt]
+		opto = "  %s, %s" %(optd['short'], optd['long'])
+		_usage += '%s %s\n' % (opto.ljust(15),optd['help'])
+	_usage += '\n'
+	return _usage
+
+
+class TaskHandler(object):
+	"""Handles the running of the tasks it is given
+	"""
+
+	def __init__(self, show_progress_bar=True, verbose=True, callback=None):
+		self.show_progress_bar = show_progress_bar
+		self.verbose = verbose
+		self.callback = callback
+		self.isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty()
+		self.progress_bar = ProgressBar(self.isatty, title="Emaint", max_desc_length=27)
+
+
+	def run_tasks(self, tasks, func, status=None, verbose=True, options=None):
+		"""Runs the module tasks"""
+		if tasks is None or func is None:
+			return
+		for task in tasks:
+			inst = task()
+			show_progress = self.show_progress_bar and self.isatty
+			# check if the function is capable of progressbar 
+			# and possibly override it off
+			if show_progress and hasattr(inst, 'can_progressbar'):
+				show_progress = inst.can_progressbar(func)
+			if show_progress:
+				self.progress_bar.reset()
+				self.progress_bar.set_label(func + " " + inst.name())
+				onProgress = self.progress_bar.start()
+			else:
+				onProgress = None
+			kwargs = {
+				'onProgress': onProgress,
+				# pass in a copy of the options so a module can not pollute or change
+				# them for other tasks if there is more to do.
+				'options': options.copy()
+				}
+			result = getattr(inst, func)(**kwargs)
+			if show_progress:
+				# make sure the final progress is displayed
+				self.progress_bar.display()
+				print()
+				self.progress_bar.stop()
+			if self.callback:
+				self.callback(result)
+
+
+def print_results(results):
+	if results:
+		print()
+		print("\n".join(results))
+		print("\n")
+
+
+def emaint_main(myargv):
+
+	# Similar to emerge, emaint needs a default umask so that created
+	# files (such as the world file) have sane permissions.
+	os.umask(0o22)
+
+	module_controller = Modules(namepath="portage.emaint.modules")
+	module_names = module_controller.module_names[:]
+	module_names.insert(0, "all")
+
+
+	parser = ArgumentParser(usage=usage(module_controller))
+	# add default options
+	parser_options = []
+	for opt in DEFAULT_OPTIONS:
+		parser_options.append(OptionItem(DEFAULT_OPTIONS[opt]))
+	for mod in module_names[1:]:
+		desc = module_controller.get_func_descriptions(mod)
+		if desc:
+			for opt in desc:
+				parser_options.append(OptionItem(desc[opt]))
+	for opt in parser_options:
+		parser.add_argument(*opt.pargs, **opt.kwargs)
+
+	options, args = parser.parse_known_args(args=myargv)
+
+	if options.version:
+		print(portage.VERSION)
+		return os.EX_OK
+
+	if len(args) != 1:
+		parser.error("Incorrect number of arguments")
+	if args[0] not in module_names:
+		parser.error("%s target is not a known target" % args[0])
+
+	check_opt = None
+	func = status = long_action = None
+	for opt in parser_options:
+		if opt.long == '--check':
+			# Default action
+			check_opt = opt
+		if opt.status and getattr(options, opt.long.lstrip("-"), False):
+			if long_action is not None:
+				parser.error("--%s and %s are exclusive options" %
+					(long_action, opt.long))
+			status = opt.status
+			func = opt.func
+			long_action = opt.long.lstrip('-')
+
+	if long_action is None:
+		long_action = 'check'
+		func = check_opt.func
+		status = check_opt.status
+
+	if args[0] == "all":
+		tasks = []
+		for m in module_names[1:]:
+			#print("DEBUG: module: %s, functions: " %(m, str(module_controller.get_functions(m))))
+			if long_action in module_controller.get_functions(m):
+				tasks.append(module_controller.get_class(m))
+	elif long_action in module_controller.get_functions(args[0]):
+		tasks = [module_controller.get_class(args[0] )]
+	else:
+		portage.util.writemsg(
+			"\nERROR: module '%s' does not have option '--%s'\n\n" %
+			(args[0], long_action), noiselevel=-1)
+		portage.util.writemsg(module_opts(module_controller, args[0]),
+			noiselevel=-1)
+		sys.exit(1)
+
+	# need to pass the parser options dict to the modules
+	# so they are available if needed.
+	task_opts = options.__dict__
+	taskmaster = TaskHandler(callback=print_results)
+	taskmaster.run_tasks(tasks, func, status, options=task_opts)
+

diff --git a/portage_with_autodep/pym/portage/emaint/module.py b/portage_with_autodep/pym/portage/emaint/module.py
new file mode 100644
index 0000000..64b0c64
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/module.py
@@ -0,0 +1,194 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+from __future__ import print_function
+
+from portage import os
+from portage.exception import PortageException
+from portage.cache.mappings import ProtectedDict
+
+
+class InvalidModuleName(PortageException):
+	"""An invalid or unknown module name."""
+
+
+class Module(object):
+	"""Class to define and hold our plug-in module
+
+	@type name: string
+	@param name: the module name
+	@type path: the path to the new module
+	"""
+
+	def __init__(self, name, namepath):
+		"""Some variables initialization"""
+		self.name = name
+		self._namepath = namepath
+		self.kids_names = []
+		self.kids = {}
+		self.initialized = self._initialize()
+
+	def _initialize(self):
+		"""Initialize the plug-in module
+
+		@rtype: boolean
+		"""
+		self.valid = False
+		try:
+			mod_name = ".".join([self._namepath, self.name])
+			self._module = __import__(mod_name, [],[], ["not empty"])
+			self.valid = True
+		except ImportError as e:
+			print("MODULE; failed import", mod_name, "  error was:",e)
+			return False
+		self.module_spec = self._module.module_spec
+		for submodule in self.module_spec['provides']:
+			kid = self.module_spec['provides'][submodule]
+			kidname = kid['name']
+			kid['module_name'] = '.'.join([mod_name, self.name])
+			kid['is_imported'] = False
+			self.kids[kidname] = kid
+			self.kids_names.append(kidname)
+		return True
+
+	def get_class(self, name):
+		if not name or name not in self.kids_names:
+			raise InvalidModuleName("Module name '%s' was invalid or not"
+				%name + "part of the module '%s'" %self.name)
+		kid = self.kids[name]
+		if kid['is_imported']:
+			module = kid['instance']
+		else:
+			try:
+				module = __import__(kid['module_name'], [],[], ["not empty"])
+				kid['instance'] = module
+				kid['is_imported'] = True
+			except ImportError:
+				raise
+			mod_class = getattr(module, kid['class'])
+		return mod_class
+
+
+class Modules(object):
+	"""Dynamic modules system for loading and retrieving any of the
+	installed emaint modules and/or provided class's
+
+	@param path: Optional path to the "modules" directory or
+			defaults to the directory of this file + '/modules'
+	@param namepath: Optional python import path to the "modules" directory or
+			defaults to the directory name of this file + '.modules'
+	"""
+
+	def __init__(self, path=None, namepath=None):
+		if path:
+			self._module_path = path
+		else:
+			self._module_path = os.path.join((
+				os.path.dirname(os.path.realpath(__file__))), "modules")
+		if namepath:
+			self._namepath = namepath
+		else:
+			self._namepath = '.'.join(os.path.dirname(
+				os.path.realpath(__file__)), "modules")
+		self._modules = self._get_all_modules()
+		self.modules = ProtectedDict(self._modules)
+		self.module_names = sorted(self._modules)
+		#self.modules = {}
+		#for mod in self.module_names:
+			#self.module[mod] = LazyLoad(
+
+	def _get_all_modules(self):
+		"""scans the emaint modules dir for loadable modules
+
+		@rtype: dictionary of module_plugins
+		"""
+		module_dir =  self._module_path
+		importables = []
+		names = os.listdir(module_dir)
+		for entry in names:
+			# skip any __init__ or __pycache__ files or directories
+			if entry.startswith('__'):
+				continue
+			try:
+				# test for statinfo to ensure it should a real module
+				# it will bail if it errors
+				os.lstat(os.path.join(module_dir, entry, '__init__.py'))
+				importables.append(entry)
+			except EnvironmentError:
+				pass
+		kids = {}
+		for entry in importables:
+			new_module = Module(entry, self._namepath)
+			for module_name in new_module.kids:
+				kid = new_module.kids[module_name]
+				kid['parent'] = new_module
+				kids[kid['name']] = kid
+		return kids
+
+	def get_module_names(self):
+		"""Convienence function to return the list of installed modules
+		available
+
+		@rtype: list
+		@return: the installed module names available
+		"""
+		return self.module_names
+
+	def get_class(self, modname):
+		"""Retrieves a module class desired
+
+		@type modname: string
+		@param modname: the module class name
+		"""
+		if modname and modname in self.module_names:
+			mod = self._modules[modname]['parent'].get_class(modname)
+		else:
+			raise InvalidModuleName("Module name '%s' was invalid or not"
+				%modname + "found")
+		return mod
+
+	def get_description(self, modname):
+		"""Retrieves the module class decription
+
+		@type modname: string
+		@param modname: the module class name
+		@type string
+		@return: the modules class decription
+		"""
+		if modname and modname in self.module_names:
+			mod = self._modules[modname]['description']
+		else:
+			raise InvalidModuleName("Module name '%s' was invalid or not"
+				%modname + "found")
+		return mod
+
+	def get_functions(self, modname):
+		"""Retrieves the module class  exported function names
+
+		@type modname: string
+		@param modname: the module class name
+		@type list
+		@return: the modules class exported function names
+		"""
+		if modname and modname in self.module_names:
+			mod = self._modules[modname]['functions']
+		else:
+			raise InvalidModuleName("Module name '%s' was invalid or not"
+				%modname + "found")
+		return mod
+
+	def get_func_descriptions(self, modname):
+		"""Retrieves the module class  exported functions descriptions
+
+		@type modname: string
+		@param modname: the module class name
+		@type dictionary
+		@return: the modules class exported functions descriptions
+		"""
+		if modname and modname in self.module_names:
+			desc = self._modules[modname]['func_desc']
+		else:
+			raise InvalidModuleName("Module name '%s' was invalid or not"
+				%modname + "found")
+		return desc

diff --git a/portage_with_autodep/pym/portage/emaint/modules/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/__init__.py
new file mode 100644
index 0000000..f67197d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/__init__.py
@@ -0,0 +1,5 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Plug-in modules for system health checks and maintenance.
+"""

diff --git a/portage_with_autodep/pym/portage/emaint/modules/binhost/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/binhost/__init__.py
new file mode 100644
index 0000000..c60e8bc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/binhost/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Scan and generate metadata indexes for binary packages.
+"""
+
+
+module_spec = {
+	'name': 'binhost',
+	'description': __doc__,
+	'provides':{
+		'module1': {
+			'name': "binhost",
+			'class': "BinhostHandler",
+			'description': __doc__,
+			'functions': ['check', 'fix'],
+			'func_desc': {}
+			}
+		}
+	}

diff --git a/portage_with_autodep/pym/portage/emaint/modules/binhost/binhost.py b/portage_with_autodep/pym/portage/emaint/modules/binhost/binhost.py
new file mode 100644
index 0000000..c297545
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/binhost/binhost.py
@@ -0,0 +1,163 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import stat
+
+import portage
+from portage import os
+from portage.util import writemsg
+
+import sys
+if sys.hexversion >= 0x3000000:
+	long = int
+
+class BinhostHandler(object):
+
+	short_desc = "Generate a metadata index for binary packages"
+
+	def name():
+		return "binhost"
+	name = staticmethod(name)
+
+	def __init__(self):
+		eroot = portage.settings['EROOT']
+		self._bintree = portage.db[eroot]["bintree"]
+		self._bintree.populate()
+		self._pkgindex_file = self._bintree._pkgindex_file
+		self._pkgindex = self._bintree._load_pkgindex()
+
+	def _need_update(self, cpv, data):
+
+		if "MD5" not in data:
+			return True
+
+		size = data.get("SIZE")
+		if size is None:
+			return True
+
+		mtime = data.get("MTIME")
+		if mtime is None:
+			return True
+
+		pkg_path = self._bintree.getname(cpv)
+		try:
+			s = os.lstat(pkg_path)
+		except OSError as e:
+			if e.errno not in (errno.ENOENT, errno.ESTALE):
+				raise
+			# We can't update the index for this one because
+			# it disappeared.
+			return False
+
+		try:
+			if long(mtime) != s[stat.ST_MTIME]:
+				return True
+			if long(size) != long(s.st_size):
+				return True
+		except ValueError:
+			return True
+
+		return False
+
+	def check(self, **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		missing = []
+		cpv_all = self._bintree.dbapi.cpv_all()
+		cpv_all.sort()
+		maxval = len(cpv_all)
+		if onProgress:
+			onProgress(maxval, 0)
+		pkgindex = self._pkgindex
+		missing = []
+		metadata = {}
+		for d in pkgindex.packages:
+			metadata[d["CPV"]] = d
+		for i, cpv in enumerate(cpv_all):
+			d = metadata.get(cpv)
+			if not d or self._need_update(cpv, d):
+				missing.append(cpv)
+			if onProgress:
+				onProgress(maxval, i+1)
+		errors = ["'%s' is not in Packages" % cpv for cpv in missing]
+		stale = set(metadata).difference(cpv_all)
+		for cpv in stale:
+			errors.append("'%s' is not in the repository" % cpv)
+		return errors
+
+	def fix(self,  **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		bintree = self._bintree
+		cpv_all = self._bintree.dbapi.cpv_all()
+		cpv_all.sort()
+		missing = []
+		maxval = 0
+		if onProgress:
+			onProgress(maxval, 0)
+		pkgindex = self._pkgindex
+		missing = []
+		metadata = {}
+		for d in pkgindex.packages:
+			metadata[d["CPV"]] = d
+
+		for i, cpv in enumerate(cpv_all):
+			d = metadata.get(cpv)
+			if not d or self._need_update(cpv, d):
+				missing.append(cpv)
+
+		stale = set(metadata).difference(cpv_all)
+		if missing or stale:
+			from portage import locks
+			pkgindex_lock = locks.lockfile(
+				self._pkgindex_file, wantnewlockfile=1)
+			try:
+				# Repopulate with lock held.
+				bintree._populate()
+				cpv_all = self._bintree.dbapi.cpv_all()
+				cpv_all.sort()
+
+				pkgindex = bintree._load_pkgindex()
+				self._pkgindex = pkgindex
+
+				metadata = {}
+				for d in pkgindex.packages:
+					metadata[d["CPV"]] = d
+
+				# Recount missing packages, with lock held.
+				del missing[:]
+				for i, cpv in enumerate(cpv_all):
+					d = metadata.get(cpv)
+					if not d or self._need_update(cpv, d):
+						missing.append(cpv)
+
+				maxval = len(missing)
+				for i, cpv in enumerate(missing):
+					try:
+						metadata[cpv] = bintree._pkgindex_entry(cpv)
+					except portage.exception.InvalidDependString:
+						writemsg("!!! Invalid binary package: '%s'\n" % \
+							bintree.getname(cpv), noiselevel=-1)
+
+					if onProgress:
+						onProgress(maxval, i+1)
+
+				for cpv in set(metadata).difference(
+					self._bintree.dbapi.cpv_all()):
+					del metadata[cpv]
+
+				# We've updated the pkgindex, so set it to
+				# repopulate when necessary.
+				bintree.populated = False
+
+				del pkgindex.packages[:]
+				pkgindex.packages.extend(metadata.values())
+				bintree._pkgindex_write(self._pkgindex)
+
+			finally:
+				locks.unlockfile(pkgindex_lock)
+
+		if onProgress:
+			if maxval == 0:
+				maxval = 1
+			onProgress(maxval, maxval)
+		return None

diff --git a/portage_with_autodep/pym/portage/emaint/modules/config/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/config/__init__.py
new file mode 100644
index 0000000..f0585b3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/config/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Check and clean the config tracker list for uninstalled packages.
+"""
+
+
+module_spec = {
+	'name': 'config',
+	'description': __doc__,
+	'provides':{
+		'module1': {
+			'name': "cleanconfmem",
+			'class': "CleanConfig",
+			'description': __doc__,
+			'functions': ['check', 'fix'],
+			'func_desc': {}
+			}
+		}
+	}

diff --git a/portage_with_autodep/pym/portage/emaint/modules/config/config.py b/portage_with_autodep/pym/portage/emaint/modules/config/config.py
new file mode 100644
index 0000000..dad024b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/config/config.py
@@ -0,0 +1,79 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.const import PRIVATE_PATH
+from portage.util import grabdict, writedict
+
+class CleanConfig(object):
+
+	short_desc = "Discard any no longer installed configs from emerge's tracker list"
+
+	def __init__(self):
+		self._root = portage.settings["ROOT"]
+		self.target = os.path.join(portage.settings["EROOT"], PRIVATE_PATH, 'config')
+
+	def name():
+		return "cleanconfmem"
+	name = staticmethod(name)
+
+	def load_configlist(self):
+		return grabdict(self.target)
+
+	def check(self,  **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		configs = self.load_configlist()
+		messages = []
+		maxval = len(configs)
+		if onProgress:
+			onProgress(maxval, 0)
+			i = 0
+		keys = sorted(configs)
+		for config in keys:
+			if not os.path.exists(config):
+				messages.append("  %s" % config)
+			if onProgress:
+				onProgress(maxval, i+1)
+				i += 1
+		return self._format_output(messages)
+
+	def fix(self, **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		configs = self.load_configlist()
+		messages = []
+		maxval = len(configs)
+		if onProgress:
+			onProgress(maxval, 0)
+			i = 0
+
+		root = self._root
+		if root == "/":
+			root = None
+		modified = False
+		for config in sorted(configs):
+			if root is None:
+				full_path = config
+			else:
+				full_path = os.path.join(root, config.lstrip(os.sep))
+			if not os.path.exists(full_path):
+				modified = True
+				configs.pop(config)
+				messages.append("  %s" % config)
+			if onProgress:
+				onProgress(maxval, i+1)
+				i += 1
+		if modified:
+			writedict(configs, self.target)
+		return self._format_output(messages, True)
+
+	def _format_output(self, messages=[], cleaned=False):
+		output = []
+		if messages:
+			output.append('Not Installed:')
+			output += messages
+			tot = '------------------------------------\n  Total %i Not installed'
+			if cleaned:
+				tot += ' ...Cleaned'
+			output.append(tot  % len(messages))
+		return output

diff --git a/portage_with_autodep/pym/portage/emaint/modules/logs/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/logs/__init__.py
new file mode 100644
index 0000000..0407efe
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/logs/__init__.py
@@ -0,0 +1,45 @@
+# Copyright 2005-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Check and clean old logs in the PORT_LOGDIR.
+"""
+
+
+module_spec = {
+	'name': 'logs',
+	'description': __doc__,
+	'provides':{
+		'module1': {
+			'name': "logs",
+			'class': "CleanLogs",
+			'description': __doc__,
+			'functions': ['check','clean'],
+			'func_desc': {
+				'clean': {
+					"short": "-C", "long": "--clean",
+					"help": "Cleans out logs more than 7 days old (cleanlogs only)" + \
+								 "   module-options: -t, -p",
+					'status': "Cleaning %s",
+					'action': 'store_true',
+					'func': 'clean',
+					},
+				'time': {
+					"short": "-t", "long": "--time",
+					"help": "(cleanlogs only): -t, --time   Delete logs older than NUM of days",
+					'status': "",
+					'type': int,
+					'dest': 'NUM',
+					'func': 'clean'
+					},
+				'pretend': {
+					"short": "-p", "long": "--pretend",
+					"help": "(cleanlogs only): -p, --pretend   Output logs that would be deleted",
+					'status': "",
+					'action': 'store_true',
+					'dest': 'pretend',
+					'func': 'clean'
+					}
+				}
+			}
+		}
+	}

diff --git a/portage_with_autodep/pym/portage/emaint/modules/logs/logs.py b/portage_with_autodep/pym/portage/emaint/modules/logs/logs.py
new file mode 100644
index 0000000..fe65cf5
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/logs/logs.py
@@ -0,0 +1,103 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.util import shlex_split, varexpand
+
+## default clean command from make.globals
+## PORT_LOGDIR_CLEAN = 'find "${PORT_LOGDIR}" -type f ! -name "summary.log*" -mtime +7 -delete'
+
+class CleanLogs(object):
+
+	short_desc = "Clean PORT_LOGDIR logs"
+
+	def name():
+		return "logs"
+	name = staticmethod(name)
+
+
+	def can_progressbar(self, func):
+		return False
+
+
+	def check(self, **kwargs):
+		if kwargs:
+			options = kwargs.get('options', None)
+			if options:
+				options['pretend'] = True
+		return self.clean(**kwargs)
+
+
+	def clean(self, **kwargs):
+		"""Log directory cleaning function
+		
+		@param **kwargs: optional dictionary of values used in this function are:
+			settings: portage settings instance: defaults to portage.settings
+				"PORT_LOGDIR": directory to clean
+				"PORT_LOGDIR_CLEAN": command for cleaning the logs.
+			options: dict: 
+				'NUM': int: number of days
+				'pretend': boolean
+		"""
+		messages = []
+		num_of_days = None
+		pretend = False
+		if kwargs:
+			# convuluted, I know, but portage.settings does not exist in
+			# kwargs.get() when called from _emerge.main.clean_logs()
+			settings = kwargs.get('settings', None)
+			if not settings:
+				settings = portage.settings
+			options = kwargs.get('options', None)
+			if options:
+				num_of_days = options.get('NUM', None)
+				pretend = options.get('pretend', False)
+
+		clean_cmd = settings.get("PORT_LOGDIR_CLEAN")
+		if clean_cmd:
+			clean_cmd = shlex_split(clean_cmd)
+			if '-mtime' in clean_cmd and num_of_days is not None:
+				if num_of_days == 0:
+					i = clean_cmd.index('-mtime')
+					clean_cmd.remove('-mtime')
+					clean_cmd.pop(i)
+				else:
+					clean_cmd[clean_cmd.index('-mtime') +1] = \
+						'+%s' % str(num_of_days)
+			if pretend:
+				if "-delete" in clean_cmd:
+					clean_cmd.remove("-delete")
+
+		if not clean_cmd:
+			return []
+		rval = self._clean_logs(clean_cmd, settings)
+		messages += self._convert_errors(rval)
+		return messages
+
+
+	@staticmethod
+	def _clean_logs(clean_cmd, settings):
+		logdir = settings.get("PORT_LOGDIR")
+		if logdir is None or not os.path.isdir(logdir):
+			return
+
+		variables = {"PORT_LOGDIR" : logdir}
+		cmd = [varexpand(x, mydict=variables) for x in clean_cmd]
+
+		try:
+			rval = portage.process.spawn(cmd, env=os.environ)
+		except portage.exception.CommandNotFound:
+			rval = 127
+		return rval
+
+
+	@staticmethod
+	def _convert_errors(rval):
+		msg = []
+		if rval != os.EX_OK:
+			msg.append("PORT_LOGDIR_CLEAN command returned %s"
+				% ("%d" % rval if rval else "None"))
+			msg.append("See the make.conf(5) man page for "
+				"PORT_LOGDIR_CLEAN usage instructions.")
+		return msg

diff --git a/portage_with_autodep/pym/portage/emaint/modules/move/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/move/__init__.py
new file mode 100644
index 0000000..d31d7b3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/move/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Perform package move updates for installed and binary packages.
+"""
+
+
+module_spec = {
+	'name': 'move',
+	'description': __doc__,
+	'provides':{
+		'module1': {
+			'name': "moveinst",
+			'class': "MoveInstalled",
+			'description': __doc__,
+			'options': ['check', 'fix'],
+			'functions': ['check', 'fix'],
+			'func_desc': {
+				}
+			},
+		'module2':{
+			'name': "movebin",
+			'class': "MoveBinary",
+			'description': "Perform package move updates for binary packages",
+			'functions': ['check', 'fix'],
+			'func_desc': {
+				}
+			}
+		}
+	}

diff --git a/portage_with_autodep/pym/portage/emaint/modules/move/move.py b/portage_with_autodep/pym/portage/emaint/modules/move/move.py
new file mode 100644
index 0000000..ef674d4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/move/move.py
@@ -0,0 +1,180 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.exception import InvalidData
+from _emerge.Package import Package
+from portage.versions import _pkg_str
+
+class MoveHandler(object):
+
+	def __init__(self, tree, porttree):
+		self._tree = tree
+		self._portdb = porttree.dbapi
+		self._update_keys = Package._dep_keys + ("PROVIDE",)
+		self._master_repo = \
+			self._portdb.getRepositoryName(self._portdb.porttree_root)
+
+	def _grab_global_updates(self):
+		from portage.update import grab_updates, parse_updates
+		retupdates = {}
+		errors = []
+
+		for repo_name in self._portdb.getRepositories():
+			repo = self._portdb.getRepositoryPath(repo_name)
+			updpath = os.path.join(repo, "profiles", "updates")
+			if not os.path.isdir(updpath):
+				continue
+
+			try:
+				rawupdates = grab_updates(updpath)
+			except portage.exception.DirectoryNotFound:
+				rawupdates = []
+			upd_commands = []
+			for mykey, mystat, mycontent in rawupdates:
+				commands, errors = parse_updates(mycontent)
+				upd_commands.extend(commands)
+				errors.extend(errors)
+			retupdates[repo_name] = upd_commands
+
+		if self._master_repo in retupdates:
+			retupdates['DEFAULT'] = retupdates[self._master_repo]
+
+		return retupdates, errors
+
+	def check(self, **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		allupdates, errors = self._grab_global_updates()
+		# Matching packages and moving them is relatively fast, so the
+		# progress bar is updated in indeterminate mode.
+		match = self._tree.dbapi.match
+		aux_get = self._tree.dbapi.aux_get
+		pkg_str = self._tree.dbapi._pkg_str
+		settings = self._tree.dbapi.settings
+		if onProgress:
+			onProgress(0, 0)
+		for repo, updates in allupdates.items():
+			if repo == 'DEFAULT':
+				continue
+			if not updates:
+				continue
+
+			def repo_match(repository):
+				return repository == repo or \
+					(repo == self._master_repo and \
+					repository not in allupdates)
+
+			for i, update_cmd in enumerate(updates):
+				if update_cmd[0] == "move":
+					origcp, newcp = update_cmd[1:]
+					for cpv in match(origcp):
+						try:
+							cpv = pkg_str(cpv, origcp.repo)
+						except (KeyError, InvalidData):
+							continue
+						if repo_match(cpv.repo):
+							errors.append("'%s' moved to '%s'" % (cpv, newcp))
+				elif update_cmd[0] == "slotmove":
+					pkg, origslot, newslot = update_cmd[1:]
+					atom = pkg.with_slot(origslot)
+					for cpv in match(atom):
+						try:
+							cpv = pkg_str(cpv, atom.repo)
+						except (KeyError, InvalidData):
+							continue
+						if repo_match(cpv.repo):
+							errors.append("'%s' slot moved from '%s' to '%s'" % \
+								(cpv, origslot, newslot))
+				if onProgress:
+					onProgress(0, 0)
+
+		# Searching for updates in all the metadata is relatively slow, so this
+		# is where the progress bar comes out of indeterminate mode.
+		cpv_all = self._tree.dbapi.cpv_all()
+		cpv_all.sort()
+		maxval = len(cpv_all)
+		meta_keys = self._update_keys + self._portdb._pkg_str_aux_keys
+		if onProgress:
+			onProgress(maxval, 0)
+		for i, cpv in enumerate(cpv_all):
+			try:
+				metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+			except KeyError:
+				continue
+			try:
+				pkg = _pkg_str(cpv, metadata=metadata, settings=settings)
+			except InvalidData:
+				continue
+			metadata = dict((k, metadata[k]) for k in self._update_keys)
+			try:
+				updates = allupdates[pkg.repo]
+			except KeyError:
+				try:
+					updates = allupdates['DEFAULT']
+				except KeyError:
+					continue
+			if not updates:
+				continue
+			metadata_updates = \
+				portage.update_dbentries(updates, metadata, parent=pkg)
+			if metadata_updates:
+				errors.append("'%s' has outdated metadata" % cpv)
+			if onProgress:
+				onProgress(maxval, i+1)
+		return errors
+
+	def fix(self,  **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		allupdates, errors = self._grab_global_updates()
+		# Matching packages and moving them is relatively fast, so the
+		# progress bar is updated in indeterminate mode.
+		move = self._tree.dbapi.move_ent
+		slotmove = self._tree.dbapi.move_slot_ent
+		if onProgress:
+			onProgress(0, 0)
+		for repo, updates in allupdates.items():
+			if repo == 'DEFAULT':
+				continue
+			if not updates:
+				continue
+
+			def repo_match(repository):
+				return repository == repo or \
+					(repo == self._master_repo and \
+					repository not in allupdates)
+
+			for i, update_cmd in enumerate(updates):
+				if update_cmd[0] == "move":
+					move(update_cmd, repo_match=repo_match)
+				elif update_cmd[0] == "slotmove":
+					slotmove(update_cmd, repo_match=repo_match)
+				if onProgress:
+					onProgress(0, 0)
+
+		# Searching for updates in all the metadata is relatively slow, so this
+		# is where the progress bar comes out of indeterminate mode.
+		self._tree.dbapi.update_ents(allupdates, onProgress=onProgress)
+		return errors
+
+class MoveInstalled(MoveHandler):
+
+	short_desc = "Perform package move updates for installed packages"
+
+	def name():
+		return "moveinst"
+	name = staticmethod(name)
+	def __init__(self):
+		eroot = portage.settings['EROOT']
+		MoveHandler.__init__(self, portage.db[eroot]["vartree"], portage.db[eroot]["porttree"])
+
+class MoveBinary(MoveHandler):
+
+	short_desc = "Perform package move updates for binary packages"
+
+	def name():
+		return "movebin"
+	name = staticmethod(name)
+	def __init__(self):
+		eroot = portage.settings['EROOT']
+		MoveHandler.__init__(self, portage.db[eroot]["bintree"], portage.db[eroot]['porttree'])

diff --git a/portage_with_autodep/pym/portage/emaint/modules/resume/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/resume/__init__.py
new file mode 100644
index 0000000..965e8f9
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/resume/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Check and fix problems in the resume and/or resume_backup files.
+"""
+
+
+module_spec = {
+	'name': 'resume',
+	'description': __doc__,
+	'provides':{
+		'module1': {
+			'name': "cleanresume",
+			'class': "CleanResume",
+			'description':  "Discard emerge --resume merge lists",
+			'functions': ['check', 'fix'],
+			'func_desc': {}
+			}
+		}
+	}

diff --git a/portage_with_autodep/pym/portage/emaint/modules/resume/resume.py b/portage_with_autodep/pym/portage/emaint/modules/resume/resume.py
new file mode 100644
index 0000000..1bada52
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/resume/resume.py
@@ -0,0 +1,58 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+
+
+class CleanResume(object):
+
+	short_desc = "Discard emerge --resume merge lists"
+
+	def name():
+		return "cleanresume"
+	name = staticmethod(name)
+
+	def check(self,  **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		messages = []
+		mtimedb = portage.mtimedb
+		resume_keys = ("resume", "resume_backup")
+		maxval = len(resume_keys)
+		if onProgress:
+			onProgress(maxval, 0)
+		for i, k in enumerate(resume_keys):
+			try:
+				d = mtimedb.get(k)
+				if d is None:
+					continue
+				if not isinstance(d, dict):
+					messages.append("unrecognized resume list: '%s'" % k)
+					continue
+				mergelist = d.get("mergelist")
+				if mergelist is None or not hasattr(mergelist, "__len__"):
+					messages.append("unrecognized resume list: '%s'" % k)
+					continue
+				messages.append("resume list '%s' contains %d packages" % \
+					(k, len(mergelist)))
+			finally:
+				if onProgress:
+					onProgress(maxval, i+1)
+		return messages
+
+	def fix(self,  **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		delete_count = 0
+		mtimedb = portage.mtimedb
+		resume_keys = ("resume", "resume_backup")
+		maxval = len(resume_keys)
+		if onProgress:
+			onProgress(maxval, 0)
+		for i, k in enumerate(resume_keys):
+			try:
+				if mtimedb.pop(k, None) is not None:
+					delete_count += 1
+			finally:
+				if onProgress:
+					onProgress(maxval, i+1)
+		if delete_count:
+			mtimedb.commit()

diff --git a/portage_with_autodep/pym/portage/emaint/modules/world/__init__.py b/portage_with_autodep/pym/portage/emaint/modules/world/__init__.py
new file mode 100644
index 0000000..3f62270
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/world/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Check and fix problems in the world file.
+"""
+
+
+module_spec = {
+	'name': 'world',
+	'description': __doc__,
+	'provides':{
+		'module1':{
+			'name': "world",
+			'class': "WorldHandler",
+			'description': __doc__,
+			'functions': ['check', 'fix'],
+			'func_desc': {}
+			}
+		}
+	}

diff --git a/portage_with_autodep/pym/portage/emaint/modules/world/world.py b/portage_with_autodep/pym/portage/emaint/modules/world/world.py
new file mode 100644
index 0000000..2c9dbff
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/modules/world/world.py
@@ -0,0 +1,89 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+
+
+class WorldHandler(object):
+
+	short_desc = "Fix problems in the world file"
+
+	def name():
+		return "world"
+	name = staticmethod(name)
+
+	def __init__(self):
+		self.invalid = []
+		self.not_installed = []
+		self.okay = []
+		from portage._sets import load_default_config
+		setconfig = load_default_config(portage.settings,
+			portage.db[portage.settings['EROOT']])
+		self._sets = setconfig.getSets()
+
+	def _check_world(self, onProgress):
+		eroot = portage.settings['EROOT']
+		self.world_file = os.path.join(eroot, portage.const.WORLD_FILE)
+		self.found = os.access(self.world_file, os.R_OK)
+		vardb = portage.db[eroot]["vartree"].dbapi
+
+		from portage._sets import SETPREFIX
+		sets = self._sets
+		world_atoms = list(sets["selected"])
+		maxval = len(world_atoms)
+		if onProgress:
+			onProgress(maxval, 0)
+		for i, atom in enumerate(world_atoms):
+			if not isinstance(atom, portage.dep.Atom):
+				if atom.startswith(SETPREFIX):
+					s = atom[len(SETPREFIX):]
+					if s in sets:
+						self.okay.append(atom)
+					else:
+						self.not_installed.append(atom)
+				else:
+					self.invalid.append(atom)
+				if onProgress:
+					onProgress(maxval, i+1)
+				continue
+			okay = True
+			if not vardb.match(atom):
+				self.not_installed.append(atom)
+				okay = False
+			if okay:
+				self.okay.append(atom)
+			if onProgress:
+				onProgress(maxval, i+1)
+
+	def check(self, **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		self._check_world(onProgress)
+		errors = []
+		if self.found:
+			errors += ["'%s' is not a valid atom" % x for x in self.invalid]
+			errors += ["'%s' is not installed" % x for x in self.not_installed]
+		else:
+			errors.append(self.world_file + " could not be opened for reading")
+		return errors
+
+	def fix(self, **kwargs):
+		onProgress = kwargs.get('onProgress', None)
+		world_set = self._sets["selected"]
+		world_set.lock()
+		try:
+			world_set.load() # maybe it's changed on disk
+			before = set(world_set)
+			self._check_world(onProgress)
+			after = set(self.okay)
+			errors = []
+			if before != after:
+				try:
+					world_set.replace(self.okay)
+				except portage.exception.PortageException:
+					errors.append("%s could not be opened for writing" % \
+						self.world_file)
+			return errors
+		finally:
+			world_set.unlock()
+

diff --git a/portage_with_autodep/pym/portage/emaint/progress.py b/portage_with_autodep/pym/portage/emaint/progress.py
new file mode 100644
index 0000000..e43c2af
--- /dev/null
+++ b/portage_with_autodep/pym/portage/emaint/progress.py
@@ -0,0 +1,61 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+import signal
+
+import portage
+
+
+class ProgressHandler(object):
+	def __init__(self):
+		self.reset()
+
+	def reset(self):
+		self.curval = 0
+		self.maxval = 0
+		self.last_update = 0
+		self.min_display_latency = 0.2
+
+	def onProgress(self, maxval, curval):
+		self.maxval = maxval
+		self.curval = curval
+		cur_time = time.time()
+		if cur_time - self.last_update >= self.min_display_latency:
+			self.last_update = cur_time
+			self.display()
+
+	def display(self):
+		raise NotImplementedError(self)
+
+
+class ProgressBar(ProgressHandler):
+	"""Class to set up and return a Progress Bar"""
+
+	def __init__(self, isatty, **kwargs):
+		self.isatty = isatty
+		self.kwargs = kwargs
+		ProgressHandler.__init__(self)
+		self.progressBar = None
+
+	def start(self):
+		if self.isatty:
+			self.progressBar = portage.output.TermProgressBar(**self.kwargs)
+			signal.signal(signal.SIGWINCH, self.sigwinch_handler)
+		else:
+			self.onProgress = None
+		return self.onProgress
+
+	def set_label(self, _label):
+		self.kwargs['label'] = _label
+
+	def display(self):
+		self.progressBar.set(self.curval, self.maxval)
+
+	def sigwinch_handler(self, signum, frame):
+		lines, self.progressBar.term_columns = \
+			portage.output.get_term_size()
+
+	def stop(self):
+		signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+

diff --git a/portage_with_autodep/pym/portage/env/__init__.pyo b/portage_with_autodep/pym/portage/env/__init__.pyo
new file mode 100644
index 0000000..846aea3
Binary files /dev/null and b/portage_with_autodep/pym/portage/env/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/env/config.pyo b/portage_with_autodep/pym/portage/env/config.pyo
new file mode 100644
index 0000000..13c2e86
Binary files /dev/null and b/portage_with_autodep/pym/portage/env/config.pyo differ

diff --git a/portage_with_autodep/pym/portage/env/loaders.py b/portage_with_autodep/pym/portage/env/loaders.py
index b540fbb..372bc12 100644
--- a/portage_with_autodep/pym/portage/env/loaders.py
+++ b/portage_with_autodep/pym/portage/env/loaders.py
@@ -40,7 +40,7 @@ def RecursiveFileLoader(filename):
 
 	@param filename: name of a file/directory to traverse
 	@rtype: list
-	@returns: List of files to process
+	@return: List of files to process
 	"""
 
 	try:
@@ -139,7 +139,7 @@ class FileLoader(DataLoader):
 			load all files in self.fname
 		@type: Boolean
 		@rtype: tuple
-		@returns:
+		@return:
 		Returns (data,errors), both may be empty dicts or populated.
 		"""
 		data = {}

diff --git a/portage_with_autodep/pym/portage/env/loaders.pyo b/portage_with_autodep/pym/portage/env/loaders.pyo
new file mode 100644
index 0000000..2622a9f
Binary files /dev/null and b/portage_with_autodep/pym/portage/env/loaders.pyo differ

diff --git a/portage_with_autodep/pym/portage/env/validators.pyo b/portage_with_autodep/pym/portage/env/validators.pyo
new file mode 100644
index 0000000..cd18adb
Binary files /dev/null and b/portage_with_autodep/pym/portage/env/validators.pyo differ

diff --git a/portage_with_autodep/pym/portage/exception.py b/portage_with_autodep/pym/portage/exception.py
index 7891120..5ccd750 100644
--- a/portage_with_autodep/pym/portage/exception.py
+++ b/portage_with_autodep/pym/portage/exception.py
@@ -78,6 +78,10 @@ class OperationNotPermitted(PortageException):
 	from errno import EPERM as errno
 	"""An operation was not permitted operating system"""
 
+class OperationNotSupported(PortageException):
+	from errno import EOPNOTSUPP as errno
+	"""Operation not supported"""
+
 class PermissionDenied(PortageException):
 	from errno import EACCES as errno
 	"""Permission denied"""

diff --git a/portage_with_autodep/pym/portage/exception.pyo b/portage_with_autodep/pym/portage/exception.pyo
new file mode 100644
index 0000000..3a60e7c
Binary files /dev/null and b/portage_with_autodep/pym/portage/exception.pyo differ

diff --git a/portage_with_autodep/pym/portage/getbinpkg.py b/portage_with_autodep/pym/portage/getbinpkg.py
index a511f51..212f788 100644
--- a/portage_with_autodep/pym/portage/getbinpkg.py
+++ b/portage_with_autodep/pym/portage/getbinpkg.py
@@ -1,5 +1,5 @@
 # getbinpkg.py -- Portage binary-package helper functions
-# Copyright 2003-2011 Gentoo Foundation
+# Copyright 2003-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from portage.output import colorize
@@ -8,7 +8,10 @@ from portage.localization import _
 import portage
 from portage import os
 from portage import _encodings
+from portage import _unicode_decode
 from portage import _unicode_encode
+from portage.package.ebuild.fetch import _hide_url_passwd
+from _emerge.Package import _all_metadata_keys
 
 import sys
 import socket
@@ -65,8 +68,15 @@ def make_metadata_dict(data):
 	myid,myglob = data
 	
 	mydict = {}
-	for x in portage.xpak.getindex_mem(myid):
-		mydict[x] = portage.xpak.getitem(data,x)
+	for k_bytes in portage.xpak.getindex_mem(myid):
+		k = _unicode_decode(k_bytes,
+			encoding=_encodings['repo.content'], errors='replace')
+		if k not in _all_metadata_keys and \
+			k != "CATEGORY":
+			continue
+		v = _unicode_decode(portage.xpak.getitem(data, k_bytes),
+			encoding=_encodings['repo.content'], errors='replace')
+		mydict[k] = v
 
 	return mydict
 
@@ -149,11 +159,16 @@ def create_conn(baseurl,conn=None):
 	http_headers = {}
 	http_params = {}
 	if username and password:
+		try:
+			encodebytes = base64.encodebytes
+		except AttributeError:
+			# Python 2
+			encodebytes = base64.encodestring
 		http_headers = {
-			"Authorization": "Basic %s" %
-			  base64.encodestring("%s:%s" % (username, password)).replace(
-			    "\012",
-			    ""
+			b"Authorization": "Basic %s" % \
+			encodebytes(_unicode_encode("%s:%s" % (username, password))).replace(
+			    b"\012",
+			    b""
 			  ),
 		}
 
@@ -354,7 +369,7 @@ def dir_get_list(baseurl,conn=None):
 		
 		if page:
 			parser = ParseLinks()
-			parser.feed(page)
+			parser.feed(_unicode_decode(page))
 			del page
 			listing = parser.get_anchors()
 		else:
@@ -542,7 +557,7 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
 		out.write(_("Loaded metadata pickle.\n"))
 		out.flush()
 		metadatafile.close()
-	except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError):
+	except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError):
 		metadata = {}
 	if baseurl not in metadata:
 		metadata[baseurl]={}
@@ -564,7 +579,8 @@ def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=
 	try:
 		filelist = dir_get_list(baseurl, conn)
 	except portage.exception.PortageException as e:
-		sys.stderr.write(_("!!! Error connecting to '%s'.\n") % baseurl)
+		sys.stderr.write(_("!!! Error connecting to '%s'.\n") %
+			_hide_url_passwd(baseurl))
 		sys.stderr.write("!!! %s\n" % str(e))
 		del e
 		return metadata[baseurl]["data"]

diff --git a/portage_with_autodep/pym/portage/getbinpkg.pyo b/portage_with_autodep/pym/portage/getbinpkg.pyo
new file mode 100644
index 0000000..53ec2e9
Binary files /dev/null and b/portage_with_autodep/pym/portage/getbinpkg.pyo differ

diff --git a/portage_with_autodep/pym/portage/glsa.py b/portage_with_autodep/pym/portage/glsa.py
index a784d14..1857695 100644
--- a/portage_with_autodep/pym/portage/glsa.py
+++ b/portage_with_autodep/pym/portage/glsa.py
@@ -1,4 +1,4 @@
-# Copyright 2003-2011 Gentoo Foundation
+# Copyright 2003-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import absolute_import
@@ -17,7 +17,7 @@ from portage import os
 from portage import _encodings
 from portage import _unicode_decode
 from portage import _unicode_encode
-from portage.versions import pkgsplit, catpkgsplit, pkgcmp, best
+from portage.versions import pkgsplit, vercmp, best
 from portage.util import grabfile
 from portage.const import CACHE_PATH
 from portage.localization import _
@@ -372,17 +372,14 @@ def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=
 	for u in unaffectedList:
 		mylist = match(u, portdbapi, match_type="match-all")
 		for c in mylist:
-			c_pv = catpkgsplit(c)
-			i_pv = catpkgsplit(best(v_installed))
-			if pkgcmp(c_pv[1:], i_pv[1:]) > 0 \
+			i = best(v_installed)
+			if vercmp(c.version, i.version) > 0 \
 					and (rValue == None \
 						or not match("="+rValue, portdbapi) \
-						or (minimize ^ (pkgcmp(c_pv[1:], catpkgsplit(rValue)[1:]) > 0)) \
+						or (minimize ^ (vercmp(c.version, rValue.version) > 0)) \
 							and match("="+c, portdbapi)) \
 					and portdbapi.aux_get(c, ["SLOT"]) == vardbapi.aux_get(best(v_installed), ["SLOT"]):
-				rValue = c_pv[0]+"/"+c_pv[1]+"-"+c_pv[2]
-				if c_pv[3] != "r0":		# we don't like -r0 for display
-					rValue += "-"+c_pv[3]
+				rValue = c
 	return rValue
 
 def format_date(datestr):
@@ -488,7 +485,7 @@ class Glsa:
 		@type	myfile: String
 		@param	myfile: Filename to grab the XML data from
 		@rtype:		None
-		@returns:	None
+		@return:	None
 		"""
 		self.DOM = xml.dom.minidom.parse(myfile)
 		if not self.DOM.doctype:
@@ -634,7 +631,7 @@ class Glsa:
 		architectures.
 		
 		@rtype:		Boolean
-		@returns:	True if the system is affected, False if not
+		@return:	True if the system is affected, False if not
 		"""
 		rValue = False
 		for k in self.packages:
@@ -654,7 +651,7 @@ class Glsa:
 		GLSA was already applied.
 		
 		@rtype:		Boolean
-		@returns:	True if the GLSA was applied, False if not
+		@return:	True if the GLSA was applied, False if not
 		"""
 		return (self.nr in get_applied_glsas(self.config))
 
@@ -665,7 +662,7 @@ class Glsa:
 		applied or on explicit user request.
 
 		@rtype:		None
-		@returns:	None
+		@return:	None
 		"""
 		if not self.isApplied():
 			checkfile = io.open(

diff --git a/portage_with_autodep/pym/portage/glsa.pyo b/portage_with_autodep/pym/portage/glsa.pyo
new file mode 100644
index 0000000..65162f1
Binary files /dev/null and b/portage_with_autodep/pym/portage/glsa.pyo differ

diff --git a/portage_with_autodep/pym/portage/localization.pyo b/portage_with_autodep/pym/portage/localization.pyo
new file mode 100644
index 0000000..e992e3a
Binary files /dev/null and b/portage_with_autodep/pym/portage/localization.pyo differ

diff --git a/portage_with_autodep/pym/portage/locks.py b/portage_with_autodep/pym/portage/locks.py
index 9ed1d6a..59fbc6e 100644
--- a/portage_with_autodep/pym/portage/locks.py
+++ b/portage_with_autodep/pym/portage/locks.py
@@ -1,5 +1,5 @@
 # portage: Lock management code
-# Copyright 2004-2010 Gentoo Foundation
+# Copyright 2004-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
@@ -8,13 +8,13 @@ __all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
 
 import errno
 import fcntl
-import stat
+import platform
 import sys
 import time
+import warnings
 
 import portage
-from portage import os
-from portage.const import PORTAGE_BIN_PATH
+from portage import os, _encodings, _unicode_decode
 from portage.exception import DirectoryNotFound, FileNotFound, \
 	InvalidData, TryAgain, OperationNotPermitted, PermissionDenied
 from portage.data import portage_gid
@@ -25,12 +25,30 @@ if sys.hexversion >= 0x3000000:
 	basestring = str
 
 HARDLINK_FD = -2
+_HARDLINK_POLL_LATENCY = 3 # seconds
 _default_lock_fn = fcntl.lockf
 
+if platform.python_implementation() == 'PyPy':
+	# workaround for https://bugs.pypy.org/issue747
+	_default_lock_fn = fcntl.flock
+
 # Used by emerge in order to disable the "waiting for lock" message
 # so that it doesn't interfere with the status display.
 _quiet = False
 
+
+_open_fds = set()
+
+def _close_fds():
+	"""
+	This is intended to be called after a fork, in order to close file
+	descriptors for locks held by the parent process. This can be called
+	safely after a fork without exec, unlike the _setup_pipes close_fds
+	behavior.
+	"""
+	while _open_fds:
+		os.close(_open_fds.pop())
+
 def lockdir(mydir, flags=0):
 	return lockfile(mydir, wantnewlockfile=1, flags=flags)
 def unlockdir(mylock):
@@ -46,19 +64,31 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
 	if not mypath:
 		raise InvalidData(_("Empty path given"))
 
+	# Support for file object or integer file descriptor parameters is
+	# deprecated due to ambiguity in whether or not it's safe to close
+	# the file descriptor, making it prone to "Bad file descriptor" errors
+	# or file descriptor leaks.
 	if isinstance(mypath, basestring) and mypath[-1] == '/':
 		mypath = mypath[:-1]
 
+	lockfilename_path = mypath
 	if hasattr(mypath, 'fileno'):
+		warnings.warn("portage.locks.lockfile() support for "
+			"file object parameters is deprecated. Use a file path instead.",
+			DeprecationWarning, stacklevel=2)
+		lockfilename_path = getattr(mypath, 'name', None)
 		mypath = mypath.fileno()
 	if isinstance(mypath, int):
+		warnings.warn("portage.locks.lockfile() support for integer file "
+			"descriptor parameters is deprecated. Use a file path instead.",
+			DeprecationWarning, stacklevel=2)
 		lockfilename    = mypath
 		wantnewlockfile = 0
 		unlinkfile      = 0
 	elif wantnewlockfile:
 		base, tail = os.path.split(mypath)
 		lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
-		del base, tail
+		lockfilename_path = lockfilename
 		unlinkfile   = 1
 	else:
 		lockfilename = mypath
@@ -112,6 +142,8 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
 	# we're waiting on lockfile and use a blocking attempt.
 	locking_method = _default_lock_fn
 	try:
+		if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+			raise IOError(errno.ENOSYS, "Function not implemented")
 		locking_method(myfd, fcntl.LOCK_EX|fcntl.LOCK_NB)
 	except IOError as e:
 		if not hasattr(e, "errno"):
@@ -143,20 +175,22 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
 				raise
 			if out is not None:
 				out.eend(os.EX_OK)
-		elif e.errno == errno.ENOLCK:
+		elif e.errno in (errno.ENOSYS, errno.ENOLCK):
 			# We're not allowed to lock on this FS.
-			os.close(myfd)
-			link_success = False
-			if lockfilename == str(lockfilename):
-				if wantnewlockfile:
-					try:
-						if os.stat(lockfilename)[stat.ST_NLINK] == 1:
-							os.unlink(lockfilename)
-					except OSError:
-						pass
-					link_success = hardlink_lockfile(lockfilename)
+			if not isinstance(lockfilename, int):
+				# If a file object was passed in, it's not safe
+				# to close the file descriptor because it may
+				# still be in use.
+				os.close(myfd)
+			lockfilename_path = _unicode_decode(lockfilename_path,
+				encoding=_encodings['fs'], errors='strict')
+			if not isinstance(lockfilename_path, basestring):
+				raise
+			link_success = hardlink_lockfile(lockfilename_path,
+				waiting_msg=waiting_msg, flags=flags)
 			if not link_success:
 				raise
+			lockfilename = lockfilename_path
 			locking_method = None
 			myfd = HARDLINK_FD
 		else:
@@ -172,6 +206,9 @@ def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
 			mypath, wantnewlockfile=wantnewlockfile, unlinkfile=unlinkfile,
 			waiting_msg=waiting_msg, flags=flags)
 
+	if myfd != HARDLINK_FD:
+		_open_fds.add(myfd)
+
 	writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
 	return (lockfilename,myfd,unlinkfile,locking_method)
 
@@ -203,7 +240,7 @@ def unlockfile(mytuple):
 		raise InvalidData
 
 	if(myfd == HARDLINK_FD):
-		unhardlink_lockfile(lockfilename)
+		unhardlink_lockfile(lockfilename, unlinkfile=unlinkfile)
 		return True
 	
 	# myfd may be None here due to myfd = mypath in lockfile()
@@ -212,6 +249,7 @@ def unlockfile(mytuple):
 		writemsg(_("lockfile does not exist '%s'\n") % lockfilename,1)
 		if myfd is not None:
 			os.close(myfd)
+			_open_fds.remove(myfd)
 		return False
 
 	try:
@@ -222,6 +260,7 @@ def unlockfile(mytuple):
 	except OSError:
 		if isinstance(lockfilename, basestring):
 			os.close(myfd)
+			_open_fds.remove(myfd)
 		raise IOError(_("Failed to unlock file '%s'\n") % lockfilename)
 
 	try:
@@ -243,6 +282,7 @@ def unlockfile(mytuple):
 			else:
 				writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
 				os.close(myfd)
+				_open_fds.remove(myfd)
 				return False
 	except SystemExit:
 		raise
@@ -255,6 +295,7 @@ def unlockfile(mytuple):
 	# open fd closed automatically on them.
 	if isinstance(lockfilename, basestring):
 		os.close(myfd)
+		_open_fds.remove(myfd)
 
 	return True
 
@@ -262,65 +303,148 @@ def unlockfile(mytuple):
 
 
 def hardlock_name(path):
-	return path+".hardlock-"+os.uname()[1]+"-"+str(os.getpid())
+	base, tail = os.path.split(path)
+	return os.path.join(base, ".%s.hardlock-%s-%s" %
+		(tail, os.uname()[1], os.getpid()))
 
 def hardlink_is_mine(link,lock):
 	try:
-		return os.stat(link).st_nlink == 2
+		lock_st = os.stat(lock)
+		if lock_st.st_nlink == 2:
+			link_st = os.stat(link)
+			return lock_st.st_ino == link_st.st_ino and \
+				lock_st.st_dev == link_st.st_dev
 	except OSError:
-		return False
+		pass
+	return False
 
-def hardlink_lockfile(lockfilename, max_wait=14400):
+def hardlink_lockfile(lockfilename, max_wait=DeprecationWarning,
+	waiting_msg=None, flags=0):
 	"""Does the NFS, hardlink shuffle to ensure locking on the disk.
-	We create a PRIVATE lockfile, that is just a placeholder on the disk.
-	Then we HARDLINK the real lockfile to that private file.
+	We create a PRIVATE hardlink to the real lockfile, that is just a
+	placeholder on the disk.
 	If our file can 2 references, then we have the lock. :)
 	Otherwise we lather, rise, and repeat.
-	We default to a 4 hour timeout.
 	"""
 
-	start_time = time.time()
+	if max_wait is not DeprecationWarning:
+		warnings.warn("The 'max_wait' parameter of "
+			"portage.locks.hardlink_lockfile() is now unused. Use "
+			"flags=os.O_NONBLOCK instead.",
+			DeprecationWarning, stacklevel=2)
+
+	global _quiet
+	out = None
+	displayed_waiting_msg = False
+	preexisting = os.path.exists(lockfilename)
 	myhardlock = hardlock_name(lockfilename)
-	reported_waiting = False
-	
-	while(time.time() < (start_time + max_wait)):
-		# We only need it to exist.
-		myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0o660)
-		os.close(myfd)
-	
-		if not os.path.exists(myhardlock):
-			raise FileNotFound(
-				_("Created lockfile is missing: %(filename)s") % \
-				{"filename" : myhardlock})
 
-		try:
-			res = os.link(myhardlock, lockfilename)
-		except OSError:
+	# myhardlock must not exist prior to our link() call, and we can
+	# safely unlink it since its file name is unique to our PID
+	try:
+		os.unlink(myhardlock)
+	except OSError as e:
+		if e.errno in (errno.ENOENT, errno.ESTALE):
 			pass
+		else:
+			func_call = "unlink('%s')" % myhardlock
+			if e.errno == OperationNotPermitted.errno:
+				raise OperationNotPermitted(func_call)
+			elif e.errno == PermissionDenied.errno:
+				raise PermissionDenied(func_call)
+			else:
+				raise
 
-		if hardlink_is_mine(myhardlock, lockfilename):
-			# We have the lock.
-			if reported_waiting:
-				writemsg("\n", noiselevel=-1)
-			return True
-
-		if reported_waiting:
-			writemsg(".", noiselevel=-1)
+	while True:
+		# create lockfilename if it doesn't exist yet
+		try:
+			myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660)
+		except OSError as e:
+			func_call = "open('%s')" % lockfilename
+			if e.errno == OperationNotPermitted.errno:
+				raise OperationNotPermitted(func_call)
+			elif e.errno == PermissionDenied.errno:
+				raise PermissionDenied(func_call)
+			else:
+				raise
 		else:
-			reported_waiting = True
-			msg = _("\nWaiting on (hardlink) lockfile: (one '.' per 3 seconds)\n"
-				"%(bin_path)s/clean_locks can fix stuck locks.\n"
-				"Lockfile: %(lockfilename)s\n") % \
-				{"bin_path": PORTAGE_BIN_PATH, "lockfilename": lockfilename}
-			writemsg(msg, noiselevel=-1)
-		time.sleep(3)
-	
-	os.unlink(myhardlock)
-	return False
+			myfd_st = None
+			try:
+				myfd_st = os.fstat(myfd)
+				if not preexisting:
+					# Don't chown the file if it is preexisting, since we
+					# want to preserve existing permissions in that case.
+					if myfd_st.st_gid != portage_gid:
+						os.fchown(myfd, -1, portage_gid)
+			except OSError as e:
+				if e.errno not in (errno.ENOENT, errno.ESTALE):
+					writemsg("%s: fchown('%s', -1, %d)\n" % \
+						(e, lockfilename, portage_gid), noiselevel=-1)
+					writemsg(_("Cannot chown a lockfile: '%s'\n") % \
+						lockfilename, noiselevel=-1)
+					writemsg(_("Group IDs of current user: %s\n") % \
+						" ".join(str(n) for n in os.getgroups()),
+						noiselevel=-1)
+				else:
+					# another process has removed the file, so we'll have
+					# to create it again
+					continue
+			finally:
+				os.close(myfd)
+
+			# If fstat shows more than one hardlink, then it's extremely
+			# unlikely that the following link call will result in a lock,
+			# so optimize away the wasteful link call and sleep or raise
+			# TryAgain.
+			if myfd_st is not None and myfd_st.st_nlink < 2:
+				try:
+					os.link(lockfilename, myhardlock)
+				except OSError as e:
+					func_call = "link('%s', '%s')" % (lockfilename, myhardlock)
+					if e.errno == OperationNotPermitted.errno:
+						raise OperationNotPermitted(func_call)
+					elif e.errno == PermissionDenied.errno:
+						raise PermissionDenied(func_call)
+					elif e.errno in (errno.ESTALE, errno.ENOENT):
+						# another process has removed the file, so we'll have
+						# to create it again
+						continue
+					else:
+						raise
+				else:
+					if hardlink_is_mine(myhardlock, lockfilename):
+						if out is not None:
+							out.eend(os.EX_OK)
+						break
+
+					try:
+						os.unlink(myhardlock)
+					except OSError as e:
+						# This should not happen, since the file name of
+						# myhardlock is unique to our host and PID,
+						# and the above link() call succeeded.
+						if e.errno not in (errno.ENOENT, errno.ESTALE):
+							raise
+						raise FileNotFound(myhardlock)
+
+		if flags & os.O_NONBLOCK:
+			raise TryAgain(lockfilename)
+
+		if out is None and not _quiet:
+			out = portage.output.EOutput()
+		if out is not None and not displayed_waiting_msg:
+			displayed_waiting_msg = True
+			if waiting_msg is None:
+				waiting_msg = _("waiting for lock on %s\n") % lockfilename
+			out.ebegin(waiting_msg)
+
+		time.sleep(_HARDLINK_POLL_LATENCY)
+
+	return True
 
-def unhardlink_lockfile(lockfilename):
+def unhardlink_lockfile(lockfilename, unlinkfile=True):
 	myhardlock = hardlock_name(lockfilename)
-	if hardlink_is_mine(myhardlock, lockfilename):
+	if unlinkfile and hardlink_is_mine(myhardlock, lockfilename):
 		# Make sure not to touch lockfilename unless we really have a lock.
 		try:
 			os.unlink(lockfilename)
@@ -344,7 +468,7 @@ def hardlock_cleanup(path, remove_all_locks=False):
 		if os.path.isfile(path+"/"+x):
 			parts = x.split(".hardlock-")
 			if len(parts) == 2:
-				filename = parts[0]
+				filename = parts[0][1:]
 				hostpid  = parts[1].split("-")
 				host  = "-".join(hostpid[:-1])
 				pid   = hostpid[-1]
@@ -368,7 +492,7 @@ def hardlock_cleanup(path, remove_all_locks=False):
 				 remove_all_locks:
 				for y in mylist[x]:
 					for z in mylist[x][y]:
-						filename = path+"/"+x+".hardlock-"+y+"-"+z
+						filename = path+"/."+x+".hardlock-"+y+"-"+z
 						if filename == mylockname:
 							continue
 						try:

diff --git a/portage_with_autodep/pym/portage/locks.pyo b/portage_with_autodep/pym/portage/locks.pyo
new file mode 100644
index 0000000..9c90a2f
Binary files /dev/null and b/portage_with_autodep/pym/portage/locks.pyo differ

diff --git a/portage_with_autodep/pym/portage/mail.py b/portage_with_autodep/pym/portage/mail.py
index 17dfcaf..3fcadd2 100644
--- a/portage_with_autodep/pym/portage/mail.py
+++ b/portage_with_autodep/pym/portage/mail.py
@@ -40,8 +40,7 @@ else:
 def TextMessage(_text):
 	from email.mime.text import MIMEText
 	mimetext = MIMEText(_text)
-	if sys.hexversion >= 0x3000000:
-		mimetext.set_charset("UTF-8")
+	mimetext.set_charset("UTF-8")
 	return mimetext
 
 def create_message(sender, recipient, subject, body, attachments=None):

diff --git a/portage_with_autodep/pym/portage/mail.pyo b/portage_with_autodep/pym/portage/mail.pyo
new file mode 100644
index 0000000..bc3a76d
Binary files /dev/null and b/portage_with_autodep/pym/portage/mail.pyo differ

diff --git a/portage_with_autodep/pym/portage/manifest.py b/portage_with_autodep/pym/portage/manifest.py
index 13efab7..90324ee 100644
--- a/portage_with_autodep/pym/portage/manifest.py
+++ b/portage_with_autodep/pym/portage/manifest.py
@@ -1,8 +1,10 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import errno
 import io
+import re
+import warnings
 
 import portage
 portage.proxy.lazyimport.lazyimport(globals(),
@@ -17,8 +19,13 @@ from portage import _unicode_encode
 from portage.exception import DigestException, FileNotFound, \
 	InvalidDataType, MissingParameter, PermissionDenied, \
 	PortageException, PortagePackageException
+from portage.const import (MANIFEST1_HASH_FUNCTIONS, MANIFEST2_HASH_DEFAULTS,
+	MANIFEST2_HASH_FUNCTIONS, MANIFEST2_IDENTIFIERS, MANIFEST2_REQUIRED_HASH)
 from portage.localization import _
 
+# Characters prohibited by repoman's file.name check.
+_prohibited_filename_chars_re = re.compile(r'[^a-zA-Z0-9._\-+:]')
+
 class FileNotInManifestException(PortageException):
 	pass
 
@@ -30,10 +37,14 @@ def manifest2AuxfileFilter(filename):
 	for x in mysplit:
 		if x[:1] == '.':
 			return False
+		if _prohibited_filename_chars_re.search(x) is not None:
+			return False
 	return not filename[:7] == 'digest-'
 
 def manifest2MiscfileFilter(filename):
 	filename = filename.strip(os.sep)
+	if _prohibited_filename_chars_re.search(filename) is not None:
+		return False
 	return not (filename in ["CVS", ".svn", "files", "Manifest"] or filename.endswith(".ebuild"))
 
 def guessManifestFileType(filename):
@@ -49,9 +60,15 @@ def guessManifestFileType(filename):
 	else:
 		return "DIST"
 
+def guessThinManifestFileType(filename):
+	type = guessManifestFileType(filename)
+	if type != "DIST":
+		return None
+	return "DIST"
+
 def parseManifest2(mysplit):
 	myentry = None
-	if len(mysplit) > 4 and mysplit[0] in portage.const.MANIFEST2_IDENTIFIERS:
+	if len(mysplit) > 4 and mysplit[0] in MANIFEST2_IDENTIFIERS:
 		mytype = mysplit[0]
 		myname = mysplit[1]
 		try:
@@ -93,25 +110,33 @@ class Manifest2Entry(ManifestEntry):
 class Manifest(object):
 	parsers = (parseManifest2,)
 	def __init__(self, pkgdir, distdir, fetchlist_dict=None,
-		manifest1_compat=False, from_scratch=False):
-		""" create new Manifest instance for package in pkgdir
-		    and add compability entries for old portage versions if manifest1_compat == True.
+		manifest1_compat=DeprecationWarning, from_scratch=False, thin=False,
+			allow_missing=False, allow_create=True, hashes=None):
+		""" Create new Manifest instance for package in pkgdir.
 		    Do not parse Manifest file if from_scratch == True (only for internal use)
 			The fetchlist_dict parameter is required only for generation of
-			a Manifest (not needed for parsing and checking sums)."""
+			a Manifest (not needed for parsing and checking sums).
+			If thin is specified, then the manifest carries only info for
+			distfiles."""
+
+		if manifest1_compat is not DeprecationWarning:
+			warnings.warn("The manifest1_compat parameter of the "
+				"portage.manifest.Manifest constructor is deprecated.",
+				DeprecationWarning, stacklevel=2)
+
 		self.pkgdir = _unicode_decode(pkgdir).rstrip(os.sep) + os.sep
 		self.fhashdict = {}
 		self.hashes = set()
-		self.hashes.update(portage.const.MANIFEST2_HASH_FUNCTIONS)
-		if manifest1_compat:
-			raise NotImplementedError("manifest1 support has been removed")
+
+		if hashes is None:
+			hashes = MANIFEST2_HASH_DEFAULTS
+
+		self.hashes.update(hashes.intersection(MANIFEST2_HASH_FUNCTIONS))
 		self.hashes.difference_update(hashname for hashname in \
 			list(self.hashes) if hashname not in hashfunc_map)
 		self.hashes.add("size")
-		if manifest1_compat:
-			raise NotImplementedError("manifest1 support has been removed")
-		self.hashes.add(portage.const.MANIFEST2_REQUIRED_HASH)
-		for t in portage.const.MANIFEST2_IDENTIFIERS:
+		self.hashes.add(MANIFEST2_REQUIRED_HASH)
+		for t in MANIFEST2_IDENTIFIERS:
 			self.fhashdict[t] = {}
 		if not from_scratch:
 			self._read()
@@ -120,7 +145,13 @@ class Manifest(object):
 		else:
 			self.fetchlist_dict = {}
 		self.distdir = distdir
-		self.guessType = guessManifestFileType
+		self.thin = thin
+		if thin:
+			self.guessType = guessThinManifestFileType
+		else:
+			self.guessType = guessManifestFileType
+		self.allow_missing = allow_missing
+		self.allow_create = allow_create
 
 	def getFullname(self):
 		""" Returns the absolute path to the Manifest file for this instance """
@@ -129,7 +160,7 @@ class Manifest(object):
 	def getDigests(self):
 		""" Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """
 		rval = {}
-		for t in portage.const.MANIFEST2_IDENTIFIERS:
+		for t in MANIFEST2_IDENTIFIERS:
 			rval.update(self.fhashdict[t])
 		return rval
 	
@@ -200,7 +231,7 @@ class Manifest(object):
 		return myhashdict
 
 	def _createManifestEntries(self):
-		valid_hashes = set(portage.const.MANIFEST2_HASH_FUNCTIONS)
+		valid_hashes = set(MANIFEST2_HASH_FUNCTIONS)
 		valid_hashes.add('size')
 		mytypes = list(self.fhashdict)
 		mytypes.sort()
@@ -218,16 +249,19 @@ class Manifest(object):
 	def checkIntegrity(self):
 		for t in self.fhashdict:
 			for f in self.fhashdict[t]:
-				if portage.const.MANIFEST2_REQUIRED_HASH not in self.fhashdict[t][f]:
-					raise MissingParameter(_("Missing %s checksum: %s %s") % (portage.const.MANIFEST2_REQUIRED_HASH, t, f))
+				if MANIFEST2_REQUIRED_HASH not in self.fhashdict[t][f]:
+					raise MissingParameter(_("Missing %s checksum: %s %s") %
+						(MANIFEST2_REQUIRED_HASH, t, f))
 
 	def write(self, sign=False, force=False):
 		""" Write Manifest instance to disk, optionally signing it """
+		if not self.allow_create:
+			return
 		self.checkIntegrity()
 		try:
 			myentries = list(self._createManifestEntries())
 			update_manifest = True
-			if not force:
+			if myentries and not force:
 				try:
 					f = io.open(_unicode_encode(self.getFullname(),
 						encoding=_encodings['fs'], errors='strict'),
@@ -246,9 +280,24 @@ class Manifest(object):
 						pass
 					else:
 						raise
+
 			if update_manifest:
-				write_atomic(self.getFullname(),
-					"".join("%s\n" % str(myentry) for myentry in myentries))
+				if myentries or not (self.thin or self.allow_missing):
+					# If myentries is empty, don't write an empty manifest
+					# when thin or allow_missing is enabled. Except for
+					# thin manifests with no DIST entries, myentries is
+					# non-empty for all currently known use cases.
+					write_atomic(self.getFullname(), "".join("%s\n" %
+						str(myentry) for myentry in myentries))
+				else:
+					# With thin manifest, there's no need to have
+					# a Manifest file if there are no DIST entries.
+					try:
+						os.unlink(self.getFullname())
+					except OSError as e:
+						if e.errno != errno.ENOENT:
+							raise
+
 			if sign:
 				self.sign()
 		except (IOError, OSError) as e:
@@ -270,14 +319,14 @@ class Manifest(object):
 			fname = os.path.join("files", fname)
 		if not os.path.exists(self.pkgdir+fname) and not ignoreMissing:
 			raise FileNotFound(fname)
-		if not ftype in portage.const.MANIFEST2_IDENTIFIERS:
+		if not ftype in MANIFEST2_IDENTIFIERS:
 			raise InvalidDataType(ftype)
 		if ftype == "AUX" and fname.startswith("files"):
 			fname = fname[6:]
 		self.fhashdict[ftype][fname] = {}
 		if hashdict != None:
 			self.fhashdict[ftype][fname].update(hashdict)
-		if not portage.const.MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]:
+		if not MANIFEST2_REQUIRED_HASH in self.fhashdict[ftype][fname]:
 			self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing)
 	
 	def removeFile(self, ftype, fname):
@@ -290,7 +339,7 @@ class Manifest(object):
 	
 	def findFile(self, fname):
 		""" Return entrytype of the given file if present in Manifest or None if not present """
-		for t in portage.const.MANIFEST2_IDENTIFIERS:
+		for t in MANIFEST2_IDENTIFIERS:
 			if fname in self.fhashdict[t]:
 				return t
 		return None
@@ -305,6 +354,8 @@ class Manifest(object):
 		distfiles to raise a FileNotFound exception for (if no file or existing
 		checksums are available), and defaults to all distfiles when not
 		specified."""
+		if not self.allow_create:
+			return
 		if checkExisting:
 			self.checkAllHashes()
 		if assumeDistHashesSometimes or assumeDistHashesAlways:
@@ -313,13 +364,88 @@ class Manifest(object):
 			distfilehashes = {}
 		self.__init__(self.pkgdir, self.distdir,
 			fetchlist_dict=self.fetchlist_dict, from_scratch=True,
-			manifest1_compat=False)
-		cpvlist = []
+			thin=self.thin, allow_missing=self.allow_missing,
+			allow_create=self.allow_create, hashes=self.hashes)
 		pn = os.path.basename(self.pkgdir.rstrip(os.path.sep))
 		cat = self._pkgdir_category()
 
 		pkgdir = self.pkgdir
+		if self.thin:
+			cpvlist = self._update_thin_pkgdir(cat, pn, pkgdir)
+		else:
+			cpvlist = self._update_thick_pkgdir(cat, pn, pkgdir)
+
+		distlist = set()
+		for cpv in cpvlist:
+			distlist.update(self._getCpvDistfiles(cpv))
+
+		if requiredDistfiles is None:
+			# This allows us to force removal of stale digests for the
+			# ebuild --force digest option (no distfiles are required).
+			requiredDistfiles = set()
+		elif len(requiredDistfiles) == 0:
+			# repoman passes in an empty list, which implies that all distfiles
+			# are required.
+			requiredDistfiles = distlist.copy()
+		required_hash_types = set()
+		required_hash_types.add("size")
+		required_hash_types.add(MANIFEST2_REQUIRED_HASH)
+		for f in distlist:
+			fname = os.path.join(self.distdir, f)
+			mystat = None
+			try:
+				mystat = os.stat(fname)
+			except OSError:
+				pass
+			if f in distfilehashes and \
+				not required_hash_types.difference(distfilehashes[f]) and \
+				((assumeDistHashesSometimes and mystat is None) or \
+				(assumeDistHashesAlways and mystat is None) or \
+				(assumeDistHashesAlways and mystat is not None and \
+				set(distfilehashes[f]) == set(self.hashes) and \
+				distfilehashes[f]["size"] == mystat.st_size)):
+				self.fhashdict["DIST"][f] = distfilehashes[f]
+			else:
+				try:
+					self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
+				except FileNotFound:
+					if f in requiredDistfiles:
+						raise
 
+	def _is_cpv(self, cat, pn, filename):
+		if not filename.endswith(".ebuild"):
+			return None
+		pf = filename[:-7]
+		ps = portage.versions._pkgsplit(pf)
+		cpv = "%s/%s" % (cat, pf)
+		if not ps:
+			raise PortagePackageException(
+				_("Invalid package name: '%s'") % cpv)
+		if ps[0] != pn:
+			raise PortagePackageException(
+				_("Package name does not "
+				"match directory name: '%s'") % cpv)
+		return cpv
+
+	def _update_thin_pkgdir(self, cat, pn, pkgdir):
+		for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
+			break
+		cpvlist = []
+		for f in pkgdir_files:
+			try:
+				f = _unicode_decode(f,
+					encoding=_encodings['fs'], errors='strict')
+			except UnicodeDecodeError:
+				continue
+			if f[:1] == '.':
+				continue
+			pf = self._is_cpv(cat, pn, f)
+			if pf is not None:
+				cpvlist.append(pf)
+		return cpvlist
+
+	def _update_thick_pkgdir(self, cat, pn, pkgdir):
+		cpvlist = []
 		for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
 			break
 		for f in pkgdir_files:
@@ -330,21 +456,10 @@ class Manifest(object):
 				continue
 			if f[:1] == ".":
 				continue
-			pf = None
-			if f[-7:] == '.ebuild':
-				pf = f[:-7]
+			pf = self._is_cpv(cat, pn, f)
 			if pf is not None:
 				mytype = "EBUILD"
-				ps = portage.versions._pkgsplit(pf)
-				cpv = "%s/%s" % (cat, pf)
-				if not ps:
-					raise PortagePackageException(
-						_("Invalid package name: '%s'") % cpv)
-				if ps[0] != pn:
-					raise PortagePackageException(
-						_("Package name does not "
-						"match directory name: '%s'") % cpv)
-				cpvlist.append(cpv)
+				cpvlist.append(pf)
 			elif manifest2MiscfileFilter(f):
 				mytype = "MISC"
 			else:
@@ -368,41 +483,7 @@ class Manifest(object):
 				continue
 			self.fhashdict["AUX"][f] = perform_multiple_checksums(
 				os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
-		distlist = set()
-		for cpv in cpvlist:
-			distlist.update(self._getCpvDistfiles(cpv))
-		if requiredDistfiles is None:
-			# This allows us to force removal of stale digests for the
-			# ebuild --force digest option (no distfiles are required).
-			requiredDistfiles = set()
-		elif len(requiredDistfiles) == 0:
-			# repoman passes in an empty list, which implies that all distfiles
-			# are required.
-			requiredDistfiles = distlist.copy()
-		required_hash_types = set()
-		required_hash_types.add("size")
-		required_hash_types.add(portage.const.MANIFEST2_REQUIRED_HASH)
-		for f in distlist:
-			fname = os.path.join(self.distdir, f)
-			mystat = None
-			try:
-				mystat = os.stat(fname)
-			except OSError:
-				pass
-			if f in distfilehashes and \
-				not required_hash_types.difference(distfilehashes[f]) and \
-				((assumeDistHashesSometimes and mystat is None) or \
-				(assumeDistHashesAlways and mystat is None) or \
-				(assumeDistHashesAlways and mystat is not None and \
-				len(distfilehashes[f]) == len(self.hashes) and \
-				distfilehashes[f]["size"] == mystat.st_size)):
-				self.fhashdict["DIST"][f] = distfilehashes[f]
-			else:
-				try:
-					self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
-				except FileNotFound:
-					if f in requiredDistfiles:
-						raise
+		return cpvlist
 
 	def _pkgdir_category(self):
 		return self.pkgdir.rstrip(os.sep).split(os.sep)[-2]
@@ -417,7 +498,7 @@ class Manifest(object):
 		return absname	
 	
 	def checkAllHashes(self, ignoreMissingFiles=False):
-		for t in portage.const.MANIFEST2_IDENTIFIERS:
+		for t in MANIFEST2_IDENTIFIERS:
 			self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
 	
 	def checkTypeHashes(self, idtype, ignoreMissingFiles=False):
@@ -481,7 +562,7 @@ class Manifest(object):
 	
 	def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True):
 		""" Regenerate all hashes for all files in this Manifest. """
-		for idtype in portage.const.MANIFEST2_IDENTIFIERS:
+		for idtype in MANIFEST2_IDENTIFIERS:
 			self.updateTypeHashes(idtype, checkExisting=checkExisting,
 				ignoreMissingFiles=ignoreMissingFiles)
 
@@ -526,9 +607,11 @@ class Manifest(object):
 		myfile.close()
 		for l in lines:
 			mysplit = l.split()
-			if len(mysplit) == 4 and mysplit[0] in portage.const.MANIFEST1_HASH_FUNCTIONS and not 1 in rVal:
+			if len(mysplit) == 4 and mysplit[0] in MANIFEST1_HASH_FUNCTIONS \
+				and 1 not in rVal:
 				rVal.append(1)
-			elif len(mysplit) > 4 and mysplit[0] in portage.const.MANIFEST2_IDENTIFIERS and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
+			elif len(mysplit) > 4 and mysplit[0] in MANIFEST2_IDENTIFIERS \
+				and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
 				rVal.append(2)
 		return rVal
 

diff --git a/portage_with_autodep/pym/portage/manifest.pyo b/portage_with_autodep/pym/portage/manifest.pyo
new file mode 100644
index 0000000..d482bbd
Binary files /dev/null and b/portage_with_autodep/pym/portage/manifest.pyo differ

diff --git a/portage_with_autodep/pym/portage/news.py b/portage_with_autodep/pym/portage/news.py
index 866e5b0..bbd9325 100644
--- a/portage_with_autodep/pym/portage/news.py
+++ b/portage_with_autodep/pym/portage/news.py
@@ -2,24 +2,30 @@
 # Copyright 2006-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
+from __future__ import print_function
+
 __all__ = ["NewsManager", "NewsItem", "DisplayRestriction",
 	"DisplayProfileRestriction", "DisplayKeywordRestriction",
-	"DisplayInstalledRestriction"]
+	"DisplayInstalledRestriction",
+	"count_unread_news", "display_news_notifications"]
 
 import io
 import logging
 import os as _os
 import re
+from portage import OrderedDict
 from portage import os
 from portage import _encodings
 from portage import _unicode_decode
 from portage import _unicode_encode
+from portage.const import NEWS_LIB_PATH
 from portage.util import apply_secpass_permissions, ensure_dirs, \
 	grabfile, normalize_path, write_atomic, writemsg_level
 from portage.data import portage_gid
 from portage.dep import isvalidatom
 from portage.localization import _
 from portage.locks import lockfile, unlockfile
+from portage.output import colorize
 from portage.exception import InvalidLocation, OperationNotPermitted, \
 	PermissionDenied
 
@@ -39,7 +45,6 @@ class NewsManager(object):
 	def __init__(self, portdb, vardb, news_path, unread_path, language_id='en'):
 		self.news_path = news_path
 		self.unread_path = unread_path
-		self.target_root = vardb.root
 		self.language_id = language_id
 		self.config = vardb.settings
 		self.vdb = vardb
@@ -114,7 +119,6 @@ class NewsManager(object):
 			except PermissionDenied:
 				return
 
-			updates = []
 			for itemid in news:
 				try:
 					itemid = _unicode_decode(itemid,
@@ -250,10 +254,11 @@ class NewsItem(object):
 		return self._valid
 
 	def parse(self):
-		lines = io.open(_unicode_encode(self.path,
+		f = io.open(_unicode_encode(self.path,
 			encoding=_encodings['fs'], errors='strict'),
-			mode='r', encoding=_encodings['content'], errors='replace'
-			).readlines()
+			mode='r', encoding=_encodings['content'], errors='replace')
+		lines = f.readlines()
+		f.close()
 		self.restrictions = {}
 		invalids = []
 		for i, line in enumerate(lines):
@@ -349,3 +354,67 @@ class DisplayInstalledRestriction(DisplayRestriction):
 		if vdb.match(self.atom):
 			return True
 		return False
+
+def count_unread_news(portdb, vardb, repos=None, update=True):
+	"""
+	Returns a dictionary mapping repos to integer counts of unread news items.
+	By default, this will scan all repos and check for new items that have
+	appeared since the last scan.
+
+	@param portdb: a portage tree database
+	@type portdb: pordbapi
+	@param vardb: an installed package database
+	@type vardb: vardbapi
+	@param repos: names of repos to scan (None means to scan all available repos)
+	@type repos: list or None
+	@param update: check for new items (default is True)
+	@type update: boolean
+	@rtype: dict
+	@return: dictionary mapping repos to integer counts of unread news items
+	"""
+
+	NEWS_PATH = os.path.join("metadata", "news")
+	UNREAD_PATH = os.path.join(vardb.settings['EROOT'], NEWS_LIB_PATH, "news")
+	news_counts = OrderedDict()
+	if repos is None:
+		repos = portdb.getRepositories()
+
+	permission_msgs = set()
+	for repo in repos:
+		try:
+			manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
+			count = manager.getUnreadItems(repo, update=True)
+		except PermissionDenied as e:
+			# NOTE: The NewsManager typically handles permission errors by
+			# returning silently, so PermissionDenied won't necessarily be
+			# raised even if we do trigger a permission error above.
+			msg = _unicode_decode("Permission denied: '%s'\n") % (e,)
+			if msg in permission_msgs:
+				pass
+			else:
+				permission_msgs.add(msg)
+				writemsg_level(msg, level=logging.ERROR, noiselevel=-1)
+			news_counts[repo] = 0
+		else:
+			news_counts[repo] = count
+
+	return news_counts
+
+def display_news_notifications(news_counts):
+	"""
+	Display a notification for unread news items, using a dictionary mapping
+	repos to integer counts, like that returned from count_unread_news().
+	"""
+	newsReaderDisplay = False
+	for repo, count in news_counts.items():
+		if count > 0:
+			if not newsReaderDisplay:
+				newsReaderDisplay = True
+				print()
+			print(colorize("WARN", " * IMPORTANT:"), end=' ')
+			print("%s news items need reading for repository '%s'." % (count, repo))
+
+	if newsReaderDisplay:
+		print(colorize("WARN", " *"), end=' ')
+		print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
+		print()

diff --git a/portage_with_autodep/pym/portage/news.pyo b/portage_with_autodep/pym/portage/news.pyo
new file mode 100644
index 0000000..bbd247c
Binary files /dev/null and b/portage_with_autodep/pym/portage/news.pyo differ

diff --git a/portage_with_autodep/pym/portage/output.py b/portage_with_autodep/pym/portage/output.py
index 0e8245f..98bec81 100644
--- a/portage_with_autodep/pym/portage/output.py
+++ b/portage_with_autodep/pym/portage/output.py
@@ -162,11 +162,14 @@ def _parse_color_map(config_root='/', onerror=None):
 		if token[0] in quotes and token[0] == token[-1]:
 			token = token[1:-1]
 		return token
+
+	f = None
 	try:
-		lineno=0
-		for line in io.open(_unicode_encode(myfile,
+		f = io.open(_unicode_encode(myfile,
 			encoding=_encodings['fs'], errors='strict'),
-			mode='r', encoding=_encodings['content'], errors='replace'):
+			mode='r', encoding=_encodings['content'], errors='replace')
+		lineno = 0
+		for line in f:
 			lineno += 1
 
 			commenter_pos = line.find("#")
@@ -226,6 +229,9 @@ def _parse_color_map(config_root='/', onerror=None):
 		elif e.errno == errno.EACCES:
 			raise PermissionDenied(myfile)
 		raise
+	finally:
+		if f is not None:
+			f.close()
 
 def nc_len(mystr):
 	tmp = re.sub(esc_seq + "^m]+m", "", mystr);
@@ -319,6 +325,12 @@ def style_to_ansi_code(style):
 		ret += codes.get(attr_name, attr_name)
 	return ret
 
+def colormap():
+	mycolors = []
+	for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET", "NORMAL"):
+		mycolors.append("%s=$'%s'" % (c, style_to_ansi_code(c)))
+	return "\n".join(mycolors)
+
 def colorize(color_key, text):
 	global havecolor
 	if havecolor:
@@ -335,12 +347,12 @@ compat_functions_colors = ["bold","white","teal","turquoise","darkteal",
 	"fuchsia","purple","blue","darkblue","green","darkgreen","yellow",
 	"brown","darkyellow","red","darkred"]
 
-def create_color_func(color_key):
-	def derived_func(*args):
-		newargs = list(args)
-		newargs.insert(0, color_key)
-		return colorize(*newargs)
-	return derived_func
+class create_color_func(object):
+	__slots__ = ("_color_key",)
+	def __init__(self, color_key):
+		self._color_key = color_key
+	def __call__(self, text):
+		return colorize(self._color_key, text)
 
 for c in compat_functions_colors:
 	globals()[c] = create_color_func(c)
@@ -416,12 +428,14 @@ class StyleWriter(formatter.DumbWriter):
 def get_term_size():
 	"""
 	Get the number of lines and columns of the tty that is connected to
-	stdout.  Returns a tuple of (lines, columns) or (-1, -1) if an error
+	stdout.  Returns a tuple of (lines, columns) or (0, 0) if an error
 	occurs. The curses module is used if available, otherwise the output of
-	`stty size` is parsed.
+	`stty size` is parsed. The lines and columns values are guaranteed to be
+	greater than or equal to zero, since a negative COLUMNS variable is
+	known to prevent some commands from working (see bug #394091).
 	"""
 	if not sys.stdout.isatty():
-		return -1, -1
+		return (0, 0)
 	try:
 		import curses
 		try:
@@ -436,10 +450,13 @@ def get_term_size():
 		out = out.split()
 		if len(out) == 2:
 			try:
-				return int(out[0]), int(out[1])
+				val = (int(out[0]), int(out[1]))
 			except ValueError:
 				pass
-	return -1, -1
+			else:
+				if val[0] >= 0 and val[1] >= 0:
+					return val
+	return (0, 0)
 
 def set_term_size(lines, columns, fd):
 	"""

diff --git a/portage_with_autodep/pym/portage/output.pyo b/portage_with_autodep/pym/portage/output.pyo
new file mode 100644
index 0000000..993a2de
Binary files /dev/null and b/portage_with_autodep/pym/portage/output.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/__init__.pyo b/portage_with_autodep/pym/portage/package/__init__.pyo
new file mode 100644
index 0000000..9d8f30c
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/__init__.pyo b/portage_with_autodep/pym/portage/package/ebuild/__init__.pyo
new file mode 100644
index 0000000..927b4bc
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py
index cd22554..0c613ce 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = (
@@ -11,7 +11,7 @@ from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator
 from portage.localization import _
 from portage.package.ebuild._config.helper import ordered_by_atom_specificity
 from portage.util import grabdict_package, stack_lists, writemsg
-from portage.versions import cpv_getkey
+from portage.versions import cpv_getkey, _pkg_str
 
 class KeywordsManager(object):
 	"""Manager class to handle keywords processing and validation"""
@@ -20,7 +20,8 @@ class KeywordsManager(object):
 				global_accept_keywords=""):
 		self._pkeywords_list = []
 		rawpkeywords = [grabdict_package(
-			os.path.join(x, "package.keywords"), recursive=1,
+			os.path.join(x.location, "package.keywords"),
+			recursive=x.portage1_directories,
 			verify_eapi=True) \
 			for x in profiles]
 		for pkeyworddict in rawpkeywords:
@@ -35,7 +36,8 @@ class KeywordsManager(object):
 
 		self._p_accept_keywords = []
 		raw_p_accept_keywords = [grabdict_package(
-			os.path.join(x, "package.accept_keywords"), recursive=1,
+			os.path.join(x.location, "package.accept_keywords"),
+			recursive=x.portage1_directories,
 			verify_eapi=True) \
 			for x in profiles]
 		for d in raw_p_accept_keywords:
@@ -75,10 +77,11 @@ class KeywordsManager(object):
 
 
 	def getKeywords(self, cpv, slot, keywords, repo):
-		cp = cpv_getkey(cpv)
-		pkg = "".join((cpv, _slot_separator, slot))
-		if repo and repo != Package.UNKNOWN_REPO:
-			pkg = "".join((pkg, _repo_separator, repo))
+		if not hasattr(cpv, 'slot'):
+			pkg = _pkg_str(cpv, slot=slot, repo=repo)
+		else:
+			pkg = cpv
+		cp = pkg.cp
 		keywords = [[x for x in keywords.split() if x != "-*"]]
 		for pkeywords_dict in self._pkeywords_list:
 			cpdict = pkeywords_dict.get(cp)
@@ -206,12 +209,16 @@ class KeywordsManager(object):
 		hasstable = False
 		hastesting = False
 		for gp in mygroups:
-			if gp == "*" or (gp == "-*" and len(mygroups) == 1):
-				writemsg(_("--- WARNING: Package '%(cpv)s' uses"
-					" '%(keyword)s' keyword.\n") % {"cpv": cpv, "keyword": gp},
-					 noiselevel=-1)
-				if gp == "*":
-					match = True
+			if gp == "*":
+				match = True
+				break
+			elif gp == "~*":
+				hastesting = True
+				for x in pgroups:
+					if x[:1] == "~":
+						match = True
+						break
+				if match:
 					break
 			elif gp in pgroups:
 				match = True
@@ -254,18 +261,19 @@ class KeywordsManager(object):
 		"""
 
 		pgroups = global_accept_keywords.split()
+		if not hasattr(cpv, 'slot'):
+			cpv = _pkg_str(cpv, slot=slot, repo=repo)
 		cp = cpv_getkey(cpv)
 
 		unmaskgroups = []
 		if self._p_accept_keywords:
-			cpv_slot = "%s:%s" % (cpv, slot)
 			accept_keywords_defaults = tuple('~' + keyword for keyword in \
 				pgroups if keyword[:1] not in "~-")
 			for d in self._p_accept_keywords:
 				cpdict = d.get(cp)
 				if cpdict:
 					pkg_accept_keywords = \
-						ordered_by_atom_specificity(cpdict, cpv_slot)
+						ordered_by_atom_specificity(cpdict, cpv)
 					if pkg_accept_keywords:
 						for x in pkg_accept_keywords:
 							if not x:
@@ -274,9 +282,8 @@ class KeywordsManager(object):
 
 		pkgdict = self.pkeywordsdict.get(cp)
 		if pkgdict:
-			cpv_slot = "%s:%s" % (cpv, slot)
 			pkg_accept_keywords = \
-				ordered_by_atom_specificity(pkgdict, cpv_slot, repo=repo)
+				ordered_by_atom_specificity(pkgdict, cpv)
 			if pkg_accept_keywords:
 				for x in pkg_accept_keywords:
 					unmaskgroups.extend(x)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.pyo
new file mode 100644
index 0000000..15043f0
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/KeywordsManager.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py
index effd55b..f76e7e2 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 201-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = (
@@ -10,7 +10,7 @@ from portage.dep import ExtendedAtomDict, use_reduce
 from portage.exception import InvalidDependString
 from portage.localization import _
 from portage.util import grabdict, grabdict_package, writemsg
-from portage.versions import cpv_getkey
+from portage.versions import cpv_getkey, _pkg_str
 
 from portage.package.ebuild._config.helper import ordered_by_atom_specificity
 
@@ -119,8 +119,9 @@ class LicenseManager(object):
 		cp = cpv_getkey(cpv)
 		cpdict = self._plicensedict.get(cp)
 		if cpdict:
-			cpv_slot = "%s:%s" % (cpv, slot)
-			plicence_list = ordered_by_atom_specificity(cpdict, cpv_slot, repo)
+			if not hasattr(cpv, slot):
+				cpv = _pkg_str(cpv, slot=slot, repo=repo)
+			plicence_list = ordered_by_atom_specificity(cpdict, cpv)
 			if plicence_list:
 				accept_license = list(self._accept_license)
 				for x in plicence_list:

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.pyo
new file mode 100644
index 0000000..4a38298
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/LicenseManager.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py
index c2b115b..f7a1177 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.py
@@ -5,7 +5,11 @@ __all__ = (
 	'LocationsManager',
 )
 
+import collections
 import io
+import warnings
+
+import portage
 from portage import os, eapi_is_supported, _encodings, _unicode_encode
 from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \
 	PROFILE_PATH, USER_CONFIG_PATH
@@ -13,7 +17,20 @@ from portage.exception import DirectoryNotFound, ParseError
 from portage.localization import _
 from portage.util import ensure_dirs, grabfile, \
 	normalize_path, shlex_split, writemsg
+from portage.repository.config import parse_layout_conf, \
+	_portage1_profiles_allow_directories
+
+
+_PORTAGE1_DIRECTORIES = frozenset([
+	'package.mask', 'package.provided',
+	'package.use', 'package.use.mask', 'package.use.force',
+	'use.mask', 'use.force'])
+
+_profile_node = collections.namedtuple('_profile_node',
+	'location portage1_directories')
 
+_allow_parent_colon = frozenset(
+	["portage-2"])
 
 class LocationsManager(object):
 
@@ -25,9 +42,9 @@ class LocationsManager(object):
 		self.config_root = config_root
 		self.target_root = target_root
 		self._user_config = local_config
-		
+
 		if self.eprefix is None:
-			self.eprefix = ""
+			self.eprefix = portage.const.EPREFIX
 
 		if self.config_root is None:
 			self.config_root = self.eprefix + os.sep
@@ -37,17 +54,33 @@ class LocationsManager(object):
 
 		self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
 		self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
+		self.config_profile_path = config_profile_path
+
+	def load_profiles(self, repositories, known_repository_paths):
+		known_repository_paths = set(os.path.realpath(x)
+			for x in known_repository_paths)
 
-		if config_profile_path is None:
-			config_profile_path = \
+		known_repos = []
+		for x in known_repository_paths:
+			try:
+				layout_data = {"profile-formats":
+					repositories.get_repo_for_location(x).profile_formats}
+			except KeyError:
+				layout_data = parse_layout_conf(x)[0]
+			# force a trailing '/' for ease of doing startswith checks
+			known_repos.append((x + '/', layout_data))
+		known_repos = tuple(known_repos)
+
+		if self.config_profile_path is None:
+			self.config_profile_path = \
 				os.path.join(self.config_root, PROFILE_PATH)
-			if os.path.isdir(config_profile_path):
-				self.profile_path = config_profile_path
+			if os.path.isdir(self.config_profile_path):
+				self.profile_path = self.config_profile_path
 			else:
-				config_profile_path = \
+				self.config_profile_path = \
 					os.path.join(self.abs_user_config, 'make.profile')
-				if os.path.isdir(config_profile_path):
-					self.profile_path = config_profile_path
+				if os.path.isdir(self.config_profile_path):
+					self.profile_path = self.config_profile_path
 				else:
 					self.profile_path = None
 		else:
@@ -55,19 +88,22 @@ class LocationsManager(object):
 			# here, in order to create an empty profile
 			# for checking dependencies of packages with
 			# empty KEYWORDS.
-			self.profile_path = config_profile_path
+			self.profile_path = self.config_profile_path
 
 
 		# The symlink might not exist or might not be a symlink.
 		self.profiles = []
+		self.profiles_complex = []
 		if self.profile_path:
 			try:
-				self._addProfile(os.path.realpath(self.profile_path))
+				self._addProfile(os.path.realpath(self.profile_path),
+					repositories, known_repos)
 			except ParseError as e:
 				writemsg(_("!!! Unable to parse profile: '%s'\n") % \
 					self.profile_path, noiselevel=-1)
 				writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
 				self.profiles = []
+				self.profiles_complex = []
 
 		if self._user_config and self.profiles:
 			custom_prof = os.path.join(
@@ -75,9 +111,11 @@ class LocationsManager(object):
 			if os.path.exists(custom_prof):
 				self.user_profile_dir = custom_prof
 				self.profiles.append(custom_prof)
+				self.profiles_complex.append(_profile_node(custom_prof, True))
 			del custom_prof
 
 		self.profiles = tuple(self.profiles)
+		self.profiles_complex = tuple(self.profiles_complex)
 
 	def _check_var_directory(self, varname, var):
 		if not os.path.isdir(var):
@@ -86,14 +124,45 @@ class LocationsManager(object):
 				noiselevel=-1)
 			raise DirectoryNotFound(var)
 
-	def _addProfile(self, currentPath):
+	def _addProfile(self, currentPath, repositories, known_repos):
+		current_abs_path = os.path.abspath(currentPath)
+		allow_directories = True
+		allow_parent_colon = True
+		repo_loc = None
+		compat_mode = False
+		intersecting_repos = [x for x in known_repos if current_abs_path.startswith(x[0])]
+		if intersecting_repos:
+			# protect against nested repositories.  Insane configuration, but the longest
+			# path will be the correct one.
+			repo_loc, layout_data = max(intersecting_repos, key=lambda x:len(x[0]))
+			allow_directories = any(x in _portage1_profiles_allow_directories
+				for x in layout_data['profile-formats'])
+			compat_mode = layout_data['profile-formats'] == ('portage-1-compat',)
+			allow_parent_colon = any(x in _allow_parent_colon
+				for x in layout_data['profile-formats'])
+
+		if compat_mode:
+			offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
+			offenders = sorted(x for x in offenders
+				if os.path.isdir(os.path.join(currentPath, x)))
+			if offenders:
+				warnings.warn(_("Profile '%(profile_path)s' in repository "
+					"'%(repo_name)s' is implicitly using 'portage-1' profile format, but "
+					"the repository profiles are not marked as that format.  This will break "
+					"in the future.  Please either convert the following paths "
+					"to files, or add\nprofile-formats = portage-1\nto the "
+					"repositories layout.conf.  Files: '%(files)s'\n")
+					% dict(profile_path=currentPath, repo_name=repo_loc,
+						files=', '.join(offenders)))
+
 		parentsFile = os.path.join(currentPath, "parent")
 		eapi_file = os.path.join(currentPath, "eapi")
+		f = None
 		try:
-			eapi = io.open(_unicode_encode(eapi_file,
+			f = io.open(_unicode_encode(eapi_file,
 				encoding=_encodings['fs'], errors='strict'),
-				mode='r', encoding=_encodings['content'], errors='replace'
-				).readline().strip()
+				mode='r', encoding=_encodings['content'], errors='replace')
+			eapi = f.readline().strip()
 		except IOError:
 			pass
 		else:
@@ -102,21 +171,69 @@ class LocationsManager(object):
 					"Profile contains unsupported "
 					"EAPI '%s': '%s'") % \
 					(eapi, os.path.realpath(eapi_file),))
+		finally:
+			if f is not None:
+				f.close()
 		if os.path.exists(parentsFile):
 			parents = grabfile(parentsFile)
 			if not parents:
 				raise ParseError(
 					_("Empty parent file: '%s'") % parentsFile)
 			for parentPath in parents:
+				abs_parent = parentPath[:1] == os.sep
+				if not abs_parent and allow_parent_colon:
+					parentPath = self._expand_parent_colon(parentsFile,
+						parentPath, repo_loc, repositories)
+
+				# NOTE: This os.path.join() call is intended to ignore
+				# currentPath if parentPath is already absolute.
 				parentPath = normalize_path(os.path.join(
 					currentPath, parentPath))
+
+				if abs_parent or repo_loc is None or \
+					not parentPath.startswith(repo_loc):
+					# It seems that this parent may point outside
+					# of the current repo, so realpath it.
+					parentPath = os.path.realpath(parentPath)
+
 				if os.path.exists(parentPath):
-					self._addProfile(parentPath)
+					self._addProfile(parentPath, repositories, known_repos)
 				else:
 					raise ParseError(
 						_("Parent '%s' not found: '%s'") %  \
 						(parentPath, parentsFile))
+
 		self.profiles.append(currentPath)
+		self.profiles_complex.append(
+			_profile_node(currentPath, allow_directories))
+
+	def _expand_parent_colon(self, parentsFile, parentPath,
+		repo_loc, repositories):
+		colon = parentPath.find(":")
+		if colon == -1:
+			return parentPath
+
+		if colon == 0:
+			if repo_loc is None:
+				raise ParseError(
+					_("Parent '%s' not found: '%s'") %  \
+					(parentPath, parentsFile))
+			else:
+				parentPath = normalize_path(os.path.join(
+					repo_loc, 'profiles', parentPath[colon+1:]))
+		else:
+			p_repo_name = parentPath[:colon]
+			try:
+				p_repo_loc = repositories.get_location_for_name(p_repo_name)
+			except KeyError:
+				raise ParseError(
+					_("Parent '%s' not found: '%s'") %  \
+					(parentPath, parentsFile))
+			else:
+				parentPath = normalize_path(os.path.join(
+					p_repo_loc, 'profiles', parentPath[colon+1:]))
+
+		return parentPath
 
 	def set_root_override(self, root_overwrite=None):
 		# Allow ROOT setting to come from make.conf if it's not overridden

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.pyo
new file mode 100644
index 0000000..c64d313
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/LocationsManager.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py
index df93e10..bce1152 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.py
@@ -5,9 +5,12 @@ __all__ = (
 	'MaskManager',
 )
 
+import warnings
+
 from portage import os
 from portage.dep import ExtendedAtomDict, match_from_list, _repo_separator, _slot_separator
-from portage.util import append_repo, grabfile_package, stack_lists
+from portage.localization import _
+from portage.util import append_repo, grabfile_package, stack_lists, writemsg
 from portage.versions import cpv_getkey
 from _emerge.Package import Package
 
@@ -32,30 +35,76 @@ class MaskManager(object):
 		# repo may be often referenced by others as the master.
 		pmask_cache = {}
 
-		def grab_pmask(loc):
+		def grab_pmask(loc, repo_config):
 			if loc not in pmask_cache:
-				pmask_cache[loc] = grabfile_package(
-						os.path.join(loc, "profiles", "package.mask"),
-						recursive=1, remember_source_file=True, verify_eapi=True)
+				path = os.path.join(loc, 'profiles', 'package.mask')
+				pmask_cache[loc] = grabfile_package(path,
+						recursive=repo_config.portage1_profiles,
+						remember_source_file=True, verify_eapi=True)
+				if repo_config.portage1_profiles_compat and os.path.isdir(path):
+					warnings.warn(_("Repository '%(repo_name)s' is implicitly using "
+						"'portage-1' profile format in its profiles/package.mask, but "
+						"the repository profiles are not marked as that format.  This will break "
+						"in the future.  Please either convert the following paths "
+						"to files, or add\nprofile-formats = portage-1\nto the "
+						"repositories layout.conf.\n")
+						% dict(repo_name=repo_config.name))
+
 			return pmask_cache[loc]
 
 		repo_pkgmasklines = []
 		for repo in repositories.repos_with_profiles():
 			lines = []
-			repo_lines = grab_pmask(repo.location)
+			repo_lines = grab_pmask(repo.location, repo)
+			removals = frozenset(line[0][1:] for line in repo_lines
+				if line[0][:1] == "-")
+			matched_removals = set()
 			for master in repo.masters:
-				master_lines = grab_pmask(master.location)
+				master_lines = grab_pmask(master.location, master)
+				for line in master_lines:
+					if line[0] in removals:
+						matched_removals.add(line[0])
+				# Since we don't stack masters recursively, there aren't any
+				# atoms earlier in the stack to be matched by negative atoms in
+				# master_lines. Also, repo_lines may contain negative atoms
+				# that are intended to negate atoms from a different master
+				# than the one with which we are currently stacking. Therefore,
+				# we disable warn_for_unmatched_removal here (see bug #386569).
 				lines.append(stack_lists([master_lines, repo_lines], incremental=1,
-					remember_source_file=True, warn_for_unmatched_removal=True,
-					strict_warn_for_unmatched_removal=strict_umatched_removal))
-			if not repo.masters:
+					remember_source_file=True, warn_for_unmatched_removal=False))
+
+			# It's safe to warn for unmatched removal if masters have not
+			# been overridden by the user, which is guaranteed when
+			# user_config is false (when called by repoman).
+			if repo.masters:
+				unmatched_removals = removals.difference(matched_removals)
+				if unmatched_removals and not user_config:
+					source_file = os.path.join(repo.location,
+						"profiles", "package.mask")
+					unmatched_removals = list(unmatched_removals)
+					if len(unmatched_removals) > 3:
+						writemsg(
+							_("--- Unmatched removal atoms in %s: %s and %s more\n") %
+							(source_file,
+							", ".join("-" + x for x in unmatched_removals[:3]),
+							len(unmatched_removals) - 3), noiselevel=-1)
+					else:
+						writemsg(
+							_("--- Unmatched removal atom(s) in %s: %s\n") %
+							(source_file,
+							", ".join("-" + x for x in unmatched_removals)),
+							noiselevel=-1)
+
+			else:
 				lines.append(stack_lists([repo_lines], incremental=1,
-					remember_source_file=True, warn_for_unmatched_removal=True,
+					remember_source_file=True, warn_for_unmatched_removal=not user_config,
 					strict_warn_for_unmatched_removal=strict_umatched_removal))
 			repo_pkgmasklines.extend(append_repo(stack_lists(lines), repo.name, remember_source_file=True))
 
 		repo_pkgunmasklines = []
 		for repo in repositories.repos_with_profiles():
+			if not repo.portage1_profiles:
+				continue
 			repo_lines = grabfile_package(os.path.join(repo.location, "profiles", "package.unmask"), \
 				recursive=1, remember_source_file=True, verify_eapi=True)
 			lines = stack_lists([repo_lines], incremental=1, \
@@ -69,9 +118,14 @@ class MaskManager(object):
 		profile_pkgunmasklines = []
 		for x in profiles:
 			profile_pkgmasklines.append(grabfile_package(
-				os.path.join(x, "package.mask"), recursive=1, remember_source_file=True, verify_eapi=True))
-			profile_pkgunmasklines.append(grabfile_package(
-				os.path.join(x, "package.unmask"), recursive=1, remember_source_file=True, verify_eapi=True))
+				os.path.join(x.location, "package.mask"),
+				recursive=x.portage1_directories,
+				remember_source_file=True, verify_eapi=True))
+			if x.portage1_directories:
+				profile_pkgunmasklines.append(grabfile_package(
+					os.path.join(x.location, "package.unmask"),
+					recursive=x.portage1_directories,
+					remember_source_file=True, verify_eapi=True))
 		profile_pkgmasklines = stack_lists(profile_pkgmasklines, incremental=1, \
 			remember_source_file=True, warn_for_unmatched_removal=True,
 			strict_warn_for_unmatched_removal=strict_umatched_removal)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.pyo
new file mode 100644
index 0000000..f48eb47
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/MaskManager.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py
index d7ef0f6..e1ec7f4 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = (
@@ -7,10 +7,10 @@ __all__ = (
 
 from _emerge.Package import Package
 from portage import os
-from portage.dep import ExtendedAtomDict, remove_slot, _get_useflag_re
+from portage.dep import dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re
 from portage.localization import _
 from portage.util import grabfile, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
-from portage.versions import cpv_getkey
+from portage.versions import cpv_getkey, _pkg_str
 
 from portage.package.ebuild._config.helper import ordered_by_atom_specificity
 
@@ -65,9 +65,9 @@ class UseManager(object):
 
 		self.repositories = repositories
 	
-	def _parse_file_to_tuple(self, file_name):
+	def _parse_file_to_tuple(self, file_name, recursive=True):
 		ret = []
-		lines = grabfile(file_name, recursive=1)
+		lines = grabfile(file_name, recursive=recursive)
 		eapi = read_corresponding_eapi_file(file_name)
 		useflag_re = _get_useflag_re(eapi)
 		for prefixed_useflag in lines:
@@ -82,10 +82,10 @@ class UseManager(object):
 				ret.append(prefixed_useflag)
 		return tuple(ret)
 
-	def _parse_file_to_dict(self, file_name, juststrings=False):
+	def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True):
 		ret = {}
 		location_dict = {}
-		file_dict = grabdict_package(file_name, recursive=1, verify_eapi=True)
+		file_dict = grabdict_package(file_name, recursive=recursive, verify_eapi=True)
 		eapi = read_corresponding_eapi_file(file_name)
 		useflag_re = _get_useflag_re(eapi)
 		for k, v in file_dict.items():
@@ -132,19 +132,29 @@ class UseManager(object):
 		return ret
 
 	def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations):
-		return tuple(self._parse_file_to_tuple(os.path.join(profile, file_name)) for profile in locations)
+		return tuple(self._parse_file_to_tuple(
+			os.path.join(profile.location, file_name),
+			recursive=profile.portage1_directories)
+			for profile in locations)
 
 	def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations, juststrings=False):
-		return tuple(self._parse_file_to_dict(os.path.join(profile, file_name), juststrings) for profile in locations)
+		return tuple(self._parse_file_to_dict(
+			os.path.join(profile.location, file_name), juststrings,
+			recursive=profile.portage1_directories)
+			for profile in locations)
 
 	def getUseMask(self, pkg=None):
 		if pkg is None:
 			return frozenset(stack_lists(
 				self._usemask_list, incremental=True))
 
+		slot = None
 		cp = getattr(pkg, "cp", None)
 		if cp is None:
-			cp = cpv_getkey(remove_slot(pkg))
+			slot = dep_getslot(pkg)
+			repo = dep_getrepo(pkg)
+			pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+			cp = pkg.cp
 		usemask = []
 		if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
 			repos = []

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.pyo
new file mode 100644
index 0000000..2c9a609
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/UseManager.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.pyo
new file mode 100644
index 0000000..b2ebd21
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/VirtualsManager.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.pyo
new file mode 100644
index 0000000..b03cc29
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.pyo
new file mode 100644
index 0000000..aeee789
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/env_var_validation.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.pyo
new file mode 100644
index 0000000..9854444
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/features_set.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py
index 4f46781..ee0c090 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = (
@@ -24,7 +24,7 @@ def ordered_by_atom_specificity(cpdict, pkg, repo=None):
 	order to achieve desired results (and thus corrupting
 	the ChangeLog like ordering of the file).
 	"""
-	if repo and repo != Package.UNKNOWN_REPO:
+	if not hasattr(pkg, 'repo') and repo and repo != Package.UNKNOWN_REPO:
 		pkg = pkg + _repo_separator + repo
 
 	results = []

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/helper.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.pyo
new file mode 100644
index 0000000..f2b9261
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/helper.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py
index 6d42809..1a75de9 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = (
@@ -15,14 +15,14 @@ env_blacklist = frozenset((
 	"A", "AA", "CATEGORY", "DEPEND", "DESCRIPTION", "EAPI",
 	"EBUILD_FORCE_TEST", "EBUILD_PHASE", "EBUILD_SKIP_MANIFEST",
 	"ED", "EMERGE_FROM", "EPREFIX", "EROOT",
-	"HOMEPAGE", "INHERITED", "IUSE",
+	"GREP_OPTIONS", "HOMEPAGE", "INHERITED", "IUSE",
 	"KEYWORDS", "LICENSE", "MERGE_TYPE",
 	"PDEPEND", "PF", "PKGUSE", "PORTAGE_BACKGROUND",
 	"PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDIR_LOCKED",
 	"PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT", "PORTAGE_IUSE",
-	"PORTAGE_NONFATAL", "PORTAGE_REPO_NAME", "PORTAGE_SANDBOX_COMPAT_LEVEL",
-	"PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "RESTRICT",
-	"ROOT", "SLOT", "SRC_URI"
+	"PORTAGE_NONFATAL", "PORTAGE_REPO_NAME",
+	"PORTAGE_USE", "PROPERTIES", "PROVIDE", "RDEPEND", "REPOSITORY",
+	"RESTRICT", "ROOT", "SLOT", "SRC_URI"
 ))
 
 environ_whitelist = []
@@ -36,7 +36,7 @@ environ_whitelist = []
 # environment in order to prevent sandbox from sourcing /etc/profile
 # in it's bashrc (causing major leakage).
 environ_whitelist += [
-	"ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "D",
+	"ACCEPT_LICENSE", "BASH_ENV", "BUILD_PREFIX", "COLUMNS", "D",
 	"DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
 	"EBUILD_FORCE_TEST",
 	"EBUILD_PHASE", "ECLASSDIR", "ECLASS_DEPTH", "ED",
@@ -50,23 +50,23 @@ environ_whitelist += [
 	"PORTAGE_BINPKG_TMPFILE",
 	"PORTAGE_BIN_PATH",
 	"PORTAGE_BUILDDIR", "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
-	"PORTAGE_COLORMAP",
+	"PORTAGE_COLORMAP", "PORTAGE_COMPRESS_EXCLUDE_SUFFIXES",
 	"PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
 	"PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES",
 	"PORTAGE_GID", "PORTAGE_GRPNAME",
 	"PORTAGE_INST_GID", "PORTAGE_INST_UID",
 	"PORTAGE_IPC_DAEMON", "PORTAGE_IUSE",
-	"PORTAGE_LOG_FILE",
+	"PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX",
 	"PORTAGE_PYM_PATH", "PORTAGE_PYTHON", "PORTAGE_QUIET",
 	"PORTAGE_REPO_NAME", "PORTAGE_RESTRICT",
-	"PORTAGE_SANDBOX_COMPAT_LEVEL", "PORTAGE_SIGPIPE_STATUS",
+	"PORTAGE_SIGPIPE_STATUS",
 	"PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME",
 	"PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE",
 	"PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PROFILE_PATHS",
 	"REPLACING_VERSIONS", "REPLACED_BY_VERSION",
 	"ROOT", "ROOTPATH", "T", "TMP", "TMPDIR",
 	"USE_EXPAND", "USE_ORDER", "WORKDIR",
-	"XARGS",
+	"XARGS", "__PORTAGE_TEST_HARDLINK_LOCKS",
 ]
 
 # user config variables
@@ -134,8 +134,9 @@ environ_filter += [
 # portage config variables and variables set directly by portage
 environ_filter += [
 	"ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES", "AUTOCLEAN",
-	"CLEAN_DELAY", "COLLISION_IGNORE", "CONFIG_PROTECT",
-	"CONFIG_PROTECT_MASK", "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
+	"CLEAN_DELAY", "COLLISION_IGNORE",
+	"CONFIG_PROTECT", "CONFIG_PROTECT_MASK",
+	"EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
 	"EMERGE_LOG_DIR",
 	"EMERGE_WARNING_DELAY",
 	"FETCHCOMMAND", "FETCHCOMMAND_FTP",
@@ -143,7 +144,8 @@ environ_filter += [
 	"FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP",
 	"GENTOO_MIRRORS", "NOCONFMEM", "O",
 	"PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE",
-	"PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_BUILDIR_LOCKED", "PORTAGE_CALLER",
+	"PORTAGE_BINHOST",
+	"PORTAGE_BINHOST_CHUNKSIZE", "PORTAGE_BUILDIR_LOCKED",
 	"PORTAGE_ELOG_CLASSES",
 	"PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
 	"PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
@@ -156,8 +158,8 @@ environ_filter += [
 	"PORTAGE_RO_DISTDIRS",
 	"PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
 	"PORTAGE_RSYNC_RETRIES", "PORTAGE_SYNC_STALE",
-	"PORTAGE_USE", "PORT_LOGDIR",
-	"QUICKPKG_DEFAULT_OPTS",
+	"PORTAGE_USE", "PORT_LOGDIR", "PORT_LOGDIR_CLEAN",
+	"QUICKPKG_DEFAULT_OPTS", "REPOMAN_DEFAULT_OPTS",
 	"RESUMECOMMAND", "RESUMECOMMAND_FTP",
 	"RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTPS",
 	"RESUMECOMMAND_RSYNC", "RESUMECOMMAND_SFTP",

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.pyo b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.pyo
new file mode 100644
index 0000000..06ea37e
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_config/special_env_vars.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_config/unpack_dependencies.py b/portage_with_autodep/pym/portage/package/ebuild/_config/unpack_dependencies.py
new file mode 100644
index 0000000..1375189
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_config/unpack_dependencies.py
@@ -0,0 +1,38 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os, _supported_eapis
+from portage.dep import use_reduce
+from portage.eapi import eapi_has_automatic_unpack_dependencies
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabfile, writemsg
+
+def load_unpack_dependencies_configuration(repositories):
+	repo_dict = {}
+	for repo in repositories.repos_with_profiles():
+		for eapi in _supported_eapis:
+			if eapi_has_automatic_unpack_dependencies(eapi):
+				file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
+				lines = grabfile(file_name, recursive=True)
+				for line in lines:
+					elements = line.split()
+					suffix = elements[0].lower()
+					if len(elements) == 1:
+						writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
+					depend = " ".join(elements[1:])
+					try:
+						use_reduce(depend, eapi=eapi)
+					except InvalidDependString as e:
+						writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
+					else:
+						repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+	ret = {}
+	for repo in repositories.repos_with_profiles():
+		for repo_name in [x.name for x in repo.masters] + [repo.name]:
+			for eapi in repo_dict.get(repo_name, {}):
+				for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
+					ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+	return ret

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.py b/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.py
new file mode 100644
index 0000000..d23677d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.py
@@ -0,0 +1,54 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage.dep import _repo_separator
+from portage.elog import elog_process
+from portage.elog.messages import eerror
+
+def eapi_invalid(self, cpv, repo_name, settings,
+	eapi_var, eapi_parsed, eapi_lineno):
+
+	msg = []
+	msg.extend(textwrap.wrap(("EAPI assignment in ebuild '%s%s%s' does not"
+		" conform with PMS section 7.3.1 (see bug #402167):") %
+		(cpv, _repo_separator, repo_name), 70))
+
+	if not eapi_parsed:
+		# None means the assignment was not found, while an
+		# empty string indicates an (invalid) empty assingment.
+		msg.append(
+			"\tvalid EAPI assignment must"
+			" occur on or before line: %s" %
+			eapi_lineno)
+	else:
+		msg.append(("\tbash returned EAPI '%s' which does not match "
+			"assignment on line: %s") %
+			(eapi_var, eapi_lineno))
+
+	if 'parse-eapi-ebuild-head' in settings.features:
+		msg.extend(textwrap.wrap(("NOTE: This error will soon"
+			" become unconditionally fatal in a future version of Portage,"
+			" but at this time, it can by made non-fatal by setting"
+			" FEATURES=-parse-eapi-ebuild-head in"
+			" make.conf."), 70))
+	else:
+		msg.extend(textwrap.wrap(("NOTE: This error will soon"
+			" become unconditionally fatal in a future version of Portage."
+			" At the earliest opportunity, please enable"
+			" FEATURES=parse-eapi-ebuild-head in make.conf in order to"
+			" make this error fatal."), 70))
+
+	if portage.data.secpass >= 2:
+		# TODO: improve elog permission error handling (bug #416231)
+		for line in msg:
+			eerror(line, phase="other", key=cpv)
+		elog_process(cpv, settings,
+			phasefilter=("other",))
+
+	else:
+		out = portage.output.EOutput()
+		for line in msg:
+			out.eerror(line)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.pyo b/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.pyo
new file mode 100644
index 0000000..0181c03
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_eapi_invalid.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.pyo b/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.pyo
new file mode 100644
index 0000000..315cb0f
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_ipc/ExitCommand.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.pyo b/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.pyo
new file mode 100644
index 0000000..9f75518
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_ipc/IpcCommand.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py
index fb6e61e..7bbb0e8 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import io
@@ -7,6 +7,7 @@ import portage
 from portage import os
 from portage import _unicode_decode
 from portage.dep import Atom
+from portage.eapi import eapi_has_repo_deps
 from portage.elog import messages as elog_messages
 from portage.exception import InvalidAtom
 from portage.package.ebuild._ipc.IpcCommand import IpcCommand
@@ -26,19 +27,21 @@ class QueryCommand(IpcCommand):
 
 	def __call__(self, argv):
 		"""
-		@returns: tuple of (stdout, stderr, returncode)
+		@return: tuple of (stdout, stderr, returncode)
 		"""
 
 		cmd, root, atom_str = argv
 
+		eapi = self.settings.get('EAPI')
+		allow_repo = eapi_has_repo_deps(eapi)
 		try:
-			atom = Atom(atom_str)
+			atom = Atom(atom_str, allow_repo=allow_repo)
 		except InvalidAtom:
 			return ('', 'invalid atom: %s\n' % atom_str, 2)
 
 		warnings = []
 		try:
-			atom = Atom(atom_str, eapi=self.settings.get('EAPI'))
+			atom = Atom(atom_str, allow_repo=allow_repo, eapi=eapi)
 		except InvalidAtom as e:
 			warnings.append(_unicode_decode("QA Notice: %s: %s") % (cmd, e))
 

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.pyo b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.pyo
new file mode 100644
index 0000000..0e9ee96
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_ipc/QueryCommand.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.pyo b/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.pyo
new file mode 100644
index 0000000..d9f8d25
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_ipc/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_metadata_invalid.py b/portage_with_autodep/pym/portage/package/ebuild/_metadata_invalid.py
new file mode 100644
index 0000000..bcf1f7f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_metadata_invalid.py
@@ -0,0 +1,41 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage.dep import _repo_separator
+from portage.elog import elog_process
+from portage.elog.messages import eerror
+
+def eapi_invalid(self, cpv, repo_name, settings,
+	eapi_var, eapi_parsed, eapi_lineno):
+
+	msg = []
+	msg.extend(textwrap.wrap(("EAPI assignment in ebuild '%s%s%s' does not"
+		" conform with PMS section 7.3.1 (see bug #402167):") %
+		(cpv, _repo_separator, repo_name), 70))
+
+	if not eapi_parsed:
+		# None means the assignment was not found, while an
+		# empty string indicates an (invalid) empty assingment.
+		msg.append(
+			"\tvalid EAPI assignment must"
+			" occur on or before line: %s" %
+			eapi_lineno)
+	else:
+		msg.append(("\tbash returned EAPI '%s' which does not match "
+			"assignment on line: %s") %
+			(eapi_var, eapi_lineno))
+
+	if portage.data.secpass >= 2:
+		# TODO: improve elog permission error handling (bug #416231)
+		for line in msg:
+			eerror(line, phase="other", key=cpv)
+		elog_process(cpv, settings,
+			phasefilter=("other",))
+
+	else:
+		out = portage.output.EOutput()
+		for line in msg:
+			out.eerror(line)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
new file mode 100644
index 0000000..44e2576
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
@@ -0,0 +1,43 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.exception import (FileNotFound,
+	PermissionDenied, PortagePackageException)
+from portage.localization import _
+from portage.util._async.ForkProcess import ForkProcess
+
+class ManifestProcess(ForkProcess):
+
+	__slots__ = ("cp", "distdir", "fetchlist_dict", "repo_config")
+
+	MODIFIED = 16
+
+	def _run(self):
+		mf = self.repo_config.load_manifest(
+			os.path.join(self.repo_config.location, self.cp),
+			self.distdir, fetchlist_dict=self.fetchlist_dict)
+
+		try:
+			mf.create(assumeDistHashesAlways=True)
+		except FileNotFound as e:
+			portage.writemsg(_("!!! File %s doesn't exist, can't update "
+				"Manifest\n") % e, noiselevel=-1)
+			return 1
+
+		except PortagePackageException as e:
+			portage.writemsg(("!!! %s\n") % (e,), noiselevel=-1)
+			return 1
+
+		try:
+			modified = mf.write(sign=False)
+		except PermissionDenied as e:
+			portage.writemsg("!!! %s: %s\n" % (_("Permission Denied"), e,),
+				noiselevel=-1)
+			return 1
+		else:
+			if modified:
+				return self.MODIFIED
+			else:
+				return os.EX_OK

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
new file mode 100644
index 0000000..38ac482
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
@@ -0,0 +1,93 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.dep import _repo_separator
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util._async.AsyncScheduler import AsyncScheduler
+from .ManifestTask import ManifestTask
+
+class ManifestScheduler(AsyncScheduler):
+
+	def __init__(self, portdb, cp_iter=None,
+		gpg_cmd=None, gpg_vars=None, force_sign_key=None, **kwargs):
+
+		AsyncScheduler.__init__(self, **kwargs)
+
+		self._portdb = portdb
+
+		if cp_iter is None:
+			cp_iter = self._iter_every_cp()
+		self._cp_iter = cp_iter
+		self._gpg_cmd = gpg_cmd
+		self._gpg_vars = gpg_vars
+		self._force_sign_key = force_sign_key
+		self._task_iter = self._iter_tasks()
+
+	def _next_task(self):
+		return next(self._task_iter)
+
+	def _iter_every_cp(self):
+		# List categories individually, in order to start yielding quicker,
+		# and in order to reduce latency in case of a signal interrupt.
+		cp_all = self._portdb.cp_all
+		for category in sorted(self._portdb.categories):
+			for cp in cp_all(categories=(category,)):
+				yield cp
+
+	def _iter_tasks(self):
+		portdb = self._portdb
+		distdir = portdb.settings["DISTDIR"]
+		disabled_repos = set()
+
+		for cp in self._cp_iter:
+			if self._terminated.is_set():
+				break
+			# We iterate over portdb.porttrees, since it's common to
+			# tweak this attribute in order to adjust repo selection.
+			for mytree in portdb.porttrees:
+				if self._terminated.is_set():
+					break
+				repo_config = portdb.repositories.get_repo_for_location(mytree)
+				if not repo_config.create_manifest:
+					if repo_config.name not in disabled_repos:
+						disabled_repos.add(repo_config.name)
+						portage.writemsg(
+							_(">>> Skipping creating Manifest for %s%s%s; "
+							"repository is configured to not use them\n") %
+							(cp, _repo_separator, repo_config.name),
+							noiselevel=-1)
+					continue
+				cpv_list = portdb.cp_list(cp, mytree=[repo_config.location])
+				if not cpv_list:
+					continue
+				fetchlist_dict = {}
+				try:
+					for cpv in cpv_list:
+						fetchlist_dict[cpv] = \
+							list(portdb.getFetchMap(cpv, mytree=mytree))
+				except InvalidDependString as e:
+					portage.writemsg(
+						_("!!! %s%s%s: SRC_URI: %s\n") %
+						(cp, _repo_separator, repo_config.name, e),
+						noiselevel=-1)
+					self._error_count += 1
+					continue
+
+				yield ManifestTask(cp=cp, distdir=distdir,
+					fetchlist_dict=fetchlist_dict, repo_config=repo_config,
+					gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars,
+					force_sign_key=self._force_sign_key)
+
+	def _task_exit(self, task):
+
+		if task.returncode != os.EX_OK:
+			if not self._terminated_tasks:
+				portage.writemsg(
+					"Error processing %s%s%s, continuing...\n" %
+					(task.cp, _repo_separator, task.repo_config.name),
+					noiselevel=-1)
+
+		AsyncScheduler._task_exit(self, task)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py
new file mode 100644
index 0000000..0ee2b91
--- /dev/null
+++ b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/ManifestTask.py
@@ -0,0 +1,186 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+import subprocess
+
+from portage import os
+from portage import _unicode_encode, _encodings
+from portage.const import MANIFEST2_IDENTIFIERS
+from portage.util import (atomic_ofstream, grablines,
+	shlex_split, varexpand, writemsg)
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+from _emerge.PipeReader import PipeReader
+from .ManifestProcess import ManifestProcess
+
+class ManifestTask(CompositeTask):
+
+	__slots__ = ("cp", "distdir", "fetchlist_dict", "gpg_cmd",
+		"gpg_vars", "repo_config", "force_sign_key", "_manifest_path")
+
+	_PGP_HEADER = b"BEGIN PGP SIGNED MESSAGE"
+	_manifest_line_re = re.compile(r'^(%s) ' % "|".join(MANIFEST2_IDENTIFIERS))
+	_gpg_key_id_re = re.compile(r'^[0-9A-F]*$')
+	_gpg_key_id_lengths = (8, 16, 24, 32, 40)
+
+	def _start(self):
+		self._manifest_path = os.path.join(self.repo_config.location,
+			self.cp, "Manifest")
+		manifest_proc = ManifestProcess(cp=self.cp, distdir=self.distdir,
+			fetchlist_dict=self.fetchlist_dict, repo_config=self.repo_config,
+			scheduler=self.scheduler)
+		self._start_task(manifest_proc, self._manifest_proc_exit)
+
+	def _manifest_proc_exit(self, manifest_proc):
+		self._assert_current(manifest_proc)
+		if manifest_proc.returncode not in (os.EX_OK, manifest_proc.MODIFIED):
+			self.returncode = manifest_proc.returncode
+			self._current_task = None
+			self.wait()
+			return
+
+		modified = manifest_proc.returncode == manifest_proc.MODIFIED
+		sign = self.gpg_cmd is not None
+
+		if not modified and sign:
+			sign = self._need_signature()
+			if not sign and self.force_sign_key is not None \
+				and os.path.exists(self._manifest_path):
+				self._check_sig_key()
+				return
+
+		if not sign or not os.path.exists(self._manifest_path):
+			self.returncode = os.EX_OK
+			self._current_task = None
+			self.wait()
+			return
+
+		self._start_gpg_proc()
+
+	def _check_sig_key(self):
+		null_fd = os.open('/dev/null', os.O_RDONLY)
+		popen_proc = PopenProcess(proc=subprocess.Popen(
+			["gpg", "--verify", self._manifest_path],
+			stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+			pipe_reader=PipeReader())
+		os.close(null_fd)
+		popen_proc.pipe_reader.input_files = {
+			"producer" : popen_proc.proc.stdout}
+		self._start_task(popen_proc, self._check_sig_key_exit)
+
+	@staticmethod
+	def _parse_gpg_key(output):
+		"""
+		Returns the first token which appears to represent a gpg key
+		id, or None if there is no such token.
+		"""
+		regex = ManifestTask._gpg_key_id_re
+		lengths = ManifestTask._gpg_key_id_lengths
+		for token in output.split():
+			m = regex.match(token)
+			if m is not None and len(m.group(0)) in lengths:
+				return m.group(0)
+		return None
+
+	@staticmethod
+	def _normalize_gpg_key(key_str):
+		"""
+		Strips leading "0x" and trailing "!", and converts to uppercase
+		(intended to be the same format as that in gpg --verify output).
+		"""
+		key_str = key_str.upper()
+		if key_str.startswith("0X"):
+			key_str = key_str[2:]
+		key_str = key_str.rstrip("!")
+		return key_str
+
+	def _check_sig_key_exit(self, proc):
+		self._assert_current(proc)
+
+		parsed_key = self._parse_gpg_key(
+			proc.pipe_reader.getvalue().decode('utf_8', 'replace'))
+		if parsed_key is not None and \
+			self._normalize_gpg_key(parsed_key) == \
+			self._normalize_gpg_key(self.force_sign_key):
+			self.returncode = os.EX_OK
+			self._current_task = None
+			self.wait()
+			return
+
+		if self._was_cancelled():
+			self.wait()
+			return
+
+		self._strip_sig(self._manifest_path)
+		self._start_gpg_proc()
+
+	@staticmethod
+	def _strip_sig(manifest_path):
+		"""
+		Strip an existing signature from a Manifest file.
+		"""
+		line_re = ManifestTask._manifest_line_re
+		lines = grablines(manifest_path)
+		f = None
+		try:
+			f = atomic_ofstream(manifest_path)
+			for line in lines:
+				if line_re.match(line) is not None:
+					f.write(line)
+			f.close()
+			f = None
+		finally:
+			if f is not None:
+				f.abort()
+
+	def _start_gpg_proc(self):
+		gpg_vars = self.gpg_vars
+		if gpg_vars is None:
+			gpg_vars = {}
+		else:
+			gpg_vars = gpg_vars.copy()
+		gpg_vars["FILE"] = self._manifest_path
+		gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
+		gpg_cmd = shlex_split(gpg_cmd)
+		gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd,
+			stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
+		# PipeLogger echos output and efficiently monitors for process
+		# exit by listening for the stdout EOF event.
+		gpg_proc.pipe_reader = PipeLogger(background=self.background,
+			input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler)
+		self._start_task(gpg_proc, self._gpg_proc_exit)
+
+	def _gpg_proc_exit(self, gpg_proc):
+		if self._default_exit(gpg_proc) != os.EX_OK:
+			self.wait()
+			return
+
+		rename_args = (self._manifest_path + ".asc", self._manifest_path)
+		try:
+			os.rename(*rename_args)
+		except OSError as e:
+			writemsg("!!! rename('%s', '%s'): %s\n" % rename_args + (e,),
+				noiselevel=-1)
+			try:
+				os.unlink(self._manifest_path + ".asc")
+			except OSError:
+				pass
+			self.returncode = 1
+		else:
+			self.returncode = os.EX_OK
+
+		self._current_task = None
+		self.wait()
+
+	def _need_signature(self):
+		try:
+			with open(_unicode_encode(self._manifest_path,
+				encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+				return self._PGP_HEADER not in f.readline()
+		except IOError as e:
+			if e.errno in (errno.ENOENT, errno.ESTALE):
+				return False
+			raise

diff --git a/portage_with_autodep/pym/portage/tests/dbapi/__init__.py b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/__init__.py
similarity index 65%
rename from portage_with_autodep/pym/portage/tests/dbapi/__init__.py
rename to portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/__init__.py
index 532918b..418ad86 100644
--- a/portage_with_autodep/pym/portage/tests/dbapi/__init__.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_parallel_manifest/__init__.py
@@ -1,2 +1,2 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py
index befdc89..94f8c79 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.py
@@ -1,10 +1,10 @@
 # Copyright 2010-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-import shutil
 import tempfile
 
 from portage import os
+from portage import shutil
 from portage.const import EBUILD_PHASES
 from portage.elog import elog_process
 from portage.package.ebuild.config import config
@@ -20,7 +20,11 @@ def spawn_nofetch(portdb, ebuild_path, settings=None):
 	to cache metadata. It will be cloned internally, in order to
 	prevent any changes from interfering with the calling code.
 	If settings is None then a suitable config instance will be
-	acquired from the given portdbapi instance.
+	acquired from the given portdbapi instance. Do not use the
+	settings parameter unless setcpv has been called on the given
+	instance, since otherwise it's possible to trigger issues like
+	bug #408817 due to fragile assumptions involving the config
+	state inside doebuild_environment().
 
 	A private PORTAGE_BUILDDIR will be created and cleaned up, in
 	order to avoid any interference with any other processes.

diff --git a/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.pyo b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.pyo
new file mode 100644
index 0000000..ac449ea
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/_spawn_nofetch.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/config.py b/portage_with_autodep/pym/portage/package/ebuild/config.py
index a8c6ad6..97cbd99 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/config.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/config.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = [
@@ -7,7 +7,10 @@ __all__ = [
 
 import copy
 from itertools import chain
+import grp
 import logging
+import platform
+import pwd
 import re
 import sys
 import warnings
@@ -21,10 +24,9 @@ from portage import bsd_chflags, \
 	load_mod, os, selinux, _unicode_decode
 from portage.const import CACHE_PATH, \
 	DEPCACHE_PATH, INCREMENTALS, MAKE_CONF_FILE, \
-	MODULES_FILE_PATH, PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, \
+	MODULES_FILE_PATH, \
 	PRIVATE_PATH, PROFILE_PATH, USER_CONFIG_PATH, \
 	USER_VIRTUALS_FILE
-from portage.const import _SANDBOX_COMPAT_LEVEL
 from portage.dbapi import dbapi
 from portage.dbapi.porttree import portdbapi
 from portage.dbapi.vartree import vartree
@@ -41,7 +43,7 @@ from portage.util import ensure_dirs, getconfig, grabdict, \
 	grabdict_package, grabfile, grabfile_package, LazyItemsDict, \
 	normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \
 	writemsg, writemsg_level
-from portage.versions import catpkgsplit, catsplit, cpv_getkey
+from portage.versions import catpkgsplit, catsplit, cpv_getkey, _pkg_str
 
 from portage.package.ebuild._config import special_env_vars
 from portage.package.ebuild._config.env_var_validation import validate_cmd_var
@@ -120,11 +122,19 @@ class config(object):
 	virtuals ...etc you look in here.
 	"""
 
+	_constant_keys = frozenset(['PORTAGE_BIN_PATH', 'PORTAGE_GID',
+		'PORTAGE_PYM_PATH'])
+
 	_setcpv_aux_keys = ('DEFINED_PHASES', 'DEPEND', 'EAPI',
 		'INHERITED', 'IUSE', 'REQUIRED_USE', 'KEYWORDS', 'LICENSE', 'PDEPEND',
 		'PROPERTIES', 'PROVIDE', 'RDEPEND', 'SLOT',
 		'repository', 'RESTRICT', 'LICENSE',)
 
+	_module_aliases = {
+		"cache.metadata_overlay.database" : "portage.cache.flat_hash.database",
+		"portage.cache.metadata_overlay.database" : "portage.cache.flat_hash.database",
+	}
+
 	_case_insensitive_vars = special_env_vars.case_insensitive_vars
 	_default_globals = special_env_vars.default_globals
 	_env_blacklist = special_env_vars.env_blacklist
@@ -135,7 +145,8 @@ class config(object):
 
 	def __init__(self, clone=None, mycpv=None, config_profile_path=None,
 		config_incrementals=None, config_root=None, target_root=None,
-		_eprefix=None, local_config=True, env=None, _unmatched_removal=False):
+		eprefix=None, local_config=True, env=None,
+		_unmatched_removal=False):
 		"""
 		@param clone: If provided, init will use deepcopy to copy by value the instance.
 		@type clone: Instance of config class.
@@ -151,8 +162,8 @@ class config(object):
 		@type config_root: String
 		@param target_root: __init__ override of $ROOT env variable.
 		@type target_root: String
-		@param _eprefix: set the EPREFIX variable (private, used by internal tests)
-		@type _eprefix: String
+		@param eprefix: set the EPREFIX variable (default is portage.const.EPREFIX)
+		@type eprefix: String
 		@param local_config: Enables loading of local config (/etc/portage); used most by repoman to
 		ignore local config (keywording and unmasking)
 		@type local_config: Boolean
@@ -164,10 +175,6 @@ class config(object):
 		@type _unmatched_removal: Boolean
 		"""
 
-		# rename local _eprefix variable for convenience
-		eprefix = _eprefix
-		del _eprefix
-
 		# When initializing the global portage.settings instance, avoid
 		# raising exceptions whenever possible since exceptions thrown
 		# from 'import portage' or 'import portage.exceptions' statements
@@ -208,6 +215,7 @@ class config(object):
 			self.repositories = clone.repositories
 			self._iuse_implicit_match = clone._iuse_implicit_match
 			self._non_user_variables = clone._non_user_variables
+			self._env_d_blacklist = clone._env_d_blacklist
 			self._repo_make_defaults = clone._repo_make_defaults
 			self.usemask = clone.usemask
 			self.useforce = clone.useforce
@@ -272,9 +280,6 @@ class config(object):
 
 			eprefix = locations_manager.eprefix
 			config_root = locations_manager.config_root
-			self.profiles = locations_manager.profiles
-			self.profile_path = locations_manager.profile_path
-			self.user_profile_dir = locations_manager.user_profile_dir
 			abs_user_config = locations_manager.abs_user_config
 
 			make_conf = getconfig(
@@ -293,6 +298,38 @@ class config(object):
 			eroot = locations_manager.eroot
 			self.global_config_path = locations_manager.global_config_path
 
+			# The expand_map is used for variable substitution
+			# in getconfig() calls, and the getconfig() calls
+			# update expand_map with the value of each variable
+			# assignment that occurs. Variable substitution occurs
+			# in the following order, which corresponds to the
+			# order of appearance in self.lookuplist:
+			#
+			#   * env.d
+			#   * make.globals
+			#   * make.defaults
+			#   * make.conf
+			#
+			# Notably absent is "env", since we want to avoid any
+			# interaction with the calling environment that might
+			# lead to unexpected results.
+
+			env_d = getconfig(os.path.join(eroot, "etc", "profile.env"),
+				expand=False) or {}
+			expand_map = env_d.copy()
+			self._expand_map = expand_map
+
+			# Allow make.globals to set default paths relative to ${EPREFIX}.
+			expand_map["EPREFIX"] = eprefix
+
+			make_globals = getconfig(os.path.join(
+				self.global_config_path, 'make.globals'), expand=expand_map)
+			if make_globals is None:
+				make_globals = {}
+
+			for k, v in self._default_globals.items():
+				make_globals.setdefault(k, v)
+
 			if config_incrementals is None:
 				self.incrementals = INCREMENTALS
 			else:
@@ -302,14 +339,20 @@ class config(object):
 
 			self.module_priority    = ("user", "default")
 			self.modules            = {}
-			modules_loader = KeyValuePairFileLoader(
-				os.path.join(config_root, MODULES_FILE_PATH), None, None)
+			modules_file = os.path.join(config_root, MODULES_FILE_PATH)
+			modules_loader = KeyValuePairFileLoader(modules_file, None, None)
 			modules_dict, modules_errors = modules_loader.load()
 			self.modules["user"] = modules_dict
 			if self.modules["user"] is None:
 				self.modules["user"] = {}
+			user_auxdbmodule = \
+				self.modules["user"].get("portdbapi.auxdbmodule")
+			if user_auxdbmodule is not None and \
+				user_auxdbmodule in self._module_aliases:
+				warnings.warn("'%s' is deprecated: %s" %
+				(user_auxdbmodule, modules_file))
+
 			self.modules["default"] = {
-				"portdbapi.metadbmodule": "portage.cache.metadata.database",
 				"portdbapi.auxdbmodule":  "portage.cache.flat_hash.database",
 			}
 
@@ -328,43 +371,9 @@ class config(object):
 			self.configlist.append({})
 			self.configdict["pkginternal"] = self.configlist[-1]
 
-			self.packages_list = [grabfile_package(os.path.join(x, "packages"), verify_eapi=True) for x in self.profiles]
-			self.packages      = tuple(stack_lists(self.packages_list, incremental=1))
-			del self.packages_list
-			#self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
-
-			# revmaskdict
-			self.prevmaskdict={}
-			for x in self.packages:
-				# Negative atoms are filtered by the above stack_lists() call.
-				if not isinstance(x, Atom):
-					x = Atom(x.lstrip('*'))
-				self.prevmaskdict.setdefault(x.cp, []).append(x)
-
-			# The expand_map is used for variable substitution
-			# in getconfig() calls, and the getconfig() calls
-			# update expand_map with the value of each variable
-			# assignment that occurs. Variable substitution occurs
-			# in the following order, which corresponds to the
-			# order of appearance in self.lookuplist:
-			#
-			#   * env.d
-			#   * make.globals
-			#   * make.defaults
-			#   * make.conf
-			#
-			# Notably absent is "env", since we want to avoid any
-			# interaction with the calling environment that might
-			# lead to unexpected results.
-			expand_map = {}
-			self._expand_map = expand_map
-
-			env_d = getconfig(os.path.join(eroot, "etc", "profile.env"),
-				expand=expand_map)
 			# env_d will be None if profile.env doesn't exist.
 			if env_d:
 				self.configdict["env.d"].update(env_d)
-				expand_map.update(env_d)
 
 			# backupenv is used for calculating incremental variables.
 			if env is None:
@@ -390,40 +399,72 @@ class config(object):
 
 			self.configdict["env"] = LazyItemsDict(self.backupenv)
 
-			for x in (self.global_config_path,):
-				self.mygcfg = getconfig(os.path.join(x, "make.globals"),
-					expand=expand_map)
-				if self.mygcfg:
-					break
+			self.configlist.append(make_globals)
+			self.configdict["globals"]=self.configlist[-1]
 
-			if self.mygcfg is None:
-				self.mygcfg = {}
+			self.make_defaults_use = []
 
-			for k, v in self._default_globals.items():
-				self.mygcfg.setdefault(k, v)
+			#Loading Repositories
+			self["PORTAGE_CONFIGROOT"] = config_root
+			self["ROOT"] = target_root
+			self["EPREFIX"] = eprefix
+			self["EROOT"] = eroot
+			known_repos = []
+			portdir = ""
+			portdir_overlay = ""
+			for confs in [make_globals, make_conf, self.configdict["env"]]:
+				v = confs.get("PORTDIR")
+				if v is not None:
+					portdir = v
+					known_repos.append(v)
+				v = confs.get("PORTDIR_OVERLAY")
+				if v is not None:
+					portdir_overlay = v
+					known_repos.extend(shlex_split(v))
+			known_repos = frozenset(known_repos)
+			self["PORTDIR"] = portdir
+			self["PORTDIR_OVERLAY"] = portdir_overlay
+			self.lookuplist = [self.configdict["env"]]
+			self.repositories = load_repository_config(self)
 
-			self.configlist.append(self.mygcfg)
-			self.configdict["globals"]=self.configlist[-1]
+			locations_manager.load_profiles(self.repositories, known_repos)
 
-			self.make_defaults_use = []
-			self.mygcfg = {}
+			profiles_complex = locations_manager.profiles_complex
+			self.profiles = locations_manager.profiles
+			self.profile_path = locations_manager.profile_path
+			self.user_profile_dir = locations_manager.user_profile_dir
+
+			packages_list = [grabfile_package(os.path.join(x, "packages"),
+				verify_eapi=True) for x in self.profiles]
+			self.packages = tuple(stack_lists(packages_list, incremental=1))
+
+			# revmaskdict
+			self.prevmaskdict={}
+			for x in self.packages:
+				# Negative atoms are filtered by the above stack_lists() call.
+				if not isinstance(x, Atom):
+					x = Atom(x.lstrip('*'))
+				self.prevmaskdict.setdefault(x.cp, []).append(x)
+
+
+			mygcfg = {}
 			if self.profiles:
 				mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults"),
 					expand=expand_map) for x in self.profiles]
 				self._make_defaults = mygcfg_dlists
-				self.mygcfg = stack_dicts(mygcfg_dlists,
+				mygcfg = stack_dicts(mygcfg_dlists,
 					incrementals=self.incrementals)
-				if self.mygcfg is None:
-					self.mygcfg = {}
-			self.configlist.append(self.mygcfg)
+				if mygcfg is None:
+					mygcfg = {}
+			self.configlist.append(mygcfg)
 			self.configdict["defaults"]=self.configlist[-1]
 
-			self.mygcfg = getconfig(
+			mygcfg = getconfig(
 				os.path.join(config_root, MAKE_CONF_FILE),
 				tolerant=tolerant, allow_sourcing=True,
 				expand=expand_map) or {}
 
-			self.mygcfg.update(getconfig(
+			mygcfg.update(getconfig(
 				os.path.join(abs_user_config, 'make.conf'),
 				tolerant=tolerant, allow_sourcing=True,
 				expand=expand_map) or {})
@@ -439,10 +480,18 @@ class config(object):
 			non_user_variables = frozenset(non_user_variables)
 			self._non_user_variables = non_user_variables
 
+			self._env_d_blacklist = frozenset(chain(
+				profile_only_variables,
+				self._env_blacklist,
+			))
+			env_d = self.configdict["env.d"]
+			for k in self._env_d_blacklist:
+				env_d.pop(k, None)
+
 			for k in profile_only_variables:
-				self.mygcfg.pop(k, None)
+				mygcfg.pop(k, None)
 
-			self.configlist.append(self.mygcfg)
+			self.configlist.append(mygcfg)
 			self.configdict["conf"]=self.configlist[-1]
 
 			self.configlist.append(LazyItemsDict())
@@ -472,25 +521,23 @@ class config(object):
 			self["ROOT"] = target_root
 			self.backup_changes("ROOT")
 
+			# The PORTAGE_OVERRIDE_EPREFIX variable propagates the EPREFIX
+			# of this config instance to any portage commands or API
+			# consumers running in subprocesses.
 			self["EPREFIX"] = eprefix
 			self.backup_changes("EPREFIX")
+			self["PORTAGE_OVERRIDE_EPREFIX"] = eprefix
+			self.backup_changes("PORTAGE_OVERRIDE_EPREFIX")
 			self["EROOT"] = eroot
 			self.backup_changes("EROOT")
 
-			self["PORTAGE_SANDBOX_COMPAT_LEVEL"] = _SANDBOX_COMPAT_LEVEL
-			self.backup_changes("PORTAGE_SANDBOX_COMPAT_LEVEL")
-
 			self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict)
 			self._penvdict = portage.dep.ExtendedAtomDict(dict)
 
-			#Loading Repositories
-			self.repositories = load_repository_config(self)
-
 			#filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
 			main_repo = self.repositories.mainRepo()
 			if main_repo is not None:
-				main_repo = main_repo.user_location
-				self["PORTDIR"] = main_repo
+				self["PORTDIR"] = main_repo.user_location
 				self.backup_changes("PORTDIR")
 
 			# repoman controls PORTDIR_OVERLAY via the environment, so no
@@ -501,11 +548,11 @@ class config(object):
 
 			new_ov = []
 			if portdir_overlay:
-				whitespace_re = re.compile(r"\s")
+				shell_quote_re = re.compile(r"[\s\\\"'$`]")
 				for ov in portdir_overlay:
 					ov = normalize_path(ov)
 					if os.path.isdir(ov):
-						if whitespace_re.search(ov) is not None:
+						if shell_quote_re.search(ov) is not None:
 							ov = portage._shell_quote(ov)
 						new_ov.append(ov)
 					else:
@@ -528,11 +575,11 @@ class config(object):
 				self._repo_make_defaults[repo.name] = d
 
 			#Read package.keywords and package.accept_keywords.
-			self._keywords_manager = KeywordsManager(self.profiles, abs_user_config, \
+			self._keywords_manager = KeywordsManager(profiles_complex, abs_user_config, \
 				local_config, global_accept_keywords=self.configdict["defaults"].get("ACCEPT_KEYWORDS", ""))
 
 			#Read all USE related files from profiles and optionally from user config.
-			self._use_manager = UseManager(self.repositories, self.profiles, abs_user_config, user_config=local_config)
+			self._use_manager = UseManager(self.repositories, profiles_complex, abs_user_config, user_config=local_config)
 			#Initialize all USE related variables we track ourselves.
 			self.usemask = self._use_manager.getUseMask()
 			self.useforce = self._use_manager.getUseForce()
@@ -549,7 +596,7 @@ class config(object):
 					self.configdict["conf"].get("ACCEPT_LICENSE", ""))
 
 			#Read package.mask and package.unmask from profiles and optionally from user config
-			self._mask_manager = MaskManager(self.repositories, self.profiles,
+			self._mask_manager = MaskManager(self.repositories, profiles_complex,
 				abs_user_config, user_config=local_config,
 				strict_umatched_removal=_unmatched_removal)
 
@@ -597,16 +644,21 @@ class config(object):
 			self.categories = [grabfile(os.path.join(x, "categories")) \
 				for x in locations_manager.profile_and_user_locations]
 			category_re = dbapi._category_re
-			self.categories = tuple(sorted(
+			# categories used to be a tuple, but now we use a frozenset
+			# for hashed category validation in pordbapi.cp_list()
+			self.categories = frozenset(
 				x for x in stack_lists(self.categories, incremental=1)
-				if category_re.match(x) is not None))
+				if category_re.match(x) is not None)
 
 			archlist = [grabfile(os.path.join(x, "arch.list")) \
 				for x in locations_manager.profile_and_user_locations]
 			archlist = stack_lists(archlist, incremental=1)
 			self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
 
-			pkgprovidedlines = [grabfile(os.path.join(x, "package.provided"), recursive=1) for x in self.profiles]
+			pkgprovidedlines = [grabfile(
+				os.path.join(x.location, "package.provided"),
+				recursive=x.portage1_directories)
+				for x in profiles_complex]
 			pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
 			has_invalid_data = False
 			for x in range(len(pkgprovidedlines)-1, -1, -1):
@@ -649,9 +701,6 @@ class config(object):
 			if "USE_ORDER" not in self:
 				self.backupenv["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:repo:env.d"
 
-			self["PORTAGE_GID"] = str(portage_gid)
-			self.backup_changes("PORTAGE_GID")
-
 			self.depcachedir = DEPCACHE_PATH
 			if eprefix:
 				# See comments about make.globals and EPREFIX
@@ -678,19 +727,60 @@ class config(object):
 				self["CBUILD"] = self["CHOST"]
 				self.backup_changes("CBUILD")
 
-			self["PORTAGE_BIN_PATH"] = PORTAGE_BIN_PATH
-			self.backup_changes("PORTAGE_BIN_PATH")
-			self["PORTAGE_PYM_PATH"] = PORTAGE_PYM_PATH
-			self.backup_changes("PORTAGE_PYM_PATH")
+			if "USERLAND" not in self:
+				# Set default USERLAND so that our test cases can assume that
+				# it's always set. This allows isolated-functions.sh to avoid
+				# calling uname -s when sourced.
+				system = platform.system()
+				if system is not None and \
+					(system.endswith("BSD") or system == "DragonFly"):
+					self["USERLAND"] = "BSD"
+				else:
+					self["USERLAND"] = "GNU"
+				self.backup_changes("USERLAND")
 
-			for var in ("PORTAGE_INST_UID", "PORTAGE_INST_GID"):
+			default_inst_ids = {
+				"PORTAGE_INST_GID": "0",
+				"PORTAGE_INST_UID": "0",
+			}
+
+			if eprefix:
+				# For prefix environments, default to the UID and GID of
+				# the top-level EROOT directory.
 				try:
-					self[var] = str(int(self.get(var, "0")))
+					eroot_st = os.stat(eroot)
+				except OSError:
+					pass
+				else:
+					default_inst_ids["PORTAGE_INST_GID"] = str(eroot_st.st_gid)
+					default_inst_ids["PORTAGE_INST_UID"] = str(eroot_st.st_uid)
+
+					if "PORTAGE_USERNAME" not in self:
+						try:
+							pwd_struct = pwd.getpwuid(eroot_st.st_uid)
+						except KeyError:
+							pass
+						else:
+							self["PORTAGE_USERNAME"] = pwd_struct.pw_name
+							self.backup_changes("PORTAGE_USERNAME")
+
+					if "PORTAGE_GRPNAME" not in self:
+						try:
+							grp_struct = grp.getgrgid(eroot_st.st_gid)
+						except KeyError:
+							pass
+						else:
+							self["PORTAGE_GRPNAME"] = grp_struct.gr_name
+							self.backup_changes("PORTAGE_GRPNAME")
+
+			for var, default_val in default_inst_ids.items():
+				try:
+					self[var] = str(int(self.get(var, default_val)))
 				except ValueError:
 					writemsg(_("!!! %s='%s' is not a valid integer.  "
-						"Falling back to '0'.\n") % (var, self[var]),
+						"Falling back to %s.\n") % (var, self[var], default_val),
 						noiselevel=-1)
-					self[var] = "0"
+					self[var] = default_val
 				self.backup_changes(var)
 
 			# initialize self.features
@@ -699,21 +789,33 @@ class config(object):
 			if bsd_chflags:
 				self.features.add('chflags')
 
-			if 'parse-eapi-ebuild-head' in self.features:
-				portage._validate_cache_for_unsupported_eapis = False
-
 			self._iuse_implicit_match = _iuse_implicit_match_cache(self)
 
 			self._validate_commands()
 
-		for k in self._case_insensitive_vars:
-			if k in self:
-				self[k] = self[k].lower()
-				self.backup_changes(k)
+			for k in self._case_insensitive_vars:
+				if k in self:
+					self[k] = self[k].lower()
+					self.backup_changes(k)
+
+			if main_repo is not None and not main_repo.sync:
+				main_repo_sync = self.get("SYNC")
+				if main_repo_sync:
+					main_repo.sync = main_repo_sync
+
+			# The first constructed config object initializes these modules,
+			# and subsequent calls to the _init() functions have no effect.
+			portage.output._init(config_root=self['PORTAGE_CONFIGROOT'])
+			portage.data._init(self)
 
 		if mycpv:
 			self.setcpv(mycpv)
 
+	@property
+	def mygcfg(self):
+		warnings.warn("portage.config.mygcfg is deprecated", stacklevel=3)
+		return {}
+
 	def _validate_commands(self):
 		for k in special_env_vars.validate_commands:
 			v = self.get(k)
@@ -817,11 +919,26 @@ class config(object):
 					writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
 						noiselevel=-1)
 
-		abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
-			PROFILE_PATH)
-		if (not self.profile_path or \
-			not os.path.exists(os.path.join(self.profile_path, "parent"))) and \
-			os.path.exists(os.path.join(self["PORTDIR"], "profiles")):
+		profile_broken = not self.profile_path or \
+			not os.path.exists(os.path.join(self.profile_path, "parent")) and \
+			os.path.exists(os.path.join(self["PORTDIR"], "profiles"))
+
+		if profile_broken:
+			abs_profile_path = None
+			for x in (PROFILE_PATH, 'etc/portage/make.profile'):
+				x = os.path.join(self["PORTAGE_CONFIGROOT"], x)
+				try:
+					os.lstat(x)
+				except OSError:
+					pass
+				else:
+					abs_profile_path = x
+					break
+
+			if abs_profile_path is None:
+				abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
+					PROFILE_PATH)
+
 			writemsg(_("\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
 				noiselevel=-1)
 			writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
@@ -851,13 +968,32 @@ class config(object):
 			writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
 				"fakeroot binary is not installed.\n"), noiselevel=-1)
 
+		if os.getuid() == 0 and not hasattr(os, "setgroups"):
+			warning_shown = False
+
+			if "userpriv" in self.features:
+				writemsg(_("!!! FEATURES=userpriv is enabled, but "
+					"os.setgroups is not available.\n"), noiselevel=-1)
+				warning_shown = True
+
+			if "userfetch" in self.features:
+				writemsg(_("!!! FEATURES=userfetch is enabled, but "
+					"os.setgroups is not available.\n"), noiselevel=-1)
+				warning_shown = True
+
+			if warning_shown and platform.python_implementation() == 'PyPy':
+				writemsg(_("!!! See https://bugs.pypy.org/issue833 for details.\n"),
+					noiselevel=-1)
+
 	def load_best_module(self,property_string):
 		best_mod = best_from_dict(property_string,self.modules,self.module_priority)
 		mod = None
 		try:
 			mod = load_mod(best_mod)
 		except ImportError:
-			if not best_mod.startswith("cache."):
+			if best_mod in self._module_aliases:
+				mod = load_mod(self._module_aliases[best_mod])
+			elif not best_mod.startswith("cache."):
 				raise
 			else:
 				best_mod = "portage." + best_mod
@@ -1099,8 +1235,11 @@ class config(object):
 				# packages since we want to save it PORTAGE_BUILT_USE for
 				# evaluating conditional USE deps in atoms passed via IPC to
 				# helpers like has_version and best_version.
+				aux_keys = set(aux_keys)
+				if hasattr(mydb, '_aux_cache_keys'):
+					aux_keys = aux_keys.intersection(mydb._aux_cache_keys)
+				aux_keys.add('USE')
 				aux_keys = list(aux_keys)
-				aux_keys.append('USE')
 				for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
 					pkg_configdict[k] = v
 				built_use = frozenset(pkg_configdict.pop('USE').split())
@@ -1115,7 +1254,7 @@ class config(object):
 			slot = pkg_configdict["SLOT"]
 			iuse = pkg_configdict["IUSE"]
 			if pkg is None:
-				cpv_slot = "%s:%s" % (self.mycpv, slot)
+				cpv_slot = _pkg_str(self.mycpv, slot=slot, repo=repository)
 			else:
 				cpv_slot = pkg
 			pkginternaluse = []
@@ -1462,6 +1601,9 @@ class config(object):
 		@return: A matching profile atom string or None if one is not found.
 		"""
 
+		warnings.warn("The config._getProfileMaskAtom() method is deprecated.",
+			DeprecationWarning, stacklevel=2)
+
 		cp = cpv_getkey(cpv)
 		profile_atoms = self.prevmaskdict.get(cp)
 		if profile_atoms:
@@ -1564,11 +1706,13 @@ class config(object):
 		@return: A list of properties that have not been accepted.
 		"""
 		accept_properties = self._accept_properties
+		if not hasattr(cpv, 'slot'):
+			cpv = _pkg_str(cpv, slot=metadata["SLOT"],
+				repo=metadata.get("repository"))
 		cp = cpv_getkey(cpv)
 		cpdict = self._ppropertiesdict.get(cp)
 		if cpdict:
-			cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
-			pproperties_list = ordered_by_atom_specificity(cpdict, cpv_slot, repo=metadata.get('repository'))
+			pproperties_list = ordered_by_atom_specificity(cpdict, cpv)
 			if pproperties_list:
 				accept_properties = list(self._accept_properties)
 				for x in pproperties_list:
@@ -1699,6 +1843,8 @@ class config(object):
 		env_d = getconfig(env_d_filename, expand=False)
 		if env_d:
 			# env_d will be None if profile.env doesn't exist.
+			for k in self._env_d_blacklist:
+				env_d.pop(k, None)
 			self.configdict["env.d"].update(env_d)
 
 	def regenerate(self, useonly=0, use_cache=None):
@@ -2025,25 +2171,43 @@ class config(object):
 				self._virtuals_manager._treeVirtuals = {}
 
 	def __delitem__(self,mykey):
-		self.modifying()
-		for x in self.lookuplist:
-			if x != None:
-				if mykey in x:
-					del x[mykey]
+		self.pop(mykey)
+
+	def __getitem__(self, key):
+		try:
+			return self._getitem(key)
+		except KeyError:
+			return '' # for backward compat, don't raise KeyError
+
+	def _getitem(self, mykey):
+
+		if mykey in self._constant_keys:
+			# These two point to temporary values when
+			# portage plans to update itself.
+			if mykey == "PORTAGE_BIN_PATH":
+				return portage._bin_path
+			elif mykey == "PORTAGE_PYM_PATH":
+				return portage._pym_path
+
+			elif mykey == "PORTAGE_GID":
+				return _unicode_decode(str(portage_gid))
 
-	def __getitem__(self,mykey):
 		for d in self.lookuplist:
-			if mykey in d:
+			try:
 				return d[mykey]
-		return '' # for backward compat, don't raise KeyError
+			except KeyError:
+				pass
+
+		raise KeyError(mykey)
 
 	def get(self, k, x=None):
-		for d in self.lookuplist:
-			if k in d:
-				return d[k]
-		return x
+		try:
+			return self._getitem(k)
+		except KeyError:
+			return x
 
 	def pop(self, key, *args):
+		self.modifying()
 		if len(args) > 1:
 			raise TypeError(
 				"pop expected at most 2 arguments, got " + \
@@ -2059,10 +2223,12 @@ class config(object):
 
 	def __contains__(self, mykey):
 		"""Called to implement membership test operators (in and not in)."""
-		for d in self.lookuplist:
-			if mykey in d:
-				return True
-		return False
+		try:
+			 self._getitem(mykey)
+		except KeyError:
+			return False
+		else:
+			return True
 
 	def setdefault(self, k, x=None):
 		v = self.get(k)
@@ -2077,6 +2243,7 @@ class config(object):
 
 	def __iter__(self):
 		keys = set()
+		keys.update(self._constant_keys)
 		for d in self.lookuplist:
 			keys.update(d)
 		return iter(keys)
@@ -2086,7 +2253,7 @@ class config(object):
 
 	def iteritems(self):
 		for k in self:
-			yield (k, self[k])
+			yield (k, self._getitem(k))
 
 	def items(self):
 		return list(self.iteritems())
@@ -2168,8 +2335,14 @@ class config(object):
 		if not eapi_exports_merge_type(eapi):
 			mydict.pop("MERGE_TYPE", None)
 
-		# Prefix variables are supported starting with EAPI 3.
-		if phase == 'depend' or eapi is None or not eapi_supports_prefix(eapi):
+		# Prefix variables are supported beginning with EAPI 3, or when
+		# force-prefix is in FEATURES, since older EAPIs would otherwise be
+		# useless with prefix configurations. This brings compatibility with
+		# the prefix branch of portage, which also supports EPREFIX for all
+		# EAPIs (for obvious reasons).
+		if phase == 'depend' or \
+			('force-prefix' not in self.features and
+			eapi is not None and not eapi_supports_prefix(eapi)):
 			mydict.pop("ED", None)
 			mydict.pop("EPREFIX", None)
 			mydict.pop("EROOT", None)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/config.pyo b/portage_with_autodep/pym/portage/package/ebuild/config.pyo
new file mode 100644
index 0000000..742ee2b
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/config.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.pyo b/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.pyo
new file mode 100644
index 0000000..2b9362b
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/deprecated_profile_check.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py
index 1e34b14..8705639 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.py
@@ -8,7 +8,6 @@ import warnings
 from portage import os, _encodings, _unicode_decode
 from portage.exception import DigestException, FileNotFound
 from portage.localization import _
-from portage.manifest import Manifest
 from portage.output import EOutput
 from portage.util import writemsg
 
@@ -16,7 +15,7 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
 	"""
 	Verifies checksums. Assumes all files have been downloaded.
 	@rtype: int
-	@returns: 1 on success and 0 on failure
+	@return: 1 on success and 0 on failure
 	"""
 
 	if justmanifest is not None:
@@ -28,49 +27,33 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
 
 	if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
 		return 1
-	allow_missing = "allow-missing-manifests" in mysettings.features
 	pkgdir = mysettings["O"]
-	manifest_path = os.path.join(pkgdir, "Manifest")
-	if not os.path.exists(manifest_path):
-		if allow_missing:
-			return 1
-		writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
-			noiselevel=-1)
-		if strict:
-			return 0
-		else:
-			return 1
 	if mf is None:
-		mf = Manifest(pkgdir, mysettings["DISTDIR"])
-	manifest_empty = True
-	for d in mf.fhashdict.values():
-		if d:
-			manifest_empty = False
-			break
-	if manifest_empty:
-		writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
-			noiselevel=-1)
-		if strict:
-			return 0
-		else:
-			return 1
+		mf = mysettings.repositories.get_repo_for_location(
+			os.path.dirname(os.path.dirname(pkgdir)))
+		mf = mf.load_manifest(pkgdir, mysettings["DISTDIR"])
 	eout = EOutput()
 	eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
 	try:
-		if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
-			eout.ebegin(_("checking ebuild checksums ;-)"))
-			mf.checkTypeHashes("EBUILD")
-			eout.eend(0)
-			eout.ebegin(_("checking auxfile checksums ;-)"))
-			mf.checkTypeHashes("AUX")
-			eout.eend(0)
-			eout.ebegin(_("checking miscfile checksums ;-)"))
-			mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
-			eout.eend(0)
+		if not mf.thin and strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
+			if mf.fhashdict.get("EBUILD"):
+				eout.ebegin(_("checking ebuild checksums ;-)"))
+				mf.checkTypeHashes("EBUILD")
+				eout.eend(0)
+			if mf.fhashdict.get("AUX"):
+				eout.ebegin(_("checking auxfile checksums ;-)"))
+				mf.checkTypeHashes("AUX")
+				eout.eend(0)
+			if mf.fhashdict.get("MISC"):
+				eout.ebegin(_("checking miscfile checksums ;-)"))
+				mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
+				eout.eend(0)
 		for f in myfiles:
 			eout.ebegin(_("checking %s ;-)") % f)
 			ftype = mf.findFile(f)
 			if ftype is None:
+				if mf.allow_missing:
+					continue
 				eout.eend(1)
 				writemsg(_("\n!!! Missing digest for '%s'\n") % (f,),
 					noiselevel=-1)
@@ -90,7 +73,7 @@ def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
 		writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
 		writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
 		return 0
-	if allow_missing:
+	if mf.thin or mf.allow_missing:
 		# In this case we ignore any missing digests that
 		# would otherwise be detected below.
 		return 1

diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestcheck.pyo b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.pyo
new file mode 100644
index 0000000..66987a2
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/digestcheck.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestgen.py b/portage_with_autodep/pym/portage/package/ebuild/digestgen.py
index eb7210e..6ad3397 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/digestgen.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/digestgen.py
@@ -17,7 +17,6 @@ from portage.dep import use_reduce
 from portage.exception import InvalidDependString, FileNotFound, \
 	PermissionDenied, PortagePackageException
 from portage.localization import _
-from portage.manifest import Manifest
 from portage.output import colorize
 from portage.package.ebuild.fetch import fetch
 from portage.util import writemsg, writemsg_stdout
@@ -34,7 +33,7 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None):
 	@param myportdb: a portdbapi instance
 	@type myportdb: portdbapi
 	@rtype: int
-	@returns: 1 on success and 0 on failure
+	@return: 1 on success and 0 on failure
 	"""
 	if mysettings is None or myportdb is None:
 		raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")
@@ -52,9 +51,21 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None):
 				del e
 				return 0
 		mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
-		manifest1_compat = False
-		mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
-			fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
+		try:
+			mf = mysettings.repositories.get_repo_for_location(mytree)
+		except KeyError:
+			# backward compatibility
+			mytree = os.path.realpath(mytree)
+			mf = mysettings.repositories.get_repo_for_location(mytree)
+
+		mf = mf.load_manifest(mysettings["O"], mysettings["DISTDIR"],
+			fetchlist_dict=fetchlist_dict)
+
+		if not mf.allow_create:
+			writemsg_stdout(_(">>> Skipping creating Manifest for %s; "
+				"repository is configured to not use them\n") % mysettings["O"])
+			return 1
+
 		# Don't require all hashes since that can trigger excessive
 		# fetches when sufficient digests already exist.  To ease transition
 		# while Manifest 1 is being removed, only require hashes that will
@@ -102,8 +113,6 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None):
 					continue
 
 		if missing_files:
-				mytree = os.path.realpath(os.path.dirname(
-					os.path.dirname(mysettings["O"])))
 				for myfile in missing_files:
 					uris = set()
 					all_restrict = set()
@@ -139,8 +148,7 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None):
 					if not fetch({myfile : uris}, mysettings):
 						myebuild = os.path.join(mysettings["O"],
 							catsplit(cpv)[1] + ".ebuild")
-						spawn_nofetch(myportdb, myebuild,
-							settings=mysettings)
+						spawn_nofetch(myportdb, myebuild)
 						writemsg(_("!!! Fetch failed for %s, can't update "
 							"Manifest\n") % myfile, noiselevel=-1)
 						if myfile in dist_hashes and \
@@ -183,8 +191,6 @@ def digestgen(myarchives=None, mysettings=None, myportdb=None):
 					os.path.join(mysettings["DISTDIR"], filename)):
 					auto_assumed.append(filename)
 			if auto_assumed:
-				mytree = os.path.realpath(
-					os.path.dirname(os.path.dirname(mysettings["O"])))
 				cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
 				pkgs = myportdb.cp_list(cp, mytree=mytree)
 				pkgs.sort()

diff --git a/portage_with_autodep/pym/portage/package/ebuild/digestgen.pyo b/portage_with_autodep/pym/portage/package/ebuild/digestgen.pyo
new file mode 100644
index 0000000..66876ec
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/digestgen.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/doebuild.py b/portage_with_autodep/pym/portage/package/ebuild/doebuild.py
index c76c1ed..610172f 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/doebuild.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/doebuild.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = ['doebuild', 'doebuild_environment', 'spawn', 'spawnebuild']
@@ -10,7 +10,6 @@ from itertools import chain
 import logging
 import os as _os
 import re
-import shutil
 import signal
 import stat
 import sys
@@ -31,8 +30,8 @@ portage.proxy.lazyimport.lazyimport(globals(),
 )
 
 from portage import auxdbkeys, bsd_chflags, \
-	eapi_is_supported, merge, os, selinux, \
-	unmerge, _encodings, _parse_eapi_ebuild_head, _os_merge, \
+	eapi_is_supported, merge, os, selinux, shutil, \
+	unmerge, _encodings, _os_merge, \
 	_shell_quote, _unicode_decode, _unicode_encode
 from portage.const import EBUILD_SH_ENV_FILE, EBUILD_SH_ENV_DIR, \
 	EBUILD_SH_BINARY, INVALID_ENV_FILE, MISC_SH_BINARY
@@ -42,16 +41,16 @@ from portage.dbapi.porttree import _parse_uri_map
 from portage.dep import Atom, check_required_use, \
 	human_readable_required_use, paren_enclose, use_reduce
 from portage.eapi import eapi_exports_KV, eapi_exports_merge_type, \
-	eapi_exports_replace_vars, eapi_has_required_use, \
-	eapi_has_src_prepare_and_src_configure, eapi_has_pkg_pretend
-from portage.elog import elog_process
+	eapi_exports_replace_vars, eapi_exports_REPOSITORY, \
+	eapi_has_required_use, eapi_has_src_prepare_and_src_configure, \
+	eapi_has_pkg_pretend
+from portage.elog import elog_process, _preload_elog_modules
 from portage.elog.messages import eerror, eqawarn
 from portage.exception import DigestException, FileNotFound, \
 	IncorrectParameter, InvalidDependString, PermissionDenied, \
 	UnsupportedAPIException
 from portage.localization import _
-from portage.manifest import Manifest
-from portage.output import style_to_ansi_code
+from portage.output import colormap
 from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
 from portage.util import apply_recursive_permissions, \
 	apply_secpass_permissions, noiselimit, normalize_path, \
@@ -116,6 +115,38 @@ def _spawn_phase(phase, settings, actionmap=None, **kwargs):
 	ebuild_phase.wait()
 	return ebuild_phase.returncode
 
+def _doebuild_path(settings, eapi=None):
+	"""
+	Generate the PATH variable.
+	"""
+
+	# Note: PORTAGE_BIN_PATH may differ from the global constant
+	# when portage is reinstalling itself.
+	portage_bin_path = settings["PORTAGE_BIN_PATH"]
+	eprefix = settings["EPREFIX"]
+	prerootpath = [x for x in settings.get("PREROOTPATH", "").split(":") if x]
+	rootpath = [x for x in settings.get("ROOTPATH", "").split(":") if x]
+
+	prefixes = []
+	if eprefix:
+		prefixes.append(eprefix)
+	prefixes.append("/")
+
+	path = []
+
+	if eapi not in (None, "0", "1", "2", "3"):
+		path.append(os.path.join(portage_bin_path, "ebuild-helpers", "4"))
+
+	path.append(os.path.join(portage_bin_path, "ebuild-helpers"))
+	path.extend(prerootpath)
+
+	for prefix in prefixes:
+		for x in ("usr/local/sbin", "usr/local/bin", "usr/sbin", "usr/bin", "sbin", "bin"):
+			path.append(os.path.join(prefix, x))
+
+	path.extend(rootpath)
+	settings["PATH"] = ":".join(path)
+
 def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
 	debug=False, use_cache=None, db=None):
 	"""
@@ -143,20 +174,32 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
 	ebuild_path = os.path.abspath(myebuild)
 	pkg_dir     = os.path.dirname(ebuild_path)
 	mytree = os.path.dirname(os.path.dirname(pkg_dir))
-
-	if "CATEGORY" in mysettings.configdict["pkg"]:
-		cat = mysettings.configdict["pkg"]["CATEGORY"]
-	else:
-		cat = os.path.basename(normalize_path(os.path.join(pkg_dir, "..")))
-
 	mypv = os.path.basename(ebuild_path)[:-7]
-
-	mycpv = cat+"/"+mypv
-	mysplit = _pkgsplit(mypv)
+	mysplit = _pkgsplit(mypv, eapi=mysettings.configdict["pkg"].get("EAPI"))
 	if mysplit is None:
 		raise IncorrectParameter(
 			_("Invalid ebuild path: '%s'") % myebuild)
 
+	if mysettings.mycpv is not None and \
+		mysettings.configdict["pkg"].get("PF") == mypv and \
+		"CATEGORY" in mysettings.configdict["pkg"]:
+		# Assume that PF is enough to assume that we've got
+		# the correct CATEGORY, though this is not really
+		# a solid assumption since it's possible (though
+		# unlikely) that two packages in different
+		# categories have the same PF. Callers should call
+		# setcpv or create a clean clone of a locked config
+		# instance in order to ensure that this assumption
+		# does not fail like in bug #408817.
+		cat = mysettings.configdict["pkg"]["CATEGORY"]
+		mycpv = mysettings.mycpv
+	elif os.path.basename(pkg_dir) in (mysplit[0], mypv):
+		# portdbapi or vardbapi
+		cat = os.path.basename(os.path.dirname(pkg_dir))
+		mycpv = cat + "/" + mypv
+	else:
+		raise AssertionError("unable to determine CATEGORY")
+
 	# Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
 	# so that the caller can override it.
 	tmpdir = mysettings["PORTAGE_TMPDIR"]
@@ -208,11 +251,11 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
 	mysettings["FILESDIR"] = pkg_dir+"/files"
 	mysettings["PF"]       = mypv
 
-	if hasattr(mydbapi, '_repo_info'):
-		repo_info = mydbapi._repo_info[mytree]
-		mysettings['PORTDIR'] = repo_info.portdir
-		mysettings['PORTDIR_OVERLAY'] = repo_info.portdir_overlay
-		mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo_info.name
+	if hasattr(mydbapi, 'repositories'):
+		repo = mydbapi.repositories.get_repo_for_location(mytree)
+		mysettings['PORTDIR'] = repo.eclass_db.porttrees[0]
+		mysettings['PORTDIR_OVERLAY'] = ' '.join(repo.eclass_db.porttrees[1:])
+		mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo.name
 
 	mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
 	mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
@@ -235,16 +278,6 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
 	else:
 		mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
 
-	if "PATH" in mysettings:
-		mysplit=mysettings["PATH"].split(":")
-	else:
-		mysplit=[]
-	# Note: PORTAGE_BIN_PATH may differ from the global constant
-	# when portage is reinstalling itself.
-	portage_bin_path = mysettings["PORTAGE_BIN_PATH"]
-	if portage_bin_path not in mysplit:
-		mysettings["PATH"] = portage_bin_path + ":" + mysettings["PATH"]
-
 	# All temporary directories should be subdirectories of
 	# $PORTAGE_TMPDIR/portage, since it's common for /tmp and /var/tmp
 	# to be mounted with the "noexec" option (see bug #346899).
@@ -268,7 +301,9 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
 	mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
 
 	# Prefix forward compatability
-	mysettings["ED"] = mysettings["D"]
+	eprefix_lstrip = mysettings["EPREFIX"].lstrip(os.sep)
+	mysettings["ED"] = os.path.join(
+		mysettings["D"], eprefix_lstrip).rstrip(os.sep) + os.sep
 
 	mysettings["PORTAGE_BASHRC"] = os.path.join(
 		mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
@@ -276,37 +311,41 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
 		mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_DIR)
 
 	# Allow color.map to control colors associated with einfo, ewarn, etc...
-	mycolors = []
-	for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET"):
-		mycolors.append("%s=$'%s'" % \
-			(c, style_to_ansi_code(c)))
-	mysettings["PORTAGE_COLORMAP"] = "\n".join(mycolors)
-
-	# All EAPI dependent code comes last, so that essential variables
-	# like PORTAGE_BUILDDIR are still initialized even in cases when
+	mysettings["PORTAGE_COLORMAP"] = colormap()
+
+	if "COLUMNS" not in mysettings:
+		# Set COLUMNS, in order to prevent unnecessary stty calls
+		# inside the set_colors function of isolated-functions.sh.
+		# We cache the result in os.environ, in order to avoid
+		# multiple stty calls in cases when get_term_size() falls
+		# back to stty due to a missing or broken curses module.
+		columns = os.environ.get("COLUMNS")
+		if columns is None:
+			rows, columns = portage.output.get_term_size()
+			if columns < 1:
+				# Force a sane value for COLUMNS, so that tools
+				# like ls don't complain (see bug #394091).
+				columns = 80
+			columns = str(columns)
+			os.environ["COLUMNS"] = columns
+		mysettings["COLUMNS"] = columns
+
+	# EAPI is always known here, even for the "depend" phase, because
+	# EbuildMetadataPhase gets it from _parse_eapi_ebuild_head().
+	eapi = mysettings.configdict['pkg']['EAPI']
+	_doebuild_path(mysettings, eapi=eapi)
+
+	# All EAPI dependent code comes last, so that essential variables like
+	# PATH and PORTAGE_BUILDDIR are still initialized even in cases when
 	# UnsupportedAPIException needs to be raised, which can be useful
 	# when uninstalling a package that has corrupt EAPI metadata.
-	eapi = None
-	if mydo == 'depend' and 'EAPI' not in mysettings.configdict['pkg']:
-		if eapi is None and 'parse-eapi-ebuild-head' in mysettings.features:
-			eapi = _parse_eapi_ebuild_head(
-				io.open(_unicode_encode(ebuild_path,
-				encoding=_encodings['fs'], errors='strict'),
-				mode='r', encoding=_encodings['content'], errors='replace'))
-
-		if eapi is not None:
-			if not eapi_is_supported(eapi):
-				raise UnsupportedAPIException(mycpv, eapi)
-			mysettings.configdict['pkg']['EAPI'] = eapi
+	if not eapi_is_supported(eapi):
+		raise UnsupportedAPIException(mycpv, eapi)
 
-	if mydo != "depend":
-		# Metadata vars such as EAPI and RESTRICT are
-		# set by the above config.setcpv() call.
-		eapi = mysettings["EAPI"]
-		if not eapi_is_supported(eapi):
-			# can't do anything with this.
-			raise UnsupportedAPIException(mycpv, eapi)
+	if eapi_exports_REPOSITORY(eapi) and "PORTAGE_REPO_NAME" in mysettings.configdict["pkg"]:
+		mysettings.configdict["pkg"]["REPOSITORY"] = mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"]
 
+	if mydo != "depend":
 		if hasattr(mydbapi, "getFetchMap") and \
 			("A" not in mysettings.configdict["pkg"] or \
 			"AA" not in mysettings.configdict["pkg"]):
@@ -331,22 +370,41 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
 			else:
 				mysettings.configdict["pkg"]["AA"] = " ".join(uri_map)
 
-	if not eapi_exports_KV(eapi):
-		# Discard KV for EAPIs that don't support it. Cache KV is restored
-		# from the backupenv whenever config.reset() is called.
-		mysettings.pop('KV', None)
-	elif mydo != 'depend' and 'KV' not in mysettings and \
-		mydo in ('compile', 'config', 'configure', 'info',
-		'install', 'nofetch', 'postinst', 'postrm', 'preinst',
-		'prepare', 'prerm', 'setup', 'test', 'unpack'):
-		mykv, err1 = ExtractKernelVersion(
-			os.path.join(mysettings['EROOT'], "usr/src/linux"))
-		if mykv:
-			# Regular source tree
-			mysettings["KV"] = mykv
-		else:
-			mysettings["KV"] = ""
-		mysettings.backup_changes("KV")
+		ccache = "ccache" in mysettings.features
+		distcc = "distcc" in mysettings.features
+		if ccache or distcc:
+			# Use default ABI libdir in accordance with bug #355283.
+			libdir = None
+			default_abi = mysettings.get("DEFAULT_ABI")
+			if default_abi:
+				libdir = mysettings.get("LIBDIR_" + default_abi)
+			if not libdir:
+				libdir = "lib"
+
+			if distcc:
+				mysettings["PATH"] = os.path.join(os.sep, eprefix_lstrip,
+					 "usr", libdir, "distcc", "bin") + ":" + mysettings["PATH"]
+
+			if ccache:
+				mysettings["PATH"] = os.path.join(os.sep, eprefix_lstrip,
+					 "usr", libdir, "ccache", "bin") + ":" + mysettings["PATH"]
+
+		if not eapi_exports_KV(eapi):
+			# Discard KV for EAPIs that don't support it. Cached KV is restored
+			# from the backupenv whenever config.reset() is called.
+			mysettings.pop('KV', None)
+		elif 'KV' not in mysettings and \
+			mydo in ('compile', 'config', 'configure', 'info',
+			'install', 'nofetch', 'postinst', 'postrm', 'preinst',
+			'prepare', 'prerm', 'setup', 'test', 'unpack'):
+			mykv, err1 = ExtractKernelVersion(
+				os.path.join(mysettings['EROOT'], "usr/src/linux"))
+			if mykv:
+				# Regular source tree
+				mysettings["KV"] = mykv
+			else:
+				mysettings["KV"] = ""
+			mysettings.backup_changes("KV")
 
 _doebuild_manifest_cache = None
 _doebuild_broken_ebuilds = set()
@@ -356,7 +414,7 @@ _doebuild_commands_without_builddir = (
 	'fetch', 'fetchall', 'help', 'manifest'
 )
 
-def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
+def doebuild(myebuild, mydo, _unused=None, settings=None, debug=0, listonly=0,
 	fetchonly=0, cleanup=0, dbkey=None, use_cache=1, fetchall=0, tree=None,
 	mydbapi=None, vartree=None, prev_mtimes=None,
 	fd_pipes=None, returnpid=False):
@@ -368,10 +426,10 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 	@type myebuild: String
 	@param mydo: Phase to run
 	@type mydo: String
-	@param myroot: $ROOT (usually '/', see man make.conf)
-	@type myroot: String
-	@param mysettings: Portage Configuration
-	@type mysettings: instance of portage.config
+	@param _unused: Deprecated (use settings["ROOT"] instead)
+	@type _unused: String
+	@param settings: Portage Configuration
+	@type settings: instance of portage.config
 	@param debug: Turns on various debug information (eg, debug for spawn)
 	@type debug: Boolean
 	@param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
@@ -403,7 +461,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 		caller clean up all returned PIDs.
 	@type returnpid: Boolean
 	@rtype: Boolean
-	@returns:
+	@return:
 	1. 0 for success
 	2. 1 for error
 	
@@ -414,7 +472,18 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 	Other variables may not be strictly required, many have defaults that are set inside of doebuild.
 	
 	"""
-	
+
+	if settings is None:
+		raise TypeError("settings parameter is required")
+	mysettings = settings
+	myroot = settings['EROOT']
+
+	if _unused is not None and _unused != mysettings['EROOT']:
+		warnings.warn("The third parameter of the "
+			"portage.doebuild() is now unused. Use "
+			"settings['ROOT'] instead.",
+			DeprecationWarning, stacklevel=2)
+
 	if not tree:
 		writemsg("Warning: tree not specified to doebuild\n")
 		tree = "porttree"
@@ -432,6 +501,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 	"install":["test"],
 	"rpm":    ["install"],
 	"package":["install"],
+	"merge"  :["install"],
 	}
 	
 	if mydbapi is None:
@@ -480,21 +550,28 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 		return 1
 
 	global _doebuild_manifest_cache
+	pkgdir = os.path.dirname(myebuild)
+	manifest_path = os.path.join(pkgdir, "Manifest")
+	if tree == "porttree":
+		repo_config = mysettings.repositories.get_repo_for_location(
+			os.path.dirname(os.path.dirname(pkgdir)))
+	else:
+		repo_config = None
+
 	mf = None
 	if "strict" in features and \
 		"digest" not in features and \
 		tree == "porttree" and \
+		not repo_config.thin_manifest and \
 		mydo not in ("digest", "manifest", "help") and \
-		not portage._doebuild_manifest_exempt_depend:
+		not portage._doebuild_manifest_exempt_depend and \
+		not (repo_config.allow_missing_manifest and not os.path.exists(manifest_path)):
 		# Always verify the ebuild checksums before executing it.
 		global _doebuild_broken_ebuilds
 
 		if myebuild in _doebuild_broken_ebuilds:
 			return 1
 
-		pkgdir = os.path.dirname(myebuild)
-		manifest_path = os.path.join(pkgdir, "Manifest")
-
 		# Avoid checking the same Manifest several times in a row during a
 		# regen with an empty cache.
 		if _doebuild_manifest_cache is None or \
@@ -505,7 +582,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 				out.eerror(_("Manifest not found for '%s'") % (myebuild,))
 				_doebuild_broken_ebuilds.add(myebuild)
 				return 1
-			mf = Manifest(pkgdir, mysettings["DISTDIR"])
+			mf = repo_config.load_manifest(pkgdir, mysettings["DISTDIR"])
 
 		else:
 			mf = _doebuild_manifest_cache
@@ -513,10 +590,12 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 		try:
 			mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
 		except KeyError:
-			out = portage.output.EOutput()
-			out.eerror(_("Missing digest for '%s'") % (myebuild,))
-			_doebuild_broken_ebuilds.add(myebuild)
-			return 1
+			if not (mf.allow_missing and
+				os.path.basename(myebuild) not in mf.fhashdict["EBUILD"]):
+				out = portage.output.EOutput()
+				out.eerror(_("Missing digest for '%s'") % (myebuild,))
+				_doebuild_broken_ebuilds.add(myebuild)
+				return 1
 		except FileNotFound:
 			out = portage.output.EOutput()
 			out.eerror(_("A file listed in the Manifest "
@@ -536,7 +615,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 		if mf.getFullname() in _doebuild_broken_manifests:
 			return 1
 
-		if mf is not _doebuild_manifest_cache:
+		if mf is not _doebuild_manifest_cache and not mf.allow_missing:
 
 			# Make sure that all of the ebuilds are
 			# actually listed in the Manifest.
@@ -553,8 +632,8 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 					_doebuild_broken_manifests.add(manifest_path)
 					return 1
 
-			# Only cache it if the above stray files test succeeds.
-			_doebuild_manifest_cache = mf
+		# We cache it only after all above checks succeed.
+		_doebuild_manifest_cache = mf
 
 	logfile=None
 	builddir_lock = None
@@ -594,7 +673,6 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 				if builddir_lock is not None:
 					builddir_lock.unlock()
 
-		restrict = set(mysettings.get('PORTAGE_RESTRICT', '').split())
 		# get possible slot information from the deps file
 		if mydo == "depend":
 			writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
@@ -654,6 +732,13 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 			if rval != os.EX_OK:
 				return rval
 
+		else:
+			# FEATURES=noauto only makes sense for porttree, and we don't want
+			# it to trigger redundant sourcing of the ebuild for API consumers
+			# that are using binary packages
+			if "noauto" in mysettings.features:
+				mysettings.features.discard("noauto")
+
 		# The info phase is special because it uses mkdtemp so and
 		# user (not necessarily in the portage group) can run it.
 		if mydo not in ('info',) and \
@@ -666,6 +751,73 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 			return unmerge(mysettings["CATEGORY"],
 				mysettings["PF"], myroot, mysettings, vartree=vartree)
 
+		phases_to_run = set()
+		if "noauto" in mysettings.features or \
+			mydo not in actionmap_deps:
+			phases_to_run.add(mydo)
+		else:
+			phase_stack = [mydo]
+			while phase_stack:
+				x = phase_stack.pop()
+				if x in phases_to_run:
+					continue
+				phases_to_run.add(x)
+				phase_stack.extend(actionmap_deps.get(x, []))
+			del phase_stack
+
+		alist = set(mysettings.configdict["pkg"].get("A", "").split())
+
+		unpacked = False
+		if tree != "porttree":
+			pass
+		elif "unpack" not in phases_to_run:
+			unpacked = os.path.exists(os.path.join(
+				mysettings["PORTAGE_BUILDDIR"], ".unpacked"))
+		else:
+			try:
+				workdir_st = os.stat(mysettings["WORKDIR"])
+			except OSError:
+				pass
+			else:
+				newstuff = False
+				if not os.path.exists(os.path.join(
+					mysettings["PORTAGE_BUILDDIR"], ".unpacked")):
+					writemsg_stdout(_(
+						">>> Not marked as unpacked; recreating WORKDIR...\n"))
+					newstuff = True
+				else:
+					for x in alist:
+						writemsg_stdout(">>> Checking %s's mtime...\n" % x)
+						try:
+							x_st = os.stat(os.path.join(
+								mysettings["DISTDIR"], x))
+						except OSError:
+							# file not fetched yet
+							x_st = None
+
+						if x_st is None or x_st.st_mtime > workdir_st.st_mtime:
+							writemsg_stdout(_(">>> Timestamp of "
+								"%s has changed; recreating WORKDIR...\n") % x)
+							newstuff = True
+							break
+
+				if newstuff:
+					if builddir_lock is None and \
+						'PORTAGE_BUILDIR_LOCKED' not in mysettings:
+						builddir_lock = EbuildBuildDir(
+							scheduler=PollScheduler().sched_iface,
+							settings=mysettings)
+						builddir_lock.lock()
+					try:
+						_spawn_phase("clean", mysettings)
+					finally:
+						if builddir_lock is not None:
+							builddir_lock.unlock()
+							builddir_lock = None
+				else:
+					writemsg_stdout(_(">>> WORKDIR is up-to-date, keeping...\n"))
+					unpacked = True
+
 		# Build directory creation isn't required for any of these.
 		# In the fetch phase, the directory is needed only for RESTRICT=fetch
 		# in order to satisfy the sane $PWD requirement (from bug #239560)
@@ -739,10 +891,9 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 		# Only try and fetch the files if we are going to need them ...
 		# otherwise, if user has FEATURES=noauto and they run `ebuild clean
 		# unpack compile install`, we will try and fetch 4 times :/
-		need_distfiles = tree == "porttree" and \
+		need_distfiles = tree == "porttree" and not unpacked and \
 			(mydo in ("fetch", "unpack") or \
 			mydo not in ("digest", "manifest") and "noauto" not in features)
-		alist = set(mysettings.configdict["pkg"].get("A", "").split())
 		if need_distfiles:
 
 			src_uri, = mydbapi.aux_get(mysettings.mycpv,
@@ -783,10 +934,14 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 					return 0
 				return 1
 
-		if mydo == "fetch":
+		if need_distfiles:
 			# Files are already checked inside fetch(),
 			# so do not check them again.
 			checkme = []
+		elif unpacked:
+			# The unpack phase is marked as complete, so it
+			# would be wasteful to check distfiles again.
+			checkme = []
 		else:
 			checkme = alist
 
@@ -845,7 +1000,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 				# this phase. This can raise PermissionDenied if
 				# the current user doesn't have write access to $PKGDIR.
 				if hasattr(portage, 'db'):
-					bintree = portage.db[mysettings["ROOT"]]["bintree"]
+					bintree = portage.db[mysettings['EROOT']]['bintree']
 					mysettings["PORTAGE_BINPKG_TMPFILE"] = \
 						bintree.getname(mysettings.mycpv) + \
 						".%s" % (os.getpid(),)
@@ -866,6 +1021,13 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 				if mydo == "package" and bintree is not None:
 					bintree.inject(mysettings.mycpv,
 						filename=mysettings["PORTAGE_BINPKG_TMPFILE"])
+			else:
+				if "PORTAGE_BINPKG_TMPFILE" in mysettings:
+					try:
+						os.unlink(mysettings["PORTAGE_BINPKG_TMPFILE"])
+					except OSError:
+						pass
+
 		elif mydo=="qmerge":
 			# check to ensure install was run.  this *only* pops up when users
 			# forget it and are using ebuild
@@ -877,6 +1039,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 			# qmerge is a special phase that implies noclean.
 			if "noclean" not in mysettings.features:
 				mysettings.features.add("noclean")
+			_handle_self_update(mysettings, vartree.dbapi)
 			#qmerge is specifically not supposed to do a runtime dep check
 			retval = merge(
 				mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
@@ -893,6 +1056,7 @@ def doebuild(myebuild, mydo, myroot, mysettings, debug=0, listonly=0,
 				# so that it's only called once.
 				elog_process(mysettings.mycpv, mysettings)
 			if retval == os.EX_OK:
+				_handle_self_update(mysettings, vartree.dbapi)
 				retval = merge(mysettings["CATEGORY"], mysettings["PF"],
 					mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
 					"build-info"), myroot, mysettings,
@@ -944,10 +1108,31 @@ def _check_temp_dir(settings):
 	# as some people use a separate PORTAGE_TMPDIR mount
 	# we prefer that as the checks below would otherwise be pointless
 	# for those people.
-	if os.path.exists(os.path.join(settings["PORTAGE_TMPDIR"], "portage")):
-		checkdir = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
+	tmpdir = os.path.realpath(settings["PORTAGE_TMPDIR"])
+	if os.path.exists(os.path.join(tmpdir, "portage")):
+		checkdir = os.path.realpath(os.path.join(tmpdir, "portage"))
+		if ("sandbox" in settings.features or
+			"usersandox" in settings.features) and \
+			not checkdir.startswith(tmpdir + os.sep):
+			msg = _("The 'portage' subdirectory of the directory "
+			"referenced by the PORTAGE_TMPDIR variable appears to be "
+			"a symlink. In order to avoid sandbox violations (see bug "
+			"#378379), you must adjust PORTAGE_TMPDIR instead of using "
+			"the symlink located at '%s'. A suitable PORTAGE_TMPDIR "
+			"setting would be '%s'.") % \
+			(os.path.join(tmpdir, "portage"), checkdir)
+			lines = []
+			lines.append("")
+			lines.append("")
+			lines.extend(wrap(msg, 72))
+			lines.append("")
+			for line in lines:
+				if line:
+					line = "!!! %s" % (line,)
+				writemsg("%s\n" % (line,), noiselevel=-1)
+			return 1
 	else:
-		checkdir = settings["PORTAGE_TMPDIR"]
+		checkdir = tmpdir
 
 	if not os.access(checkdir, os.W_OK):
 		writemsg(_("%s is not writable.\n"
@@ -955,8 +1140,7 @@ def _check_temp_dir(settings):
 			noiselevel=-1)
 		return 1
 
-	else:
-		fd = tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir)
+	with tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir) as fd:
 		os.chmod(fd.name, 0o755)
 		if not os.access(fd.name, os.X_OK):
 			writemsg(_("Can not execute files in %s\n"
@@ -1085,7 +1269,8 @@ def _validate_deps(mysettings, myroot, mydo, mydbapi):
 	all_keys.add("SRC_URI")
 	all_keys = tuple(all_keys)
 	metadata = dict(zip(all_keys,
-		mydbapi.aux_get(mysettings.mycpv, all_keys)))
+		mydbapi.aux_get(mysettings.mycpv, all_keys,
+		myrepo=mysettings.get("PORTAGE_REPO_NAME"))))
 
 	class FakeTree(object):
 		def __init__(self, mydb):
@@ -1173,7 +1358,7 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero
 	@param keywords: Extra options encoded as a dict, to be passed to spawn
 	@type keywords: Dictionary
 	@rtype: Integer
-	@returns:
+	@return:
 	1. The return code of the spawned process.
 	"""
 
@@ -1201,7 +1386,8 @@ def spawn(mystring, mysettings, debug=0, free=0, droppriv=0, sesandbox=0, fakero
 	# fake ownership/permissions will have to be converted to real
 	# permissions in the merge phase.
 	fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
-	if droppriv and not uid and portage_gid and portage_uid:
+	if droppriv and uid == 0 and portage_gid and portage_uid and \
+		hasattr(os, "setgroups"):
 		keywords.update({"uid":portage_uid,"gid":portage_gid,
 			"groups":userpriv_groups,"umask":0o02})
 	if not free:
@@ -1277,6 +1463,17 @@ def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
 	if mydo == "pretend" and not eapi_has_pkg_pretend(eapi):
 		return os.EX_OK
 
+	if not (mydo == "install" and "noauto" in mysettings.features):
+		check_file = os.path.join(
+			mysettings["PORTAGE_BUILDDIR"], ".%sed" % mydo.rstrip('e'))
+		if os.path.exists(check_file):
+			writemsg_stdout(_(">>> It appears that "
+				"'%(action)s' has already executed for '%(pkg)s'; skipping.\n") %
+				{"action":mydo, "pkg":mysettings["PF"]})
+			writemsg_stdout(_(">>> Remove '%(file)s' to force %(action)s.\n") %
+				{"file":check_file, "action":mydo})
+			return os.EX_OK
+
 	return _spawn_phase(mydo, mysettings,
 		actionmap=actionmap, logfile=logfile,
 		fd_pipes=fd_pipes, returnpid=returnpid)
@@ -1285,13 +1482,14 @@ _post_phase_cmds = {
 
 	"install" : [
 		"install_qa_check",
-		"install_symlink_html_docs"],
+		"install_symlink_html_docs",
+		"install_hooks"],
 
 	"preinst" : [
 		"preinst_sfperms",
 		"preinst_selinux_labels",
 		"preinst_suid_scan",
-		"preinst_mask"]
+		]
 }
 
 def _post_phase_userpriv_perms(mysettings):
@@ -1320,7 +1518,9 @@ def _check_build_log(mysettings, out=None):
 	except EnvironmentError:
 		return
 
+	f_real = None
 	if logfile.endswith('.gz'):
+		f_real = f
 		f =  gzip.GzipFile(filename='', mode='rb', fileobj=f)
 
 	am_maintainer_mode = []
@@ -1425,19 +1625,32 @@ def _check_build_log(mysettings, out=None):
 		msg.extend("\t" + line for line in make_jobserver)
 		_eqawarn(msg)
 
+	f.close()
+	if f_real is not None:
+		f_real.close()
+
 def _post_src_install_chost_fix(settings):
 	"""
 	It's possible that the ebuild has changed the
 	CHOST variable, so revert it to the initial
-	setting.
+	setting. Also, revert IUSE in case it's corrupted
+	due to local environment settings like in bug #386829.
 	"""
-	if settings.get('CATEGORY') == 'virtual':
-		return
 
-	chost = settings.get('CHOST')
-	if chost:
-		write_atomic(os.path.join(settings['PORTAGE_BUILDDIR'],
-			'build-info', 'CHOST'), chost + '\n')
+	build_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')
+
+	for k in ('IUSE',):
+		v = settings.get(k)
+		if v is not None:
+			write_atomic(os.path.join(build_info_dir, k), v + '\n')
+
+	# The following variables are irrelevant for virtual packages.
+	if settings.get('CATEGORY') != 'virtual':
+
+		for k in ('CHOST',):
+			v = settings.get(k)
+			if v is not None:
+				write_atomic(os.path.join(build_info_dir, k), v + '\n')
 
 _vdb_use_conditional_keys = ('DEPEND', 'LICENSE', 'PDEPEND',
 	'PROPERTIES', 'PROVIDE', 'RDEPEND', 'RESTRICT',)
@@ -1481,6 +1694,7 @@ def _post_src_install_uid_fix(mysettings, out):
 	_preinst_bsdflags(mysettings)
 
 	destdir = mysettings["D"]
+	ed_len = len(mysettings["ED"])
 	unicode_errors = []
 
 	while True:
@@ -1499,12 +1713,12 @@ def _post_src_install_uid_fix(mysettings, out):
 				new_parent = _unicode_decode(parent,
 					encoding=_encodings['merge'], errors='replace')
 				new_parent = _unicode_encode(new_parent,
-					encoding=_encodings['merge'], errors='backslashreplace')
+					encoding='ascii', errors='backslashreplace')
 				new_parent = _unicode_decode(new_parent,
 					encoding=_encodings['merge'], errors='replace')
 				os.rename(parent, new_parent)
 				unicode_error = True
-				unicode_errors.append(new_parent[len(destdir):])
+				unicode_errors.append(new_parent[ed_len:])
 				break
 
 			for fname in chain(dirs, files):
@@ -1517,13 +1731,13 @@ def _post_src_install_uid_fix(mysettings, out):
 					new_fname = _unicode_decode(fname,
 						encoding=_encodings['merge'], errors='replace')
 					new_fname = _unicode_encode(new_fname,
-						encoding=_encodings['merge'], errors='backslashreplace')
+						encoding='ascii', errors='backslashreplace')
 					new_fname = _unicode_decode(new_fname,
 						encoding=_encodings['merge'], errors='replace')
 					new_fpath = os.path.join(parent, new_fname)
 					os.rename(fpath, new_fpath)
 					unicode_error = True
-					unicode_errors.append(new_fpath[len(destdir):])
+					unicode_errors.append(new_fpath[ed_len:])
 					fname = new_fname
 					fpath = new_fpath
 				else:
@@ -1597,20 +1811,24 @@ def _post_src_install_uid_fix(mysettings, out):
 
 	if unicode_errors:
 		for l in _merge_unicode_error(unicode_errors):
-			eerror(l, phase='install', key=mysettings.mycpv, out=out)
+			eqawarn(l, phase='install', key=mysettings.mycpv, out=out)
 
 	build_info_dir = os.path.join(mysettings['PORTAGE_BUILDDIR'],
 		'build-info')
 
-	io.open(_unicode_encode(os.path.join(build_info_dir,
+	f = io.open(_unicode_encode(os.path.join(build_info_dir,
 		'SIZE'), encoding=_encodings['fs'], errors='strict'),
 		mode='w', encoding=_encodings['repo.content'],
-		errors='strict').write(_unicode_decode(str(size) + '\n'))
+		errors='strict')
+	f.write(_unicode_decode(str(size) + '\n'))
+	f.close()
 
-	io.open(_unicode_encode(os.path.join(build_info_dir,
+	f = io.open(_unicode_encode(os.path.join(build_info_dir,
 		'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),
 		mode='w', encoding=_encodings['repo.content'],
-		errors='strict').write(_unicode_decode("%.0f\n" % (time.time(),)))
+		errors='strict')
+	f.write(_unicode_decode("%.0f\n" % (time.time(),)))
+	f.close()
 
 	use = frozenset(mysettings['PORTAGE_USE'].split())
 	for k in _vdb_use_conditional_keys:
@@ -1636,10 +1854,12 @@ def _post_src_install_uid_fix(mysettings, out):
 			except OSError:
 				pass
 			continue
-		io.open(_unicode_encode(os.path.join(build_info_dir,
+		f = io.open(_unicode_encode(os.path.join(build_info_dir,
 			k), encoding=_encodings['fs'], errors='strict'),
 			mode='w', encoding=_encodings['repo.content'],
-			errors='strict').write(_unicode_decode(v + '\n'))
+			errors='strict')
+		f.write(_unicode_decode(v + '\n'))
+		f.close()
 
 	_reapply_bsdflags_to_image(mysettings)
 
@@ -1664,15 +1884,46 @@ def _post_src_install_soname_symlinks(mysettings, out):
 	needed_filename = os.path.join(mysettings["PORTAGE_BUILDDIR"],
 		"build-info", "NEEDED.ELF.2")
 
+	f = None
 	try:
-		lines = io.open(_unicode_encode(needed_filename,
+		f = io.open(_unicode_encode(needed_filename,
 			encoding=_encodings['fs'], errors='strict'),
 			mode='r', encoding=_encodings['repo.content'],
-			errors='replace').readlines()
+			errors='replace')
+		lines = f.readlines()
 	except IOError as e:
 		if e.errno not in (errno.ENOENT, errno.ESTALE):
 			raise
 		return
+	finally:
+		if f is not None:
+			f.close()
+
+	qa_no_symlink = ""
+	f = None
+	try:
+		f = io.open(_unicode_encode(os.path.join(
+			mysettings["PORTAGE_BUILDDIR"],
+			"build-info", "QA_SONAME_NO_SYMLINK"),
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'],
+			errors='replace')
+		qa_no_symlink = f.read()
+	except IOError as e:
+		if e.errno not in (errno.ENOENT, errno.ESTALE):
+			raise
+	finally:
+		if f is not None:
+			f.close()
+
+	qa_no_symlink = qa_no_symlink.split()
+	if qa_no_symlink:
+		if len(qa_no_symlink) > 1:
+			qa_no_symlink = "|".join("(%s)" % x for x in qa_no_symlink)
+			qa_no_symlink = "^(%s)$" % qa_no_symlink
+		else:
+			qa_no_symlink = "^%s$" % qa_no_symlink[0]
+		qa_no_symlink = re.compile(qa_no_symlink)
 
 	libpaths = set(portage.util.getlibpaths(
 		mysettings["ROOT"], env=mysettings))
@@ -1730,6 +1981,8 @@ def _post_src_install_soname_symlinks(mysettings, out):
 			continue
 		if not is_libdir(os.path.dirname(obj)):
 			continue
+		if qa_no_symlink and qa_no_symlink.match(obj.strip(os.sep)) is not None:
+			continue
 
 		obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))
 		sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)
@@ -1746,8 +1999,7 @@ def _post_src_install_soname_symlinks(mysettings, out):
 	if not missing_symlinks:
 		return
 
-	qa_msg = ["QA Notice: Missing soname symlink(s) " + \
-		"will be automatically created:"]
+	qa_msg = ["QA Notice: Missing soname symlink(s):"]
 	qa_msg.append("")
 	qa_msg.extend("\t%s -> %s" % (os.path.join(
 		os.path.dirname(obj).lstrip(os.sep), soname),
@@ -1757,20 +2009,11 @@ def _post_src_install_soname_symlinks(mysettings, out):
 	for line in qa_msg:
 		eqawarn(line, key=mysettings.mycpv, out=out)
 
-	_preinst_bsdflags(mysettings)
-	for obj, soname in missing_symlinks:
-		obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))
-		sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)
-		os.symlink(os.path.basename(obj_file_path), sym_file_path)
-	_reapply_bsdflags_to_image(mysettings)
-
 def _merge_unicode_error(errors):
 	lines = []
 
-	msg = _("This package installs one or more file names containing "
-		"characters that do not match your current locale "
-		"settings. The current setting for filesystem encoding is '%s'.") \
-		% _encodings['merge']
+	msg = _("QA Notice: This package installs one or more file names "
+		"containing characters that are not encoded with the UTF-8 encoding.")
 	lines.extend(wrap(msg, 72))
 
 	lines.append("")
@@ -1778,14 +2021,55 @@ def _merge_unicode_error(errors):
 	lines.extend("\t" + x for x in errors)
 	lines.append("")
 
-	if _encodings['merge'].lower().replace('_', '').replace('-', '') != 'utf8':
-		msg = _("For best results, UTF-8 encoding is recommended. See "
-			"the Gentoo Linux Localization Guide for instructions "
-			"about how to configure your locale for UTF-8 encoding:")
-		lines.extend(wrap(msg, 72))
-		lines.append("")
-		lines.append("\t" + \
-			"http://www.gentoo.org/doc/en/guide-localization.xml")
-		lines.append("")
-
 	return lines
+
+def _prepare_self_update(settings):
+	"""
+	Call this when portage is updating itself, in order to create
+	temporary copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH, since
+	the new versions may be incompatible. An atexit hook will
+	automatically clean up the temporary copies.
+	"""
+
+	# sanity check: ensure that that this routine only runs once
+	if portage._bin_path != portage.const.PORTAGE_BIN_PATH:
+		return
+
+	# Load lazily referenced portage submodules into memory,
+	# so imports won't fail during portage upgrade/downgrade.
+	_preload_elog_modules(settings)
+	portage.proxy.lazyimport._preload_portage_submodules()
+
+	# Make the temp directory inside $PORTAGE_TMPDIR/portage, since
+	# it's common for /tmp and /var/tmp to be mounted with the
+	# "noexec" option (see bug #346899).
+	build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
+	portage.util.ensure_dirs(build_prefix)
+	base_path_tmp = tempfile.mkdtemp(
+		"", "._portage_reinstall_.", build_prefix)
+	portage.process.atexit_register(shutil.rmtree, base_path_tmp)
+
+	orig_bin_path = portage._bin_path
+	portage._bin_path = os.path.join(base_path_tmp, "bin")
+	shutil.copytree(orig_bin_path, portage._bin_path, symlinks=True)
+
+	orig_pym_path = portage._pym_path
+	portage._pym_path = os.path.join(base_path_tmp, "pym")
+	shutil.copytree(orig_pym_path, portage._pym_path, symlinks=True)
+
+	for dir_path in (base_path_tmp, portage._bin_path, portage._pym_path):
+		os.chmod(dir_path, 0o755)
+
+def _handle_self_update(settings, vardb):
+	cpv = settings.mycpv
+	if settings["ROOT"] == "/" and \
+		portage.dep.match_from_list(
+		portage.const.PORTAGE_PACKAGE_ATOM, [cpv]):
+		inherited = frozenset(settings.get('INHERITED', '').split())
+		if not vardb.cpv_exists(cpv) or \
+			'9999' in cpv or \
+			'git' in inherited or \
+			'git-2' in inherited:
+			_prepare_self_update(settings)
+			return True
+	return False

diff --git a/portage_with_autodep/pym/portage/package/ebuild/doebuild.pyo b/portage_with_autodep/pym/portage/package/ebuild/doebuild.pyo
new file mode 100644
index 0000000..a6ebb1d
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/doebuild.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/fetch.py b/portage_with_autodep/pym/portage/package/ebuild/fetch.py
index 5cbbf87..b795b28 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/fetch.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/fetch.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -10,7 +10,6 @@ import io
 import logging
 import random
 import re
-import shutil
 import stat
 import sys
 import tempfile
@@ -24,7 +23,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
 	'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
 )
 
-from portage import OrderedDict, os, selinux, _encodings, \
+from portage import OrderedDict, os, selinux, shutil, _encodings, \
 	_shell_quote, _unicode_encode
 from portage.checksum import hashfunc_map, perform_md5, verify_all
 from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
@@ -34,7 +33,6 @@ from portage.exception import FileNotFound, OperationNotPermitted, \
 	PortageException, TryAgain
 from portage.localization import _
 from portage.locks import lockfile, unlockfile
-from portage.manifest import Manifest
 from portage.output import colorize, EOutput
 from portage.util import apply_recursive_permissions, \
 	apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
@@ -48,6 +46,9 @@ _userpriv_spawn_kwargs = (
 	("umask",  0o02),
 )
 
+def _hide_url_passwd(url):
+	return re.sub(r'//(.+):.+@(.+)', r'//\1:*password*@\2', url)
+
 def _spawn_fetch(settings, args, **kwargs):
 	"""
 	Spawn a process with appropriate settings for fetching, including
@@ -68,7 +69,8 @@ def _spawn_fetch(settings, args, **kwargs):
 		}
 
 	if "userfetch" in settings.features and \
-		os.getuid() == 0 and portage_gid and portage_uid:
+		os.getuid() == 0 and portage_gid and portage_uid and \
+		hasattr(os, "setgroups"):
 		kwargs.update(_userpriv_spawn_kwargs)
 
 	spawn_func = spawn
@@ -356,7 +358,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
 		allow_missing_digests = True
 	pkgdir = mysettings.get("O")
 	if digests is None and not (pkgdir is None or skip_manifest):
-		mydigests = Manifest(
+		mydigests = mysettings.repositories.get_repo_for_location(
+			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
 			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
 	elif digests is None or skip_manifest:
 		# no digests because fetch was not called for a specific package
@@ -612,18 +615,6 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
 					elif userfetch:
 						has_space = False
 
-			if not has_space:
-				writemsg(_("!!! Insufficient space to store %s in %s\n") % \
-					(myfile, mysettings["DISTDIR"]), noiselevel=-1)
-
-				if has_space_superuser:
-					writemsg(_("!!! Insufficient privileges to use "
-						"remaining space.\n"), noiselevel=-1)
-					if userfetch:
-						writemsg(_("!!! You may set FEATURES=\"-userfetch\""
-							" in /etc/make.conf in order to fetch with\n"
-							"!!! superuser privileges.\n"), noiselevel=-1)
-
 			if distdir_writable and use_locks:
 
 				lock_kwargs = {}
@@ -646,7 +637,10 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
 				match, mystat = _check_distfile(
 					myfile_path, pruned_digests, eout)
 				if match:
-					if distdir_writable:
+					# Skip permission adjustment for symlinks, since we don't
+					# want to modify anything outside of the primary DISTDIR,
+					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
+					if distdir_writable and not os.path.islink(myfile_path):
 						try:
 							apply_secpass_permissions(myfile_path,
 								gid=portage_gid, mode=0o664, mask=0o2,
@@ -727,6 +721,20 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
 						os.symlink(readonly_file, myfile_path)
 						continue
 
+				# this message is shown only after we know that
+				# the file is not already fetched
+				if not has_space:
+					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
+						(myfile, mysettings["DISTDIR"]), noiselevel=-1)
+
+					if has_space_superuser:
+						writemsg(_("!!! Insufficient privileges to use "
+							"remaining space.\n"), noiselevel=-1)
+						if userfetch:
+							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
+								" in /etc/make.conf in order to fetch with\n"
+								"!!! superuser privileges.\n"), noiselevel=-1)
+
 				if fsmirrors and not os.path.exists(myfile_path) and has_space:
 					for mydir in fsmirrors:
 						mirror_file = os.path.join(mydir, myfile)
@@ -746,14 +754,18 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
 						raise
 					del e
 				else:
-					try:
-						apply_secpass_permissions(
-							myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
-							stat_cached=mystat)
-					except PortageException as e:
-						if not os.access(myfile_path, os.R_OK):
-							writemsg(_("!!! Failed to adjust permissions:"
-								" %s\n") % str(e), noiselevel=-1)
+					# Skip permission adjustment for symlinks, since we don't
+					# want to modify anything outside of the primary DISTDIR,
+					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
+					if not os.path.islink(myfile_path):
+						try:
+							apply_secpass_permissions(myfile_path,
+								gid=portage_gid, mode=0o664, mask=0o2,
+								stat_cached=mystat)
+						except PortageException as e:
+							if not os.access(myfile_path, os.R_OK):
+								writemsg(_("!!! Failed to adjust permissions:"
+									" %s\n") % (e,), noiselevel=-1)
 
 					# If the file is empty then it's obviously invalid. Remove
 					# the empty file and try to download if possible.
@@ -940,7 +952,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
 						locfetch=fetchcommand
 						command_var = fetchcommand_var
 					writemsg_stdout(_(">>> Downloading '%s'\n") % \
-						re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
+						_hide_url_passwd(loc))
 					variables = {
 						"DISTDIR": mysettings["DISTDIR"],
 						"URI":     loc,
@@ -1019,18 +1031,19 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
 								# Fetch failed... Try the next one... Kill 404 files though.
 								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
 									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
-									if html404.search(io.open(
+									with io.open(
 										_unicode_encode(myfile_path,
 										encoding=_encodings['fs'], errors='strict'),
 										mode='r', encoding=_encodings['content'], errors='replace'
-										).read()):
-										try:
-											os.unlink(mysettings["DISTDIR"]+"/"+myfile)
-											writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
-											fetched = 0
-											continue
-										except (IOError, OSError):
-											pass
+										) as f:
+										if html404.search(f.read()):
+											try:
+												os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
+												fetched = 0
+												continue
+											except (IOError, OSError):
+												pass
 								fetched = 1
 								continue
 							if True:
@@ -1040,7 +1053,6 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
 								# from another mirror...
 								verified_ok,reason = verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
 								if not verified_ok:
-									print(reason)
 									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
 										noiselevel=-1)
 									writemsg(_("!!! Reason: %s\n") % reason[0],

diff --git a/portage_with_autodep/pym/portage/package/ebuild/fetch.pyo b/portage_with_autodep/pym/portage/package/ebuild/fetch.pyo
new file mode 100644
index 0000000..3bd81df
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/fetch.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py
index f2af638..8a88c2f 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.py
@@ -83,7 +83,13 @@ def getmaskingreason(mycpv, metadata=None, settings=None,
 	pmasklists = []
 	for profile in locations:
 		pmask_filename = os.path.join(profile, "package.mask")
-		pmasklists.append((pmask_filename, grablines(pmask_filename, recursive=1)))
+		node = None
+		for l, recursive_filename in grablines(pmask_filename,
+			recursive=1, remember_source_file=True):
+			if node is None or node[0] != recursive_filename:
+				node = (recursive_filename, [])
+				pmasklists.append(node)
+			node[1].append(l)
 
 	pmaskdict = settings._mask_manager._pmaskdict
 	if mycp in pmaskdict:

diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.pyo b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.pyo
new file mode 100644
index 0000000..1614244
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/getmaskingreason.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py
index 4c65fcc..9bf605d 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = ['getmaskingstatus']
@@ -7,11 +7,9 @@ import sys
 
 import portage
 from portage import eapi_is_supported, _eapi_is_deprecated
-from portage.dep import match_from_list, _slot_separator, _repo_separator
 from portage.localization import _
 from portage.package.ebuild.config import config
-from portage.versions import catpkgsplit, cpv_getkey
-from _emerge.Package import Package
+from portage.versions import catpkgsplit, _pkg_str
 
 if sys.hexversion >= 0x3000000:
 	basestring = str
@@ -53,9 +51,6 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
 		metadata = pkg.metadata
 		installed = pkg.installed
 
-	mysplit = catpkgsplit(mycpv)
-	if not mysplit:
-		raise ValueError(_("invalid CPV: %s") % mycpv)
 	if metadata is None:
 		db_keys = list(portdb._aux_cache_keys)
 		try:
@@ -70,11 +65,14 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
 		else:
 			metadata["USE"] = ""
 
-	rValue = []
+	if not hasattr(mycpv, 'slot'):
+		try:
+			mycpv = _pkg_str(mycpv, slot=metadata['SLOT'],
+				repo=metadata.get('repository'))
+		except portage.exception.InvalidData:
+			raise ValueError(_("invalid CPV: %s") % mycpv)
 
-	# profile checking
-	if settings._getProfileMaskAtom(mycpv, metadata):
-		rValue.append(_MaskReason("profile", "profile"))
+	rValue = []
 
 	# package.mask checking
 	if settings._getMaskAtom(mycpv, metadata):
@@ -85,8 +83,6 @@ def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
 	mygroups = settings._getKeywords(mycpv, metadata)
 	licenses = metadata["LICENSE"]
 	properties = metadata["PROPERTIES"]
-	if eapi.startswith("-"):
-		eapi = eapi[1:]
 	if not eapi_is_supported(eapi):
 		return [_MaskReason("EAPI", "EAPI %s" % eapi)]
 	elif _eapi_is_deprecated(eapi) and not installed:

diff --git a/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.pyo b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.pyo
new file mode 100644
index 0000000..9cf1d9d
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/getmaskingstatus.pyo differ

diff --git a/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py
index 616dc2e..b8fbdc5 100644
--- a/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py
+++ b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.py
@@ -5,11 +5,11 @@ __all__ = ['prepare_build_dirs']
 
 import errno
 import gzip
-import shutil
 import stat
 import time
 
-from portage import os, _encodings, _unicode_encode, _unicode_decode
+import portage
+from portage import os, shutil, _encodings, _unicode_encode, _unicode_decode
 from portage.data import portage_gid, portage_uid, secpass
 from portage.exception import DirectoryNotFound, FileNotFound, \
 	OperationNotPermitted, PermissionDenied, PortageException
@@ -118,11 +118,13 @@ def _adjust_perms_msg(settings, msg):
 	background = settings.get("PORTAGE_BACKGROUND") == "1"
 	log_path = settings.get("PORTAGE_LOG_FILE")
 	log_file = None
+	log_file_real = None
 
 	if background and log_path is not None:
 		try:
 			log_file = open(_unicode_encode(log_path,
 				encoding=_encodings['fs'], errors='strict'), mode='ab')
+			log_file_real = log_file
 		except IOError:
 			def write(msg):
 				pass
@@ -139,6 +141,8 @@ def _adjust_perms_msg(settings, msg):
 	finally:
 		if log_file is not None:
 			log_file.close()
+			if log_file_real is not log_file:
+				log_file_real.close()
 
 def _prepare_features_dirs(mysettings):
 
@@ -311,7 +315,7 @@ def _prepare_workdir(mysettings):
 		logdir = normalize_path(mysettings["PORT_LOGDIR"])
 		logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
 		if not os.path.exists(logid_path):
-			open(_unicode_encode(logid_path), 'w')
+			open(_unicode_encode(logid_path), 'w').close()
 		logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
 			time.gmtime(os.stat(logid_path).st_mtime)),
 			encoding=_encodings['content'], errors='replace')
@@ -342,13 +346,31 @@ def _prepare_workdir(mysettings):
 				writemsg(_unicode_decode("!!! %s: %s\n") %
 					(_("Permission Denied"), log_subdir), noiselevel=-1)
 
+	tmpdir_log_path = os.path.join(
+		mysettings["T"], "build.log%s" % compress_log_ext)
 	if not logdir_subdir_ok:
 		# NOTE: When sesandbox is enabled, the local SELinux security policies
 		# may not allow output to be piped out of the sesandbox domain. The
 		# current policy will allow it to work when a pty is available, but
 		# not through a normal pipe. See bug #162404.
-		mysettings["PORTAGE_LOG_FILE"] = os.path.join(
-			mysettings["T"], "build.log%s" % compress_log_ext)
+		mysettings["PORTAGE_LOG_FILE"] = tmpdir_log_path
+	else:
+		# Create a symlink from tmpdir_log_path to PORTAGE_LOG_FILE, as
+		# requested in bug #412865.
+		make_new_symlink = False
+		try:
+			target = os.readlink(tmpdir_log_path)
+		except OSError:
+			make_new_symlink = True
+		else:
+			if target != mysettings["PORTAGE_LOG_FILE"]:
+				make_new_symlink = True
+		if make_new_symlink:
+			try:
+				os.unlink(tmpdir_log_path)
+			except OSError:
+				pass
+			os.symlink(mysettings["PORTAGE_LOG_FILE"], tmpdir_log_path)
 
 def _ensure_log_subdirs(logdir, subdir):
 	"""
@@ -358,13 +380,27 @@ def _ensure_log_subdirs(logdir, subdir):
 	and subdir are assumed to be normalized absolute paths.
 	"""
 	st = os.stat(logdir)
+	uid = -1
 	gid = st.st_gid
 	grp_mode = 0o2070 & st.st_mode
 
+	# If logdir is writable by the portage group but its uid
+	# is not portage_uid, then set the uid to portage_uid if
+	# we have privileges to do so, for compatibility with our
+	# default logrotate config (see bug 378451). With the
+	# "su portage portage" directive and logrotate-3.8.0,
+	# logrotate's chown call during the compression phase will
+	# only succeed if the log file's uid is portage_uid.
+	if grp_mode and gid == portage_gid and \
+		portage.data.secpass >= 2:
+		uid = portage_uid
+		if st.st_uid != portage_uid:
+			ensure_dirs(logdir, uid=uid)
+
 	logdir_split_len = len(logdir.split(os.sep))
 	subdir_split = subdir.split(os.sep)[logdir_split_len:]
 	subdir_split.reverse()
 	current = logdir
 	while subdir_split:
 		current = os.path.join(current, subdir_split.pop())
-		ensure_dirs(current, gid=gid, mode=grp_mode, mask=0)
+		ensure_dirs(current, uid=uid, gid=gid, mode=grp_mode, mask=0)

diff --git a/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.pyo b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.pyo
new file mode 100644
index 0000000..2dcfaea
Binary files /dev/null and b/portage_with_autodep/pym/portage/package/ebuild/prepare_build_dirs.pyo differ

diff --git a/portage_with_autodep/pym/portage/process.py b/portage_with_autodep/pym/portage/process.py
index 6866a2f..d7d1037 100644
--- a/portage_with_autodep/pym/portage/process.py
+++ b/portage_with_autodep/pym/portage/process.py
@@ -1,9 +1,11 @@
 # portage.py -- core Portage functionality
-# Copyright 1998-2010 Gentoo Foundation
+# Copyright 1998-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 
 import atexit
+import errno
+import platform
 import signal
 import sys
 import traceback
@@ -32,6 +34,18 @@ if os.path.isdir("/proc/%i/fd" % os.getpid()):
 	def get_open_fds():
 		return (int(fd) for fd in os.listdir("/proc/%i/fd" % os.getpid()) \
 			if fd.isdigit())
+
+	if platform.python_implementation() == 'PyPy':
+		# EAGAIN observed with PyPy 1.8.
+		_get_open_fds = get_open_fds
+		def get_open_fds():
+			try:
+				return _get_open_fds()
+			except OSError as e:
+				if e.errno != errno.EAGAIN:
+					raise
+				return range(max_fd_limit)
+
 else:
 	def get_open_fds():
 		return range(max_fd_limit)
@@ -257,7 +271,7 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
 
 	pid = os.fork()
 
-	if not pid:
+	if pid == 0:
 		try:
 			_exec(binary, mycommand, opt_name, fd_pipes,
 			      env, gid, groups, uid, umask, pre_exec)
@@ -272,6 +286,9 @@ def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
 			sys.stderr.flush()
 			os._exit(1)
 
+	if not isinstance(pid, int):
+		raise AssertionError("fork returned non-integer: %s" % (repr(pid),))
+
 	# Add the pid to our local and the global pid lists.
 	mypids.append(pid)
 	spawned_pids.append(pid)
@@ -350,13 +367,18 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
 	@param pre_exec: A function to be called with no arguments just prior to the exec call.
 	@type pre_exec: callable
 	@rtype: None
-	@returns: Never returns (calls os.execve)
+	@return: Never returns (calls os.execve)
 	"""
 	
 	# If the process we're creating hasn't been given a name
 	# assign it the name of the executable.
 	if not opt_name:
-		opt_name = os.path.basename(binary)
+		if binary is portage._python_interpreter:
+			# NOTE: PyPy 1.7 will die due to "libary path not found" if argv[0]
+			# does not contain the full path of the binary.
+			opt_name = binary
+		else:
+			opt_name = os.path.basename(binary)
 
 	# Set up the command's argument list.
 	myargs = [opt_name]
@@ -391,8 +413,20 @@ def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
 	# And switch to the new process.
 	os.execve(binary, myargs, env)
 
-def _setup_pipes(fd_pipes):
-	"""Setup pipes for a forked process."""
+def _setup_pipes(fd_pipes, close_fds=True):
+	"""Setup pipes for a forked process.
+
+	WARNING: When not followed by exec, the close_fds behavior
+	can trigger interference from destructors that close file
+	descriptors. This interference happens when the garbage
+	collector intermittently executes such destructors after their
+	corresponding file descriptors have been re-used, leading
+	to intermittent "[Errno 9] Bad file descriptor" exceptions in
+	forked processes. This problem has been observed with PyPy 1.8,
+	and also with CPython under some circumstances (as triggered
+	by xmpppy in bug #374335). In order to close a safe subset of
+	file descriptors, see portage.locks._close_fds().
+	"""
 	my_fds = {}
 	# To protect from cases where direct assignment could
 	# clobber needed fds ({1:2, 2:1}) we first dupe the fds
@@ -402,14 +436,16 @@ def _setup_pipes(fd_pipes):
 	# Then assign them to what they should be.
 	for fd in my_fds:
 		os.dup2(my_fds[fd], fd)
-	# Then close _all_ fds that haven't been explicitly
-	# requested to be kept open.
-	for fd in get_open_fds():
-		if fd not in my_fds:
-			try:
-				os.close(fd)
-			except OSError:
-				pass
+
+	if close_fds:
+		# Then close _all_ fds that haven't been explicitly
+		# requested to be kept open.
+		for fd in get_open_fds():
+			if fd not in my_fds:
+				try:
+					os.close(fd)
+				except OSError:
+					pass
 
 def find_binary(binary):
 	"""
@@ -418,7 +454,7 @@ def find_binary(binary):
 	@param binary: Name of the binary to find
 	@type string
 	@rtype: None or string
-	@returns: full path to binary or None if the binary could not be located.
+	@return: full path to binary or None if the binary could not be located.
 	"""
 	for path in os.environ.get("PATH", "").split(":"):
 		filename = "%s/%s" % (path, binary)

diff --git a/portage_with_autodep/pym/portage/process.pyo b/portage_with_autodep/pym/portage/process.pyo
new file mode 100644
index 0000000..c53af30
Binary files /dev/null and b/portage_with_autodep/pym/portage/process.pyo differ

diff --git a/portage_with_autodep/pym/portage/proxy/__init__.pyo b/portage_with_autodep/pym/portage/proxy/__init__.pyo
new file mode 100644
index 0000000..b3d096b
Binary files /dev/null and b/portage_with_autodep/pym/portage/proxy/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/proxy/lazyimport.pyo b/portage_with_autodep/pym/portage/proxy/lazyimport.pyo
new file mode 100644
index 0000000..9da8089
Binary files /dev/null and b/portage_with_autodep/pym/portage/proxy/lazyimport.pyo differ

diff --git a/portage_with_autodep/pym/portage/proxy/objectproxy.pyo b/portage_with_autodep/pym/portage/proxy/objectproxy.pyo
new file mode 100644
index 0000000..f0919ff
Binary files /dev/null and b/portage_with_autodep/pym/portage/proxy/objectproxy.pyo differ

diff --git a/portage_with_autodep/pym/portage/repository/__init__.pyo b/portage_with_autodep/pym/portage/repository/__init__.pyo
new file mode 100644
index 0000000..ab526c0
Binary files /dev/null and b/portage_with_autodep/pym/portage/repository/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/repository/config.py b/portage_with_autodep/pym/portage/repository/config.py
index 9f0bb99..20f1919 100644
--- a/portage_with_autodep/pym/portage/repository/config.py
+++ b/portage_with_autodep/pym/portage/repository/config.py
@@ -1,21 +1,37 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import io
 import logging
+import warnings
+import sys
 import re
 
 try:
-	from configparser import SafeConfigParser, ParsingError
+	from configparser import ParsingError
+	if sys.hexversion >= 0x3020000:
+		from configparser import ConfigParser as SafeConfigParser
+	else:
+		from configparser import SafeConfigParser
 except ImportError:
 	from ConfigParser import SafeConfigParser, ParsingError
-from portage import os
-from portage.const import USER_CONFIG_PATH, REPO_NAME_LOC
+from portage import eclass_cache, os
+from portage.const import (MANIFEST2_HASH_FUNCTIONS, MANIFEST2_REQUIRED_HASH,
+	REPO_NAME_LOC, USER_CONFIG_PATH)
 from portage.env.loaders import KeyValuePairFileLoader
-from portage.util import normalize_path, writemsg, writemsg_level, shlex_split
+from portage.util import (normalize_path, read_corresponding_eapi_file, shlex_split,
+	stack_lists, writemsg, writemsg_level)
 from portage.localization import _
+from portage import _unicode_decode
 from portage import _unicode_encode
 from portage import _encodings
+from portage import manifest
+
+_valid_profile_formats = frozenset(
+	['pms', 'portage-1', 'portage-2'])
+
+_portage1_profiles_allow_directories = frozenset(
+	["portage-1-compat", "portage-1", 'portage-2'])
 
 _repo_name_sub_re = re.compile(r'[^\w-]')
 
@@ -35,8 +51,13 @@ def _gen_valid_repo(name):
 class RepoConfig(object):
 	"""Stores config of one repository"""
 
-	__slots__ = ['aliases', 'eclass_overrides', 'eclass_locations', 'location', 'user_location', 'masters', 'main_repo',
-		'missing_repo_name', 'name', 'priority', 'sync', 'format']
+	__slots__ = ('aliases', 'allow_missing_manifest', 'allow_provide_virtual',
+		'cache_formats', 'create_manifest', 'disable_manifest', 'eapi',
+		'eclass_db', 'eclass_locations', 'eclass_overrides', 'format', 'location',
+		'main_repo', 'manifest_hashes', 'masters', 'missing_repo_name',
+		'name', 'portage1_profiles', 'portage1_profiles_compat', 'priority',
+		'profile_formats', 'sign_commit', 'sign_manifest', 'sync',
+		'thin_manifest', 'update_changelog', 'user_location')
 
 	def __init__(self, name, repo_opts):
 		"""Build a RepoConfig with options in repo_opts
@@ -51,11 +72,15 @@ class RepoConfig(object):
 		if eclass_overrides is not None:
 			eclass_overrides = tuple(eclass_overrides.split())
 		self.eclass_overrides = eclass_overrides
-		#Locations are computed later.
+		# Eclass databases and locations are computed later.
+		self.eclass_db = None
 		self.eclass_locations = None
 
-		#Masters are only read from layout.conf.
-		self.masters = None
+		# Masters from repos.conf override layout.conf.
+		masters = repo_opts.get('masters')
+		if masters is not None:
+			masters = tuple(masters.split())
+		self.masters = masters
 
 		#The main-repo key makes only sense for the 'DEFAULT' section.
 		self.main_repo = repo_opts.get('main-repo')
@@ -87,59 +112,153 @@ class RepoConfig(object):
 			location = None
 		self.location = location
 
+		eapi = None
 		missing = True
 		if self.location is not None:
-			name, missing = self._read_repo_name(self.location)
-			# We must ensure that the name conforms to PMS 3.1.5
-			# in order to avoid InvalidAtom exceptions when we
-			# use it to generate atoms.
-			name = _gen_valid_repo(name)
-			if not name:
-				# name only contains invalid characters
-				name = "x-" + os.path.basename(self.location)
-				name = _gen_valid_repo(name)
-				# If basename only contains whitespace then the
-				# end result is name = 'x-'.
-
+			eapi = read_corresponding_eapi_file(os.path.join(self.location, REPO_NAME_LOC))
+			name, missing = self._read_valid_repo_name(self.location)
 		elif name == "DEFAULT": 
 			missing = False
+
+		self.eapi = eapi
 		self.name = name
 		self.missing_repo_name = missing
+		# sign_commit is disabled by default, since it requires Git >=1.7.9,
+		# and key_id configured by `git config user.signingkey key_id`
+		self.sign_commit = False
+		self.sign_manifest = True
+		self.thin_manifest = False
+		self.allow_missing_manifest = False
+		self.allow_provide_virtual = False
+		self.create_manifest = True
+		self.disable_manifest = False
+		self.manifest_hashes = None
+		self.update_changelog = False
+		self.cache_formats = None
+		self.portage1_profiles = True
+		self.portage1_profiles_compat = False
+
+		# Parse layout.conf.
+		if self.location:
+			layout_filename = os.path.join(self.location, "metadata", "layout.conf")
+			layout_data = parse_layout_conf(self.location, self.name)[0]
+
+			# layout.conf masters may be overridden here if we have a masters
+			# setting from the user's repos.conf
+			if self.masters is None:
+				self.masters = layout_data['masters']
+
+			if layout_data['aliases']:
+				aliases = self.aliases
+				if aliases is None:
+					aliases = ()
+				# repos.conf aliases come after layout.conf aliases, giving
+				# them the ability to do incremental overrides
+				self.aliases = layout_data['aliases'] + tuple(aliases)
+
+			for value in ('allow-missing-manifest',
+				'allow-provide-virtual', 'cache-formats',
+				'create-manifest', 'disable-manifest', 'manifest-hashes',
+				'profile-formats',
+				'sign-commit', 'sign-manifest', 'thin-manifest', 'update-changelog'):
+				setattr(self, value.lower().replace("-", "_"), layout_data[value])
+
+			self.portage1_profiles = any(x in _portage1_profiles_allow_directories
+				for x in layout_data['profile-formats'])
+			self.portage1_profiles_compat = layout_data['profile-formats'] == ('portage-1-compat',)
+
+	def iter_pregenerated_caches(self, auxdbkeys, readonly=True, force=False):
+		"""
+		Reads layout.conf cache-formats from left to right and yields cache
+		instances for each supported type that's found. If no cache-formats
+		are specified in layout.conf, 'pms' type is assumed if the
+		metadata/cache directory exists or force is True.
+		"""
+		formats = self.cache_formats
+		if not formats:
+			if not force:
+				return
+			formats = ('pms',)
+
+		for fmt in formats:
+			name = None
+			if fmt == 'pms':
+				from portage.cache.metadata import database
+				name = 'metadata/cache'
+			elif fmt == 'md5-dict':
+				from portage.cache.flat_hash import md5_database as database
+				name = 'metadata/md5-cache'
+
+			if name is not None:
+				yield database(self.location, name,
+					auxdbkeys, readonly=readonly)
+
+	def get_pregenerated_cache(self, auxdbkeys, readonly=True, force=False):
+		"""
+		Returns the first cache instance yielded from
+		iter_pregenerated_caches(), or None if no cache is available or none
+		of the available formats are supported.
+		"""
+		return next(self.iter_pregenerated_caches(
+			auxdbkeys, readonly=readonly, force=force), None)
+
+	def load_manifest(self, *args, **kwds):
+		kwds['thin'] = self.thin_manifest
+		kwds['allow_missing'] = self.allow_missing_manifest
+		kwds['allow_create'] = self.create_manifest
+		kwds['hashes'] = self.manifest_hashes
+		if self.disable_manifest:
+			kwds['from_scratch'] = True
+		return manifest.Manifest(*args, **kwds)
 
 	def update(self, new_repo):
 		"""Update repository with options in another RepoConfig"""
-		if new_repo.aliases is not None:
-			self.aliases = new_repo.aliases
-		if new_repo.eclass_overrides is not None:
-			self.eclass_overrides = new_repo.eclass_overrides
-		if new_repo.masters is not None:
-			self.masters = new_repo.masters
+
+		keys = set(self.__slots__)
+		keys.discard("missing_repo_name")
+		for k in keys:
+			v = getattr(new_repo, k, None)
+			if v is not None:
+				setattr(self, k, v)
+
 		if new_repo.name is not None:
-			self.name = new_repo.name
 			self.missing_repo_name = new_repo.missing_repo_name
-		if new_repo.user_location is not None:
-			self.user_location = new_repo.user_location
-		if new_repo.location is not None:
-			self.location = new_repo.location
-		if new_repo.priority is not None:
-			self.priority = new_repo.priority
-		if new_repo.sync is not None:
-			self.sync = new_repo.sync
-
-	def _read_repo_name(self, repo_path):
+
+	@staticmethod
+	def _read_valid_repo_name(repo_path):
+		name, missing = RepoConfig._read_repo_name(repo_path)
+		# We must ensure that the name conforms to PMS 3.1.5
+		# in order to avoid InvalidAtom exceptions when we
+		# use it to generate atoms.
+		name = _gen_valid_repo(name)
+		if not name:
+			# name only contains invalid characters
+			name = "x-" + os.path.basename(repo_path)
+			name = _gen_valid_repo(name)
+			# If basename only contains whitespace then the
+			# end result is name = 'x-'.
+		return name, missing
+
+	@staticmethod
+	def _read_repo_name(repo_path):
 		"""
 		Read repo_name from repo_path.
 		Returns repo_name, missing.
 		"""
 		repo_name_path = os.path.join(repo_path, REPO_NAME_LOC)
+		f = None
 		try:
-			return io.open(
+			f = io.open(
 				_unicode_encode(repo_name_path,
 				encoding=_encodings['fs'], errors='strict'),
 				mode='r', encoding=_encodings['repo.content'],
-				errors='replace').readline().strip(), False
+				errors='replace')
+			return f.readline().strip(), False
 		except EnvironmentError:
 			return "x-" + os.path.basename(repo_path), True
+		finally:
+			if f is not None:
+				f.close()
 
 	def info_string(self):
 		"""
@@ -167,109 +286,154 @@ class RepoConfig(object):
 		repo_msg.append("")
 		return "\n".join(repo_msg)
 
+	def __repr__(self):
+		return "<portage.repository.config.RepoConfig(name='%s', location='%s')>" % (self.name, _unicode_decode(self.location))
+
+	def __str__(self):
+		d = {}
+		for k in self.__slots__:
+			d[k] = getattr(self, k, None)
+		return _unicode_decode("%s") % (d,)
+
+	if sys.hexversion < 0x3000000:
+
+		__unicode__ = __str__
+
+		def __str__(self):
+			return _unicode_encode(self.__unicode__())
+
 class RepoConfigLoader(object):
 	"""Loads and store config of several repositories, loaded from PORTDIR_OVERLAY or repos.conf"""
-	def __init__(self, paths, settings):
-		"""Load config from files in paths"""
-		def parse(paths, prepos, ignored_map, ignored_location_map):
-			"""Parse files in paths to load config"""
-			parser = SafeConfigParser()
-			try:
-				parser.read(paths)
-			except ParsingError as e:
-				writemsg(_("!!! Error while reading repo config file: %s\n") % e, noiselevel=-1)
-			prepos['DEFAULT'] = RepoConfig("DEFAULT", parser.defaults())
-			for sname in parser.sections():
-				optdict = {}
-				for oname in parser.options(sname):
-					optdict[oname] = parser.get(sname, oname)
-
-				repo = RepoConfig(sname, optdict)
-				if repo.location and not os.path.exists(repo.location):
-					writemsg(_("!!! Invalid repos.conf entry '%s'"
-						" (not a dir): '%s'\n") % (sname, repo.location), noiselevel=-1)
-					continue
 
-				if repo.name in prepos:
-					old_location = prepos[repo.name].location
-					if old_location is not None and repo.location is not None and old_location != repo.location:
-						ignored_map.setdefault(repo.name, []).append(old_location)
-						ignored_location_map[old_location] = repo.name
-					prepos[repo.name].update(repo)
-				else:
+	@staticmethod
+	def _add_repositories(portdir, portdir_overlay, prepos, ignored_map, ignored_location_map):
+		"""Add overlays in PORTDIR_OVERLAY as repositories"""
+		overlays = []
+		if portdir:
+			portdir = normalize_path(portdir)
+			overlays.append(portdir)
+		try:
+			port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
+		except ValueError as e:
+			#File "/usr/lib/python3.2/shlex.py", line 168, in read_token
+			#	raise ValueError("No closing quotation")
+			writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
+				" %s: %s\n") % (e, portdir_overlay), noiselevel=-1)
+			port_ov = []
+		overlays.extend(port_ov)
+		default_repo_opts = {}
+		if prepos['DEFAULT'].aliases is not None:
+			default_repo_opts['aliases'] = \
+				' '.join(prepos['DEFAULT'].aliases)
+		if prepos['DEFAULT'].eclass_overrides is not None:
+			default_repo_opts['eclass-overrides'] = \
+				' '.join(prepos['DEFAULT'].eclass_overrides)
+		if prepos['DEFAULT'].masters is not None:
+			default_repo_opts['masters'] = \
+				' '.join(prepos['DEFAULT'].masters)
+
+		if overlays:
+			# We need a copy of the original repos.conf data, since we're
+			# going to modify the prepos dict and some of the RepoConfig
+			# objects that we put in prepos may have to be discarded if
+			# they get overridden by a repository with the same name but
+			# a different location. This is common with repoman, for example,
+			# when temporarily overriding an rsync repo with another copy
+			# of the same repo from CVS.
+			repos_conf = prepos.copy()
+			#overlay priority is negative because we want them to be looked before any other repo
+			base_priority = 0
+			for ov in overlays:
+				if os.path.isdir(ov):
+					repo_opts = default_repo_opts.copy()
+					repo_opts['location'] = ov
+					repo = RepoConfig(None, repo_opts)
+					# repos_conf_opts contains options from repos.conf
+					repos_conf_opts = repos_conf.get(repo.name)
+					if repos_conf_opts is not None:
+						# Selectively copy only the attributes which
+						# repos.conf is allowed to override.
+						for k in ('aliases', 'eclass_overrides', 'masters', 'priority'):
+							v = getattr(repos_conf_opts, k, None)
+							if v is not None:
+								setattr(repo, k, v)
+
+					if repo.name in prepos:
+						old_location = prepos[repo.name].location
+						if old_location is not None and old_location != repo.location:
+							ignored_map.setdefault(repo.name, []).append(old_location)
+							ignored_location_map[old_location] = repo.name
+							if old_location == portdir:
+								portdir = repo.user_location
+
+					if ov == portdir and portdir not in port_ov:
+						repo.priority = -1000
+					elif repo.priority is None:
+						repo.priority = base_priority
+						base_priority += 1
+
 					prepos[repo.name] = repo
+				else:
+					writemsg(_("!!! Invalid PORTDIR_OVERLAY"
+						" (not a dir): '%s'\n") % ov, noiselevel=-1)
 
-		def add_overlays(portdir, portdir_overlay, prepos, ignored_map, ignored_location_map):
-			"""Add overlays in PORTDIR_OVERLAY as repositories"""
-			overlays = []
-			if portdir:
-				portdir = normalize_path(portdir)
-				overlays.append(portdir)
-			port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
-			overlays.extend(port_ov)
-			default_repo_opts = {}
-			if prepos['DEFAULT'].aliases is not None:
-				default_repo_opts['aliases'] = \
-					' '.join(prepos['DEFAULT'].aliases)
-			if prepos['DEFAULT'].eclass_overrides is not None:
-				default_repo_opts['eclass-overrides'] = \
-					' '.join(prepos['DEFAULT'].eclass_overrides)
-			if prepos['DEFAULT'].masters is not None:
-				default_repo_opts['masters'] = \
-					' '.join(prepos['DEFAULT'].masters)
-			if overlays:
-				#overlay priority is negative because we want them to be looked before any other repo
-				base_priority = 0
-				for ov in overlays:
-					if os.path.isdir(ov):
-						repo_opts = default_repo_opts.copy()
-						repo_opts['location'] = ov
-						repo = RepoConfig(None, repo_opts)
-						repo_conf_opts = prepos.get(repo.name)
-						if repo_conf_opts is not None:
-							if repo_conf_opts.aliases is not None:
-								repo_opts['aliases'] = \
-									' '.join(repo_conf_opts.aliases)
-							if repo_conf_opts.eclass_overrides is not None:
-								repo_opts['eclass-overrides'] = \
-									' '.join(repo_conf_opts.eclass_overrides)
-							if repo_conf_opts.masters is not None:
-								repo_opts['masters'] = \
-									' '.join(repo_conf_opts.masters)
-						repo = RepoConfig(repo.name, repo_opts)
-						if repo.name in prepos:
-							old_location = prepos[repo.name].location
-							if old_location is not None and old_location != repo.location:
-								ignored_map.setdefault(repo.name, []).append(old_location)
-								ignored_location_map[old_location] = repo.name
-								if old_location == portdir:
-									portdir = repo.user_location
-							prepos[repo.name].update(repo)
-							repo = prepos[repo.name]
-						else:
-							prepos[repo.name] = repo
-
-						if ov == portdir and portdir not in port_ov:
-							repo.priority = -1000
-						else:
-							repo.priority = base_priority
-							base_priority += 1
+		return portdir
 
-					else:
-						writemsg(_("!!! Invalid PORTDIR_OVERLAY"
-							" (not a dir): '%s'\n") % ov, noiselevel=-1)
+	@staticmethod
+	def _parse(paths, prepos, ignored_map, ignored_location_map):
+		"""Parse files in paths to load config"""
+		parser = SafeConfigParser()
 
-			return portdir
+		# use read_file/readfp in order to control decoding of unicode
+		try:
+			# Python >=3.2
+			read_file = parser.read_file
+		except AttributeError:
+			read_file = parser.readfp
 
-		def repo_priority(r):
-			"""
-			Key funtion for comparing repositories by priority.
-			None is equal priority zero.
-			"""
-			x = prepos[r].priority
-			if x is None:
-				return 0
-			return x
+		for p in paths:
+			f = None
+			try:
+				f = io.open(_unicode_encode(p,
+					encoding=_encodings['fs'], errors='strict'),
+					mode='r', encoding=_encodings['repo.content'],
+					errors='replace')
+			except EnvironmentError:
+				pass
+			else:
+				try:
+					read_file(f)
+				except ParsingError as e:
+					writemsg(_unicode_decode(
+						_("!!! Error while reading repo config file: %s\n")
+						) % e, noiselevel=-1)
+			finally:
+				if f is not None:
+					f.close()
+
+		prepos['DEFAULT'] = RepoConfig("DEFAULT", parser.defaults())
+		for sname in parser.sections():
+			optdict = {}
+			for oname in parser.options(sname):
+				optdict[oname] = parser.get(sname, oname)
+
+			repo = RepoConfig(sname, optdict)
+			if repo.location and not os.path.exists(repo.location):
+				writemsg(_("!!! Invalid repos.conf entry '%s'"
+					" (not a dir): '%s'\n") % (sname, repo.location), noiselevel=-1)
+				continue
+
+			if repo.name in prepos:
+				old_location = prepos[repo.name].location
+				if old_location is not None and repo.location is not None and old_location != repo.location:
+					ignored_map.setdefault(repo.name, []).append(old_location)
+					ignored_location_map[old_location] = repo.name
+				prepos[repo.name].update(repo)
+			else:
+				prepos[repo.name] = repo
+
+	def __init__(self, paths, settings):
+		"""Load config from files in paths"""
 
 		prepos = {}
 		location_map = {}
@@ -279,10 +443,12 @@ class RepoConfigLoader(object):
 
 		portdir = settings.get('PORTDIR', '')
 		portdir_overlay = settings.get('PORTDIR_OVERLAY', '')
-		parse(paths, prepos, ignored_map, ignored_location_map)
+
+		self._parse(paths, prepos, ignored_map, ignored_location_map)
+
 		# If PORTDIR_OVERLAY contains a repo with the same repo_name as
 		# PORTDIR, then PORTDIR is overridden.
-		portdir = add_overlays(portdir, portdir_overlay, prepos,
+		portdir = self._add_repositories(portdir, portdir_overlay, prepos,
 			ignored_map, ignored_location_map)
 		if portdir and portdir.strip():
 			portdir = os.path.realpath(portdir)
@@ -294,38 +460,14 @@ class RepoConfigLoader(object):
 			for repo in prepos.values()
 			if repo.location is not None and repo.missing_repo_name)
 
-		#Parse layout.conf and read masters key.
-		for repo in prepos.values():
-			if not repo.location:
-				continue
-			layout_filename = os.path.join(repo.location, "metadata", "layout.conf")
-			layout_file = KeyValuePairFileLoader(layout_filename, None, None)
-			layout_data, layout_errors = layout_file.load()
-
-			masters = layout_data.get('masters')
-			if masters and masters.strip():
-				masters = masters.split()
-			else:
-				masters = None
-			repo.masters = masters
-
-			aliases = layout_data.get('aliases')
-			if aliases and aliases.strip():
-				aliases = aliases.split()
-			else:
-				aliases = None
-			if aliases:
-				if repo.aliases:
-					aliases.extend(repo.aliases)
-				repo.aliases = tuple(sorted(set(aliases)))
-
 		#Take aliases into account.
 		new_prepos = {}
 		for repo_name, repo in prepos.items():
 			names = set()
 			names.add(repo_name)
 			if repo.aliases:
-				names.update(repo.aliases)
+				aliases = stack_lists([repo.aliases], incremental=True)
+				names.update(aliases)
 
 			for name in names:
 				if name in new_prepos:
@@ -342,16 +484,11 @@ class RepoConfigLoader(object):
 
 		# filter duplicates from aliases, by only including
 		# items where repo.name == key
-		prepos_order = [repo.name for key, repo in prepos.items() \
-			if repo.name == key and repo.location is not None]
-		prepos_order.sort(key=repo_priority)
 
-		if portdir in location_map:
-			portdir_repo = prepos[location_map[portdir]]
-			portdir_sync = settings.get('SYNC', '')
-			#if SYNC variable is set and not overwritten by repos.conf
-			if portdir_sync and not portdir_repo.sync:
-				portdir_repo.sync = portdir_sync
+		prepos_order = sorted(prepos.items(), key=lambda r:r[1].priority or 0)
+
+		prepos_order = [repo.name for (key, repo) in prepos_order
+			if repo.name == key and repo.location is not None]
 
 		if prepos['DEFAULT'].main_repo is None or \
 			prepos['DEFAULT'].main_repo not in prepos:
@@ -406,7 +543,12 @@ class RepoConfigLoader(object):
 
 			eclass_locations = []
 			eclass_locations.extend(master_repo.location for master_repo in repo.masters)
-			eclass_locations.append(repo.location)
+			# Only append the current repo to eclass_locations if it's not
+			# there already. This allows masters to have more control over
+			# eclass override order, which may be useful for scenarios in
+			# which there is a plan to migrate eclasses to a master repo.
+			if repo.location not in eclass_locations:
+				eclass_locations.append(repo.location)
 
 			if repo.eclass_overrides:
 				for other_repo_name in repo.eclass_overrides:
@@ -419,6 +561,23 @@ class RepoConfigLoader(object):
 							level=logging.ERROR, noiselevel=-1)
 			repo.eclass_locations = tuple(eclass_locations)
 
+		eclass_dbs = {}
+		for repo_name, repo in prepos.items():
+			if repo_name == "DEFAULT":
+				continue
+
+			eclass_db = None
+			for eclass_location in repo.eclass_locations:
+				tree_db = eclass_dbs.get(eclass_location)
+				if tree_db is None:
+					tree_db = eclass_cache.cache(eclass_location)
+					eclass_dbs[eclass_location] = tree_db
+				if eclass_db is None:
+					eclass_db = tree_db.copy()
+				else:
+					eclass_db.append(tree_db)
+			repo.eclass_db = eclass_db
+
 		self._prepos_changed = True
 		self._repo_location_list = []
 
@@ -488,6 +647,9 @@ class RepoConfigLoader(object):
 			return None
 		return self.treemap[repo_name]
 
+	def get_repo_for_location(self, location):
+		return self.prepos[self.get_name_for_location(location)]
+
 	def __getitem__(self, repo_name):
 		return self.prepos[repo_name]
 
@@ -502,3 +664,113 @@ def load_repository_config(settings):
 		repoconfigpaths.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
 			USER_CONFIG_PATH, "repos.conf"))
 	return RepoConfigLoader(repoconfigpaths, settings)
+
+def _get_repo_name(repo_location, cached=None):
+	if cached is not None:
+		return cached
+	name, missing = RepoConfig._read_repo_name(repo_location)
+	if missing:
+		return None
+	return name
+
+def parse_layout_conf(repo_location, repo_name=None):
+	eapi = read_corresponding_eapi_file(os.path.join(repo_location, REPO_NAME_LOC))
+
+	layout_filename = os.path.join(repo_location, "metadata", "layout.conf")
+	layout_file = KeyValuePairFileLoader(layout_filename, None, None)
+	layout_data, layout_errors = layout_file.load()
+
+	data = {}
+
+	# None indicates abscence of a masters setting, which later code uses
+	# to trigger a backward compatibility fallback that sets an implicit
+	# master. In order to avoid this fallback behavior, layout.conf can
+	# explicitly set masters to an empty value, which will result in an
+	# empty tuple here instead of None.
+	masters = layout_data.get('masters')
+	if masters is not None:
+		masters = tuple(masters.split())
+	data['masters'] = masters
+	data['aliases'] = tuple(layout_data.get('aliases', '').split())
+
+	data['allow-provide-virtual'] = \
+		layout_data.get('allow-provide-virtuals', 'false').lower() == 'true'
+
+	data['sign-commit'] = layout_data.get('sign-commits', 'false').lower() \
+		== 'true'
+
+	data['sign-manifest'] = layout_data.get('sign-manifests', 'true').lower() \
+		== 'true'
+
+	data['thin-manifest'] = layout_data.get('thin-manifests', 'false').lower() \
+		== 'true'
+
+	manifest_policy = layout_data.get('use-manifests', 'strict').lower()
+	data['allow-missing-manifest'] = manifest_policy != 'strict'
+	data['create-manifest'] = manifest_policy != 'false'
+	data['disable-manifest'] = manifest_policy == 'false'
+
+	# for compatibility w/ PMS, fallback to pms; but also check if the
+	# cache exists or not.
+	cache_formats = layout_data.get('cache-formats', 'pms').lower().split()
+	if 'pms' in cache_formats and not os.path.isdir(
+		os.path.join(repo_location, 'metadata', 'cache')):
+		cache_formats.remove('pms')
+	data['cache-formats'] = tuple(cache_formats)
+
+	manifest_hashes = layout_data.get('manifest-hashes')
+	if manifest_hashes is not None:
+		manifest_hashes = frozenset(manifest_hashes.upper().split())
+		if MANIFEST2_REQUIRED_HASH not in manifest_hashes:
+			repo_name = _get_repo_name(repo_location, cached=repo_name)
+			warnings.warn((_("Repository named '%(repo_name)s' has a "
+				"'manifest-hashes' setting that does not contain "
+				"the '%(hash)s' hash which is required by this "
+				"portage version. You will have to upgrade portage "
+				"if you want to generate valid manifests for this "
+				"repository: %(layout_filename)s") %
+				{"repo_name": repo_name or 'unspecified',
+				"hash":MANIFEST2_REQUIRED_HASH,
+				"layout_filename":layout_filename}),
+				DeprecationWarning)
+		unsupported_hashes = manifest_hashes.difference(
+			MANIFEST2_HASH_FUNCTIONS)
+		if unsupported_hashes:
+			repo_name = _get_repo_name(repo_location, cached=repo_name)
+			warnings.warn((_("Repository named '%(repo_name)s' has a "
+				"'manifest-hashes' setting that contains one "
+				"or more hash types '%(hashes)s' which are not supported by "
+				"this portage version. You will have to upgrade "
+				"portage if you want to generate valid manifests for "
+				"this repository: %(layout_filename)s") %
+				{"repo_name": repo_name or 'unspecified',
+				"hashes":" ".join(sorted(unsupported_hashes)),
+				"layout_filename":layout_filename}),
+				DeprecationWarning)
+	data['manifest-hashes'] = manifest_hashes
+
+	data['update-changelog'] = layout_data.get('update-changelog', 'false').lower() \
+		== 'true'
+
+	raw_formats = layout_data.get('profile-formats')
+	if raw_formats is None:
+		if eapi in ('4-python',):
+			raw_formats = ('portage-1',)
+		else:
+			raw_formats = ('portage-1-compat',)
+	else:
+		raw_formats = set(raw_formats.split())
+		unknown = raw_formats.difference(_valid_profile_formats)
+		if unknown:
+			repo_name = _get_repo_name(repo_location, cached=repo_name)
+			warnings.warn((_("Repository named '%(repo_name)s' has unsupported "
+				"profiles in use ('profile-formats = %(unknown_fmts)s' setting in "
+				"'%(layout_filename)s; please upgrade portage.") %
+				dict(repo_name=repo_name or 'unspecified',
+				layout_filename=layout_filename,
+				unknown_fmts=" ".join(unknown))),
+				DeprecationWarning)
+		raw_formats = tuple(raw_formats.intersection(_valid_profile_formats))
+	data['profile-formats'] = raw_formats
+
+	return data, layout_errors

diff --git a/portage_with_autodep/pym/portage/repository/config.pyo b/portage_with_autodep/pym/portage/repository/config.pyo
new file mode 100644
index 0000000..f9ee26d
Binary files /dev/null and b/portage_with_autodep/pym/portage/repository/config.pyo differ

diff --git a/portage_with_autodep/pym/portage/tests/__init__.py b/portage_with_autodep/pym/portage/tests/__init__.py
index a647aa2..492ece4 100644
--- a/portage_with_autodep/pym/portage/tests/__init__.py
+++ b/portage_with_autodep/pym/portage/tests/__init__.py
@@ -1,10 +1,13 @@
 # tests/__init__.py -- Portage Unit Test functionality
-# Copyright 2006-2010 Gentoo Foundation
+# Copyright 2006-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
+from __future__ import print_function
+
 import sys
 import time
 import unittest
+from optparse import OptionParser, OptionValueError
 
 try:
 	from unittest.runner import _TextTestResult # new in python-2.7
@@ -16,35 +19,33 @@ from portage import _encodings
 from portage import _unicode_decode
 
 def main():
-
-	TEST_FILE = b'__test__'
-	svn_dirname = b'.svn'
 	suite = unittest.TestSuite()
 	basedir = os.path.dirname(os.path.realpath(__file__))
-	testDirs = []
 
-	if len(sys.argv) > 1:
-		suite.addTests(getTestFromCommandLine(sys.argv[1:], basedir))
-		return TextTestRunner(verbosity=2).run(suite)
+	usage = "usage: %s [options] [tests to run]" % os.path.basename(sys.argv[0])
+	parser = OptionParser(usage=usage)
+	parser.add_option("-l", "--list", help="list all tests",
+		action="store_true", dest="list_tests")
+	(options, args) = parser.parse_args(args=sys.argv)
 
-  # the os.walk help mentions relative paths as being quirky
-	# I was tired of adding dirs to the list, so now we add __test__
-	# to each dir we want tested.
-	for root, dirs, files in os.walk(basedir):
-		if svn_dirname in dirs:
-			dirs.remove(svn_dirname)
-		try:
-			root = _unicode_decode(root,
-				encoding=_encodings['fs'], errors='strict')
-		except UnicodeDecodeError:
-			continue
+	if options.list_tests:
+		testdir = os.path.dirname(sys.argv[0])
+		for mydir in getTestDirs(basedir):
+			testsubdir = os.path.basename(mydir)
+			for name in getTestNames(mydir):
+				print("%s/%s/%s.py" % (testdir, testsubdir, name))
+		return os.EX_OK
 
-		if TEST_FILE in files:
-			testDirs.append(root)
+	if len(args) > 1:
+		suite.addTests(getTestFromCommandLine(args[1:], basedir))
+	else:
+		for mydir in getTestDirs(basedir):
+			suite.addTests(getTests(os.path.join(basedir, mydir), basedir))
 
-	for mydir in testDirs:
-		suite.addTests(getTests(os.path.join(basedir, mydir), basedir) )
-	return TextTestRunner(verbosity=2).run(suite)
+	result = TextTestRunner(verbosity=2).run(suite)
+	if not result.wasSuccessful():
+		return 1
+	return os.EX_OK
 
 def my_import(name):
 	mod = __import__(name)
@@ -54,7 +55,7 @@ def my_import(name):
 	return mod
 
 def getTestFromCommandLine(args, base_path):
-	ret = []
+	result = []
 	for arg in args:
 		realpath = os.path.realpath(arg)
 		path = os.path.dirname(realpath)
@@ -64,28 +65,39 @@ def getTestFromCommandLine(args, base_path):
 			raise Exception("Invalid argument: '%s'" % arg)
 
 		mymodule = f[:-3]
+		result.extend(getTestsFromFiles(path, base_path, [mymodule]))
+	return result
 
-		parent_path = path[len(base_path)+1:]
-		parent_module = ".".join(("portage", "tests", parent_path))
-		parent_module = parent_module.replace('/', '.')
-		result = []
+def getTestDirs(base_path):
+	TEST_FILE = b'__test__'
+	svn_dirname = b'.svn'
+	testDirs = []
 
-		# Make the trailing / a . for module importing
-		modname = ".".join((parent_module, mymodule))
-		mod = my_import(modname)
-		ret.append(unittest.TestLoader().loadTestsFromModule(mod))
-	return ret
+	# the os.walk help mentions relative paths as being quirky
+	# I was tired of adding dirs to the list, so now we add __test__
+	# to each dir we want tested.
+	for root, dirs, files in os.walk(base_path):
+		if svn_dirname in dirs:
+			dirs.remove(svn_dirname)
+		try:
+			root = _unicode_decode(root,
+				encoding=_encodings['fs'], errors='strict')
+		except UnicodeDecodeError:
+			continue
 
-def getTests(path, base_path):
-	"""
+		if TEST_FILE in files:
+			testDirs.append(root)
 
-	path is the path to a given subdir ( 'portage/' for example)
-	This does a simple filter on files in that dir to give us modules
-	to import
+	testDirs.sort()
+	return testDirs
 
-	"""
+def getTestNames(path):
 	files = os.listdir(path)
 	files = [ f[:-3] for f in files if f.startswith("test") and f.endswith(".py") ]
+	files.sort()
+	return files
+
+def getTestsFromFiles(path, base_path, files):
 	parent_path = path[len(base_path)+1:]
 	parent_module = ".".join(("portage", "tests", parent_path))
 	parent_module = parent_module.replace('/', '.')
@@ -97,6 +109,16 @@ def getTests(path, base_path):
 		result.append(unittest.TestLoader().loadTestsFromModule(mod))
 	return result
 
+def getTests(path, base_path):
+	"""
+
+	path is the path to a given subdir ( 'portage/' for example)
+	This does a simple filter on files in that dir to give us modules
+	to import
+
+	"""
+	return getTestsFromFiles(path, base_path, getTestNames(path))
+
 class TextTestResult(_TextTestResult):
 	"""
 	We need a subclass of unittest._TextTestResult to handle tests with TODO
@@ -109,6 +131,7 @@ class TextTestResult(_TextTestResult):
 	def __init__(self, stream, descriptions, verbosity):
 		super(TextTestResult, self).__init__(stream, descriptions, verbosity)
 		self.todoed = []
+		self.portage_skipped = []
 
 	def addTodo(self, test, info):
 		self.todoed.append((test,info))
@@ -117,12 +140,20 @@ class TextTestResult(_TextTestResult):
 		elif self.dots:
 			self.stream.write(".")
 
+	def addPortageSkip(self, test, info):
+		self.portage_skipped.append((test,info))
+		if self.showAll:
+			self.stream.writeln("SKIP")
+		elif self.dots:
+			self.stream.write(".")
+
 	def printErrors(self):
 		if self.dots or self.showAll:
 			self.stream.writeln()
 			self.printErrorList('ERROR', self.errors)
 			self.printErrorList('FAIL', self.failures)
 			self.printErrorList('TODO', self.todoed)
+			self.printErrorList('SKIP', self.portage_skipped)
 
 class TestCase(unittest.TestCase):
 	"""
@@ -131,15 +162,12 @@ class TestCase(unittest.TestCase):
 	and then fix the code later.  This may not be a great approach
 	(broken code!!??!11oneone) but it does happen at times.
 	"""
-	
-	def __init__(self, methodName='runTest'):
-		# This method exists because unittest.py in python 2.4 stores
-		# the methodName as __testMethodName while 2.5 uses
-		# _testMethodName.
-		self._testMethodName = methodName
-		unittest.TestCase.__init__(self, methodName)
+
+	def __init__(self, *pargs, **kwargs):
+		unittest.TestCase.__init__(self, *pargs, **kwargs)
 		self.todo = False
-		
+		self.portage_skip = None
+
 	def defaultTestResult(self):
 		return TextTestResult()
 
@@ -162,7 +190,13 @@ class TestCase(unittest.TestCase):
 				testMethod()
 				ok = True
 			except self.failureException:
-				if self.todo:
+				if self.portage_skip is not None:
+					if self.portage_skip is True:
+						result.addPortageSkip(self, "%s: SKIP" % testMethod)
+					else:
+						result.addPortageSkip(self, "%s: SKIP: %s" %
+							(testMethod, self.portage_skip))
+				elif self.todo:
 					result.addTodo(self,"%s: TODO" % testMethod)
 				else:
 					result.addFailure(self, sys.exc_info())
@@ -192,21 +226,21 @@ class TestCase(unittest.TestCase):
 		   unexpected exception.
 		"""
 		try:
-		    callableObj(*args, **kwargs)
+			callableObj(*args, **kwargs)
 		except excClass:
-		    return
+			return
 		else:
-		    if hasattr(excClass,'__name__'): excName = excClass.__name__
-		    else: excName = str(excClass)
-		    raise self.failureException("%s not raised: %s" % (excName, msg))
-			
+			if hasattr(excClass,'__name__'): excName = excClass.__name__
+			else: excName = str(excClass)
+			raise self.failureException("%s not raised: %s" % (excName, msg))
+
 class TextTestRunner(unittest.TextTestRunner):
 	"""
 	We subclass unittest.TextTestRunner to output SKIP for tests that fail but are skippable
 	"""
-	
+
 	def _makeResult(self):
-	        return TextTestResult(self.stream, self.descriptions, self.verbosity)
+		return TextTestResult(self.stream, self.descriptions, self.verbosity)
 
 	def run(self, test):
 		"""
@@ -236,7 +270,7 @@ class TextTestRunner(unittest.TextTestRunner):
 		else:
 			self.stream.writeln("OK")
 		return result
-	
+
 test_cps = ['sys-apps/portage','virtual/portage']
 test_versions = ['1.0', '1.0-r1','2.3_p4','1.0_alpha57']
 test_slots = [ None, '1','gentoo-sources-2.6.17','spankywashere']

diff --git a/portage_with_autodep/pym/portage/tests/__init__.pyo b/portage_with_autodep/pym/portage/tests/__init__.pyo
new file mode 100644
index 0000000..0e961b8
Binary files /dev/null and b/portage_with_autodep/pym/portage/tests/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/tests/bin/__init__.py b/portage_with_autodep/pym/portage/tests/bin/__init__.py
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/bin/__test__ b/portage_with_autodep/pym/portage/tests/bin/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/bin/setup_env.py b/portage_with_autodep/pym/portage/tests/bin/setup_env.py
deleted file mode 100644
index e07643d..0000000
--- a/portage_with_autodep/pym/portage/tests/bin/setup_env.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# setup_env.py -- Make sure bin subdir has sane env for testing
-# Copyright 2007-2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import tempfile
-
-from portage import os
-from portage import shutil
-from portage.tests import TestCase
-from portage.process import spawn
-
-basepath = os.path.join(os.path.dirname(os.path.dirname(
-	os.path.abspath(__file__))),
-	"..", "..", "..")
-bindir = os.path.join(basepath, "bin")
-pymdir = os.path.join(basepath, "pym")
-basedir = None
-env = None
-
-def binTestsCleanup():
-	global basedir
-	if basedir is None:
-		return
-	if os.access(basedir, os.W_OK):
-		shutil.rmtree(basedir)
-		basedir = None
-
-def binTestsInit():
-	binTestsCleanup()
-	global basedir, env
-	basedir = tempfile.mkdtemp()
-	env = os.environ.copy()
-	env["D"] = os.path.join(basedir, "image")
-	env["T"] = os.path.join(basedir, "temp")
-	env["S"] = os.path.join(basedir, "workdir")
-	env["PF"] = "portage-tests-0.09-r1"
-	env["PATH"] = bindir + ":" + env["PATH"]
-	env["PORTAGE_BIN_PATH"] = bindir
-	env["PORTAGE_PYM_PATH"] = pymdir
-	os.mkdir(env["D"])
-	os.mkdir(env["T"])
-	os.mkdir(env["S"])
-
-class BinTestCase(TestCase):
-	def init(self):
-		binTestsInit()
-	def cleanup(self):
-		binTestsCleanup()
-
-def _exists_in_D(path):
-	# Note: do not use os.path.join() here, we assume D to end in /
-	return os.access(env["D"] + path, os.W_OK)
-def exists_in_D(path):
-	if not _exists_in_D(path):
-		raise TestCase.failureException
-def xexists_in_D(path):
-	if _exists_in_D(path):
-		raise TestCase.failureException
-
-def portage_func(func, args, exit_status=0):
-	# we don't care about the output of the programs,
-	# just their exit value and the state of $D
-	global env
-	f = open('/dev/null', 'wb')
-	fd_pipes = {0:0,1:f.fileno(),2:f.fileno()}
-	def pre_exec():
-		os.chdir(env["S"])
-	spawn([func] + args.split(), env=env,
-		fd_pipes=fd_pipes, pre_exec=pre_exec)
-	f.close()
-
-def create_portage_wrapper(bin):
-	def derived_func(*args):
-		newargs = list(args)
-		newargs.insert(0, bin)
-		return portage_func(*newargs)
-	return derived_func
-
-for bin in os.listdir(os.path.join(bindir, "ebuild-helpers")):
-	if bin.startswith("do") or \
-	   bin.startswith("new") or \
-	   bin.startswith("prep") or \
-	   bin in ["ecompress","ecompressdir","fowners","fperms"]:
-		globals()[bin] = create_portage_wrapper(
-			os.path.join(bindir, "ebuild-helpers", bin))

diff --git a/portage_with_autodep/pym/portage/tests/bin/test_dobin.py b/portage_with_autodep/pym/portage/tests/bin/test_dobin.py
deleted file mode 100644
index 6f50d7a..0000000
--- a/portage_with_autodep/pym/portage/tests/bin/test_dobin.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# test_dobin.py -- Portage Unit Testing Functionality
-# Copyright 2007-2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests.bin.setup_env import BinTestCase, dobin, xexists_in_D
-
-class DoBin(BinTestCase):
-	def testDoBin(self):
-		self.init()
-		try:
-			dobin("does-not-exist", 1)
-			xexists_in_D("does-not-exist")
-			xexists_in_D("/bin/does-not-exist")
-			xexists_in_D("/usr/bin/does-not-exist")
-		finally:
-			self.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/bin/test_dodir.py b/portage_with_autodep/pym/portage/tests/bin/test_dodir.py
deleted file mode 100644
index f4eb9b2..0000000
--- a/portage_with_autodep/pym/portage/tests/bin/test_dodir.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# test_dodir.py -- Portage Unit Testing Functionality
-# Copyright 2007-2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests.bin.setup_env import BinTestCase, dodir, exists_in_D
-
-class DoDir(BinTestCase):
-	def testDoDir(self):
-		self.init()
-		try:
-			dodir("usr /usr")
-			exists_in_D("/usr")
-			dodir("/var/lib/moocow")
-			exists_in_D("/var/lib/moocow")
-		finally:
-			self.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/dbapi/__test__ b/portage_with_autodep/pym/portage/tests/dbapi/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py b/portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py
deleted file mode 100644
index a2c5f77..0000000
--- a/portage_with_autodep/pym/portage/tests/dbapi/test_fakedbapi.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import shutil
-import tempfile
-
-from portage import os
-from portage.dbapi.virtual import fakedbapi
-from portage.package.ebuild.config import config
-from portage.tests import TestCase
-
-class TestFakedbapi(TestCase):
-
-	def testFakedbapi(self):
-		packages = (
-			("sys-apps/portage-2.1.10", {
-				"EAPI"         : "2",
-				"IUSE"         : "ipc doc",
-				"repository"   : "gentoo",
-				"SLOT"         : "0",
-				"USE"          : "ipc missing-iuse",
-			}),
-			("virtual/package-manager-0", {
-				"EAPI"         : "0",
-				"repository"   : "gentoo",
-				"SLOT"         : "0",
-			}),
-		)
-
-		match_tests = (
-			("sys-apps/portage:0[ipc]",             ["sys-apps/portage-2.1.10"]),
-			("sys-apps/portage:0[-ipc]",            []),
-			("sys-apps/portage:0[doc]",             []),
-			("sys-apps/portage:0[-doc]",            ["sys-apps/portage-2.1.10"]),
-			("sys-apps/portage:0",                  ["sys-apps/portage-2.1.10"]),
-			("sys-apps/portage:0[missing-iuse]",    []),
-			("sys-apps/portage:0[-missing-iuse]",   []),
-			("sys-apps/portage:0::gentoo[ipc]",     ["sys-apps/portage-2.1.10"]),
-			("sys-apps/portage:0::multilib[ipc]",   []),
-			("virtual/package-manager",             ["virtual/package-manager-0"]),
-		)
-
-		tempdir = tempfile.mkdtemp()
-		try:
-			portdir = os.path.join(tempdir, "usr/portage")
-			os.makedirs(portdir)
-			env = {
-				"PORTDIR": portdir,
-			}
-			fakedb = fakedbapi(settings=config(config_profile_path="",
-				env=env, _eprefix=tempdir))
-			for cpv, metadata in packages:
-				fakedb.cpv_inject(cpv, metadata=metadata)
-
-			for atom, expected_result in match_tests:
-				self.assertEqual( fakedb.match(atom), expected_result )
-		finally:
-			shutil.rmtree(tempdir)

diff --git a/portage_with_autodep/pym/portage/tests/dep/__init__.py b/portage_with_autodep/pym/portage/tests/dep/__init__.py
deleted file mode 100644
index 9c3f524..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# tests/portage.dep/__init__.py -- Portage Unit Test functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/dep/__test__ b/portage_with_autodep/pym/portage/tests/dep/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/dep/testAtom.py b/portage_with_autodep/pym/portage/tests/dep/testAtom.py
deleted file mode 100644
index 092cacf..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/testAtom.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# Copyright 2006, 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import Atom
-from portage.exception import InvalidAtom
-
-class TestAtom(TestCase):
-
-	def testAtom(self):
-
-		tests = (
-			( "=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]",
-				('=',  'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False ),
-			( "=sys-apps/portage-2.1-r1*:0[doc]",
-				('=*',  'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False ),
-			( "sys-apps/portage:0[doc]",
-				(None,  'sys-apps/portage', None, '0', '[doc]', None), False, False ),
-			( "sys-apps/portage:0[doc]",
-				(None,  'sys-apps/portage', None, '0', '[doc]', None), False, False ),
-			( "*/*",
-				(None,  '*/*', None, None, None, None), True, False ),
-			( "sys-apps/*",
-				(None,  'sys-apps/*', None, None, None, None), True, False ),
-			( "*/portage",
-				(None,  '*/portage', None, None, None, None), True, False ),
-			( "s*s-*/portage:1",
-				(None,  's*s-*/portage', None, '1', None, None), True, False ),
-			( "*/po*ge:2",
-				(None,  '*/po*ge', None, '2', None, None), True, False ),
-			( "!dev-libs/A",
-				(None,  'dev-libs/A', None, None, None, None), True, True ),
-			( "!!dev-libs/A",
-				(None,  'dev-libs/A', None, None, None, None), True, True ),
-			( "!!dev-libs/A",
-				(None,  'dev-libs/A', None, None, None, None), True, True ),
-			( "dev-libs/A[foo(+)]",
-				(None,  'dev-libs/A', None, None, "[foo(+)]", None), True, True ),
-			( "dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
-				(None,  'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
-			( "dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
-				(None,  'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True ),
-
-			( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]",
-				('=',  'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True ),
-			( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]",
-				('=*',  'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True ),
-			( "sys-apps/portage:0::repo_name[doc]",
-				(None,  'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True ),
-
-			( "*/*::repo_name",
-				(None,  '*/*', None, None, None, 'repo_name'), True, True ),
-			( "sys-apps/*::repo_name",
-				(None,  'sys-apps/*', None, None, None, 'repo_name'), True, True ),
-			( "*/portage::repo_name",
-				(None,  '*/portage', None, None, None, 'repo_name'), True, True ),
-			( "s*s-*/portage:1::repo_name",
-				(None,  's*s-*/portage', None, '1', None, 'repo_name'), True, True ),
-		)
-		
-		tests_xfail = (
-			( Atom("sys-apps/portage"), False, False ),
-			( "cat/pkg[a!]", False, False ),
-			( "cat/pkg[!a]", False, False ),
-			( "cat/pkg[!a!]", False, False ),
-			( "cat/pkg[!a-]", False, False ),
-			( "cat/pkg[-a=]", False, False ),
-			( "cat/pkg[-a?]", False, False ),
-			( "cat/pkg[-a!]", False, False ),
-			( "cat/pkg[=a]", False, False ),
-			( "cat/pkg[=a=]", False, False ),
-			( "cat/pkg[=a?]", False, False ),
-			( "cat/pkg[=a!]", False, False ),
-			( "cat/pkg[=a-]", False, False ),
-			( "cat/pkg[?a]", False, False ),
-			( "cat/pkg[?a=]", False, False ),
-			( "cat/pkg[?a?]", False, False ),
-			( "cat/pkg[?a!]", False, False ),
-			( "cat/pkg[?a-]", False, False ),
-			( "sys-apps/portage[doc]:0", False, False ),
-			( "*/*", False, False ),
-			( "sys-apps/*", False, False ),
-			( "*/portage", False, False ),
-			( "*/**", True, False ),
-			( "*/portage[use]", True, False ),
-			( "cat/pkg[a()]", False, False ),
-			( "cat/pkg[a(]", False, False ),
-			( "cat/pkg[a)]", False, False ),
-			( "cat/pkg[a(,b]", False, False ),
-			( "cat/pkg[a),b]", False, False ),
-			( "cat/pkg[a(*)]", False, False ),
-			( "cat/pkg[a(*)]", True, False ),
-			( "cat/pkg[a(+-)]", False, False ),
-			( "cat/pkg[a()]", False, False ),
-			( "cat/pkg[(+)a]", False, False ),
-			( "cat/pkg[a=(+)]", False, False ),
-			( "cat/pkg[!(+)a=]", False, False ),
-			( "cat/pkg[!a=(+)]", False, False ),
-			( "cat/pkg[a?(+)]", False, False ),
-			( "cat/pkg[!a?(+)]", False, False ),
-			( "cat/pkg[!(+)a?]", False, False ),
-			( "cat/pkg[-(+)a]", False, False ),
-			( "cat/pkg[a(+),-a]", False, False ),
-			( "cat/pkg[a(-),-a]", False, False ),
-			( "cat/pkg[-a,a(+)]", False, False ),
-			( "cat/pkg[-a,a(-)]", False, False ),
-			( "cat/pkg[-a(+),a(-)]", False, False ),
-			( "cat/pkg[-a(-),a(+)]", False, False ),
-			( "sys-apps/portage[doc]::repo_name", False, False ),
-			( "sys-apps/portage:0[doc]::repo_name", False, False ),
-			( "sys-apps/portage[doc]:0::repo_name", False, False ),
-			( "=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False ),
-			( "=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False ),
-			( "sys-apps/portage:0::repo_name[doc]", False, False ),
-			( "*/*::repo_name", True, False ),
-		)
-
-		for atom, parts, allow_wildcard, allow_repo in tests:
-			a = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
-			op, cp, ver, slot, use, repo = parts
-			self.assertEqual( op, a.operator,
-				msg="Atom('%s').operator = %s == '%s'" % ( atom, a.operator, op ) )
-			self.assertEqual( cp, a.cp,
-				msg="Atom('%s').cp = %s == '%s'" % ( atom, a.cp, cp ) )
-			if ver is not None:
-				cpv = "%s-%s" % (cp, ver)
-			else:
-				cpv = cp
-			self.assertEqual( cpv, a.cpv,
-				msg="Atom('%s').cpv = %s == '%s'" % ( atom, a.cpv, cpv ) )
-			self.assertEqual( slot, a.slot,
-				msg="Atom('%s').slot = %s == '%s'" % ( atom, a.slot, slot ) )
-			self.assertEqual( repo, a.repo,
-				msg="Atom('%s').repo == %s == '%s'" % ( atom, a.repo, repo ) )
-
-			if a.use:
-				returned_use = str(a.use)
-			else:
-				returned_use = None
-			self.assertEqual( use, returned_use,
-				msg="Atom('%s').use = %s == '%s'" % ( atom, returned_use, use ) )
-
-		for atom, allow_wildcard, allow_repo in tests_xfail:
-			self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom, \
-				allow_wildcard=allow_wildcard, allow_repo=allow_repo)
-
-	def test_intersects(self):
-		test_cases = (
-			("dev-libs/A", "dev-libs/A", True),
-			("dev-libs/A", "dev-libs/B", False),
-			("dev-libs/A", "sci-libs/A", False),
-			("dev-libs/A[foo]", "sci-libs/A[bar]", False),
-			("dev-libs/A[foo(+)]", "sci-libs/A[foo(-)]", False),
-			("=dev-libs/A-1", "=dev-libs/A-1-r1", False),
-			("~dev-libs/A-1", "=dev-libs/A-1", False),
-			("=dev-libs/A-1:1", "=dev-libs/A-1", True),
-			("=dev-libs/A-1:1", "=dev-libs/A-1:1", True),
-			("=dev-libs/A-1:1", "=dev-libs/A-1:2", False),
-		)
-
-		for atom, other, expected_result in test_cases:
-			self.assertEqual(Atom(atom).intersects(Atom(other)), expected_result, \
-				"%s and %s should intersect: %s" % (atom, other, expected_result))
-
-	def test_violated_conditionals(self):
-		test_cases = (
-			("dev-libs/A", ["foo"], ["foo"], None, "dev-libs/A"),
-			("dev-libs/A[foo]", [], ["foo"], None, "dev-libs/A[foo]"),
-			("dev-libs/A[foo]", ["foo"], ["foo"], None, "dev-libs/A"),
-			("dev-libs/A[foo]", [], ["foo"], [], "dev-libs/A[foo]"),
-			("dev-libs/A[foo]", ["foo"], ["foo"], [], "dev-libs/A"),
-
-			("dev-libs/A:0[foo]", ["foo"], ["foo"], [], "dev-libs/A:0"),
-
-			("dev-libs/A[foo,-bar]", [], ["foo", "bar"], None, "dev-libs/A[foo]"),
-			("dev-libs/A[-foo,bar]", [], ["foo", "bar"], None, "dev-libs/A[bar]"),
-
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
-
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[!c=]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,b=,!c=]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,!e?]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,-f]"),
-
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c=]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a,!c=]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a,!c=]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a,!c=]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a,!c=]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a,!c=,-f]"),
-
-			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c(+)=]"),
-			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a(-),!c(-)=]"),
-			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a(+),!c(+)=]"),
-			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a(-),!c(-)=]"),
-			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a(+),!c(+)=]"),
-			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
-
-			("dev-libs/A[a(+),b(+)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a"], ["a"], "dev-libs/A[b(+)=,!e(+)?]"),
-			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["b"], ["b"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
-			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["c"], ["c"], "dev-libs/A[!c(+)=,!e(+)?]"),
-			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["d"], ["d"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
-			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["e"], ["e"], "dev-libs/A"),
-			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["f"], ["f"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
-
-			#Some more test cases to trigger all remaining code paths
-			("dev-libs/B[x?]", [], ["x"], ["x"], "dev-libs/B[x?]"),
-			("dev-libs/B[x(+)?]", [], [], ["x"], "dev-libs/B"),
-			("dev-libs/B[x(-)?]", [], [], ["x"], "dev-libs/B[x(-)?]"),
-
-			("dev-libs/C[x=]", [], ["x"], ["x"], "dev-libs/C[x=]"),
-			("dev-libs/C[x(+)=]", [], [], ["x"], "dev-libs/C"),
-			("dev-libs/C[x(-)=]", [], [], ["x"], "dev-libs/C[x(-)=]"),
-
-			("dev-libs/D[!x=]", [], ["x"], ["x"], "dev-libs/D"),
-			("dev-libs/D[!x(+)=]", [], [], ["x"], "dev-libs/D[!x(+)=]"),
-			("dev-libs/D[!x(-)=]", [], [], ["x"], "dev-libs/D"),
-
-			#Missing IUSE test cases
-			("dev-libs/B[x]", [], [], [], "dev-libs/B[x]"),
-			("dev-libs/B[-x]", [], [], [], "dev-libs/B[-x]"),
-			("dev-libs/B[x?]", [], [], [], "dev-libs/B[x?]"),
-			("dev-libs/B[x=]", [], [], [], "dev-libs/B[x=]"),
-			("dev-libs/B[!x=]", [], [], ["x"], "dev-libs/B[!x=]"),
-			("dev-libs/B[!x?]", [], [], ["x"], "dev-libs/B[!x?]"),
-		)
-		
-		test_cases_xfail = (
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], None),
-		)
-
-		class use_flag_validator(object):
-			def __init__(self, iuse):
-				self.iuse = iuse
-
-			def is_valid_flag(self, flag):
-				return flag in iuse
-
-		for atom, other_use, iuse, parent_use, expected_violated_atom in test_cases:
-			a = Atom(atom)
-			validator = use_flag_validator(iuse)
-			violated_atom = a.violated_conditionals(other_use, validator.is_valid_flag, parent_use)
-			if parent_use is None:
-				fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
-					(atom, " ".join(other_use), " ".join(iuse), "None", str(violated_atom), expected_violated_atom)
-			else:
-				fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
-					(atom, " ".join(other_use), " ".join(iuse), " ".join(parent_use), str(violated_atom), expected_violated_atom)
-			self.assertEqual(str(violated_atom), expected_violated_atom, fail_msg)
-
-		for atom, other_use, iuse, parent_use in test_cases_xfail:
-			a = Atom(atom)
-			validator = use_flag_validator(iuse)
-			self.assertRaisesMsg(atom, InvalidAtom, \
-				a.violated_conditionals, other_use, validator.is_valid_flag, parent_use)
-
-	def test_evaluate_conditionals(self):
-		test_cases = (
-			("dev-libs/A[foo]", [], "dev-libs/A[foo]"),
-			("dev-libs/A[foo]", ["foo"], "dev-libs/A[foo]"),
-
-			("dev-libs/A:0[foo=]", ["foo"], "dev-libs/A:0[foo]"),
-
-			("dev-libs/A[foo,-bar]", [], "dev-libs/A[foo,-bar]"),
-			("dev-libs/A[-foo,bar]", [], "dev-libs/A[-foo,bar]"),
-
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], "dev-libs/A[a,-b,c,-e,-f]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], "dev-libs/A[a,-b,c,-e,-f]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], "dev-libs/A[a,b,c,-e,-f]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], "dev-libs/A[a,-b,-c,-e,-f]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], "dev-libs/A[a,-b,c,d,-e,-f]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], "dev-libs/A[a,-b,c,-f]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], "dev-libs/A[a,-b,c,-e,-f]"),
-			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], "dev-libs/A[a(-),-b(+),c(-),d(+),-e(-),-f(+)]"),
-			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["f"], "dev-libs/A[a(+),-b(-),c(+),-e(+),-f(-)]"),
-		)
-
-		for atom, use, expected_atom in test_cases:
-			a = Atom(atom)
-			b = a.evaluate_conditionals(use)
-			self.assertEqual(str(b), expected_atom)
-			self.assertEqual(str(b.unevaluated_atom), atom)
-
-	def test__eval_qa_conditionals(self):
-		test_cases = (
-			("dev-libs/A[foo]", [], [], "dev-libs/A[foo]"),
-			("dev-libs/A[foo]", ["foo"], [], "dev-libs/A[foo]"),
-			("dev-libs/A[foo]", [], ["foo"], "dev-libs/A[foo]"),
-
-			("dev-libs/A:0[foo]", [], [], "dev-libs/A:0[foo]"),
-			("dev-libs/A:0[foo]", ["foo"], [], "dev-libs/A:0[foo]"),
-			("dev-libs/A:0[foo]", [], ["foo"], "dev-libs/A:0[foo]"),
-			("dev-libs/A:0[foo=]", [], ["foo"], "dev-libs/A:0[foo]"),
-
-			("dev-libs/A[foo,-bar]", ["foo"], ["bar"], "dev-libs/A[foo,-bar]"),
-			("dev-libs/A[-foo,bar]", ["foo", "bar"], [], "dev-libs/A[-foo,bar]"),
-
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a", "b", "c"], [], "dev-libs/A[a,-b,c,d,-e,-f]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c"], "dev-libs/A[a,b,-c,d,-e,-f]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d", "e", "f"], [], "dev-libs/A[a,b,-b,c,-c,-e,-f]"),
-			("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["d", "e", "f"], "dev-libs/A[a,b,-b,c,-c,d,-f]"),
-			
-			("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", \
-				["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a(-),-b(+),c(-),-e(-),-f(+)]"),
-			("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", \
-				[], ["a", "b", "c", "d", "e", "f"], "dev-libs/A[a(+),b(-),-c(+),d(-),-f(-)]"),
-		)
-
-		for atom, use_mask, use_force, expected_atom in test_cases:
-			a = Atom(atom)
-			b = a._eval_qa_conditionals(use_mask, use_force)
-			self.assertEqual(str(b), expected_atom)
-			self.assertEqual(str(b.unevaluated_atom), atom)

diff --git a/portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py b/portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py
deleted file mode 100644
index 54791e0..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/testCheckRequiredUse.py
+++ /dev/null
@@ -1,219 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import check_required_use
-from portage.exception import InvalidDependString
-
-class TestCheckRequiredUse(TestCase):
-
-	def testCheckRequiredUse(self):
-		test_cases = (
-			( "|| ( a b )", [], ["a", "b"], False),
-			( "|| ( a b )", ["a"], ["a", "b"], True),
-			( "|| ( a b )", ["b"], ["a", "b"], True),
-			( "|| ( a b )", ["a", "b"], ["a", "b"], True),
-
-			( "^^ ( a b )", [], ["a", "b"], False),
-			( "^^ ( a b )", ["a"], ["a", "b"], True),
-			( "^^ ( a b )", ["b"], ["a", "b"], True),
-			( "^^ ( a b )", ["a", "b"], ["a", "b"], False),
-
-			( "^^ ( || ( a b ) c )", [], ["a", "b", "c"], False),
-			( "^^ ( || ( a b ) c )", ["a"], ["a", "b", "c"], True),
-
-			( "^^ ( || ( ( a b ) ) ( c ) )", [], ["a", "b", "c"], False),
-			( "( ^^ ( ( || ( ( a ) ( b ) ) ) ( ( c ) ) ) )", ["a"], ["a", "b", "c"], True),
-
-			( "a || ( b c )", ["a"], ["a", "b", "c"], False),
-			( "|| ( b c ) a", ["a"], ["a", "b", "c"], False),
-
-			( "|| ( a b c )", ["a"], ["a", "b", "c"], True),
-			( "|| ( a b c )", ["b"], ["a", "b", "c"], True),
-			( "|| ( a b c )", ["c"], ["a", "b", "c"], True),
-
-			( "^^ ( a b c )", ["a"], ["a", "b", "c"], True),
-			( "^^ ( a b c )", ["b"], ["a", "b", "c"], True),
-			( "^^ ( a b c )", ["c"], ["a", "b", "c"], True),
-			( "^^ ( a b c )", ["a", "b"], ["a", "b", "c"], False),
-			( "^^ ( a b c )", ["b", "c"], ["a", "b", "c"], False),
-			( "^^ ( a b c )", ["a", "c"], ["a", "b", "c"], False),
-			( "^^ ( a b c )", ["a", "b", "c"], ["a", "b", "c"], False),
-
-			( "a? ( ^^ ( b c ) )", [], ["a", "b", "c"], True),
-			( "a? ( ^^ ( b c ) )", ["a"], ["a", "b", "c"], False),
-			( "a? ( ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
-			( "a? ( ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
-			( "a? ( ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
-			( "a? ( ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
-
-			( "^^ ( a? ( !b ) !c? ( d ) )", [], ["a", "b", "c", "d"], False),
-			( "^^ ( a? ( !b ) !c? ( d ) )", ["a"], ["a", "b", "c", "d"], True),
-			( "^^ ( a? ( !b ) !c? ( d ) )", ["c"], ["a", "b", "c", "d"], True),
-			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "c"], ["a", "b", "c", "d"], True),
-			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "c"], ["a", "b", "c", "d"], False),
-			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
-			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
-			( "^^ ( a? ( !b ) !c? ( d ) )", ["a", "d"], ["a", "b", "c", "d"], False),
-
-			( "|| ( ^^ ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
-			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
-			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
-			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
-			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
-			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], True),
-			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
-			( "|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
-
-			( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
-			( "^^ ( || ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
-			( "^^ ( || ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], False),
-			( "^^ ( || ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
-			( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], False),
-			( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], False),
-			( "^^ ( || ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
-			( "^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], True),
-
-			( "|| ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], True),
-			( "|| ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
-			( "|| ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
-			( "|| ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
-			( "|| ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
-			( "|| ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
-			( "|| ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
-			( "|| ( ( a b ) c )", [], ["a", "b", "c"], False),
-
-			( "^^ ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], False),
-			( "^^ ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
-			( "^^ ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
-			( "^^ ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
-			( "^^ ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
-			( "^^ ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
-			( "^^ ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
-			( "^^ ( ( a b ) c )", [], ["a", "b", "c"], False),
-		)
-
-		test_cases_xfail = (
-			( "^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b"]),
-			( "^^ ( || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
-			( "^^( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
-			( "^^ || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
-			( "^^ ( ( || ) ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
-			( "^^ ( || ( a b ) ) ^^ ( b c ) )", [], ["a", "b", "c"]),
-		)
-
-		for required_use, use, iuse, expected in test_cases:
-			self.assertEqual(bool(check_required_use(required_use, use, iuse.__contains__)), \
-				expected, required_use + ", USE = " + " ".join(use))
-
-		for required_use, use, iuse in test_cases_xfail:
-			self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
-				InvalidDependString, check_required_use, required_use, use, iuse.__contains__)
-
-	def testCheckRequiredUseFilterSatisfied(self):
-		"""
-		Test filtering of satisfied parts of REQUIRED_USE,
-		in order to reduce noise for bug #353234.
-		"""
-		test_cases = (
-			(
-				"bindist? ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) dvdnav? ( dvd )",
-				("cdio", "cdparanoia"),
-				"cdio? ( !cdparanoia )"
-			),
-			(
-				"|| ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) ^^ ( foo bar )",
-				["cdio", "cdparanoia", "foo"],
-				"cdio? ( !cdparanoia )"
-			),
-			(
-				"^^ ( || ( a b ) c )",
-				("a", "b", "c"),
-				"^^ ( || ( a b ) c )"
-			),
-			(
-				"^^ ( || ( ( a b ) ) ( c ) )",
-				("a", "b", "c"),
-				"^^ ( ( a b ) c )"
-			),
-			(
-				"a? ( ( c e ) ( b d ) )",
-				("a", "c", "e"),
-				"a? ( b d )"
-			),
-			(
-				"a? ( ( c e ) ( b d ) )",
-				("a", "b", "c", "e"),
-				"a? ( d )"
-			),
-			(
-				"a? ( ( c e ) ( c e b c d e c ) )",
-				("a", "c", "e"),
-				"a? ( b d )"
-			),
-			(
-				"^^ ( || ( a b ) ^^ ( b c ) )",
-				("a", "b"),
-				"^^ ( || ( a b ) ^^ ( b c ) )"
-			),
-			(
-				"^^ ( || ( a b ) ^^ ( b c ) )",
-				["a", "c"],
-				"^^ ( || ( a b ) ^^ ( b c ) )"
-			),
-			(
-				"^^ ( || ( a b ) ^^ ( b c ) )",
-				["b", "c"],
-				""
-			),
-			(
-				"^^ ( || ( a b ) ^^ ( b c ) )",
-				["a", "b", "c"],
-				""
-			),
-			(
-				"^^ ( ( a b c ) ( b c d ) )",
-				["a", "b", "c"],
-				""
-			),
-			(
-				"^^ ( ( a b c ) ( b c d ) )",
-				["a", "b", "c", "d"],
-				"^^ ( ( a b c ) ( b c d ) )"
-			),
-			(
-				"^^ ( ( a b c ) ( b c !d ) )",
-				["a", "b", "c"],
-				"^^ ( ( a b c ) ( b c !d ) )"
-			),
-			(
-				"^^ ( ( a b c ) ( b c !d ) )",
-				["a", "b", "c", "d"],
-				""
-			),
-			(
-				"( ( ( a ) ) ( ( ( b c ) ) ) )",
-				[""],
-				"a b c"
-			),
-			(
-				"|| ( ( ( ( a ) ) ( ( ( b c ) ) ) ) )",
-				[""],
-				"a b c"
-			),
-			(
-				"|| ( ( a ( ( ) ( ) ) ( ( ) ) ( b ( ) c ) ) )",
-				[""],
-				"a b c"
-			),
-			(
-				"|| ( ( a b c ) ) || ( ( d e f ) )",
-				[""],
-				"a b c d e f"
-			),
-		)
-		for required_use, use, expected in test_cases:
-			result = check_required_use(required_use, use, lambda k: True).tounicode()
-			self.assertEqual(result, expected,
-				"REQUIRED_USE = '%s', USE = '%s', '%s' != '%s'" % \
-				(required_use, " ".join(use), result, expected))

diff --git a/portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py b/portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py
deleted file mode 100644
index 69d092e..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/testExtendedAtomDict.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# test_isvalidatom.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import ExtendedAtomDict
-
-class TestExtendedAtomDict(TestCase):
-
-	def testExtendedAtomDict(self):
-		d = ExtendedAtomDict(dict)
-		d["*/*"] = { "test1": "x" }
-		d["dev-libs/*"] = { "test2": "y" }
-		d.setdefault("sys-apps/portage", {})["test3"] = "z"
-		self.assertEqual(d.get("dev-libs/A"), { "test1": "x", "test2": "y" })
-		self.assertEqual(d.get("sys-apps/portage"), { "test1": "x", "test3": "z" })
-		self.assertEqual(d["dev-libs/*"], { "test2": "y" })
-		self.assertEqual(d["sys-apps/portage"], {'test1': 'x', 'test3': 'z'})

diff --git a/portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py b/portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py
deleted file mode 100644
index 026a552..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/testExtractAffectingUSE.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import extract_affecting_use
-from portage.exception import InvalidDependString
-
-class TestExtractAffectingUSE(TestCase):
-
-	def testExtractAffectingUSE(self):
-		test_cases = (
-			("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "A", ("a",)),
-			("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "B", ("b",)),
-			("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "C", ("c",)),
-			("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "D", ("d",)),
-			
-			("a? ( b? ( AB ) )", "AB", ("a", "b")),
-			("a? ( b? ( c? ( ABC ) ) )", "ABC", ("a", "b", "c")),
-
-			("a? ( A b? ( c? ( ABC ) AB ) )", "A", ("a",)),
-			("a? ( A b? ( c? ( ABC ) AB ) )", "AB", ("a", "b")),
-			("a? ( A b? ( c? ( ABC ) AB ) )", "ABC", ("a", "b", "c")),
-			("a? ( A b? ( c? ( ABC ) AB ) ) X", "X", []),
-			("X a? ( A b? ( c? ( ABC ) AB ) )", "X", []),
-
-			("ab? ( || ( A B ) )", "A", ("ab",)),
-			("!ab? ( || ( A B ) )", "B", ("ab",)),
-			("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "A", ("ab",)),
-			("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "B", ("ab", "b")),
-			("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "C", ("ab", "b")),
-
-			("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "A", ("ab",)),
-			("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "B", ("ab", "b")),
-			("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "C", ("ab", "b")),
-
-			("a? ( A )", "B", []),
-
-			("a? ( || ( A B ) )", "B", ["a"]),
-
-			# test USE dep defaults for bug #363073
-			("a? ( >=dev-lang/php-5.2[pcre(+)] )", ">=dev-lang/php-5.2[pcre(+)]", ["a"]),
-		)
-
-		test_cases_xfail = (
-			("? ( A )", "A"),
-			("!? ( A )", "A"),
-			("( A", "A"),
-			("A )", "A"),
-			
-			("||( A B )", "A"),
-			("|| (A B )", "A"),
-			("|| ( A B)", "A"),
-			("|| ( A B", "A"),
-			("|| A B )", "A"),
-			("|| A B", "A"),
-			("|| ( A B ) )", "A"),
-			("|| || B C", "A"),
-			("|| ( A B || )", "A"),
-			("a? A", "A"),
-			("( || ( || || ( A ) foo? ( B ) ) )", "A"),
-			("( || ( || bar? ( A ) foo? ( B ) ) )", "A"),
-		)
-
-		for dep, atom, expected in test_cases:
-			expected = set(expected)
-			result = extract_affecting_use(dep, atom, eapi="0")
-			fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
-				" ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
-			self.assertEqual(result, expected, fail_msg)
-
-		for dep, atom in test_cases_xfail:
-			fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
-				" ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
-			self.assertRaisesMsg(fail_msg, \
-				InvalidDependString, extract_affecting_use, dep, atom, eapi="0")

diff --git a/portage_with_autodep/pym/portage/tests/dep/testStandalone.py b/portage_with_autodep/pym/portage/tests/dep/testStandalone.py
deleted file mode 100644
index e9f01df..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/testStandalone.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import cpvequal
-from portage.exception import PortageException
-
-class TestStandalone(TestCase):
-	""" Test some small functions portage.dep
-	"""
-
-	def testCPVequal(self):
-
-		test_cases = (
-			( "sys-apps/portage-2.1","sys-apps/portage-2.1", True ),
-			( "sys-apps/portage-2.1","sys-apps/portage-2.0", False ),
-			( "sys-apps/portage-2.1","sys-apps/portage-2.1-r1", False ),
-			( "sys-apps/portage-2.1-r1","sys-apps/portage-2.1", False ),
-			( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
-			( "sys-apps/portage-2.1_alpha3_p6","sys-apps/portage-2.1_alpha3", False ),
-			( "sys-apps/portage-2.1_alpha3","sys-apps/portage-2.1", False ),
-			( "sys-apps/portage-2.1","sys-apps/X-2.1", False ),
-			( "sys-apps/portage-2.1","portage-2.1", False ),
-		)
-		
-		test_cases_xfail = (
-			( "sys-apps/portage","sys-apps/portage" ),
-			( "sys-apps/portage-2.1-6","sys-apps/portage-2.1-6" ),
-		)
-
-		for cpv1, cpv2, expected_result in test_cases:
-			self.assertEqual(cpvequal(cpv1, cpv2), expected_result)
-
-		for cpv1, cpv2 in test_cases_xfail:
-			self.assertRaisesMsg("cpvequal("+cpv1+", "+cpv2+")", \
-				PortageException, cpvequal, cpv1, cpv2)

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py b/portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py
deleted file mode 100644
index d050adc..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_best_match_to_list.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# test_best_match_to_list.py -- Portage Unit Testing Functionality
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import Atom, best_match_to_list
-
-class Test_best_match_to_list(TestCase):
-
-	def best_match_to_list_wrapper(self, mypkg, mylist):
-		"""
-		This function uses best_match_to_list to create sorted
-		list of matching atoms.
-		"""
-		ret = []
-		while mylist:
-			m = best_match_to_list(mypkg, mylist)
-			if m is not None:
-				ret.append(m)
-				mylist.remove(m)
-			else:
-				break
-
-		return ret
-
-	def testBest_match_to_list(self):
-		tests = [
-					("dev-libs/A-1", [Atom("dev-libs/A"), Atom("=dev-libs/A-1")], \
-						[Atom("=dev-libs/A-1"), Atom("dev-libs/A")]),
-					("dev-libs/A-1", [Atom("dev-libs/B"), Atom("=dev-libs/A-1:0")], \
-						[Atom("=dev-libs/A-1:0")]),
-					("dev-libs/A-1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=dev-libs/A-1:0")], \
-						[Atom("=dev-libs/A-1:0"), Atom("dev-libs/*", allow_wildcard=True)]),
-					("dev-libs/A-1:0", [Atom("dev-*/*", allow_wildcard=True), Atom("dev-*/*:0", allow_wildcard=True),\
-						Atom("dev-libs/A"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A:0"), \
-						Atom("=dev-libs/A-1*"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1")], \
-						[Atom("=dev-libs/A-1"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1*"), \
-						Atom("dev-libs/A:0"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A"), \
-						Atom("dev-*/*:0", allow_wildcard=True), Atom("dev-*/*", allow_wildcard=True)])
-				]
-
-		for pkg, atom_list, result in tests:
-			self.assertEqual( self.best_match_to_list_wrapper( pkg, atom_list ), result )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py
deleted file mode 100644
index 8a0a8aa..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_dep_getcpv.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# test_dep_getcpv.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import dep_getcpv
-
-class DepGetCPV(TestCase):
-	""" A simple testcase for isvalidatom
-	"""
-
-	def testDepGetCPV(self):
-		
-		prefix_ops = ["<", ">", "=", "~", "<=", 
-			      ">=", "!=", "!<", "!>", "!~"]
-
-		bad_prefix_ops = [ ">~", "<~", "~>", "~<" ]
-		postfix_ops = [ ("=", "*"), ]
-
-		cpvs = ["sys-apps/portage-2.1", "sys-apps/portage-2.1",
-				"sys-apps/portage-2.1"]
-		slots = [None, ":foo", ":2"]
-		for cpv in cpvs:
-			for slot in slots:
-				for prefix in prefix_ops:
-					mycpv = prefix + cpv
-					if slot:
-						mycpv += slot
-					self.assertEqual( dep_getcpv( mycpv ), cpv )
-
-				for prefix, postfix in postfix_ops:
-					mycpv = prefix + cpv + postfix
-					if slot:
-						mycpv += slot
-					self.assertEqual( dep_getcpv( mycpv ), cpv )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py
deleted file mode 100644
index 78ead8c..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_dep_getrepo.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import dep_getrepo
-
-class DepGetRepo(TestCase):
-	""" A simple testcase for isvalidatom
-	"""
-
-	def testDepGetRepo(self):
-
-		repo_char = "::"
-		repos = ( "a", "repo-name", "repo_name", "repo123", None )
-		cpvs = ["sys-apps/portage"]
-		versions = ["2.1.1","2.1-r1", None]
-		uses = ["[use]", None]
-		for cpv in cpvs:
-			for version in versions:
-				for use in uses:
-					for repo in repos:
-						pkg = cpv
-						if version:
-							pkg = '=' + pkg + '-' + version
-						if repo is not None:
-							pkg = pkg + repo_char + repo
-						if use:
-							pkg = pkg + use
-						self.assertEqual( dep_getrepo( pkg ), repo )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py
deleted file mode 100644
index 206cecc..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_dep_getslot.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# test_dep_getslot.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import dep_getslot
-
-class DepGetSlot(TestCase):
-	""" A simple testcase for isvalidatom
-	"""
-
-	def testDepGetSlot(self):
-
-		slot_char = ":"
-		slots = ( "a", "1.2", "1", "IloveVapier", None )
-		cpvs = ["sys-apps/portage"]
-		versions = ["2.1.1","2.1-r1"]
-		for cpv in cpvs:
-			for version in versions:
-				for slot in slots:
-					mycpv = cpv
-					if version:
-						mycpv = '=' + mycpv + '-' + version
-					if slot is not None:
-						self.assertEqual( dep_getslot( 
-							mycpv + slot_char + slot ), slot )
-					else:
-						self.assertEqual( dep_getslot( mycpv ), slot )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py b/portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py
deleted file mode 100644
index d2494f7..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_dep_getusedeps.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# test_dep_getusedeps.py -- Portage Unit Testing Functionality
-# Copyright 2007-2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import dep_getusedeps
-
-from portage.tests import test_cps, test_slots, test_versions, test_usedeps
-
-class DepGetUseDeps(TestCase):
-	""" A simple testcase for dep_getusedeps
-	"""
-
-	def testDepGetUseDeps(self):
-
-		for mycpv in test_cps:
-			for version in test_versions:
-				for slot in test_slots:
-					for use in test_usedeps:
-						cpv = mycpv[:]
-						if version:
-							cpv += version
-						if slot:
-							cpv += ":" + slot
-						if isinstance(use, tuple):
-							cpv += "[%s]" % (",".join(use),)
-							self.assertEqual( dep_getusedeps(
-								cpv ), use )
-						else:
-							if len(use):
-								self.assertEqual( dep_getusedeps(
-									cpv + "[" + use + "]" ), (use,) )
-							else:
-								self.assertEqual( dep_getusedeps(
-									cpv + "[" + use + "]" ), () )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_get_operator.py b/portage_with_autodep/pym/portage/tests/dep/test_get_operator.py
deleted file mode 100644
index 4f9848f..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_get_operator.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# test_get_operator.py -- Portage Unit Testing Functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import get_operator
-
-class GetOperator(TestCase):
-
-	def testGetOperator(self):
-
-		# get_operator does not validate operators
-		tests = [ ( "~", "~" ), ( "=", "=" ), ( ">", ">" ),
-			  ( ">=", ">=" ), ( "<=", "<=" ),
-		]
-
-		test_cpvs = ["sys-apps/portage-2.1"]
-		slots = [ None,"1","linux-2.5.6" ]
-		for cpv in test_cpvs:
-			for test in tests:
-				for slot in slots:
-					atom = cpv[:]
-					if slot:
-						atom += ":" + slot
-					result = get_operator( test[0] + atom )
-					self.assertEqual( result, test[1],
-						msg="get_operator(%s) != %s" % (test[0] + atom, test[1]) )
-
-		result = get_operator( "sys-apps/portage" )
-		self.assertEqual( result, None )
-
-		result = get_operator( "=sys-apps/portage-2.1*" )
-		self.assertEqual( result , "=*" )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py b/portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py
deleted file mode 100644
index 06f8110..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_get_required_use_flags.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import get_required_use_flags
-from portage.exception import InvalidDependString
-
-class TestCheckRequiredUse(TestCase):
-
-	def testCheckRequiredUse(self):
-		test_cases = (
-			("a b c", ["a", "b", "c"]),
-
-			("|| ( a b c )", ["a", "b", "c"]),
-			("^^ ( a b c )", ["a", "b", "c"]),
-
-			("|| ( a b ^^ ( d e f ) )", ["a", "b", "d", "e", "f"]),
-			("^^ ( a b || ( d e f ) )", ["a", "b", "d", "e", "f"]),
-
-			("( ^^ ( a ( b ) ( || ( ( d e ) ( f ) ) ) ) )", ["a", "b", "d", "e", "f"]),
-
-			("a? ( ^^ ( b c ) )", ["a", "b", "c"]),
-			("a? ( ^^ ( !b !d? ( c ) ) )", ["a", "b", "c", "d"]),
-		)
-
-		test_cases_xfail = (
-			("^^ ( || ( a b ) ^^ ( b c )"),
-			("^^( || ( a b ) ^^ ( b c ) )"),
-			("^^ || ( a b ) ^^ ( b c )"),
-			("^^ ( ( || ) ( a b ) ^^ ( b c ) )"),
-			("^^ ( || ( a b ) ) ^^ ( b c ) )"),
-		)
-
-		for required_use, expected in test_cases:
-			result = get_required_use_flags(required_use)
-			expected = set(expected)
-			self.assertEqual(result, expected, \
-				"REQUIRED_USE: '%s', expected: '%s', got: '%s'" % (required_use, expected, result))
-
-		for required_use in test_cases_xfail:
-			self.assertRaisesMsg("REQUIRED_USE: '%s'" % (required_use,), \
-				InvalidDependString, get_required_use_flags, required_use)

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_isjustname.py b/portage_with_autodep/pym/portage/tests/dep/test_isjustname.py
deleted file mode 100644
index c16fb54..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_isjustname.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# test_isjustname.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import isjustname
-
-class IsJustName(TestCase):
-
-	def testIsJustName(self):
-
-		cats = ( "", "sys-apps/", "foo/", "virtual/" )
-		pkgs = ( "portage", "paludis", "pkgcore", "notARealPkg" )
-		vers = ( "", "-2.0-r3", "-1.0_pre2", "-3.1b" )
-
-		for pkg in pkgs:
-			for cat in cats:
-				for ver in vers:
-					if len(ver):
-						self.assertFalse( isjustname( cat + pkg + ver ),
-						msg="isjustname(%s) is True!" % (cat + pkg + ver) )
-					else:
-						self.assertTrue( isjustname( cat + pkg + ver ),
-						msg="isjustname(%s) is False!" % (cat + pkg + ver) )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py b/portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py
deleted file mode 100644
index 173ab0d..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_isvalidatom.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# Copyright 2006-2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import isvalidatom
-
-class IsValidAtomTestCase(object):
-	def __init__(self, atom, expected, allow_wildcard=False, allow_repo=False):
-		self.atom = atom
-		self.expected = expected
-		self.allow_wildcard = allow_wildcard
-		self.allow_repo = allow_repo
-
-class IsValidAtom(TestCase):
-
-	def testIsValidAtom(self):
-
-		test_cases = (
-			IsValidAtomTestCase("sys-apps/portage", True),
-			IsValidAtomTestCase("=sys-apps/portage-2.1", True),
-			IsValidAtomTestCase("=sys-apps/portage-2.1*", True),
-			IsValidAtomTestCase(">=sys-apps/portage-2.1", True),
-			IsValidAtomTestCase("<=sys-apps/portage-2.1", True),
-			IsValidAtomTestCase(">sys-apps/portage-2.1", True),
-			IsValidAtomTestCase("<sys-apps/portage-2.1", True),
-			IsValidAtomTestCase("~sys-apps/portage-2.1", True),
-			IsValidAtomTestCase("sys-apps/portage:foo", True),
-			IsValidAtomTestCase("sys-apps/portage-2.1:foo", False),
-			IsValidAtomTestCase( "sys-apps/portage-2.1:", False),
-			IsValidAtomTestCase("sys-apps/portage-2.1:", False),
-			IsValidAtomTestCase("sys-apps/portage-2.1:[foo]", False),
-			IsValidAtomTestCase("sys-apps/portage", True),
-			IsValidAtomTestCase("sys-apps/portage", True),
-			IsValidAtomTestCase("sys-apps/portage", True),
-			IsValidAtomTestCase("sys-apps/portage", True),
-			IsValidAtomTestCase("sys-apps/portage", True),
-			IsValidAtomTestCase("sys-apps/portage", True),
-			IsValidAtomTestCase("sys-apps/portage", True),
-
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar?,!baz?,!doc=,build=]", True),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc?]", True),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc?]", True),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc=]", True),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc=]", True),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc=]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc?]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc?]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc!=]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][-baz][doc?][!build?]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?]", True),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?,]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[,bar,-baz,doc?,!build?]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz][doc?,!build?]", False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][doc,build]", False),
-			IsValidAtomTestCase(">~cate-gory/foo-1.0", False),
-			IsValidAtomTestCase(">~category/foo-1.0", False),
-			IsValidAtomTestCase("<~category/foo-1.0", False),
-			IsValidAtomTestCase("###cat/foo-1.0", False),
-			IsValidAtomTestCase("~sys-apps/portage", False),
-			IsValidAtomTestCase("portage", False),
-			IsValidAtomTestCase("=portage", False),
-			IsValidAtomTestCase(">=portage-2.1", False),
-			IsValidAtomTestCase("~portage-2.1", False),
-			IsValidAtomTestCase("=portage-2.1*", False),
-			IsValidAtomTestCase("null/portage", True),
-			IsValidAtomTestCase("null/portage*:0", False),
-			IsValidAtomTestCase(">=null/portage-2.1", True),
-			IsValidAtomTestCase(">=null/portage", False),
-			IsValidAtomTestCase(">null/portage", False),
-			IsValidAtomTestCase("=null/portage*", False),
-			IsValidAtomTestCase("=null/portage", False),
-			IsValidAtomTestCase("~null/portage", False),
-			IsValidAtomTestCase("<=null/portage", False),
-			IsValidAtomTestCase("<null/portage", False),
-			IsValidAtomTestCase("~null/portage-2.1", True),
-			IsValidAtomTestCase("=null/portage-2.1*", True),
-			IsValidAtomTestCase("null/portage-2.1*", False),
-			IsValidAtomTestCase("app-doc/php-docs-20071125", False),
-			IsValidAtomTestCase("app-doc/php-docs-20071125-r2", False),
-			IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
-			IsValidAtomTestCase("foo/-z-1", False),
-
-			# These are invalid because pkg name must not end in hyphen
-			# followed by numbers
-			IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
-			IsValidAtomTestCase("=foo/bar-123-1", False),
-			IsValidAtomTestCase("=foo/bar-123-1*", False),
-			IsValidAtomTestCase("foo/bar-123", False),
-			IsValidAtomTestCase("=foo/bar-123-1-r1", False),
-			IsValidAtomTestCase("=foo/bar-123-1-r1*", False),
-			IsValidAtomTestCase("foo/bar-123-r1", False),
-			IsValidAtomTestCase("foo/bar-1", False),
-
-			IsValidAtomTestCase("=foo/bar--baz-1-r1", True),
-			IsValidAtomTestCase("=foo/bar-baz--1-r1", True),
-			IsValidAtomTestCase("=foo/bar-baz---1-r1", True),
-			IsValidAtomTestCase("=foo/bar-baz---1", True),
-			IsValidAtomTestCase("=foo/bar-baz-1--r1", False),
-			IsValidAtomTestCase("games-strategy/ufo2000", True),
-			IsValidAtomTestCase("~games-strategy/ufo2000-0.1", True),
-			IsValidAtomTestCase("=media-libs/x264-20060810", True),
-			IsValidAtomTestCase("foo/b", True),
-			IsValidAtomTestCase("app-text/7plus", True),
-			IsValidAtomTestCase("foo/666", True),
-			IsValidAtomTestCase("=dev-libs/poppler-qt3-0.11*", True),
-
-			 #Testing atoms with repositories
-			IsValidAtomTestCase("sys-apps/portage::repo_123-name", True, allow_repo=True),
-			IsValidAtomTestCase("=sys-apps/portage-2.1::repo", True, allow_repo=True),
-			IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", True, allow_repo=True),
-			IsValidAtomTestCase("sys-apps/portage:foo::repo", True, allow_repo=True),
-			IsValidAtomTestCase("sys-apps/portage-2.1:foo::repo", False, allow_repo=True),
-			IsValidAtomTestCase("sys-apps/portage-2.1:::repo", False, allow_repo=True),
-			IsValidAtomTestCase("sys-apps/portage-2.1:::repo[foo]", False, allow_repo=True),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", True, allow_repo=True),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", True, allow_repo=True),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[!doc]", False, allow_repo=True),
-			IsValidAtomTestCase("###cat/foo-1.0::repo", False, allow_repo=True),
-			IsValidAtomTestCase("~sys-apps/portage::repo", False, allow_repo=True),
-			IsValidAtomTestCase("portage::repo", False, allow_repo=True),
-			IsValidAtomTestCase("=portage::repo", False, allow_repo=True),
-			IsValidAtomTestCase("null/portage::repo", True, allow_repo=True),
-			IsValidAtomTestCase("app-doc/php-docs-20071125::repo", False, allow_repo=True),
-			IsValidAtomTestCase("=foo/bar-1-r1-1-r1::repo", False, allow_repo=True),
-
-			IsValidAtomTestCase("sys-apps/portage::repo_123-name", False, allow_repo=False),
-			IsValidAtomTestCase("=sys-apps/portage-2.1::repo", False, allow_repo=False),
-			IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", False, allow_repo=False),
-			IsValidAtomTestCase("sys-apps/portage:foo::repo", False, allow_repo=False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", False, allow_repo=False),
-			IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", False, allow_repo=False),
-			IsValidAtomTestCase("null/portage::repo", False, allow_repo=False),
-		)
-
-		for test_case in test_cases:
-			if test_case.expected:
-				atom_type = "valid"
-			else:
-				atom_type = "invalid"
-			self.assertEqual( bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard, \
-				allow_repo=test_case.allow_repo)), test_case.expected,
-				msg="isvalidatom(%s) != %s" % ( test_case.atom, test_case.expected ) )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py b/portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py
deleted file mode 100644
index afba414..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_match_from_list.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2006, 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import sys
-from portage.tests import TestCase
-from portage.dep import Atom, match_from_list, _repo_separator
-from portage.versions import catpkgsplit
-
-if sys.hexversion >= 0x3000000:
-	basestring = str
-
-class Package(object):
-	"""
-	Provides a minimal subset of attributes of _emerge.Package.Package
-	"""
-	def __init__(self, atom):
-		atom = Atom(atom, allow_repo=True)
-		self.cp = atom.cp
-		self.cpv = atom.cpv
-		self.cpv_split = catpkgsplit(self.cpv)
-		self.slot = atom.slot
-		self.repo = atom.repo
-		if atom.use:
-			self.use = self._use_class(atom.use.enabled)
-			self.iuse = self._iuse_class(atom.use.required)
-		else:
-			self.use = self._use_class([])
-			self.iuse = self._iuse_class([])
-
-	class _use_class(object):
-		def __init__(self, use):
-			self.enabled = frozenset(use)
-
-	class _iuse_class(object):
-		def __init__(self, iuse):
-			self.all = frozenset(iuse)
-
-		def is_valid_flag(self, flags):
-			if isinstance(flags, basestring):
-				flags = [flags]
-			for flag in flags:
-				if not flag in self.all:
-					return False
-			return True
-
-class Test_match_from_list(TestCase):
-
-	def testMatch_from_list(self):
-		tests = (
-			("=sys-apps/portage-45*", [], [] ),
-			("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
-			("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
-			("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
-			("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
-			("=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
-			("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"] ),
-			("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], [] ),
-			("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
-			("<=sys-apps/portage-045", ["sys-apps/portage-046"], [] ),
-			("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
-			("<sys-apps/portage-046", ["sys-apps/portage-046"], [] ),
-			(">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
-			(">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
-			(">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"] ),
-			(">sys-apps/portage-047", ["sys-apps/portage-046-r1"], [] ),
-			("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"] ),
-			("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], [] ),
-			("=sys-fs/udev-1*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]),
-			("=sys-fs/udev-4*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
-			("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
-			("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
-			("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"] ),
-			("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"] ),
-			("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"] ),
-			("dev-libs/*", ["sys-apps/portage-2.1.2"], [] ),
-			("*/tar", ["sys-apps/portage-2.1.2"], [] ),
-			("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"] ),
-			("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"] ),
-
-			("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"] ),
-			("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"] ),
-			("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], [] ),
-			("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], [] ),
-			("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], [] ),
-			("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"] ),
-			("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
-			("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], [] ),
-			("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"] ),
-
-			("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"] ),
-			("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"] ),
-			("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], [] ),
-			("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
-			("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], [] ),
-			("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"] ),
-		)
-
-		for atom, cpv_list, expected_result in tests:
-			result = []
-			for pkg in match_from_list( atom, cpv_list ):
-				if isinstance(pkg, Package):
-					if pkg.repo:
-						result.append(pkg.cpv + _repo_separator + pkg.repo)
-					else:
-						result.append(pkg.cpv)
-				else:
-					result.append(pkg)
-			self.assertEqual( result, expected_result )

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py b/portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py
deleted file mode 100644
index 9a147a0..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_paren_reduce.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.dep import paren_reduce
-from portage.exception import InvalidDependString
-
-class TestParenReduce(TestCase):
-
-	def testParenReduce(self):
-
-		test_cases = (
-			( "A", ["A"]),
-			( "( A )", ["A"]),
-			( "|| ( A B )", [ "||", ["A", "B"] ]),
-			( "|| ( A || ( B C ) )", [ "||", ["A", "||", ["B", "C"]]]),
-			( "|| ( A || ( B C D ) )", [ "||", ["A", "||", ["B", "C", "D"]] ]),
-			( "|| ( A || ( B || ( C D ) E ) )", [ "||", ["A", "||", ["B", "||", ["C", "D"], "E"]] ]),
-			( "a? ( A )", ["a?", ["A"]]),
-			
-			( "( || ( ( ( A ) B ) ) )", ["A", "B"]),
-			( "( || ( || ( ( A ) B ) ) )", [ "||", ["A", "B"] ]),
-			( "|| ( A )", ["A"]),
-			( "( || ( || ( || ( A ) foo? ( B ) ) ) )", [ "||", ["A", "foo?", ["B"] ]]),
-			( "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )", [ "||", ["bar?", ["A"], "foo?", ["B"] ]]),
-			( "A || ( ) foo? ( ) B", ["A", "B"]),
-
-			( "|| ( A ) || ( B )", ["A", "B"]),
-			( "foo? ( A ) foo? ( B )", ["foo?", ["A"], "foo?", ["B"]]),
-
-			( "|| ( ( A B ) C )", [ "||", [ ["A", "B"], "C"] ]),
-			( "|| ( ( A B ) ( C ) )", [ "||", [ ["A", "B"], "C"] ]),
-			# test USE dep defaults for bug #354003
-			( ">=dev-lang/php-5.2[pcre(+)]", [ ">=dev-lang/php-5.2[pcre(+)]" ]),
-		)
-		
-		test_cases_xfail = (
-			"( A",
-			"A )",
-
-			"||( A B )",
-			"|| (A B )",
-			"|| ( A B)",
-			"|| ( A B",
-			"|| A B )",
-
-			"|| A B",
-			"|| ( A B ) )",
-			"|| || B C",
-			
-			"|| ( A B || )",
-			
-			"a? A",
-			
-			( "( || ( || || ( A ) foo? ( B ) ) )"),
-			( "( || ( || bar? ( A ) foo? ( B ) ) )"),
-		)
-
-		for dep_str, expected_result in test_cases:
-			self.assertEqual(paren_reduce(dep_str), expected_result,
-				"input: '%s' result: %s != %s" % (dep_str,
-				paren_reduce(dep_str), expected_result))
-
-		for dep_str in test_cases_xfail:
-			self.assertRaisesMsg(dep_str,
-				InvalidDependString, paren_reduce, dep_str)

diff --git a/portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py b/portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py
deleted file mode 100644
index 1618430..0000000
--- a/portage_with_autodep/pym/portage/tests/dep/test_use_reduce.py
+++ /dev/null
@@ -1,627 +0,0 @@
-# Copyright 2009-2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.exception import InvalidDependString
-from portage.dep import Atom, use_reduce
-
-class UseReduceTestCase(object):
-	def __init__(self, deparray, uselist=[], masklist=[], \
-		matchall=0, excludeall=[], is_src_uri=False, \
-		eapi="0", opconvert=False, flat=False, expected_result=None, \
-			is_valid_flag=None, token_class=None):
-		self.deparray = deparray
-		self.uselist = uselist
-		self.masklist = masklist
-		self.matchall = matchall
-		self.excludeall = excludeall
-		self.is_src_uri = is_src_uri
-		self.eapi = eapi
-		self.opconvert = opconvert
-		self.flat = flat
-		self.is_valid_flag = is_valid_flag
-		self.token_class = token_class
-		self.expected_result = expected_result
-
-	def run(self):
-		try:
-			return use_reduce(self.deparray, self.uselist, self.masklist, \
-				self.matchall, self.excludeall, self.is_src_uri, self.eapi, \
-				self.opconvert, self.flat, self.is_valid_flag, self.token_class)
-		except InvalidDependString as e:
-			raise InvalidDependString("%s: %s" % (e, self.deparray))
-
-class UseReduce(TestCase):
-
-	def always_true(self, ununsed_parameter):
-		return True
-
-	def always_false(self, ununsed_parameter):
-		return False
-
-	def testUseReduce(self):
-
-		EAPI_WITH_SRC_URI_ARROWS = "2"
-		EAPI_WITHOUT_SRC_URI_ARROWS = "0"
-
-		test_cases = (
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				uselist = ["a", "b", "c", "d"],
-				expected_result = ["A", "B"]
-				),
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				uselist = ["a", "b", "c"],
-				expected_result = ["A", "B", "D"]
-				),
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				uselist = ["b", "c"],
-				expected_result = ["B", "D"]
-				),
-
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				matchall = True,
-				expected_result = ["A", "B", "C", "D"]
-				),
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				masklist = ["a", "c"],
-				expected_result = ["C", "D"]
-				),
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				matchall = True,
-				masklist = ["a", "c"],
-				expected_result = ["B", "C", "D"]
-				),
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				uselist = ["a", "b"],
-				masklist = ["a", "c"],
-				expected_result = ["B", "C", "D"]
-				),
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				excludeall = ["a", "c"],
-				expected_result = ["D"]
-				),
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				uselist = ["b"],
-				excludeall = ["a", "c"],
-				expected_result = ["B", "D"]
-				),
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				matchall = True,
-				excludeall = ["a", "c"],
-				expected_result = ["A", "B", "D"]
-				),
-			UseReduceTestCase(
-				"a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
-				matchall = True,
-				excludeall = ["a", "c"],
-				masklist = ["b"],
-				expected_result = ["A", "D"]
-				),
-
-			
-			UseReduceTestCase(
-				"a? ( b? ( AB ) )",
-				uselist = ["a", "b"],
-				expected_result = ["AB"]
-				),
-			UseReduceTestCase(
-				"a? ( b? ( AB ) C )",
-				uselist = ["a"],
-				expected_result = ["C"]
-				),
-			UseReduceTestCase(
-				"a? ( b? ( || ( AB CD ) ) )",
-				uselist = ["a", "b"],
-				expected_result = ["||", ["AB", "CD"]]
-				),
-			UseReduceTestCase(
-				"|| ( || ( a? ( A ) b? ( B ) ) )",
-				uselist = ["a", "b"],
-				expected_result = ["||", ["A", "B"]]
-				),
-			UseReduceTestCase(
-				"|| ( || ( a? ( A ) b? ( B ) ) )",
-				uselist = ["a"],
-				expected_result = ["A"]
-				),
-			UseReduceTestCase(
-				"|| ( || ( a? ( A ) b? ( B ) ) )",
-				uselist = [],
-				expected_result = []
-				),
-			UseReduceTestCase(
-				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
-				uselist = [],
-				expected_result = []
-				),
-			UseReduceTestCase(
-				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
-				uselist = ["a"],
-				expected_result = ["A"]
-				),
-			UseReduceTestCase(
-				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
-				uselist = ["b"],
-				expected_result = ["B"]
-				),
-			UseReduceTestCase(
-				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
-				uselist = ["c"],
-				expected_result = []
-				),
-			UseReduceTestCase(
-				"|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
-				uselist = ["a", "c"],
-				expected_result = ["||", [ "A", "C"]]
-				),
-			
-			#paren_reduce tests
-			UseReduceTestCase(
-				"A",
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"( A )",
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"|| ( A B )",
-				expected_result = [ "||", ["A", "B"] ]),
-			UseReduceTestCase(
-				"|| ( ( A B ) C )",
-				expected_result = [ "||", [ ["A", "B"], "C"] ]),
-			UseReduceTestCase(
-				"|| ( ( A B ) ( C ) )",
-				expected_result = [ "||", [ ["A", "B"], "C"] ]),
-			UseReduceTestCase(
-				"|| ( A || ( B C ) )",
-				expected_result = [ "||", ["A", "B", "C"]]),
-			UseReduceTestCase(
-				"|| ( A || ( B C D ) )",
-				expected_result = [ "||", ["A", "B", "C", "D"] ]),
-			UseReduceTestCase(
-				"|| ( A || ( B || ( C D ) E ) )",
-				expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
-			UseReduceTestCase(
-				"( || ( ( ( A ) B ) ) )",
-				expected_result = ["A", "B"] ),
-			UseReduceTestCase(
-				"( || ( || ( ( A ) B ) ) )",
-				expected_result = [ "||", ["A", "B"] ]),
-			UseReduceTestCase(
-				"( || ( || ( ( A ) B ) ) )",
-				expected_result = [ "||", ["A", "B"] ]),
-			UseReduceTestCase(
-				"|| ( A )",
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
-				uselist = ["foo"],
-				expected_result = [ "||", ["A", "B"] ]),
-			UseReduceTestCase(
-				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
-				expected_result = []),
-			UseReduceTestCase(
-				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
-				uselist = ["foo", "bar"],
-				expected_result = [ "||", [ "A", "B" ] ]),
-			UseReduceTestCase(
-				"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
-				expected_result = ["A", "B"]),
-			UseReduceTestCase(
-				"|| ( A ) || ( B )",
-				expected_result = ["A", "B"]),
-			UseReduceTestCase(
-				"foo? ( A ) foo? ( B )",
-				expected_result = []),
-			UseReduceTestCase(
-				"foo? ( A ) foo? ( B )",
-				uselist = ["foo"],
-				expected_result = ["A", "B"]),
-			UseReduceTestCase(
-				"|| ( A B ) C",
-				expected_result = ['||', ['A', 'B'], 'C']),
-			UseReduceTestCase(
-				"A || ( B C )",
-				expected_result = ['A', '||', ['B', 'C']]),
-
-			#SRC_URI stuff
-			UseReduceTestCase(
-				"http://foo/bar -> blah.tbz2",
-				is_src_uri = True,
-				eapi = EAPI_WITH_SRC_URI_ARROWS,
-				expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
-			UseReduceTestCase(
-				"foo? ( http://foo/bar -> blah.tbz2 )",
-				uselist = [],
-				is_src_uri = True,
-				eapi = EAPI_WITH_SRC_URI_ARROWS,
-				expected_result = []),
-			UseReduceTestCase(
-				"foo? ( http://foo/bar -> blah.tbz2 )",
-				uselist = ["foo"],
-				is_src_uri = True,
-				eapi = EAPI_WITH_SRC_URI_ARROWS,
-				expected_result = ["http://foo/bar", "->", "blah.tbz2"]),
-			UseReduceTestCase(
-				"http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
-				uselist = [],
-				is_src_uri = True,
-				eapi = EAPI_WITH_SRC_URI_ARROWS,
-				expected_result = ["http://foo/bar", "->", "bar.tbz2"]),
-			UseReduceTestCase(
-				"http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
-				uselist = ["foo"],
-				is_src_uri = True,
-				eapi = EAPI_WITH_SRC_URI_ARROWS,
-				expected_result = ["http://foo/bar", "->", "bar.tbz2", "ftp://foo/a"]),
-			UseReduceTestCase(
-				"http://foo.com/foo http://foo/bar -> blah.tbz2",
-				uselist = ["foo"],
-				is_src_uri = True,
-				eapi = EAPI_WITH_SRC_URI_ARROWS,
-				expected_result = ["http://foo.com/foo", "http://foo/bar", "->", "blah.tbz2"]),
-
-			#opconvert tests
-			UseReduceTestCase(
-				"A",
-				opconvert = True,
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"( A )",
-				opconvert = True,
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"|| ( A B )",
-				opconvert = True,
-				expected_result = [['||', 'A', 'B']]),
-			UseReduceTestCase(
-				"|| ( ( A B ) C )",
-				opconvert = True,
-				expected_result = [['||', ['A', 'B'], 'C']]),
-			UseReduceTestCase(
-				"|| ( A || ( B C ) )",
-				opconvert = True,
-				expected_result = [['||', 'A', 'B', 'C']]),
-			UseReduceTestCase(
-				"|| ( A || ( B C D ) )",
-				opconvert = True,
-				expected_result = [['||', 'A', 'B', 'C', 'D']]),
-			UseReduceTestCase(
-				"|| ( A || ( B || ( C D ) E ) )",
-				expected_result = [ "||", ["A", "B", "C", "D", "E"] ]),
-			UseReduceTestCase(
-				"( || ( ( ( A ) B ) ) )",
-				opconvert = True,
-				expected_result = [ "A", "B" ] ),
-			UseReduceTestCase(
-				"( || ( || ( ( A ) B ) ) )",
-				opconvert = True,
-				expected_result = [['||', 'A', 'B']]),
-			UseReduceTestCase(
-				"|| ( A B ) C",
-				opconvert = True,
-				expected_result = [['||', 'A', 'B'], 'C']),
-			UseReduceTestCase(
-				"A || ( B C )",
-				opconvert = True,
-				expected_result = ['A', ['||', 'B', 'C']]),
-			UseReduceTestCase(
-				"A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
-				uselist = ["foo", "bar"],
-				opconvert = True,
-				expected_result = ['A', ['||', 'B', 'C', 'D', 'E'], 'G']),
-			UseReduceTestCase(
-				"A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
-				uselist = ["foo", "bar"],
-				opconvert = False,
-				expected_result = ['A', '||', ['B', 'C', 'D', 'E'], 'G']),
-
-			UseReduceTestCase(
-				"|| ( A )",
-				opconvert = True,
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
-				uselist = ["foo"],
-				opconvert = True,
-				expected_result = [['||', 'A', 'B']]),
-			UseReduceTestCase(
-				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
-				opconvert = True,
-				expected_result = []),
-			UseReduceTestCase(
-				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
-				uselist = ["foo", "bar"],
-				opconvert = True,
-				expected_result = [['||', 'A', 'B']]),
-			UseReduceTestCase(
-				"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
-				opconvert = True,
-				expected_result = ["A", "B"]),
-			UseReduceTestCase(
-				"|| ( A ) || ( B )",
-				opconvert = True,
-				expected_result = ["A", "B"]),
-			UseReduceTestCase(
-				"foo? ( A ) foo? ( B )",
-				opconvert = True,
-				expected_result = []),
-			UseReduceTestCase(
-				"foo? ( A ) foo? ( B )",
-				uselist = ["foo"],
-				opconvert = True,
-				expected_result = ["A", "B"]),
-			UseReduceTestCase(
-				"|| ( foo? ( || ( A B ) ) )",
-				uselist = ["foo"],
-				opconvert = True,
-				expected_result = [['||', 'A', 'B']]),
-
-			UseReduceTestCase(
-				"|| ( ( A B ) foo? ( || ( C D ) ) )",
-				uselist = ["foo"],
-				opconvert = True,
-				expected_result = [['||', ['A', 'B'], 'C', 'D']]),
-
-			UseReduceTestCase(
-				"|| ( ( A B ) foo? ( || ( C D ) ) )",
-				uselist = ["foo"],
-				opconvert = False,
-				expected_result = ['||', [['A', 'B'], 'C', 'D']]),
-
-			UseReduceTestCase(
-				"|| ( ( A B ) || ( C D ) )",
-				expected_result = ['||', [['A', 'B'], 'C', 'D']]),
-
-			UseReduceTestCase(
-				"|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
-				expected_result = ['||', [['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
-
-			UseReduceTestCase(
-				"|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
-				opconvert = True,
-				expected_result = [['||', ['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
-
-			UseReduceTestCase(
-				"|| ( foo? ( A B ) )",
-				uselist = ["foo"],
-				expected_result = ['A', 'B']),
-
-			UseReduceTestCase(
-				"|| ( || ( foo? ( A B ) ) )",
-				uselist = ["foo"],
-				expected_result = ['A', 'B']),
-
-			UseReduceTestCase(
-				"|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
-				uselist = ["a", "b", "c", "d", "e", "f"],
-				expected_result = ['A', 'B']),
-
-			UseReduceTestCase(
-				"|| ( || ( ( || ( a? ( ( b? ( c? ( || ( || ( || ( ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) ) ) ) )",
-				uselist = ["a", "b", "c", "d", "e", "f"],
-				expected_result = ['A', 'B']),
-
-			UseReduceTestCase(
-				"|| ( ( A ( || ( B ) ) ) )",
-				expected_result = ['A', 'B']),
-
-			UseReduceTestCase(
-				"|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
-				uselist = ["foo", "bar", "baz"],
-				expected_result = ['||', [['A', 'B'], ['C', 'D', '||', ['E', ['F', 'G'], 'H']]]]),
-
-			UseReduceTestCase(
-				"|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
-				uselist = ["foo", "bar", "baz"],
-				opconvert = True,
-				expected_result = [['||', ['A', 'B'], ['C', 'D', ['||', 'E', ['F', 'G'], 'H']]]]),
-
-			UseReduceTestCase(
-				"|| ( foo? ( A B ) )",
-				uselist = ["foo"],
-				opconvert=True,
-				expected_result = ['A', 'B']),
-
-			UseReduceTestCase(
-				"|| ( || ( foo? ( A B ) ) )",
-				uselist = ["foo"],
-				opconvert=True,
-				expected_result = ['A', 'B']),
-
-			UseReduceTestCase(
-				"|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
-				uselist = ["a", "b", "c", "d", "e", "f"],
-				opconvert=True,
-				expected_result = ['A', 'B']),
-
-			#flat test
-			UseReduceTestCase(
-				"A",
-				flat = True,
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"( A )",
-				flat = True,
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"|| ( A B )",
-				flat = True,
-				expected_result = [ "||", "A", "B" ] ),
-			UseReduceTestCase(
-				"|| ( A || ( B C ) )",
-				flat = True,
-				expected_result = [ "||", "A", "||", "B", "C" ]),
-			UseReduceTestCase(
-				"|| ( A || ( B C D ) )",
-				flat = True,
-				expected_result = [ "||", "A", "||", "B", "C", "D" ]),
-			UseReduceTestCase(
-				"|| ( A || ( B || ( C D ) E ) )",
-				flat = True,
-				expected_result = [ "||", "A", "||", "B", "||", "C", "D", "E" ]),
-			UseReduceTestCase(
-				"( || ( ( ( A ) B ) ) )",
-				flat = True,
-				expected_result = [ "||", "A", "B"] ),
-			UseReduceTestCase(
-				"( || ( || ( ( A ) B ) ) )",
-				flat = True,
-				expected_result = [ "||", "||", "A", "B" ]),
-			UseReduceTestCase(
-				"( || ( || ( ( A ) B ) ) )",
-				flat = True,
-				expected_result = [ "||", "||", "A", "B" ]),
-			UseReduceTestCase(
-				"|| ( A )",
-				flat = True,
-				expected_result = ["||", "A"]),
-			UseReduceTestCase(
-				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"( || ( || ( || ( A ) foo? ( B ) ) ) )",
-				uselist = ["foo"],
-				flat = True,
-				expected_result = [ "||", "||","||", "A", "B" ]),
-			UseReduceTestCase(
-				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
-				flat = True,
-				expected_result = ["||", "||","||"]),
-			UseReduceTestCase(
-				"( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
-				uselist = ["foo", "bar"],
-				flat = True,
-				expected_result = [ "||", "||", "A", "||", "B" ]),
-			UseReduceTestCase(
-				"A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
-				flat = True,
-				expected_result = ["A", "||", "B"]),
-			UseReduceTestCase(
-				"|| ( A ) || ( B )",
-				flat = True,
-				expected_result = ["||", "A", "||", "B"]),
-			UseReduceTestCase(
-				"foo? ( A ) foo? ( B )",
-				flat = True,
-				expected_result = []),
-			UseReduceTestCase(
-				"foo? ( A ) foo? ( B )",
-				uselist = ["foo"],
-				flat = True,
-				expected_result = ["A", "B"]),
-
-			#use flag validation
-			UseReduceTestCase(
-				"foo? ( A )",
-				uselist = ["foo"],
-				is_valid_flag = self.always_true,
-				expected_result = ["A"]),
-			UseReduceTestCase(
-				"foo? ( A )",
-				is_valid_flag = self.always_true,
-				expected_result = []),
-
-			#token_class
-			UseReduceTestCase(
-				"foo? ( dev-libs/A )",
-				uselist = ["foo"],
-				token_class=Atom,
-				expected_result = ["dev-libs/A"]),
-			UseReduceTestCase(
-				"foo? ( dev-libs/A )",
-				token_class=Atom,
-				expected_result = []),
-		)
-		
-		test_cases_xfail = (
-			UseReduceTestCase("? ( A )"),
-			UseReduceTestCase("!? ( A )"),
-			UseReduceTestCase("( A"),
-			UseReduceTestCase("A )"),
-			UseReduceTestCase("||( A B )"),
-			UseReduceTestCase("|| (A B )"),
-			UseReduceTestCase("|| ( A B)"),
-			UseReduceTestCase("|| ( A B"),
-			UseReduceTestCase("|| A B )"),
-			UseReduceTestCase("|| A B"),
-			UseReduceTestCase("|| ( A B ) )"),
-			UseReduceTestCase("|| || B C"),
-			UseReduceTestCase("|| ( A B || )"),
-			UseReduceTestCase("a? A"),
-			UseReduceTestCase("( || ( || || ( A ) foo? ( B ) ) )"),
-			UseReduceTestCase("( || ( || bar? ( A ) foo? ( B ) ) )"),
-			UseReduceTestCase("foo?"),
-			UseReduceTestCase("foo? || ( A )"),
-			UseReduceTestCase("|| ( )"),
-			UseReduceTestCase("foo? ( )"),
-
-			#SRC_URI stuff
-			UseReduceTestCase("http://foo/bar -> blah.tbz2", is_src_uri = True, eapi = EAPI_WITHOUT_SRC_URI_ARROWS),
-			UseReduceTestCase("|| ( http://foo/bar -> blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-			UseReduceTestCase("http://foo/bar -> foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-			UseReduceTestCase("http://foo/bar blah.tbz2 ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-			UseReduceTestCase("-> http://foo/bar blah.tbz2 )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-			UseReduceTestCase("http://foo/bar ->", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-			UseReduceTestCase("http://foo/bar -> foo? ( http://foo.com/foo )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-			UseReduceTestCase("foo? ( http://foo/bar -> ) blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-			UseReduceTestCase("http://foo/bar -> foo/blah.tbz2", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-			UseReduceTestCase("http://foo/bar -> -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = True, eapi = EAPI_WITH_SRC_URI_ARROWS),
-			
-			UseReduceTestCase("http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri = False, eapi = EAPI_WITH_SRC_URI_ARROWS),
-
-			UseReduceTestCase(
-				"A",
-				opconvert = True,
-				flat = True),
-
-			#use flag validation
-			UseReduceTestCase("1.0? ( A )"),
-			UseReduceTestCase("!1.0? ( A )"),
-			UseReduceTestCase("!? ( A )"),
-			UseReduceTestCase("!?? ( A )"),
-			UseReduceTestCase(
-				"foo? ( A )",
-				is_valid_flag = self.always_false,
-				),
-			UseReduceTestCase(
-				"foo? ( A )",
-				uselist = ["foo"],
-				is_valid_flag = self.always_false,
-				),
-
-			#token_class
-			UseReduceTestCase(
-				"foo? ( A )",
-				uselist = ["foo"],
-				token_class=Atom),
-			UseReduceTestCase(
-				"A(B",
-				token_class=Atom),
-		)
-
-		for test_case in test_cases:
-			# If it fails then show the input, since lots of our
-			# test cases have the same output but different input,
-			# making it difficult deduce which test has failed.
-			self.assertEqual(test_case.run(), test_case.expected_result,
-				"input: '%s' result: %s != %s" % (test_case.deparray,
-				test_case.run(), test_case.expected_result))
-
-		for test_case in test_cases_xfail:
-			self.assertRaisesMsg(test_case.deparray, (InvalidDependString, ValueError), test_case.run)

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/__init__.py b/portage_with_autodep/pym/portage/tests/ebuild/__init__.py
deleted file mode 100644
index e2d487e..0000000
--- a/portage_with_autodep/pym/portage/tests/ebuild/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# Copyright 1998-2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/__test__ b/portage_with_autodep/pym/portage/tests/ebuild/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py b/portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py
deleted file mode 100644
index d8277f2..0000000
--- a/portage_with_autodep/pym/portage/tests/ebuild/test_array_fromfile_eof.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2009 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import array
-import tempfile
-
-from portage import _unicode_decode
-from portage import _unicode_encode
-from portage.tests import TestCase
-
-class ArrayFromfileEofTestCase(TestCase):
-
-	def testArrayFromfileEof(self):
-		# This tests if the following python issue is fixed
-		# in the currently running version of python:
-		#   http://bugs.python.org/issue5334
-
-		input_data = "an arbitrary string"
-		input_bytes = _unicode_encode(input_data,
-			encoding='utf_8', errors='strict')
-		f = tempfile.TemporaryFile()
-		f.write(input_bytes)
-
-		f.seek(0)
-		data = []
-		eof = False
-		while not eof:
-			a = array.array('B')
-			try:
-				a.fromfile(f, len(input_bytes) + 1)
-			except (EOFError, IOError):
-				# python-3.0 lost data here
-				eof = True
-
-			if not a:
-				eof = True
-			else:
-				data.append(_unicode_decode(a.tostring(),
-					encoding='utf_8', errors='strict'))
-
-		f.close()
-
-		self.assertEqual(input_data, ''.join(data))

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_config.py b/portage_with_autodep/pym/portage/tests/ebuild/test_config.py
deleted file mode 100644
index 7bec8c6..0000000
--- a/portage_with_autodep/pym/portage/tests/ebuild/test_config.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import portage
-from portage import os
-from portage.package.ebuild.config import config
-from portage.package.ebuild._config.LicenseManager import LicenseManager
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class ConfigTestCase(TestCase):
-
-	def testClone(self):
-		"""
-		Test the clone via constructor.
-		"""
-
-		ebuilds = {
-			"dev-libs/A-1": { },
-		}
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-		try:
-			settings = config(clone=playground.settings)
-			result = playground.run(["=dev-libs/A-1"])
-			pkg, existing_node = result.depgraph._select_package(
-				playground.root, "=dev-libs/A-1")
-			settings.setcpv(pkg)
-
-			# clone after setcpv tests deepcopy of LazyItemsDict
-			settings2 = config(clone=settings)
-		finally:
-			playground.cleanup()
-
-	def testFeaturesMutation(self):
-		"""
-		Test whether mutation of config.features updates the FEATURES
-		variable and persists through config.regenerate() calls. Also
-		verify that features_set._prune_overrides() works correctly.
-		"""
-		playground = ResolverPlayground()
-		try:
-			settings = config(clone=playground.settings)
-
-			settings.features.add('noclean')
-			self.assertEqual('noclean' in settings['FEATURES'].split(), True)
-			settings.regenerate()
-			self.assertEqual('noclean' in settings['FEATURES'].split(),True)
-
-			settings.features.discard('noclean')
-			self.assertEqual('noclean' in settings['FEATURES'].split(), False)
-			settings.regenerate()
-			self.assertEqual('noclean' in settings['FEATURES'].split(), False)
-
-			settings.features.add('noclean')
-			self.assertEqual('noclean' in settings['FEATURES'].split(), True)
-			settings.regenerate()
-			self.assertEqual('noclean' in settings['FEATURES'].split(),True)
-
-			# before: ['noclean', '-noclean', 'noclean']
-			settings.features._prune_overrides()
-			#  after: ['noclean']
-			self.assertEqual(settings._features_overrides.count('noclean'), 1)
-			self.assertEqual(settings._features_overrides.count('-noclean'), 0)
-
-			settings.features.remove('noclean')
-
-			# before: ['noclean', '-noclean']
-			settings.features._prune_overrides()
-			#  after: ['-noclean']
-			self.assertEqual(settings._features_overrides.count('noclean'), 0)
-			self.assertEqual(settings._features_overrides.count('-noclean'), 1)
-		finally:
-			playground.cleanup()
-
-	def testLicenseManager(self):
-
-		user_config = {
-			"package.license":
-				(
-					"dev-libs/* TEST",
-					"dev-libs/A -TEST2", 
-					"=dev-libs/A-2 TEST3 @TEST",
-					"*/* @EULA TEST2",
-					"=dev-libs/C-1 *",
-					"=dev-libs/C-2 -*",
-				),
-		}
-
-		playground = ResolverPlayground(user_config=user_config)
-		try:
-			portage.util.noiselimit = -2
-
-			license_group_locations = (os.path.join(playground.portdir, "profiles"),)
-			pkg_license = os.path.join(playground.eroot, "etc", "portage")
-
-			lic_man = LicenseManager(license_group_locations, pkg_license)
-
-			self.assertEqual(lic_man._accept_license_str, None)
-			self.assertEqual(lic_man._accept_license, None)
-			self.assertEqual(lic_man._license_groups, {"EULA": frozenset(["TEST"])})
-			self.assertEqual(lic_man._undef_lic_groups, set(["TEST"]))
-
-			self.assertEqual(lic_man.extract_global_changes(), "TEST TEST2")
-			self.assertEqual(lic_man.extract_global_changes(), "")
-
-			lic_man.set_accept_license_str("TEST TEST2")
-			self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/B-1", "0", None), ["TEST", "TEST2", "TEST"])
-			self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-1", "0", None), ["TEST", "TEST2", "TEST", "-TEST2"])
-			self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-2", "0", None), ["TEST", "TEST2", "TEST", "-TEST2", "TEST3", "@TEST"])
-
-			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/B-1", [], "TEST", "0", None), "TEST")
-			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-1", [], "-TEST2", "0", None), "")
-			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), "TEST")
-			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-1", [], "TEST5", "0", None), "TEST5")
-			self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-2", [], "TEST2", "0", None), "")
-
-			self.assertEqual(lic_man.getMissingLicenses("dev-libs/B-1", [], "TEST", "0", None), [])
-			self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-1", [], "-TEST2", "0", None), ["-TEST2"])
-			self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), [])
-			self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-3", [], "|| ( TEST2 || ( TEST3 TEST4 ) )", "0", None), ["TEST2", "TEST3", "TEST4"])
-			self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-1", [], "TEST5", "0", None), [])
-			self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-2", [], "TEST2", "0", None), ["TEST2"])
-			self.assertEqual(lic_man.getMissingLicenses("dev-libs/D-1", [], "", "0", None), [])
-		finally:
-			portage.util.noiselimit = 0
-			playground.cleanup()
-
-	def testPackageMaskOrder(self):
-
-		ebuilds = {
-			"dev-libs/A-1": { },
-			"dev-libs/B-1": { },
-			"dev-libs/C-1": { },
-			"dev-libs/D-1": { },
-			"dev-libs/E-1": { },
-		}
-
-		repo_configs = {
-			"test_repo": {
-				"package.mask":
-					(
-						"dev-libs/A",
-						"dev-libs/C",
-					),
-			}
-		}
-
-		profile = {
-			"package.mask":
-				(
-					"-dev-libs/A",
-					"dev-libs/B",
-					"-dev-libs/B",
-					"dev-libs/D",
-				),
-		}
-
-		user_config = {
-			"package.mask":
-				(
-					"-dev-libs/C",
-					"-dev-libs/D",
-					"dev-libs/E",
-				),
-		}
-
-		test_cases = (
-				ResolverPlaygroundTestCase(
-					["dev-libs/A"],
-					options = { "--autounmask": 'n' },
-					success = False),
-				ResolverPlaygroundTestCase(
-					["dev-libs/B"],
-					success = True,
-					mergelist = ["dev-libs/B-1"]),
-				ResolverPlaygroundTestCase(
-					["dev-libs/C"],
-					success = True,
-					mergelist = ["dev-libs/C-1"]),
-				ResolverPlaygroundTestCase(
-					["dev-libs/D"],
-					success = True,
-					mergelist = ["dev-libs/D-1"]),
-				ResolverPlaygroundTestCase(
-					["dev-libs/E"],
-					options = { "--autounmask": 'n' },
-					success = False),
-		)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, repo_configs=repo_configs, \
-			profile=profile, user_config=user_config)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py b/portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py
deleted file mode 100644
index ed08b2a..0000000
--- a/portage_with_autodep/pym/portage/tests/ebuild/test_doebuild_spawn.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage import _python_interpreter
-from portage import _shell_quote
-from portage.const import EBUILD_SH_BINARY
-from portage.package.ebuild.config import config
-from portage.package.ebuild.doebuild import spawn as doebuild_spawn
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground
-from _emerge.EbuildPhase import EbuildPhase
-from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
-from _emerge.Package import Package
-from _emerge.PollScheduler import PollScheduler
-
-class DoebuildSpawnTestCase(TestCase):
-	"""
-	Invoke portage.package.ebuild.doebuild.spawn() with a
-	minimal environment. This gives coverage to some of
-	the ebuild execution internals, like ebuild.sh,
-	AbstractEbuildProcess, and EbuildIpcDaemon.
-	"""
-
-	def testDoebuildSpawn(self):
-		playground = ResolverPlayground()
-		try:
-			settings = config(clone=playground.settings)
-			cpv = 'sys-apps/portage-2.1'
-			metadata = {
-				'EAPI'      : '2',
-				'INHERITED' : 'python eutils',
-				'IUSE'      : 'build doc epydoc python3 selinux',
-				'LICENSE'   : 'GPL-2',
-				'PROVIDE'   : 'virtual/portage',
-				'RDEPEND'   : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6',
-				'SLOT'      : '0',
-			}
-			root_config = playground.trees[playground.root]['root_config']
-			pkg = Package(built=False, cpv=cpv, installed=False,
-				metadata=metadata, root_config=root_config,
-				type_name='ebuild')
-			settings.setcpv(pkg)
-			settings['PORTAGE_PYTHON'] = _python_interpreter
-			settings['PORTAGE_BUILDDIR'] = os.path.join(
-				settings['PORTAGE_TMPDIR'], cpv)
-			settings['T'] = os.path.join(
-				settings['PORTAGE_BUILDDIR'], 'temp')
-			for x in ('PORTAGE_BUILDDIR', 'T'):
-				os.makedirs(settings[x])
-			# Create a fake environment, to pretend as if the ebuild
-			# has been sourced already.
-			open(os.path.join(settings['T'], 'environment'), 'wb')
-
-			scheduler = PollScheduler().sched_iface
-			for phase in ('_internal_test',):
-
-				# Test EbuildSpawnProcess by calling doebuild.spawn() with
-				# returnpid=False. This case is no longer used by portage
-				# internals since EbuildPhase is used instead and that passes
-				# returnpid=True to doebuild.spawn().
-				rval = doebuild_spawn("%s %s" % (_shell_quote(
-					os.path.join(settings["PORTAGE_BIN_PATH"],
-					os.path.basename(EBUILD_SH_BINARY))), phase),
-					settings, free=1)
-				self.assertEqual(rval, os.EX_OK)
-
-				ebuild_phase = EbuildPhase(background=False,
-					phase=phase, scheduler=scheduler,
-					settings=settings)
-				ebuild_phase.start()
-				ebuild_phase.wait()
-				self.assertEqual(ebuild_phase.returncode, os.EX_OK)
-
-			ebuild_phase = MiscFunctionsProcess(background=False,
-				commands=['success_hooks'],
-				scheduler=scheduler, settings=settings)
-			ebuild_phase.start()
-			ebuild_phase.wait()
-			self.assertEqual(ebuild_phase.returncode, os.EX_OK)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py b/portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py
deleted file mode 100644
index b5b4796..0000000
--- a/portage_with_autodep/pym/portage/tests/ebuild/test_ipc_daemon.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import shutil
-import tempfile
-import time
-from portage import os
-from portage import _python_interpreter
-from portage.tests import TestCase
-from portage.const import PORTAGE_BIN_PATH
-from portage.const import PORTAGE_PYM_PATH
-from portage.const import BASH_BINARY
-from portage.package.ebuild._ipc.ExitCommand import ExitCommand
-from portage.util import ensure_dirs
-from _emerge.SpawnProcess import SpawnProcess
-from _emerge.EbuildBuildDir import EbuildBuildDir
-from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
-from _emerge.TaskScheduler import TaskScheduler
-
-class IpcDaemonTestCase(TestCase):
-
-	_SCHEDULE_TIMEOUT = 40000 # 40 seconds
-
-	def testIpcDaemon(self):
-		tmpdir = tempfile.mkdtemp()
-		build_dir = None
-		try:
-			env = {}
-
-			# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
-			# need to be inherited by ebuild subprocesses.
-			if 'PORTAGE_USERNAME' in os.environ:
-				env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
-			if 'PORTAGE_GRPNAME' in os.environ:
-				env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
-
-			env['PORTAGE_PYTHON'] = _python_interpreter
-			env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH
-			env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
-			env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1')
-
-			task_scheduler = TaskScheduler(max_jobs=2)
-			build_dir = EbuildBuildDir(
-				scheduler=task_scheduler.sched_iface,
-				settings=env)
-			build_dir.lock()
-			ensure_dirs(env['PORTAGE_BUILDDIR'])
-
-			input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in')
-			output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out')
-			os.mkfifo(input_fifo)
-			os.mkfifo(output_fifo)
-
-			for exitcode in (0, 1, 2):
-				exit_command = ExitCommand()
-				commands = {'exit' : exit_command}
-				daemon = EbuildIpcDaemon(commands=commands,
-					input_fifo=input_fifo,
-					output_fifo=output_fifo,
-					scheduler=task_scheduler.sched_iface)
-				proc = SpawnProcess(
-					args=[BASH_BINARY, "-c",
-					'"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode],
-					env=env, scheduler=task_scheduler.sched_iface)
-
-				self.received_command = False
-				def exit_command_callback():
-					self.received_command = True
-					proc.cancel()
-					daemon.cancel()
-
-				exit_command.reply_hook = exit_command_callback
-				task_scheduler.add(daemon)
-				task_scheduler.add(proc)
-				start_time = time.time()
-				task_scheduler.run(timeout=self._SCHEDULE_TIMEOUT)
-				task_scheduler.clear()
-
-				self.assertEqual(self.received_command, True,
-					"command not received after %d seconds" % \
-					(time.time() - start_time,))
-				self.assertEqual(proc.isAlive(), False)
-				self.assertEqual(daemon.isAlive(), False)
-				self.assertEqual(exit_command.exitcode, exitcode)
-
-			# Intentionally short timeout test for QueueScheduler.run()
-			sleep_time_s = 10      # 10.000 seconds
-			short_timeout_ms = 10  #  0.010 seconds
-
-			for i in range(3):
-				exit_command = ExitCommand()
-				commands = {'exit' : exit_command}
-				daemon = EbuildIpcDaemon(commands=commands,
-					input_fifo=input_fifo,
-					output_fifo=output_fifo,
-					scheduler=task_scheduler.sched_iface)
-				proc = SpawnProcess(
-					args=[BASH_BINARY, "-c", 'exec sleep %d' % sleep_time_s],
-					env=env, scheduler=task_scheduler.sched_iface)
-
-				self.received_command = False
-				def exit_command_callback():
-					self.received_command = True
-					proc.cancel()
-					daemon.cancel()
-
-				exit_command.reply_hook = exit_command_callback
-				task_scheduler.add(daemon)
-				task_scheduler.add(proc)
-				start_time = time.time()
-				task_scheduler.run(timeout=short_timeout_ms)
-				task_scheduler.clear()
-
-				self.assertEqual(self.received_command, False,
-					"command received after %d seconds" % \
-					(time.time() - start_time,))
-				self.assertEqual(proc.isAlive(), False)
-				self.assertEqual(daemon.isAlive(), False)
-				self.assertEqual(proc.returncode == os.EX_OK, False)
-
-		finally:
-			if build_dir is not None:
-				build_dir.unlock()
-			shutil.rmtree(tmpdir)

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py b/portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py
deleted file mode 100644
index 4b6ff21..0000000
--- a/portage_with_autodep/pym/portage/tests/ebuild/test_pty_eof.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2009-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.util._pty import _can_test_pty_eof, _test_pty_eof
-
-class PtyEofFdopenBufferedTestCase(TestCase):
-
-	def testPtyEofFdopenBuffered(self):
-		# This tests if the following python issue is fixed yet:
-		#   http://bugs.python.org/issue5380
-		# Since it might not be fixed, mark as todo.
-		self.todo = True
-		# The result is only valid if openpty does not raise EnvironmentError.
-		if _can_test_pty_eof():
-			try:
-				self.assertEqual(_test_pty_eof(fdopen_buffered=True), True)
-			except EnvironmentError:
-				pass
-
-class PtyEofFdopenUnBufferedTestCase(TestCase):
-	def testPtyEofFdopenUnBuffered(self):
-		# New development: It appears that array.fromfile() is usable
-		# with python3 as long as fdopen is called with a bufsize
-		# argument of 0.
-
-		# The result is only valid if openpty does not raise EnvironmentError.
-		if _can_test_pty_eof():
-			try:
-				self.assertEqual(_test_pty_eof(), True)
-			except EnvironmentError:
-				pass

diff --git a/portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py b/portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py
deleted file mode 100644
index fea4738..0000000
--- a/portage_with_autodep/pym/portage/tests/ebuild/test_spawn.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 1998-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import errno
-import io
-import sys
-import tempfile
-from portage import os
-from portage import _encodings
-from portage import _unicode_encode
-from portage.const import BASH_BINARY
-from portage.tests import TestCase
-from _emerge.SpawnProcess import SpawnProcess
-from _emerge.PollScheduler import PollScheduler
-
-class SpawnTestCase(TestCase):
-
-	def testLogfile(self):
-		logfile = None
-		try:
-			fd, logfile = tempfile.mkstemp()
-			os.close(fd)
-			null_fd = os.open('/dev/null', os.O_RDWR)
-			test_string = 2 * "blah blah blah\n"
-			scheduler = PollScheduler().sched_iface
-			proc = SpawnProcess(
-				args=[BASH_BINARY, "-c",
-				"echo -n '%s'" % test_string],
-				env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd},
-				scheduler=scheduler,
-				logfile=logfile)
-			proc.start()
-			os.close(null_fd)
-			self.assertEqual(proc.wait(), os.EX_OK)
-			f = io.open(_unicode_encode(logfile,
-				encoding=_encodings['fs'], errors='strict'),
-				mode='r', encoding=_encodings['content'], errors='strict')
-			log_content = f.read()
-			f.close()
-			# When logging passes through a pty, this comparison will fail
-			# unless the oflag terminal attributes have the termios.OPOST
-			# bit disabled. Otherwise, tranformations such as \n -> \r\n
-			# may occur.
-			self.assertEqual(test_string, log_content)
-		finally:
-			if logfile:
-				try:
-					os.unlink(logfile)
-				except EnvironmentError as e:
-					if e.errno != errno.ENOENT:
-						raise
-					del e

diff --git a/portage_with_autodep/pym/portage/tests/env/__init__.py b/portage_with_autodep/pym/portage/tests/env/__init__.py
deleted file mode 100644
index cbeabe5..0000000
--- a/portage_with_autodep/pym/portage/tests/env/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# tests/portage/env/__init__.py -- Portage Unit Test functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-

diff --git a/portage_with_autodep/pym/portage/tests/env/__test__ b/portage_with_autodep/pym/portage/tests/env/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/env/config/__init__.py b/portage_with_autodep/pym/portage/tests/env/config/__init__.py
deleted file mode 100644
index ef5cc43..0000000
--- a/portage_with_autodep/pym/portage/tests/env/config/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# tests/portage/env/config/__init__.py -- Portage Unit Test functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-

diff --git a/portage_with_autodep/pym/portage/tests/env/config/__test__ b/portage_with_autodep/pym/portage/tests/env/config/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py
deleted file mode 100644
index f1e9e98..0000000
--- a/portage_with_autodep/pym/portage/tests/env/config/test_PackageKeywordsFile.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# test_PackageKeywordsFile.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage.tests import TestCase
-from portage.env.config import PackageKeywordsFile
-from tempfile import mkstemp
-
-class PackageKeywordsFileTestCase(TestCase):
-
-	cpv = ['sys-apps/portage']
-	keywords = ['~x86', 'amd64', '-mips']
-	
-	def testPackageKeywordsFile(self):
-		"""
-		A simple test to ensure the load works properly
-		"""
-
-		self.BuildFile()
-		try:
-			f = PackageKeywordsFile(self.fname)
-			f.load()
-			i = 0
-			for cpv, keyword in f.items():
-				self.assertEqual( cpv, self.cpv[i] )
-				[k for k in keyword if self.assertTrue(k in self.keywords)]
-				i = i + 1
-		finally:
-			self.NukeFile()
-	
-	def BuildFile(self):
-		fd, self.fname = mkstemp()
-		f = os.fdopen(fd, 'w')
-		for c in self.cpv:
-			f.write("%s %s\n" % (c,' '.join(self.keywords)))
-		f.close()
-
-	def NukeFile(self):
-		os.unlink(self.fname)

diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py
deleted file mode 100644
index 0c5b30f..0000000
--- a/portage_with_autodep/pym/portage/tests/env/config/test_PackageMaskFile.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# test_PackageMaskFile.py -- Portage Unit Testing Functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage.env.config import PackageMaskFile
-from portage.tests import TestCase, test_cps
-from tempfile import mkstemp
-
-class PackageMaskFileTestCase(TestCase):
-	
-	def testPackageMaskFile(self):
-		self.BuildFile()
-		try:
-			f = PackageMaskFile(self.fname)
-			f.load()
-			for atom in f:
-				self.assertTrue(atom in test_cps)
-		finally:
-			self.NukeFile()
-	
-	def BuildFile(self):
-		fd, self.fname = mkstemp()
-		f = os.fdopen(fd, 'w')
-		f.write("\n".join(test_cps))
-		f.close()
-	
-	def NukeFile(self):
-		os.unlink(self.fname)

diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py
deleted file mode 100644
index 7a38067..0000000
--- a/portage_with_autodep/pym/portage/tests/env/config/test_PackageUseFile.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# test_PackageUseFile.py -- Portage Unit Testing Functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage.tests import TestCase
-from portage.env.config import PackageUseFile
-from tempfile import mkstemp
-
-
-class PackageUseFileTestCase(TestCase):
-
-	cpv = 'sys-apps/portage'
-	useflags = ['cdrom', 'far', 'boo', 'flag', 'blat']
-	
-	def testPackageUseFile(self):
-		"""
-		A simple test to ensure the load works properly
-		"""
-		self.BuildFile()
-		try:
-			f = PackageUseFile(self.fname)
-			f.load()
-			for cpv, use in f.items():
-				self.assertEqual( cpv, self.cpv )
-				[flag for flag in use if self.assertTrue(flag in self.useflags)]
-		finally:
-			self.NukeFile()
-
-	def BuildFile(self):
-		fd, self.fname = mkstemp()
-		f = os.fdopen(fd, 'w')
-		f.write("%s %s" % (self.cpv, ' '.join(self.useflags)))
-		f.close()
-	
-	def NukeFile(self):
-		os.unlink(self.fname)

diff --git a/portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py b/portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py
deleted file mode 100644
index 2cd1a8a..0000000
--- a/portage_with_autodep/pym/portage/tests/env/config/test_PortageModulesFile.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2006-2009 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage.tests import TestCase
-from portage.env.config import PortageModulesFile
-from tempfile import mkstemp
-
-class PortageModulesFileTestCase(TestCase):
-
-	keys = ['foo.bar','baz','bob','extra_key']
-	invalid_keys = ['',""]
-	modules = ['spanky','zmedico','antarus','ricer','5','6']
-
-	def setUp(self):
-		self.items = {}
-		for k, v in zip(self.keys + self.invalid_keys, 
-				self.modules):
-			self.items[k] = v
-
-	def testPortageModulesFile(self):
-		self.BuildFile()
-		f = PortageModulesFile(self.fname)
-		f.load()
-		for k in self.keys:
-			self.assertEqual(f[k], self.items[k])
-		for ik in self.invalid_keys:
-			self.assertEqual(False, ik in f)
-		self.NukeFile()
-
-	def BuildFile(self):
-		fd, self.fname = mkstemp()
-		f = os.fdopen(fd, 'w')
-		for k, v in self.items.items():
-			f.write('%s=%s\n' % (k,v))
-		f.close()
-
-	def NukeFile(self):
-		os.unlink(self.fname)

diff --git a/portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py b/portage_with_autodep/pym/portage/tests/lafilefixer/__init__.py
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/lafilefixer/__test__ b/portage_with_autodep/pym/portage/tests/lafilefixer/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py b/portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py
deleted file mode 100644
index 0bcffaa..0000000
--- a/portage_with_autodep/pym/portage/tests/lafilefixer/test_lafilefixer.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# test_lafilefixer.py -- Portage Unit Testing Functionality
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.exception import InvalidData
-
-class test_lafilefixer(TestCase):
-
-	def get_test_cases_clean(self):
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' -lm'\n" + \
-			b"current=6\n" + \
-			b"age=0\n" + \
-			b"revision=2\n" + \
-			b"installed=yes\n" + \
-			b"dlopen=''\n" + \
-			b"dlpreopen=''\n" + \
-			b"libdir='/usr/lib64'\n"
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' -lm'\n" + \
-			b"current=6\n" + \
-			b"age=0\n" + \
-			b"revision=2\n" + \
-			b"installed=yes\n" + \
-			b"dlopen=''\n" + \
-			b"dlpreopen=''\n" + \
-			b"libdir='/usr/lib64'\n"
-		yield b"dependency_libs=' liba.la /usr/lib64/bar.la -lc'\n"
-
-	def get_test_cases_update(self):
-		#.la -> -l*
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc'\n", \
-			b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n"
-		#move stuff into inherited_linker_flags
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' /usr/lib64/liba.la -pthread /usr/lib64/libb.la -lc'\n" + \
-			b"inherited_linker_flags=''\n", \
-			b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n" + \
-			b"inherited_linker_flags=' -pthread'\n"
-		#reorder 
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' /usr/lib64/liba.la -R/usr/lib64 /usr/lib64/libb.la -lc'\n", \
-			b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -la -lb -lc'\n"
-		#remove duplicates from dependency_libs (the original version didn't do it for inherited_linker_flags)
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libc.la -pthread -mt" + \
-			b" -L/usr/lib -R/usr/lib64 -lc /usr/lib64/libb.la -lc'\n" +\
-			b"inherited_linker_flags=' -pthread -pthread'\n", \
-			b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -L/usr/lib -la -lc -lb'\n" +\
-			b"inherited_linker_flags=' -pthread -pthread -mt'\n"
-		#-L rewriting
-		yield b"dependency_libs=' -L/usr/X11R6/lib'\n", \
-			b"dependency_libs=' -L/usr/lib'\n"
-		yield b"dependency_libs=' -L/usr/local/lib'\n", \
-			b"dependency_libs=' -L/usr/lib'\n"
-		yield b"dependency_libs=' -L/usr/lib64/pkgconfig/../..'\n", \
-			b"dependency_libs=' -L/usr'\n"
-		yield b"dependency_libs=' -L/usr/lib/pkgconfig/..'\n", \
-			b"dependency_libs=' -L/usr/lib'\n"
-		yield b"dependency_libs=' -L/usr/lib/pkgconfig/../.. -L/usr/lib/pkgconfig/..'\n", \
-			b"dependency_libs=' -L/usr -L/usr/lib'\n"
-		#we once got a backtrace on this one
-		yield b"dependency_libs=' /usr/lib64/libMagickCore.la -L/usr/lib64 -llcms2 /usr/lib64/libtiff.la " + \
-			b"-ljbig -lc /usr/lib64/libfreetype.la /usr/lib64/libjpeg.la /usr/lib64/libXext.la " + \
-			b"/usr/lib64/libXt.la /usr/lib64/libSM.la -lICE -luuid /usr/lib64/libICE.la /usr/lib64/libX11.la " + \
-			b"/usr/lib64/libxcb.la /usr/lib64/libXau.la /usr/lib64/libXdmcp.la -lbz2 -lz -lm " + \
-			b"/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4/libgomp.la -lrt -lpthread /usr/lib64/libltdl.la -ldl " + \
-			b"/usr/lib64/libfpx.la -lstdc++'", \
-			b"dependency_libs=' -L/usr/lib64 -L/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4 -lMagickCore -llcms2 " + \
-			b"-ltiff -ljbig -lc -lfreetype -ljpeg -lXext -lXt -lSM -lICE -luuid -lX11 -lxcb -lXau -lXdmcp " + \
-			b"-lbz2 -lz -lm -lgomp -lrt -lpthread -lltdl -ldl -lfpx -lstdc++'"
-
-
-	def get_test_cases_broken(self):
-		yield b""
-		#no dependency_libs
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"current=6\n" + \
-			b"age=0\n" + \
-			b"revision=2\n"
-		#borken dependency_libs
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc' \n"
-			#borken dependency_libs
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc\n"
-		#crap in dependency_libs
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
-		#dependency_libs twice
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n" +\
-			b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
-		#inherited_linker_flags twice
-		yield b"dlname='libfoo.so.1'\n" + \
-			b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
-			b"old_library='libpdf.a'\n" + \
-			b"inherited_linker_flags=''\n" +\
-			b"inherited_linker_flags=''\n"
-
-	def testlafilefixer(self):
-		from portage.util.lafilefixer import _parse_lafile_contents, rewrite_lafile
-
-		for clean_contents in self.get_test_cases_clean():
-			self.assertEqual(rewrite_lafile(clean_contents), (False, None))
-
-		for original_contents, fixed_contents in self.get_test_cases_update():
-			self.assertEqual(rewrite_lafile(original_contents), (True, fixed_contents))
-
-		for broken_contents in self.get_test_cases_broken():
-			self.assertRaises(InvalidData, rewrite_lafile, broken_contents)

diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/__init__.py b/portage_with_autodep/pym/portage/tests/lazyimport/__init__.py
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/__test__ b/portage_with_autodep/pym/portage/tests/lazyimport/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py b/portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
deleted file mode 100644
index 08ccfa7..0000000
--- a/portage_with_autodep/pym/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import re
-import portage
-from portage import os
-from portage.const import PORTAGE_PYM_PATH
-from portage.tests import TestCase
-
-from _emerge.PollScheduler import PollScheduler
-from _emerge.PipeReader import PipeReader
-from _emerge.SpawnProcess import SpawnProcess
-
-class LazyImportPortageBaselineTestCase(TestCase):
-
-	_module_re = re.compile(r'^(portage|repoman|_emerge)\.')
-
-	_baseline_imports = frozenset([
-		'portage.const', 'portage.localization',
-		'portage.proxy', 'portage.proxy.lazyimport',
-		'portage.proxy.objectproxy',
-		'portage._selinux',
-	])
-
-	_baseline_import_cmd = [portage._python_interpreter, '-c', '''
-import os
-import sys
-sys.path.insert(0, os.environ["PORTAGE_PYM_PATH"])
-import portage
-sys.stdout.write(" ".join(k for k in sys.modules
-	if sys.modules[k] is not None))
-''']
-
-	def testLazyImportPortageBaseline(self):
-		"""
-		Check what modules are imported by a baseline module import.
-		"""
-
-		env = os.environ.copy()
-		pythonpath = env.get('PYTHONPATH')
-		if pythonpath is not None and not pythonpath.strip():
-			pythonpath = None
-		if pythonpath is None:
-			pythonpath = ''
-		else:
-			pythonpath = ':' + pythonpath
-		pythonpath = PORTAGE_PYM_PATH + pythonpath
-		env[pythonpath] = pythonpath
-
-		# If python is patched to insert the path of the
-		# currently installed portage module into sys.path,
-		# then the above PYTHONPATH override doesn't help.
-		env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
-
-		scheduler = PollScheduler().sched_iface
-		master_fd, slave_fd = os.pipe()
-		master_file = os.fdopen(master_fd, 'rb', 0)
-		slave_file = os.fdopen(slave_fd, 'wb')
-		producer = SpawnProcess(
-			args=self._baseline_import_cmd,
-			env=env, fd_pipes={1:slave_fd},
-			scheduler=scheduler)
-		producer.start()
-		slave_file.close()
-
-		consumer = PipeReader(
-			input_files={"producer" : master_file},
-			scheduler=scheduler)
-
-		consumer.start()
-		consumer.wait()
-		self.assertEqual(producer.wait(), os.EX_OK)
-		self.assertEqual(consumer.wait(), os.EX_OK)
-
-		output = consumer.getvalue().decode('ascii', 'replace').split()
-
-		unexpected_modules = " ".join(sorted(x for x in output \
-			if self._module_re.match(x) is not None and \
-			x not in self._baseline_imports))
-
-		self.assertEqual("", unexpected_modules)

diff --git a/portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py b/portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py
deleted file mode 100644
index 9d20eba..0000000
--- a/portage_with_autodep/pym/portage/tests/lazyimport/test_preload_portage_submodules.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import portage
-from portage.tests import TestCase
-
-class PreloadPortageSubmodulesTestCase(TestCase):
-
-	def testPreloadPortageSubmodules(self):
-		"""
-		Verify that _preload_portage_submodules() doesn't leave any
-		remaining proxies that refer to the portage.* namespace.
-		"""
-		portage.proxy.lazyimport._preload_portage_submodules()
-		for name in portage.proxy.lazyimport._module_proxies:
-			self.assertEqual(name.startswith('portage.'), False)

diff --git a/portage_with_autodep/pym/portage/tests/lint/__init__.pyo b/portage_with_autodep/pym/portage/tests/lint/__init__.pyo
new file mode 100644
index 0000000..a1241e5
Binary files /dev/null and b/portage_with_autodep/pym/portage/tests/lint/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.pyo b/portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.pyo
new file mode 100644
index 0000000..a7ddc80
Binary files /dev/null and b/portage_with_autodep/pym/portage/tests/lint/test_bash_syntax.pyo differ

diff --git a/portage_with_autodep/pym/portage/tests/lint/test_compile_modules.pyo b/portage_with_autodep/pym/portage/tests/lint/test_compile_modules.pyo
new file mode 100644
index 0000000..7b1460d
Binary files /dev/null and b/portage_with_autodep/pym/portage/tests/lint/test_compile_modules.pyo differ

diff --git a/portage_with_autodep/pym/portage/tests/lint/test_import_modules.pyo b/portage_with_autodep/pym/portage/tests/lint/test_import_modules.pyo
new file mode 100644
index 0000000..b3a1d61
Binary files /dev/null and b/portage_with_autodep/pym/portage/tests/lint/test_import_modules.pyo differ

diff --git a/portage_with_autodep/pym/portage/tests/locks/__test__ b/portage_with_autodep/pym/portage/tests/locks/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py b/portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py
deleted file mode 100644
index 8946caf..0000000
--- a/portage_with_autodep/pym/portage/tests/locks/test_asynchronous_lock.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import shutil
-import signal
-import tempfile
-
-from portage import os
-from portage.tests import TestCase
-from _emerge.AsynchronousLock import AsynchronousLock
-from _emerge.PollScheduler import PollScheduler
-
-class AsynchronousLockTestCase(TestCase):
-
-	def testAsynchronousLock(self):
-		scheduler = PollScheduler().sched_iface
-		tempdir = tempfile.mkdtemp()
-		try:
-			path = os.path.join(tempdir, 'lock_me')
-			for force_async in (True, False):
-				for force_dummy in (True, False):
-					async_lock = AsynchronousLock(path=path,
-						scheduler=scheduler, _force_async=force_async,
-						_force_thread=True,
-						_force_dummy=force_dummy)
-					async_lock.start()
-					self.assertEqual(async_lock.wait(), os.EX_OK)
-					self.assertEqual(async_lock.returncode, os.EX_OK)
-					async_lock.unlock()
-
-				async_lock = AsynchronousLock(path=path,
-					scheduler=scheduler, _force_async=force_async,
-					_force_process=True)
-				async_lock.start()
-				self.assertEqual(async_lock.wait(), os.EX_OK)
-				self.assertEqual(async_lock.returncode, os.EX_OK)
-				async_lock.unlock()
-
-		finally:
-			shutil.rmtree(tempdir)
-
-	def testAsynchronousLockWait(self):
-		scheduler = PollScheduler().sched_iface
-		tempdir = tempfile.mkdtemp()
-		try:
-			path = os.path.join(tempdir, 'lock_me')
-			lock1 = AsynchronousLock(path=path, scheduler=scheduler)
-			lock1.start()
-			self.assertEqual(lock1.wait(), os.EX_OK)
-			self.assertEqual(lock1.returncode, os.EX_OK)
-
-			# lock2 requires _force_async=True since the portage.locks
-			# module is not designed to work as intended here if the
-			# same process tries to lock the same file more than
-			# one time concurrently.
-			lock2 = AsynchronousLock(path=path, scheduler=scheduler,
-				_force_async=True, _force_process=True)
-			lock2.start()
-			# lock2 should be waiting for lock1 to release
-			self.assertEqual(lock2.poll(), None)
-			self.assertEqual(lock2.returncode, None)
-
-			lock1.unlock()
-			self.assertEqual(lock2.wait(), os.EX_OK)
-			self.assertEqual(lock2.returncode, os.EX_OK)
-			lock2.unlock()
-		finally:
-			shutil.rmtree(tempdir)
-
-	def testAsynchronousLockWaitCancel(self):
-		scheduler = PollScheduler().sched_iface
-		tempdir = tempfile.mkdtemp()
-		try:
-			path = os.path.join(tempdir, 'lock_me')
-			lock1 = AsynchronousLock(path=path, scheduler=scheduler)
-			lock1.start()
-			self.assertEqual(lock1.wait(), os.EX_OK)
-			self.assertEqual(lock1.returncode, os.EX_OK)
-			lock2 = AsynchronousLock(path=path, scheduler=scheduler,
-				_force_async=True, _force_process=True)
-			lock2.start()
-			# lock2 should be waiting for lock1 to release
-			self.assertEqual(lock2.poll(), None)
-			self.assertEqual(lock2.returncode, None)
-
-			# Cancel lock2 and then check wait() and returncode results.
-			lock2.cancel()
-			self.assertEqual(lock2.wait() == os.EX_OK, False)
-			self.assertEqual(lock2.returncode == os.EX_OK, False)
-			self.assertEqual(lock2.returncode is None, False)
-			lock1.unlock()
-		finally:
-			shutil.rmtree(tempdir)
-
-	def testAsynchronousLockWaitKill(self):
-		scheduler = PollScheduler().sched_iface
-		tempdir = tempfile.mkdtemp()
-		try:
-			path = os.path.join(tempdir, 'lock_me')
-			lock1 = AsynchronousLock(path=path, scheduler=scheduler)
-			lock1.start()
-			self.assertEqual(lock1.wait(), os.EX_OK)
-			self.assertEqual(lock1.returncode, os.EX_OK)
-			lock2 = AsynchronousLock(path=path, scheduler=scheduler,
-				_force_async=True, _force_process=True)
-			lock2.start()
-			# lock2 should be waiting for lock1 to release
-			self.assertEqual(lock2.poll(), None)
-			self.assertEqual(lock2.returncode, None)
-
-			# Kill lock2's process and then check wait() and
-			# returncode results. This is intended to simulate
-			# a SIGINT sent via the controlling tty.
-			self.assertEqual(lock2._imp is not None, True)
-			self.assertEqual(lock2._imp._proc is not None, True)
-			self.assertEqual(lock2._imp._proc.pid is not None, True)
-			lock2._imp._kill_test = True
-			os.kill(lock2._imp._proc.pid, signal.SIGTERM)
-			self.assertEqual(lock2.wait() == os.EX_OK, False)
-			self.assertEqual(lock2.returncode == os.EX_OK, False)
-			self.assertEqual(lock2.returncode is None, False)
-			lock1.unlock()
-		finally:
-			shutil.rmtree(tempdir)

diff --git a/portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py b/portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py
deleted file mode 100644
index d5748ad..0000000
--- a/portage_with_autodep/pym/portage/tests/locks/test_lock_nonblock.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import shutil
-import tempfile
-import traceback
-
-import portage
-from portage import os
-from portage.tests import TestCase
-
-class LockNonblockTestCase(TestCase):
-
-	def testLockNonblock(self):
-		tempdir = tempfile.mkdtemp()
-		try:
-			path = os.path.join(tempdir, 'lock_me')
-			lock1 = portage.locks.lockfile(path)
-			pid = os.fork()
-			if pid == 0:
-				portage.process._setup_pipes({0:0, 1:1, 2:2})
-				rval = 2
-				try:
-					try:
-						lock2 = portage.locks.lockfile(path, flags=os.O_NONBLOCK)
-					except portage.exception.TryAgain:
-						rval = os.EX_OK
-					else:
-						rval = 1
-						portage.locks.unlockfile(lock2)
-				except SystemExit:
-					raise
-				except:
-					traceback.print_exc()
-				finally:
-					os._exit(rval)
-
-			self.assertEqual(pid > 0, True)
-			pid, status = os.waitpid(pid, 0)
-			self.assertEqual(os.WIFEXITED(status), True)
-			self.assertEqual(os.WEXITSTATUS(status), os.EX_OK)
-
-			portage.locks.unlockfile(lock1)
-		finally:
-			shutil.rmtree(tempdir)
-

diff --git a/portage_with_autodep/pym/portage/tests/news/__init__.py b/portage_with_autodep/pym/portage/tests/news/__init__.py
deleted file mode 100644
index 28a753f..0000000
--- a/portage_with_autodep/pym/portage/tests/news/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# tests/portage.news/__init__.py -- Portage Unit Test functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/news/__test__ b/portage_with_autodep/pym/portage/tests/news/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/news/test_NewsItem.py b/portage_with_autodep/pym/portage/tests/news/test_NewsItem.py
deleted file mode 100644
index a4e76f3..0000000
--- a/portage_with_autodep/pym/portage/tests/news/test_NewsItem.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# test_NewsItem.py -- Portage Unit Testing Functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage.tests import TestCase
-from portage.news import NewsItem
-from portage.dbapi.virtual import testdbapi
-from tempfile import mkstemp
-# TODO(antarus) Make newsitem use a loader so we can load using a string instead of a tempfile
-
-class NewsItemTestCase(TestCase):
-	"""These tests suck: they use your running config instead of making their own"""
-	fakeItem = """
-Title: YourSQL Upgrades from 4.0 to 4.1
-Author: Ciaran McCreesh <ciaranm@gentoo.org>
-Content-Type: text/plain
-Posted: 01-Nov-2005
-Revision: 1
-#Display-If-Installed:
-#Display-If-Profile:
-#Display-If-Arch:
-
-YourSQL databases created using YourSQL version 4.0 are incompatible
-with YourSQL version 4.1 or later. There is no reliable way to
-automate the database format conversion, so action from the system
-administrator is required before an upgrade can take place.
-
-Please see the Gentoo YourSQL Upgrade Guide for instructions:
-
-    http://www.gentoo.org/doc/en/yoursql-upgrading.xml
-
-Also see the official YourSQL documentation:
-
-    http://dev.yoursql.com/doc/refman/4.1/en/upgrading-from-4-0.html
-
-After upgrading, you should also recompile any packages which link
-against YourSQL:
-
-    revdep-rebuild --library=libyoursqlclient.so.12
-
-The revdep-rebuild tool is provided by app-portage/gentoolkit.
-"""
-	def setUp(self):
-		self.profile = "/usr/portage/profiles/default-linux/x86/2007.0/"
-		self.keywords = "x86"
-		# Use fake/test dbapi to avoid slow tests
-		self.vardb = testdbapi()
-		# self.vardb.inject_cpv('sys-apps/portage-2.0', { 'SLOT' : 0 })
-		# Consumers only use ARCH, so avoid portage.settings by using a dict
-		self.settings = { 'ARCH' : 'x86' }
-
-	def testDisplayIfProfile(self):
-		tmpItem = self.fakeItem[:].replace("#Display-If-Profile:", "Display-If-Profile: %s" %
-			self.profile)
-
-		item = self._processItem(tmpItem)
-		try:
-			self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
-				msg="Expected %s to be relevant, but it was not!" % tmpItem)
-		finally:
-			os.unlink(item.path)
-
-	def testDisplayIfInstalled(self):
-		tmpItem = self.fakeItem[:].replace("#Display-If-Installed:", "Display-If-Installed: %s" %
-			"sys-apps/portage")
-
-		try:
-			item = self._processItem(tmpItem)
-			self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
-				msg="Expected %s to be relevant, but it was not!" % tmpItem)
-		finally:
-			os.unlink(item.path)
-
-	def testDisplayIfKeyword(self):
-		tmpItem = self.fakeItem[:].replace("#Display-If-Keyword:", "Display-If-Keyword: %s" %
-			self.keywords)
-
-		try:
-			item = self._processItem(tmpItem)
-			self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
-				msg="Expected %s to be relevant, but it was not!" % tmpItem)
-		finally:
-			os.unlink(item.path)
-
-	def _processItem(self, item):
-		filename = None
-		fd, filename = mkstemp()
-		f = os.fdopen(fd, 'w')
-		f.write(item)
-		f.close()
-		try:
-			return NewsItem(filename, 0)
-		except TypeError:
-			self.fail("Error while processing news item %s" % filename)

diff --git a/portage_with_autodep/pym/portage/tests/process/__init__.py b/portage_with_autodep/pym/portage/tests/process/__init__.py
deleted file mode 100644
index d19e353..0000000
--- a/portage_with_autodep/pym/portage/tests/process/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# Copyright 1998-2008 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/process/__test__ b/portage_with_autodep/pym/portage/tests/process/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/process/test_poll.py b/portage_with_autodep/pym/portage/tests/process/test_poll.py
deleted file mode 100644
index ee6ee0c..0000000
--- a/portage_with_autodep/pym/portage/tests/process/test_poll.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 1998-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage.tests import TestCase
-from _emerge.PollScheduler import PollScheduler
-from _emerge.PipeReader import PipeReader
-from _emerge.SpawnProcess import SpawnProcess
-
-class PipeReaderTestCase(TestCase):
-
-	def testPipeReader(self):
-		"""
-		Use a poll loop to read data from a pipe and assert that
-		the data written to the pipe is identical to the data
-		read from the pipe.
-		"""
-
-		test_string = 2 * "blah blah blah\n"
-
-		scheduler = PollScheduler().sched_iface
-		master_fd, slave_fd = os.pipe()
-		master_file = os.fdopen(master_fd, 'rb', 0)
-		slave_file = os.fdopen(slave_fd, 'wb')
-		producer = SpawnProcess(
-			args=["bash", "-c", "echo -n '%s'" % test_string],
-			env=os.environ, fd_pipes={1:slave_fd},
-			scheduler=scheduler)
-		producer.start()
-		slave_file.close()
-
-		consumer = PipeReader(
-			input_files={"producer" : master_file},
-			scheduler=scheduler)
-
-		consumer.start()
-		consumer.wait()
-		output = consumer.getvalue().decode('ascii', 'replace')
-		self.assertEqual(test_string, output)

diff --git a/portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py b/portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py
deleted file mode 100644
index 6a8e3c1..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/ResolverPlayground.py
+++ /dev/null
@@ -1,690 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from itertools import permutations
-import shutil
-import sys
-import tempfile
-import portage
-from portage import os
-from portage.const import PORTAGE_BASE_PATH
-from portage.dbapi.vartree import vartree
-from portage.dbapi.porttree import portagetree
-from portage.dbapi.bintree import binarytree
-from portage.dep import Atom, _repo_separator
-from portage.package.ebuild.config import config
-from portage.package.ebuild.digestgen import digestgen
-from portage._sets import load_default_config
-from portage._sets.base import InternalPackageSet
-from portage.versions import catsplit
-
-import _emerge
-from _emerge.actions import calc_depclean
-from _emerge.Blocker import Blocker
-from _emerge.create_depgraph_params import create_depgraph_params
-from _emerge.depgraph import backtrack_depgraph
-from _emerge.RootConfig import RootConfig
-
-if sys.hexversion >= 0x3000000:
-	basestring = str
-
-class ResolverPlayground(object):
-	"""
-	This class helps to create the necessary files on disk and
-	the needed settings instances, etc. for the resolver to do
-	its work.
-	"""
-
-	config_files = frozenset(("package.use", "package.mask", "package.keywords", \
-		"package.unmask", "package.properties", "package.license", "use.mask", "use.force"))
-
-	def __init__(self, ebuilds={}, installed={}, profile={}, repo_configs={}, \
-		user_config={}, sets={}, world=[], debug=False):
-		"""
-		ebuilds: cpv -> metadata mapping simulating available ebuilds. 
-		installed: cpv -> metadata mapping simulating installed packages.
-			If a metadata key is missing, it gets a default value.
-		profile: settings defined by the profile.
-		"""
-		self.debug = debug
-		self.root = "/"
-		self.eprefix = tempfile.mkdtemp()
-		self.eroot = self.root + self.eprefix.lstrip(os.sep) + os.sep
-		self.portdir = os.path.join(self.eroot, "usr/portage")
-		self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
-		os.makedirs(self.portdir)
-		os.makedirs(self.vdbdir)
-
-		if not debug:
-			portage.util.noiselimit = -2
-
-		self.repo_dirs = {}
-		#Make sure the main repo is always created
-		self._get_repo_dir("test_repo")
-
-		self._create_ebuilds(ebuilds)
-		self._create_installed(installed)
-		self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
-		self._create_world(world)
-
-		self.settings, self.trees = self._load_config()
-
-		self._create_ebuild_manifests(ebuilds)
-		
-		portage.util.noiselimit = 0
-
-	def _get_repo_dir(self, repo):
-		"""
-		Create the repo directory if needed.
-		"""
-		if repo not in self.repo_dirs:
-			if repo == "test_repo":
-				repo_path = self.portdir
-			else:
-				repo_path = os.path.join(self.eroot, "usr", "local", repo)
-
-			self.repo_dirs[repo] = repo_path
-			profile_path = os.path.join(repo_path, "profiles")
-
-			try:
-				os.makedirs(profile_path)
-			except os.error:
-				pass
-
-			repo_name_file = os.path.join(profile_path, "repo_name")
-			f = open(repo_name_file, "w")
-			f.write("%s\n" % repo)
-			f.close()
-
-		return self.repo_dirs[repo]
-
-	def _create_ebuilds(self, ebuilds):
-		for cpv in ebuilds:
-			a = Atom("=" + cpv, allow_repo=True)
-			repo = a.repo
-			if repo is None:
-				repo = "test_repo"
-
-			metadata = ebuilds[cpv].copy()
-			eapi = metadata.pop("EAPI", 0)
-			lic = metadata.pop("LICENSE", "")
-			properties = metadata.pop("PROPERTIES", "")
-			slot = metadata.pop("SLOT", 0)
-			keywords = metadata.pop("KEYWORDS", "x86")
-			iuse = metadata.pop("IUSE", "")
-			depend = metadata.pop("DEPEND", "")
-			rdepend = metadata.pop("RDEPEND", None)
-			pdepend = metadata.pop("PDEPEND", None)
-			required_use = metadata.pop("REQUIRED_USE", None)
-
-			if metadata:
-				raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
-
-			repo_dir = self._get_repo_dir(repo)
-			ebuild_dir = os.path.join(repo_dir, a.cp)
-			ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
-			try:
-				os.makedirs(ebuild_dir)
-			except os.error:
-				pass
-
-			f = open(ebuild_path, "w")
-			f.write('EAPI="' + str(eapi) + '"\n')
-			f.write('LICENSE="' + str(lic) + '"\n')
-			f.write('PROPERTIES="' + str(properties) + '"\n')
-			f.write('SLOT="' + str(slot) + '"\n')
-			f.write('KEYWORDS="' + str(keywords) + '"\n')
-			f.write('IUSE="' + str(iuse) + '"\n')
-			f.write('DEPEND="' + str(depend) + '"\n')
-			if rdepend is not None:
-				f.write('RDEPEND="' + str(rdepend) + '"\n')
-			if pdepend is not None:
-				f.write('PDEPEND="' + str(pdepend) + '"\n')
-			if required_use is not None:
-				f.write('REQUIRED_USE="' + str(required_use) + '"\n')
-			f.close()
-
-	def _create_ebuild_manifests(self, ebuilds):
-		tmpsettings = config(clone=self.settings)
-		tmpsettings['PORTAGE_QUIET'] = '1'
-		for cpv in ebuilds:
-			a = Atom("=" + cpv, allow_repo=True)
-			repo = a.repo
-			if repo is None:
-				repo = "test_repo"
-
-			repo_dir = self._get_repo_dir(repo)
-			ebuild_dir = os.path.join(repo_dir, a.cp)
-			ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
-
-			portdb = self.trees[self.root]["porttree"].dbapi
-			tmpsettings['O'] = ebuild_dir
-			if not digestgen(mysettings=tmpsettings, myportdb=portdb):
-				raise AssertionError('digest creation failed for %s' % ebuild_path)
-
-	def _create_installed(self, installed):
-		for cpv in installed:
-			a = Atom("=" + cpv, allow_repo=True)
-			repo = a.repo
-			if repo is None:
-				repo = "test_repo"
-
-			vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
-			try:
-				os.makedirs(vdb_pkg_dir)
-			except os.error:
-				pass
-
-			metadata = installed[cpv].copy()
-			eapi = metadata.pop("EAPI", 0)
-			lic = metadata.pop("LICENSE", "")
-			properties = metadata.pop("PROPERTIES", "")
-			slot = metadata.pop("SLOT", 0)
-			keywords = metadata.pop("KEYWORDS", "~x86")
-			iuse = metadata.pop("IUSE", "")
-			use = metadata.pop("USE", "")
-			depend = metadata.pop("DEPEND", "")
-			rdepend = metadata.pop("RDEPEND", None)
-			pdepend = metadata.pop("PDEPEND", None)
-			required_use = metadata.pop("REQUIRED_USE", None)
-
-			if metadata:
-				raise ValueError("metadata of installed '%s' contains unknown keys: %s" % (cpv, metadata.keys()))
-
-			def write_key(key, value):
-				f = open(os.path.join(vdb_pkg_dir, key), "w")
-				f.write(str(value) + "\n")
-				f.close()
-			
-			write_key("EAPI", eapi)
-			write_key("LICENSE", lic)
-			write_key("PROPERTIES", properties)
-			write_key("SLOT", slot)
-			write_key("LICENSE", lic)
-			write_key("PROPERTIES", properties)
-			write_key("repository", repo)
-			write_key("KEYWORDS", keywords)
-			write_key("IUSE", iuse)
-			write_key("USE", use)
-			write_key("DEPEND", depend)
-			if rdepend is not None:
-				write_key("RDEPEND", rdepend)
-			if pdepend is not None:
-				write_key("PDEPEND", pdepend)
-			if required_use is not None:
-				write_key("REQUIRED_USE", required_use)
-
-	def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
-
-		for repo in self.repo_dirs:
-			repo_dir = self._get_repo_dir(repo)
-			profile_dir = os.path.join(self._get_repo_dir(repo), "profiles")
-
-			#Create $REPO/profiles/categories
-			categories = set()
-			for cpv in ebuilds:
-				ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
-				if ebuilds_repo is None:
-					ebuilds_repo = "test_repo"
-				if ebuilds_repo == repo:
-					categories.add(catsplit(cpv)[0])
-
-			categories_file = os.path.join(profile_dir, "categories")
-			f = open(categories_file, "w")
-			for cat in categories:
-				f.write(cat + "\n")
-			f.close()
-			
-			#Create $REPO/profiles/license_groups
-			license_file = os.path.join(profile_dir, "license_groups")
-			f = open(license_file, "w")
-			f.write("EULA TEST\n")
-			f.close()
-
-			repo_config = repo_configs.get(repo) 
-			if repo_config:
-				for config_file, lines in repo_config.items():
-					if config_file not in self.config_files:
-						raise ValueError("Unknown config file: '%s'" % config_file)
-		
-					file_name = os.path.join(profile_dir, config_file)
-					f = open(file_name, "w")
-					for line in lines:
-						f.write("%s\n" % line)
-					f.close()
-
-			#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
-			os.makedirs(os.path.join(repo_dir, "eclass"))
-
-			if repo == "test_repo":
-				#Create a minimal profile in /usr/portage
-				sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
-				os.makedirs(sub_profile_dir)
-
-				eapi_file = os.path.join(sub_profile_dir, "eapi")
-				f = open(eapi_file, "w")
-				f.write("0\n")
-				f.close()
-
-				make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
-				f = open(make_defaults_file, "w")
-				f.write("ARCH=\"x86\"\n")
-				f.write("ACCEPT_KEYWORDS=\"x86\"\n")
-				f.close()
-
-				use_force_file = os.path.join(sub_profile_dir, "use.force")
-				f = open(use_force_file, "w")
-				f.write("x86\n")
-				f.close()
-
-				if profile:
-					for config_file, lines in profile.items():
-						if config_file not in self.config_files:
-							raise ValueError("Unknown config file: '%s'" % config_file)
-
-						file_name = os.path.join(sub_profile_dir, config_file)
-						f = open(file_name, "w")
-						for line in lines:
-							f.write("%s\n" % line)
-						f.close()
-
-				#Create profile symlink
-				os.makedirs(os.path.join(self.eroot, "etc"))
-				os.symlink(sub_profile_dir, os.path.join(self.eroot, "etc", "make.profile"))
-
-		user_config_dir = os.path.join(self.eroot, "etc", "portage")
-
-		try:
-			os.makedirs(user_config_dir)
-		except os.error:
-			pass
-
-		repos_conf_file = os.path.join(user_config_dir, "repos.conf")		
-		f = open(repos_conf_file, "w")
-		priority = 0
-		for repo in sorted(self.repo_dirs.keys()):
-			f.write("[%s]\n" % repo)
-			f.write("LOCATION=%s\n" % self.repo_dirs[repo])
-			if repo == "test_repo":
-				f.write("PRIORITY=%s\n" % -1000)
-			else:
-				f.write("PRIORITY=%s\n" % priority)
-				priority += 1
-		f.close()
-
-		for config_file, lines in user_config.items():
-			if config_file not in self.config_files:
-				raise ValueError("Unknown config file: '%s'" % config_file)
-
-			file_name = os.path.join(user_config_dir, config_file)
-			f = open(file_name, "w")
-			for line in lines:
-				f.write("%s\n" % line)
-			f.close()
-
-		#Create /usr/share/portage/config/sets/portage.conf
-		default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
-		
-		try:
-			os.makedirs(default_sets_conf_dir)
-		except os.error:
-			pass
-
-		provided_sets_portage_conf = \
-			os.path.join(PORTAGE_BASE_PATH, "cnf/sets/portage.conf")
-		os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))
-
-		set_config_dir = os.path.join(user_config_dir, "sets")
-
-		try:
-			os.makedirs(set_config_dir)
-		except os.error:
-			pass
-
-		for sets_file, lines in sets.items():
-			file_name = os.path.join(set_config_dir, sets_file)
-			f = open(file_name, "w")
-			for line in lines:
-				f.write("%s\n" % line)
-			f.close()
-
-		user_config_dir = os.path.join(self.eroot, "etc", "portage")
-
-		try:
-			os.makedirs(user_config_dir)
-		except os.error:
-			pass
-
-		for config_file, lines in user_config.items():
-			if config_file not in self.config_files:
-				raise ValueError("Unknown config file: '%s'" % config_file)
-
-			file_name = os.path.join(user_config_dir, config_file)
-			f = open(file_name, "w")
-			for line in lines:
-				f.write("%s\n" % line)
-			f.close()
-
-	def _create_world(self, world):
-		#Create /var/lib/portage/world
-		var_lib_portage = os.path.join(self.eroot, "var", "lib", "portage")
-		os.makedirs(var_lib_portage)
-
-		world_file = os.path.join(var_lib_portage, "world")
-
-		f = open(world_file, "w")
-		for atom in world:
-			f.write("%s\n" % atom)
-		f.close()
-
-	def _load_config(self):
-		portdir_overlay = []
-		for repo_name in sorted(self.repo_dirs):
-			path = self.repo_dirs[repo_name]
-			if path != self.portdir:
-				portdir_overlay.append(path)
-
-		env = {
-			"ACCEPT_KEYWORDS": "x86",
-			"PORTDIR": self.portdir,
-			"PORTDIR_OVERLAY": " ".join(portdir_overlay),
-			'PORTAGE_TMPDIR'       : os.path.join(self.eroot, 'var/tmp'),
-		}
-
-		# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
-		# need to be inherited by ebuild subprocesses.
-		if 'PORTAGE_USERNAME' in os.environ:
-			env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
-		if 'PORTAGE_GRPNAME' in os.environ:
-			env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
-
-		settings = config(_eprefix=self.eprefix, env=env)
-		settings.lock()
-
-		trees = {
-			self.root: {
-					"vartree": vartree(settings=settings),
-					"porttree": portagetree(self.root, settings=settings),
-					"bintree": binarytree(self.root,
-						os.path.join(self.eroot, "usr/portage/packages"),
-						settings=settings)
-				}
-			}
-
-		for root, root_trees in trees.items():
-			settings = root_trees["vartree"].settings
-			settings._init_dirs()
-			setconfig = load_default_config(settings, root_trees)
-			root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
-		
-		return settings, trees
-
-	def run(self, atoms, options={}, action=None):
-		options = options.copy()
-		options["--pretend"] = True
-		if self.debug:
-			options["--debug"] = True
-
-		global_noiselimit = portage.util.noiselimit
-		global_emergelog_disable = _emerge.emergelog._disable
-		try:
-
-			if not self.debug:
-				portage.util.noiselimit = -2
-			_emerge.emergelog._disable = True
-
-			if options.get("--depclean"):
-				rval, cleanlist, ordered, req_pkg_count = \
-					calc_depclean(self.settings, self.trees, None,
-					options, "depclean", InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
-				result = ResolverPlaygroundDepcleanResult( \
-					atoms, rval, cleanlist, ordered, req_pkg_count)
-			else:
-				params = create_depgraph_params(options, action)
-				success, depgraph, favorites = backtrack_depgraph(
-					self.settings, self.trees, options, params, action, atoms, None)
-				depgraph._show_merge_list()
-				depgraph.display_problems()
-				result = ResolverPlaygroundResult(atoms, success, depgraph, favorites)
-		finally:
-			portage.util.noiselimit = global_noiselimit
-			_emerge.emergelog._disable = global_emergelog_disable
-
-		return result
-
-	def run_TestCase(self, test_case):
-		if not isinstance(test_case, ResolverPlaygroundTestCase):
-			raise TypeError("ResolverPlayground needs a ResolverPlaygroundTestCase")
-		for atoms in test_case.requests:
-			result = self.run(atoms, test_case.options, test_case.action)
-			if not test_case.compare_with_result(result):
-				return
-
-	def cleanup(self):
-		portdb = self.trees[self.root]["porttree"].dbapi
-		portdb.close_caches()
-		portage.dbapi.porttree.portdbapi.portdbapi_instances.remove(portdb)
-		if self.debug:
-			print("\nEROOT=%s" % self.eroot)
-		else:
-			shutil.rmtree(self.eroot)
-
-class ResolverPlaygroundTestCase(object):
-
-	def __init__(self, request, **kwargs):
-		self.all_permutations = kwargs.pop("all_permutations", False)
-		self.ignore_mergelist_order = kwargs.pop("ignore_mergelist_order", False)
-		self.ambiguous_merge_order = kwargs.pop("ambiguous_merge_order", False)
-		self.check_repo_names = kwargs.pop("check_repo_names", False)
-		self.merge_order_assertions = kwargs.pop("merge_order_assertions", False)
-
-		if self.all_permutations:
-			self.requests = list(permutations(request))
-		else:
-			self.requests = [request]
-
-		self.options = kwargs.pop("options", {})
-		self.action = kwargs.pop("action", None)
-		self.test_success = True
-		self.fail_msg = None
-		self._checks = kwargs.copy()
-
-	def compare_with_result(self, result):
-		checks = dict.fromkeys(result.checks)
-		for key, value in self._checks.items():
-			if not key in checks:
-				raise KeyError("Not an available check: '%s'" % key)
-			checks[key] = value
-
-		fail_msgs = []
-		for key, value in checks.items():
-			got = getattr(result, key)
-			expected = value
-
-			if key in result.optional_checks and expected is None:
-				continue
-
-			if key == "mergelist":
-				if not self.check_repo_names:
-					#Strip repo names if we don't check them
-					if got:
-						new_got = []
-						for cpv in got:
-							if cpv[:1] == "!":
-								new_got.append(cpv)
-								continue
-							a = Atom("="+cpv, allow_repo=True)
-							new_got.append(a.cpv)
-						got = new_got
-					if expected:
-						new_expected = []
-						for obj in expected:
-							if isinstance(obj, basestring):
-								if obj[:1] == "!":
-									new_expected.append(obj)
-									continue
-								a = Atom("="+obj, allow_repo=True)
-								new_expected.append(a.cpv)
-								continue
-							new_expected.append(set())
-							for cpv in obj:
-								if cpv[:1] != "!":
-									cpv = Atom("="+cpv, allow_repo=True).cpv
-								new_expected[-1].add(cpv)
-						expected = new_expected
-				if self.ignore_mergelist_order and got is not None:
-					got = set(got)
-					expected = set(expected)
-
-				if self.ambiguous_merge_order and got:
-					expected_stack = list(reversed(expected))
-					got_stack = list(reversed(got))
-					new_expected = []
-					match = True
-					while got_stack and expected_stack:
-						got_token = got_stack.pop()
-						expected_obj = expected_stack.pop()
-						if isinstance(expected_obj, basestring):
-							new_expected.append(expected_obj)
-							if got_token == expected_obj:
-								continue
-							# result doesn't match, so stop early
-							match = False
-							break
-						expected_obj = set(expected_obj)
-						try:
-							expected_obj.remove(got_token)
-						except KeyError:
-							# result doesn't match, so stop early
-							match = False
-							break
-						new_expected.append(got_token)
-						while got_stack and expected_obj:
-							got_token = got_stack.pop()
-							try:
-								expected_obj.remove(got_token)
-							except KeyError:
-								match = False
-								break
-							new_expected.append(got_token)
-						if not match:
-							# result doesn't match, so stop early
-							break
-						if expected_obj:
-							# result does not match, so stop early
-							match = False
-							new_expected.append(tuple(expected_obj))
-							break
-					if expected_stack:
-						# result does not match, add leftovers to new_expected
-						match = False
-						expected_stack.reverse()
-						new_expected.extend(expected_stack)
-					expected = new_expected
-
-					if match and self.merge_order_assertions:
-						for node1, node2 in self.merge_order_assertions:
-							if not (got.index(node1) < got.index(node2)):
-								fail_msgs.append("atoms: (" + \
-									", ".join(result.atoms) + "), key: " + \
-									("merge_order_assertions, expected: %s" % \
-									str((node1, node2))) + \
-									", got: " + str(got))
-
-			elif key in ("unstable_keywords", "needed_p_mask_changes") and expected is not None:
-				expected = set(expected)
-
-			if got != expected:
-				fail_msgs.append("atoms: (" + ", ".join(result.atoms) + "), key: " + \
-					key + ", expected: " + str(expected) + ", got: " + str(got))
-		if fail_msgs:
-			self.test_success = False
-			self.fail_msg = "\n".join(fail_msgs)
-			return False
-		return True
-
-class ResolverPlaygroundResult(object):
-
-	checks = (
-		"success", "mergelist", "use_changes", "license_changes", "unstable_keywords", "slot_collision_solutions",
-		"circular_dependency_solutions", "needed_p_mask_changes",
-		)
-	optional_checks = (
-		)
-
-	def __init__(self, atoms, success, mydepgraph, favorites):
-		self.atoms = atoms
-		self.success = success
-		self.depgraph = mydepgraph
-		self.favorites = favorites
-		self.mergelist = None
-		self.use_changes = None
-		self.license_changes = None
-		self.unstable_keywords = None
-		self.needed_p_mask_changes = None
-		self.slot_collision_solutions = None
-		self.circular_dependency_solutions = None
-
-		if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
-			self.mergelist = []
-			for x in self.depgraph._dynamic_config._serialized_tasks_cache:
-				if isinstance(x, Blocker):
-					self.mergelist.append(x.atom)
-				else:
-					repo_str = ""
-					if x.metadata["repository"] != "test_repo":
-						repo_str = _repo_separator + x.metadata["repository"]
-					self.mergelist.append(x.cpv + repo_str)
-
-		if self.depgraph._dynamic_config._needed_use_config_changes:
-			self.use_changes = {}
-			for pkg, needed_use_config_changes in \
-				self.depgraph._dynamic_config._needed_use_config_changes.items():
-				new_use, changes = needed_use_config_changes
-				self.use_changes[pkg.cpv] = changes
-
-		if self.depgraph._dynamic_config._needed_unstable_keywords:
-			self.unstable_keywords = set()
-			for pkg in self.depgraph._dynamic_config._needed_unstable_keywords:
-				self.unstable_keywords.add(pkg.cpv)
-
-		if self.depgraph._dynamic_config._needed_p_mask_changes:
-			self.needed_p_mask_changes = set()
-			for pkg in self.depgraph._dynamic_config._needed_p_mask_changes:
-				self.needed_p_mask_changes.add(pkg.cpv)
-
-		if self.depgraph._dynamic_config._needed_license_changes:
-			self.license_changes = {}
-			for pkg, missing_licenses in self.depgraph._dynamic_config._needed_license_changes.items():
-				self.license_changes[pkg.cpv] = missing_licenses
-
-		if self.depgraph._dynamic_config._slot_conflict_handler is not None:
-			self.slot_collision_solutions  = []
-			handler = self.depgraph._dynamic_config._slot_conflict_handler
-
-			for change in handler.changes:
-				new_change = {}
-				for pkg in change:
-					new_change[pkg.cpv] = change[pkg]
-				self.slot_collision_solutions.append(new_change)
-
-		if self.depgraph._dynamic_config._circular_dependency_handler is not None:
-			handler = self.depgraph._dynamic_config._circular_dependency_handler
-			sol = handler.solutions
-			self.circular_dependency_solutions = dict( zip([x.cpv for x in sol.keys()], sol.values()) )
-
-class ResolverPlaygroundDepcleanResult(object):
-
-	checks = (
-		"success", "cleanlist", "ordered", "req_pkg_count",
-		)
-	optional_checks = (
-		"ordered", "req_pkg_count",
-		)
-
-	def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count):
-		self.atoms = atoms
-		self.success = rval == 0
-		self.cleanlist = cleanlist
-		self.ordered = ordered
-		self.req_pkg_count = req_pkg_count

diff --git a/portage_with_autodep/pym/portage/tests/resolver/__test__ b/portage_with_autodep/pym/portage/tests/resolver/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py b/portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py
deleted file mode 100644
index 54c435f..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_autounmask.py
+++ /dev/null
@@ -1,326 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class AutounmaskTestCase(TestCase):
-
-	def testAutounmask(self):
-
-		ebuilds = {
-			#ebuilds to test use changes
-			"dev-libs/A-1": { "SLOT": 1, "DEPEND": "dev-libs/B[foo]", "EAPI": 2}, 
-			"dev-libs/A-2": { "SLOT": 2, "DEPEND": "dev-libs/B[bar]", "EAPI": 2}, 
-			"dev-libs/B-1": { "DEPEND": "foo? ( dev-libs/C ) bar? ( dev-libs/D )", "IUSE": "foo bar"}, 
-			"dev-libs/C-1": {},
-			"dev-libs/D-1": {},
-
-			#ebuilds to test if we allow changing of masked or forced flags
-			"dev-libs/E-1": { "SLOT": 1, "DEPEND": "dev-libs/F[masked-flag]", "EAPI": 2},
-			"dev-libs/E-2": { "SLOT": 2, "DEPEND": "dev-libs/G[-forced-flag]", "EAPI": 2},
-			"dev-libs/F-1": { "IUSE": "masked-flag"},
-			"dev-libs/G-1": { "IUSE": "forced-flag"},
-
-			#ebuilds to test keyword changes
-			"app-misc/Z-1": { "KEYWORDS": "~x86", "DEPEND": "app-misc/Y" },
-			"app-misc/Y-1": { "KEYWORDS": "~x86" },
-			"app-misc/W-1": {},
-			"app-misc/W-2": { "KEYWORDS": "~x86" },
-			"app-misc/V-1": { "KEYWORDS": "~x86", "DEPEND": ">=app-misc/W-2"},
-
-			#ebuilds to test mask and keyword changes
-			"app-text/A-1": {},
-			"app-text/B-1": { "KEYWORDS": "~x86" },
-			"app-text/C-1": { "KEYWORDS": "" },
-			"app-text/D-1": { "KEYWORDS": "~x86" },
-			"app-text/D-2": { "KEYWORDS": "" },
-
-			#ebuilds for mixed test for || dep handling
-			"sci-libs/K-1": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/M sci-libs/P ) )", "EAPI": 2},
-			"sci-libs/K-2": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/P sci-libs/M ) )", "EAPI": 2},
-			"sci-libs/K-3": { "DEPEND": " || ( sci-libs/M || ( sci-libs/L[bar] sci-libs/P ) )", "EAPI": 2},
-			"sci-libs/K-4": { "DEPEND": " || ( sci-libs/M || ( sci-libs/P sci-libs/L[bar] ) )", "EAPI": 2},
-			"sci-libs/K-5": { "DEPEND": " || ( sci-libs/P || ( sci-libs/L[bar] sci-libs/M ) )", "EAPI": 2},
-			"sci-libs/K-6": { "DEPEND": " || ( sci-libs/P || ( sci-libs/M sci-libs/L[bar] ) )", "EAPI": 2},
-			"sci-libs/K-7": { "DEPEND": " || ( sci-libs/M sci-libs/L[bar] )", "EAPI": 2},
-			"sci-libs/K-8": { "DEPEND": " || ( sci-libs/L[bar] sci-libs/M )", "EAPI": 2},
-
-			"sci-libs/L-1": { "IUSE": "bar" },
-			"sci-libs/M-1": { "KEYWORDS": "~x86" },
-			"sci-libs/P-1": { },
-
-			#ebuilds to test these nice "required by cat/pkg[foo]" messages
-			"dev-util/Q-1": { "DEPEND": "foo? ( dev-util/R[bar] )", "IUSE": "+foo", "EAPI": 2 },
-			"dev-util/Q-2": { "RDEPEND": "!foo? ( dev-util/R[bar] )", "IUSE": "foo", "EAPI": 2 },
-			"dev-util/R-1": { "IUSE": "bar" },
-
-			#ebuilds to test interaction with REQUIRED_USE
-			"app-portage/A-1": { "DEPEND": "app-portage/B[foo]", "EAPI": 2 }, 
-			"app-portage/A-2": { "DEPEND": "app-portage/B[foo=]", "IUSE": "+foo", "REQUIRED_USE": "foo", "EAPI": "4" }, 
-
-			"app-portage/B-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" }, 
-			"app-portage/C-1": { "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
-			}
-
-		test_cases = (
-				#Test USE changes.
-				#The simple case.
-
-				ResolverPlaygroundTestCase(
-					["dev-libs/A:1"],
-					options = {"--autounmask": "n"},
-					success = False),
-				ResolverPlaygroundTestCase(
-					["dev-libs/A:1"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
-					use_changes = { "dev-libs/B-1": {"foo": True} } ),
-
-				#Make sure we restart if needed.
-				ResolverPlaygroundTestCase(
-					["dev-libs/A:1", "dev-libs/B"],
-					options = {"--autounmask": True},
-					all_permutations = True,
-					success = False,
-					mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
-					use_changes = { "dev-libs/B-1": {"foo": True} } ),
-				ResolverPlaygroundTestCase(
-					["dev-libs/A:1", "dev-libs/A:2", "dev-libs/B"],
-					options = {"--autounmask": True},
-					all_permutations = True,
-					success = False,
-					mergelist = ["dev-libs/D-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", "dev-libs/A-2"],
-					ignore_mergelist_order = True,
-					use_changes = { "dev-libs/B-1": {"foo": True, "bar": True} } ),
-
-				#Test keywording.
-				#The simple case.
-
-				ResolverPlaygroundTestCase(
-					["app-misc/Z"],
-					options = {"--autounmask": "n"},
-					success = False),
-				ResolverPlaygroundTestCase(
-					["app-misc/Z"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["app-misc/Y-1", "app-misc/Z-1"],
-					unstable_keywords = ["app-misc/Y-1", "app-misc/Z-1"]),
-
-				#Make sure that the backtracking for slot conflicts handles our mess.
-
-				ResolverPlaygroundTestCase(
-					["=app-misc/V-1", "app-misc/W"],
-					options = {"--autounmask": True},
-					all_permutations = True,
-					success = False,
-					mergelist = ["app-misc/W-2", "app-misc/V-1"],
-					unstable_keywords = ["app-misc/W-2", "app-misc/V-1"]),
-
-				#Mixed testing
-				#Make sure we don't change use for something in a || dep if there is another choice
-				#that needs no change.
-				
-				ResolverPlaygroundTestCase(
-					["=sci-libs/K-1"],
-					options = {"--autounmask": True},
-					success = True,
-					mergelist = ["sci-libs/P-1", "sci-libs/K-1"]),
-				ResolverPlaygroundTestCase(
-					["=sci-libs/K-2"],
-					options = {"--autounmask": True},
-					success = True,
-					mergelist = ["sci-libs/P-1", "sci-libs/K-2"]),
-				ResolverPlaygroundTestCase(
-					["=sci-libs/K-3"],
-					options = {"--autounmask": True},
-					success = True,
-					mergelist = ["sci-libs/P-1", "sci-libs/K-3"]),
-				ResolverPlaygroundTestCase(
-					["=sci-libs/K-4"],
-					options = {"--autounmask": True},
-					success = True,
-					mergelist = ["sci-libs/P-1", "sci-libs/K-4"]),
-				ResolverPlaygroundTestCase(
-					["=sci-libs/K-5"],
-					options = {"--autounmask": True},
-					success = True,
-					mergelist = ["sci-libs/P-1", "sci-libs/K-5"]),
-				ResolverPlaygroundTestCase(
-					["=sci-libs/K-6"],
-					options = {"--autounmask": True},
-					success = True,
-					mergelist = ["sci-libs/P-1", "sci-libs/K-6"]),
-
-				#Make sure we prefer use changes over keyword changes.
-				ResolverPlaygroundTestCase(
-					["=sci-libs/K-7"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["sci-libs/L-1", "sci-libs/K-7"],
-					use_changes = { "sci-libs/L-1": { "bar": True } }),
-				ResolverPlaygroundTestCase(
-					["=sci-libs/K-8"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["sci-libs/L-1", "sci-libs/K-8"],
-					use_changes = { "sci-libs/L-1": { "bar": True } }),
-
-				#Test these nice "required by cat/pkg[foo]" messages.
-				ResolverPlaygroundTestCase(
-					["=dev-util/Q-1"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["dev-util/R-1", "dev-util/Q-1"],
-					use_changes = { "dev-util/R-1": { "bar": True } }),
-				ResolverPlaygroundTestCase(
-					["=dev-util/Q-2"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["dev-util/R-1", "dev-util/Q-2"],
-					use_changes = { "dev-util/R-1": { "bar": True } }),
-
-				#Test interaction with REQUIRED_USE.
-				ResolverPlaygroundTestCase(
-					["=app-portage/A-1"],
-					options = { "--autounmask": True },
-					use_changes = None,
-					success = False),
-				ResolverPlaygroundTestCase(
-					["=app-portage/A-2"],
-					options = { "--autounmask": True },
-					use_changes = None,
-					success = False),
-				ResolverPlaygroundTestCase(
-					["=app-portage/C-1"],
-					options = { "--autounmask": True },
-					use_changes = None,
-					success = False),
-
-				#Make sure we don't change masked/forced flags.
-				ResolverPlaygroundTestCase(
-					["dev-libs/E:1"],
-					options = {"--autounmask": True},
-					use_changes = None,
-					success = False),
-				ResolverPlaygroundTestCase(
-					["dev-libs/E:2"],
-					options = {"--autounmask": True},
-					use_changes = None,
-					success = False),
-
-				#Test mask and keyword changes.
-				ResolverPlaygroundTestCase(
-					["app-text/A"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["app-text/A-1"],
-					needed_p_mask_changes = ["app-text/A-1"]),
-				ResolverPlaygroundTestCase(
-					["app-text/B"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["app-text/B-1"],
-					unstable_keywords = ["app-text/B-1"],
-					needed_p_mask_changes = ["app-text/B-1"]),
-				ResolverPlaygroundTestCase(
-					["app-text/C"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["app-text/C-1"],
-					unstable_keywords = ["app-text/C-1"],
-					needed_p_mask_changes = ["app-text/C-1"]),
-				#Make sure unstable keyword is preferred over missing keyword
-				ResolverPlaygroundTestCase(
-					["app-text/D"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["app-text/D-1"],
-					unstable_keywords = ["app-text/D-1"]),
-				#Test missing keyword
-				ResolverPlaygroundTestCase(
-					["=app-text/D-2"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["app-text/D-2"],
-					unstable_keywords = ["app-text/D-2"])
-			)
-
-		profile = {
-			"use.mask":
-				(
-					"masked-flag",
-				),
-			"use.force":
-				(
-					"forced-flag",
-				),
-			"package.mask":
-				(
-					"app-text/A",
-					"app-text/B",
-					"app-text/C",
-				),
-		}
-
-		playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-	def testAutounmaskForLicenses(self):
-
-		ebuilds = {
-			"dev-libs/A-1": { "LICENSE": "TEST" },
-			"dev-libs/B-1": { "LICENSE": "TEST", "IUSE": "foo", "KEYWORDS": "~x86"},
-			"dev-libs/C-1": { "DEPEND": "dev-libs/B[foo]", "EAPI": 2 },
-			
-			"dev-libs/D-1": { "DEPEND": "dev-libs/E dev-libs/F", "LICENSE": "TEST" },
-			"dev-libs/E-1": { "LICENSE": "TEST" },
-			"dev-libs/E-2": { "LICENSE": "TEST" },
-			"dev-libs/F-1": { "DEPEND": "=dev-libs/E-1", "LICENSE": "TEST" },
-			}
-
-		test_cases = (
-				ResolverPlaygroundTestCase(
-					["=dev-libs/A-1"],
-					options = {"--autounmask": 'n'},
-					success = False),
-				ResolverPlaygroundTestCase(
-					["=dev-libs/A-1"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["dev-libs/A-1"],
-					license_changes = { "dev-libs/A-1": set(["TEST"]) }),
-
-				#Test license+keyword+use change at once.
-				ResolverPlaygroundTestCase(
-					["=dev-libs/C-1"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["dev-libs/B-1", "dev-libs/C-1"],
-					license_changes = { "dev-libs/B-1": set(["TEST"]) },
-					unstable_keywords = ["dev-libs/B-1"],
-					use_changes = { "dev-libs/B-1": { "foo": True } }),
-
-				#Test license with backtracking.
-				ResolverPlaygroundTestCase(
-					["=dev-libs/D-1"],
-					options = {"--autounmask": True},
-					success = False,
-					mergelist = ["dev-libs/E-1", "dev-libs/F-1", "dev-libs/D-1"],
-					license_changes = { "dev-libs/D-1": set(["TEST"]), "dev-libs/E-1": set(["TEST"]), "dev-libs/E-2": set(["TEST"]), "dev-libs/F-1": set(["TEST"]) }),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py b/portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py
deleted file mode 100644
index fc49306..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_backtracking.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class BacktrackingTestCase(TestCase):
-
-	def testBacktracking(self):
-		ebuilds = {
-			"dev-libs/A-1": {},
-			"dev-libs/A-2": {},
-			"dev-libs/B-1": { "DEPEND": "dev-libs/A" },
-			}
-
-		test_cases = (
-				ResolverPlaygroundTestCase(
-					["=dev-libs/A-1", "dev-libs/B"],
-					all_permutations = True,
-					mergelist = ["dev-libs/A-1", "dev-libs/B-1"],
-					success = True),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-
-	def testHittingTheBacktrackLimit(self):
-		ebuilds = {
-			"dev-libs/A-1": {},
-			"dev-libs/A-2": {},
-			"dev-libs/B-1": {},
-			"dev-libs/B-2": {},
-			"dev-libs/C-1": { "DEPEND": "dev-libs/A dev-libs/B" },
-			"dev-libs/D-1": { "DEPEND": "=dev-libs/A-1 =dev-libs/B-1" },
-			}
-
-		test_cases = (
-				ResolverPlaygroundTestCase(
-					["dev-libs/C", "dev-libs/D"],
-					all_permutations = True,
-					mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
-					ignore_mergelist_order = True,
-					success = True),
-				#This one hits the backtrack limit. Be aware that this depends on the argument order.
-				ResolverPlaygroundTestCase(
-					["dev-libs/D", "dev-libs/C"],
-					options = { "--backtrack": 1 },
-					mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/A-2", "dev-libs/B-2", "dev-libs/C-1", "dev-libs/D-1"],
-					ignore_mergelist_order = True,
-					slot_collision_solutions = [],
-					success = False),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-
-	def testBacktrackingGoodVersionFirst(self):
-		"""
-		When backtracking due to slot conflicts, we masked the version that has been pulled
-		in first. This is not always a good idea. Mask the highest version instead.
-		"""
-
-		ebuilds = {
-			"dev-libs/A-1": { "DEPEND": "=dev-libs/C-1 dev-libs/B" },
-			"dev-libs/B-1": { "DEPEND": "=dev-libs/C-1" },
-			"dev-libs/B-2": { "DEPEND": "=dev-libs/C-2" },
-			"dev-libs/C-1": { },
-			"dev-libs/C-2": { },
-			}
-
-		test_cases = (
-				ResolverPlaygroundTestCase(
-					["dev-libs/A"],
-					mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", ],
-					success = True),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-	def testBacktrackWithoutUpdates(self):
-		"""
-		If --update is not given we might have to mask the old installed version later.
-		"""
-
-		ebuilds = {
-			"dev-libs/A-1": { "DEPEND": "dev-libs/Z" },
-			"dev-libs/B-1": { "DEPEND": ">=dev-libs/Z-2" },
-			"dev-libs/Z-1": { },
-			"dev-libs/Z-2": { },
-			}
-
-		installed = {
-			"dev-libs/Z-1": { "USE": "" },
-			}
-
-		test_cases = (
-				ResolverPlaygroundTestCase(
-					["dev-libs/B", "dev-libs/A"],
-					all_permutations = True,
-					mergelist = ["dev-libs/Z-2", "dev-libs/B-1", "dev-libs/A-1", ],
-					ignore_mergelist_order = True,
-					success = True),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
-
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-	def testBacktrackMissedUpdates(self):
-		"""
-		An update is missed due to a dependency on an older version.
-		"""
-
-		ebuilds = {
-			"dev-libs/A-1": { },
-			"dev-libs/A-2": { },
-			"dev-libs/B-1": { "RDEPEND": "<=dev-libs/A-1" },
-			}
-
-		installed = {
-			"dev-libs/A-1": { "USE": "" },
-			"dev-libs/B-1": { "USE": "", "RDEPEND": "<=dev-libs/A-1" },
-			}
-
-		options = {'--update' : True, '--deep' : True, '--selective' : True}
-
-		test_cases = (
-				ResolverPlaygroundTestCase(
-					["dev-libs/A", "dev-libs/B"],
-					options = options,
-					all_permutations = True,
-					mergelist = [],
-					success = True),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
-
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py b/portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py
deleted file mode 100644
index f8331ac..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_circular_dependencies.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class CircularDependencyTestCase(TestCase):
-
-	#TODO:
-	#	use config change by autounmask
-	#	conflict on parent's parent
-	#	difference in RDEPEND and DEPEND
-	#	is there anything else than priority buildtime and runtime?
-	#	play with use.{mask,force}
-	#	play with REQUIRED_USE
-	
-
-	def testCircularDependency(self):
-
-		ebuilds = {
-			"dev-libs/Z-1": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) )", "IUSE": "+foo bar", "EAPI": 1 }, 
-			"dev-libs/Z-2": { "DEPEND": "foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 }, 
-			"dev-libs/Z-3": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) ) foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 }, 
-			"dev-libs/Y-1": { "DEPEND": "dev-libs/Z" },
-			"dev-libs/W-1": { "DEPEND": "dev-libs/Z[foo] dev-libs/Y", "EAPI": 2 },
-			"dev-libs/W-2": { "DEPEND": "dev-libs/Z[foo=] dev-libs/Y", "IUSE": "+foo", "EAPI": 2 },
-			"dev-libs/W-3": { "DEPEND": "dev-libs/Z[bar] dev-libs/Y", "EAPI": 2 },
-
-			"app-misc/A-1": { "DEPEND": "foo? ( =app-misc/B-1 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
-			"app-misc/A-2": { "DEPEND": "foo? ( =app-misc/B-2 ) bar? ( =app-misc/B-2 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
-			"app-misc/B-1": { "DEPEND": "=app-misc/A-1" },
-			"app-misc/B-2": { "DEPEND": "=app-misc/A-2" },
-			}
-
-		test_cases = (
-			#Simple tests
-			ResolverPlaygroundTestCase(
-				["=dev-libs/Z-1"],
-				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)]), frozenset([("bar", True)])])},
-				success = False),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/Z-2"],
-				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
-				success = False),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/Z-3"],
-				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
-				success = False),
-
-			#Conflict on parent
-			ResolverPlaygroundTestCase(
-				["=dev-libs/W-1"],
-				circular_dependency_solutions = {},
-				success = False),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/W-2"],
-				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
-				success = False),
-
-			#Conflict with autounmask
-			ResolverPlaygroundTestCase(
-				["=dev-libs/W-3"],
-				circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)])])},
-				use_changes = { "dev-libs/Z-3": {"bar": True}},
-				success = False),
-
-			#Conflict with REQUIRED_USE
-			ResolverPlaygroundTestCase(
-				["=app-misc/B-1"],
-				circular_dependency_solutions = { "app-misc/B-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
-				success = False),
-			ResolverPlaygroundTestCase(
-				["=app-misc/B-2"],
-				circular_dependency_solutions = {},
-				success = False),
-		)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_depclean.py b/portage_with_autodep/pym/portage/tests/resolver/test_depclean.py
deleted file mode 100644
index ba70144..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_depclean.py
+++ /dev/null
@@ -1,285 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class SimpleDepcleanTestCase(TestCase):
-
-	def testSimpleDepclean(self):
-		ebuilds = {
-			"dev-libs/A-1": {},
-			"dev-libs/B-1": {},
-			}
-		installed = {
-			"dev-libs/A-1": {},
-			"dev-libs/B-1": {},
-			}
-
-		world = (
-			"dev-libs/A",
-			)
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				[],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = ["dev-libs/B-1"]),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-class DepcleanWithDepsTestCase(TestCase):
-
-	def testDepcleanWithDeps(self):
-		ebuilds = {
-			"dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
-			"dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
-			"dev-libs/C-1": {},
-			"dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
-			"dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
-			"dev-libs/F-1": {},
-			}
-		installed = {
-			"dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
-			"dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
-			"dev-libs/C-1": {},
-			"dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
-			"dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
-			"dev-libs/F-1": {},
-			}
-
-		world = (
-			"dev-libs/A",
-			)
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				[],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = ["dev-libs/B-1", "dev-libs/D-1",
-					"dev-libs/E-1", "dev-libs/F-1"]),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-
-class DepcleanWithInstalledMaskedTestCase(TestCase):
-
-	def testDepcleanWithInstalledMasked(self):
-		"""
-		Test case for bug 332719.
-		emerge --declean ignores that B is masked by license and removes C.
-		The next emerge -uDN world doesn't take B and installs C again.
-		"""
-		ebuilds = {
-			"dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
-			"dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
-			"dev-libs/C-1": { "KEYWORDS": "x86" },
-			}
-		installed = {
-			"dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
-			"dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
-			"dev-libs/C-1": { "KEYWORDS": "x86" },
-			}
-
-		world = (
-			"dev-libs/A",
-			)
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				[],
-				options = {"--depclean": True},
-				success = True,
-				#cleanlist = ["dev-libs/C-1"]),
-				cleanlist = ["dev-libs/B-1"]),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-class DepcleanInstalledKeywordMaskedSlotTestCase(TestCase):
-
-	def testDepcleanInstalledKeywordMaskedSlot(self):
-		"""
-		Verify that depclean removes newer slot
-		masked by KEYWORDS (see bug #350285).
-		"""
-		ebuilds = {
-			"dev-libs/A-1": { "RDEPEND": "|| ( =dev-libs/B-2.7* =dev-libs/B-2.6* )" },
-			"dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
-			"dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
-			}
-		installed = {
-			"dev-libs/A-1": { "EAPI" : "3", "RDEPEND": "|| ( dev-libs/B:2.7 dev-libs/B:2.6 )" },
-			"dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
-			"dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
-			}
-
-		world = (
-			"dev-libs/A",
-			)
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				[],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = ["dev-libs/B-2.7"]),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-class DepcleanWithExcludeTestCase(TestCase):
-
-	def testDepcleanWithExclude(self):
-
-		installed = {
-			"dev-libs/A-1": {},
-			"dev-libs/B-1": { "RDEPEND": "dev-libs/A" },
-			}
-
-		test_cases = (
-			#Without --exclude.
-			ResolverPlaygroundTestCase(
-				[],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = ["dev-libs/B-1", "dev-libs/A-1"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/A"],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = []),
-			ResolverPlaygroundTestCase(
-				["dev-libs/B"],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = ["dev-libs/B-1"]),
-
-			#With --exclude
-			ResolverPlaygroundTestCase(
-				[],
-				options = {"--depclean": True, "--exclude": ["dev-libs/A"]},
-				success = True,
-				cleanlist = ["dev-libs/B-1"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/B"],
-				options = {"--depclean": True, "--exclude": ["dev-libs/B"]},
-				success = True,
-				cleanlist = []),
-			)
-
-		playground = ResolverPlayground(installed=installed)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-class DepcleanWithExcludeAndSlotsTestCase(TestCase):
-
-	def testDepcleanWithExcludeAndSlots(self):
-
-		installed = {
-			"dev-libs/Z-1": { "SLOT": 1},
-			"dev-libs/Z-2": { "SLOT": 2},
-			"dev-libs/Y-1": { "RDEPEND": "=dev-libs/Z-1", "SLOT": 1 },
-			"dev-libs/Y-2": { "RDEPEND": "=dev-libs/Z-2", "SLOT": 2 },
-			}
-
-		world = [ "dev-libs/Y" ]
-
-		test_cases = (
-			#Without --exclude.
-			ResolverPlaygroundTestCase(
-				[],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = ["dev-libs/Y-1", "dev-libs/Z-1"]),
-			ResolverPlaygroundTestCase(
-				[],
-				options = {"--depclean": True, "--exclude": ["dev-libs/Z"]},
-				success = True,
-				cleanlist = ["dev-libs/Y-1"]),
-			ResolverPlaygroundTestCase(
-				[],
-				options = {"--depclean": True, "--exclude": ["dev-libs/Y"]},
-				success = True,
-				cleanlist = []),
-			)
-
-		playground = ResolverPlayground(installed=installed, world=world)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-class DepcleanAndWildcardsTestCase(TestCase):
-
-	def testDepcleanAndWildcards(self):
-
-		installed = {
-			"dev-libs/A-1": { "RDEPEND": "dev-libs/B" },
-			"dev-libs/B-1": {},
-			}
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				["*/*"],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/*"],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = ["dev-libs/A-1", "dev-libs/B-1"]),
-			ResolverPlaygroundTestCase(
-				["*/A"],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = ["dev-libs/A-1"]),
-			ResolverPlaygroundTestCase(
-				["*/B"],
-				options = {"--depclean": True},
-				success = True,
-				cleanlist = []),
-			)
-
-		playground = ResolverPlayground(installed=installed)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_depth.py b/portage_with_autodep/pym/portage/tests/resolver/test_depth.py
deleted file mode 100644
index cb1e2dd..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_depth.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# Copyright 2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
-	ResolverPlaygroundTestCase)
-
-class ResolverDepthTestCase(TestCase):
-
-	def testResolverDepth(self):
-
-		ebuilds = {
-			"dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
-			"dev-libs/A-2": {"RDEPEND" : "dev-libs/B"},
-			"dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
-			"dev-libs/B-2": {"RDEPEND" : "dev-libs/C"},
-			"dev-libs/C-1": {},
-			"dev-libs/C-2": {},
-
-			"virtual/libusb-0"         : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
-			"virtual/libusb-1"         : {"EAPI" :"2", "SLOT" : "1", "RDEPEND" : ">=dev-libs/libusb-1.0.4:1"},
-			"dev-libs/libusb-0.1.13"   : {},
-			"dev-libs/libusb-1.0.5"    : {"SLOT":"1"},
-			"dev-libs/libusb-compat-1" : {},
-			"sys-freebsd/freebsd-lib-8": {"IUSE" : "+usb"},
-
-			"sys-fs/udev-164"          : {"EAPI" : "1", "RDEPEND" : "virtual/libusb:0"},
-
-			"virtual/jre-1.5.0"        : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
-			"virtual/jre-1.5.0-r1"     : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
-			"virtual/jre-1.6.0"        : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
-			"virtual/jre-1.6.0-r1"     : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
-			"virtual/jdk-1.5.0"        : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
-			"virtual/jdk-1.5.0-r1"     : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
-			"virtual/jdk-1.6.0"        : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
-			"virtual/jdk-1.6.0-r1"     : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
-			"dev-java/gcj-jdk-4.5"     : {},
-			"dev-java/gcj-jdk-4.5-r1"  : {},
-			"dev-java/icedtea-6.1"     : {},
-			"dev-java/icedtea-6.1-r1"  : {},
-			"dev-java/sun-jdk-1.5"     : {"SLOT" : "1.5"},
-			"dev-java/sun-jdk-1.6"     : {"SLOT" : "1.6"},
-			"dev-java/sun-jre-bin-1.5" : {"SLOT" : "1.5"},
-			"dev-java/sun-jre-bin-1.6" : {"SLOT" : "1.6"},
-
-			"dev-java/ant-core-1.8"   : {"DEPEND"  : ">=virtual/jdk-1.4"},
-			"dev-db/hsqldb-1.8"       : {"RDEPEND" : ">=virtual/jre-1.6"},
-			}
-
-		installed = {
-			"dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
-			"dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
-			"dev-libs/C-1": {},
-
-			"virtual/jre-1.5.0"       : {"SLOT" : "1.5", "RDEPEND" : "|| ( =virtual/jdk-1.5.0* =dev-java/sun-jre-bin-1.5.0* )"},
-			"virtual/jre-1.6.0"       : {"SLOT" : "1.6", "RDEPEND" : "|| ( =virtual/jdk-1.6.0* =dev-java/sun-jre-bin-1.6.0* )"},
-			"virtual/jdk-1.5.0"       : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
-			"virtual/jdk-1.6.0"       : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
-			"dev-java/gcj-jdk-4.5"    : {},
-			"dev-java/icedtea-6.1"    : {},
-
-			"virtual/libusb-0"         : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
-			}
-
-		world = ["dev-libs/A"]
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				["dev-libs/A"],
-				options = {"--update": True, "--deep": 0},
-				success = True,
-				mergelist = ["dev-libs/A-2"]),
-
-			ResolverPlaygroundTestCase(
-				["dev-libs/A"],
-				options = {"--update": True, "--deep": 1},
-				success = True,
-				mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
-
-			ResolverPlaygroundTestCase(
-				["dev-libs/A"],
-				options = {"--update": True, "--deep": 2},
-				success = True,
-				mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
-
-			ResolverPlaygroundTestCase(
-				["@world"],
-				options = {"--update": True, "--deep": True},
-				success = True,
-				mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
-
-			ResolverPlaygroundTestCase(
-				["@world"],
-				options = {"--emptytree": True},
-				success = True,
-				mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
-
-			ResolverPlaygroundTestCase(
-				["@world"],
-				options = {"--selective": True, "--deep": True},
-				success = True,
-				mergelist = []),
-
-			ResolverPlaygroundTestCase(
-				["dev-libs/A"],
-				options = {"--deep": 2},
-				success = True,
-				mergelist = ["dev-libs/A-2"]),
-
-			ResolverPlaygroundTestCase(
-				["virtual/jre"],
-				options = {},
-				success = True,
-				mergelist = ['virtual/jre-1.6.0-r1']),
-
-			ResolverPlaygroundTestCase(
-				["virtual/jre"],
-				options = {"--deep" : True},
-				success = True,
-				mergelist = ['virtual/jre-1.6.0-r1']),
-
-			# Test bug #141118, where we avoid pulling in
-			# redundant deps, satisfying nested virtuals
-			# as efficiently as possible.
-			ResolverPlaygroundTestCase(
-				["virtual/jre"],
-				options = {"--selective" : True, "--deep" : True},
-				success = True,
-				mergelist = []),
-
-			# Test bug #150361, where depgraph._greedy_slots()
-			# is triggered by --update with AtomArg.
-			ResolverPlaygroundTestCase(
-				["virtual/jre"],
-				options = {"--update" : True},
-				success = True,
-				ambiguous_merge_order = True,
-				mergelist = [('virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
-
-			# Recursively traversed virtual dependencies, and their
-			# direct dependencies, are considered to have the same
-			# depth as direct dependencies.
-			ResolverPlaygroundTestCase(
-				["virtual/jre"],
-				options = {"--update" : True, "--deep" : 1},
-				success = True,
-				ambiguous_merge_order = True,
-				merge_order_assertions=(('dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1'), ('virtual/jdk-1.6.0-r1', 'virtual/jre-1.6.0-r1'),
-					('dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.5.0-r1'), ('virtual/jdk-1.5.0-r1', 'virtual/jre-1.5.0-r1')),
-				mergelist = [('dev-java/icedtea-6.1-r1', 'dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.6.0-r1', 'virtual/jdk-1.5.0-r1', 'virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
-
-			ResolverPlaygroundTestCase(
-				["virtual/jre:1.5"],
-				options = {"--update" : True},
-				success = True,
-				mergelist = ['virtual/jre-1.5.0-r1']),
-
-			ResolverPlaygroundTestCase(
-				["virtual/jre:1.6"],
-				options = {"--update" : True},
-				success = True,
-				mergelist = ['virtual/jre-1.6.0-r1']),
-
-			# Test that we don't pull in any unnecessary updates
-			# when --update is not specified, even though we
-			# specified --deep.
-			ResolverPlaygroundTestCase(
-				["dev-java/ant-core"],
-				options = {"--deep" : True},
-				success = True,
-				mergelist = ["dev-java/ant-core-1.8"]),
-
-			ResolverPlaygroundTestCase(
-				["dev-java/ant-core"],
-				options = {"--update" : True},
-				success = True,
-				mergelist = ["dev-java/ant-core-1.8"]),
-
-			# Recursively traversed virtual dependencies, and their
-			# direct dependencies, are considered to have the same
-			# depth as direct dependencies.
-			ResolverPlaygroundTestCase(
-				["dev-java/ant-core"],
-				options = {"--update" : True, "--deep" : 1},
-				success = True,
-				mergelist = ['dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1', 'dev-java/ant-core-1.8']),
-
-			ResolverPlaygroundTestCase(
-				["dev-db/hsqldb"],
-				options = {"--deep" : True},
-				success = True,
-				mergelist = ["dev-db/hsqldb-1.8"]),
-
-			# Don't traverse deps of an installed package with --deep=0,
-			# even if it's a virtual.
-			ResolverPlaygroundTestCase(
-				["virtual/libusb:0"],
-				options = {"--selective" : True, "--deep" : 0},
-				success = True,
-				mergelist = []),
-
-			# Satisfy unsatisfied dep of installed package with --deep=1.
-			ResolverPlaygroundTestCase(
-				["virtual/libusb:0"],
-				options = {"--selective" : True, "--deep" : 1},
-				success = True,
-				mergelist = ['dev-libs/libusb-0.1.13']),
-
-			# Pull in direct dep of virtual, even with --deep=0.
-			ResolverPlaygroundTestCase(
-				["sys-fs/udev"],
-				options = {"--deep" : 0},
-				success = True,
-				mergelist = ['dev-libs/libusb-0.1.13', 'sys-fs/udev-164']),
-
-			# Test --nodeps with direct virtual deps.
-			ResolverPlaygroundTestCase(
-				["sys-fs/udev"],
-				options = {"--nodeps" : True},
-				success = True,
-				mergelist = ["sys-fs/udev-164"]),
-
-			# Test that --nodeps overrides --deep.
-			ResolverPlaygroundTestCase(
-				["sys-fs/udev"],
-				options = {"--nodeps" : True, "--deep" : True},
-				success = True,
-				mergelist = ["sys-fs/udev-164"]),
-
-			# Test that --nodeps overrides --emptytree.
-			ResolverPlaygroundTestCase(
-				["sys-fs/udev"],
-				options = {"--nodeps" : True, "--emptytree" : True},
-				success = True,
-				mergelist = ["sys-fs/udev-164"]),
-
-			# Test --emptytree with virtuals.
-			ResolverPlaygroundTestCase(
-				["sys-fs/udev"],
-				options = {"--emptytree" : True},
-				success = True,
-				mergelist = ['dev-libs/libusb-0.1.13', 'virtual/libusb-0', 'sys-fs/udev-164']),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
-			world=world)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_eapi.py b/portage_with_autodep/pym/portage/tests/resolver/test_eapi.py
deleted file mode 100644
index 525b585..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_eapi.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class EAPITestCase(TestCase):
-
-	def testEAPI(self):
-
-		ebuilds = {
-			#EAPI-1: IUSE-defaults
-			"dev-libs/A-1.0": { "EAPI": 0, "IUSE": "+foo" }, 
-			"dev-libs/A-1.1": { "EAPI": 1, "IUSE": "+foo" }, 
-			"dev-libs/A-1.2": { "EAPI": 2, "IUSE": "+foo" }, 
-			"dev-libs/A-1.3": { "EAPI": 3, "IUSE": "+foo" }, 
-			"dev-libs/A-1.4": { "EAPI": "4", "IUSE": "+foo" }, 
-
-			#EAPI-1: slot deps
-			"dev-libs/A-2.0": { "EAPI": 0, "DEPEND": "dev-libs/B:0" }, 
-			"dev-libs/A-2.1": { "EAPI": 1, "DEPEND": "dev-libs/B:0" }, 
-			"dev-libs/A-2.2": { "EAPI": 2, "DEPEND": "dev-libs/B:0" }, 
-			"dev-libs/A-2.3": { "EAPI": 3, "DEPEND": "dev-libs/B:0" }, 
-			"dev-libs/A-2.4": { "EAPI": "4", "DEPEND": "dev-libs/B:0" }, 
-
-			#EAPI-2: use deps
-			"dev-libs/A-3.0": { "EAPI": 0, "DEPEND": "dev-libs/B[foo]" }, 
-			"dev-libs/A-3.1": { "EAPI": 1, "DEPEND": "dev-libs/B[foo]" }, 
-			"dev-libs/A-3.2": { "EAPI": 2, "DEPEND": "dev-libs/B[foo]" }, 
-			"dev-libs/A-3.3": { "EAPI": 3, "DEPEND": "dev-libs/B[foo]" }, 
-			"dev-libs/A-3.4": { "EAPI": "4", "DEPEND": "dev-libs/B[foo]" }, 
-
-			#EAPI-2: strong blocks
-			"dev-libs/A-4.0": { "EAPI": 0, "DEPEND": "!!dev-libs/B" }, 
-			"dev-libs/A-4.1": { "EAPI": 1, "DEPEND": "!!dev-libs/B" }, 
-			"dev-libs/A-4.2": { "EAPI": 2, "DEPEND": "!!dev-libs/B" }, 
-			"dev-libs/A-4.3": { "EAPI": 3, "DEPEND": "!!dev-libs/B" }, 
-			"dev-libs/A-4.4": { "EAPI": "4", "DEPEND": "!!dev-libs/B" }, 
-
-			#EAPI-4: slot operator deps
-			#~ "dev-libs/A-5.0": { "EAPI": 0, "DEPEND": "dev-libs/B:*" }, 
-			#~ "dev-libs/A-5.1": { "EAPI": 1, "DEPEND": "dev-libs/B:*" }, 
-			#~ "dev-libs/A-5.2": { "EAPI": 2, "DEPEND": "dev-libs/B:*" }, 
-			#~ "dev-libs/A-5.3": { "EAPI": 3, "DEPEND": "dev-libs/B:*" }, 
-			#~ "dev-libs/A-5.4": { "EAPI": "4", "DEPEND": "dev-libs/B:*" }, 
-
-			#EAPI-4: use dep defaults
-			"dev-libs/A-6.0": { "EAPI": 0, "DEPEND": "dev-libs/B[bar(+)]" }, 
-			"dev-libs/A-6.1": { "EAPI": 1, "DEPEND": "dev-libs/B[bar(+)]" }, 
-			"dev-libs/A-6.2": { "EAPI": 2, "DEPEND": "dev-libs/B[bar(+)]" }, 
-			"dev-libs/A-6.3": { "EAPI": 3, "DEPEND": "dev-libs/B[bar(+)]" }, 
-			"dev-libs/A-6.4": { "EAPI": "4", "DEPEND": "dev-libs/B[bar(+)]" }, 
-			
-			#EAPI-4: REQUIRED_USE
-			"dev-libs/A-7.0": { "EAPI": 0, "IUSE": "foo bar", "REQUIRED_USE": "|| ( foo bar )" }, 
-			"dev-libs/A-7.1": { "EAPI": 1, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" }, 
-			"dev-libs/A-7.2": { "EAPI": 2, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" }, 
-			"dev-libs/A-7.3": { "EAPI": 3, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" }, 
-			"dev-libs/A-7.4": { "EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" }, 
-
-			"dev-libs/B-1": {"EAPI": 1, "IUSE": "+foo"}, 
-			}
-
-		test_cases = (
-			ResolverPlaygroundTestCase(["=dev-libs/A-1.0"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-1.1"], success = True, mergelist = ["dev-libs/A-1.1"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-1.2"], success = True, mergelist = ["dev-libs/A-1.2"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-1.3"], success = True, mergelist = ["dev-libs/A-1.3"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-1.4"], success = True, mergelist = ["dev-libs/A-1.4"]),
-
-			ResolverPlaygroundTestCase(["=dev-libs/A-2.0"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-2.1"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.1"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-2.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.2"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-2.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.3"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-2.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.4"]),
-
-			ResolverPlaygroundTestCase(["=dev-libs/A-3.0"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-3.1"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-3.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.2"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-3.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.3"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-3.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.4"]),
-
-			ResolverPlaygroundTestCase(["=dev-libs/A-4.0"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-4.1"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-4.2"], success = True, mergelist = ["dev-libs/A-4.2"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-4.3"], success = True, mergelist = ["dev-libs/A-4.3"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-4.4"], success = True, mergelist = ["dev-libs/A-4.4"]),
-
-			ResolverPlaygroundTestCase(["=dev-libs/A-5.0"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-5.1"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-5.2"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-5.3"], success = False),
-			# not implemented: EAPI-4: slot operator deps
-			#~ ResolverPlaygroundTestCase(["=dev-libs/A-5.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-5.4"]),
-
-			ResolverPlaygroundTestCase(["=dev-libs/A-6.0"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-6.1"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-6.2"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-6.3"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-6.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-6.4"]),
-			
-			ResolverPlaygroundTestCase(["=dev-libs/A-7.0"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-7.1"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-7.2"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-7.3"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-7.4"], success = True, mergelist = ["dev-libs/A-7.4"]),
-		)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py b/portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py
deleted file mode 100644
index 0a52c81..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_merge_order.py
+++ /dev/null
@@ -1,453 +0,0 @@
-# Copyright 2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import portage
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
-	ResolverPlaygroundTestCase)
-
-class MergeOrderTestCase(TestCase):
-
-	def testMergeOrder(self):
-		ebuilds = {
-			"app-misc/blocker-buildtime-a-1" : {},
-			"app-misc/blocker-buildtime-unbuilt-a-1" : {
-				"DEPEND" : "!app-misc/installed-blocker-a",
-			},
-			"app-misc/blocker-buildtime-unbuilt-hard-a-1" : {
-				"EAPI"   : "2",
-				"DEPEND" : "!!app-misc/installed-blocker-a",
-			},
-			"app-misc/blocker-update-order-a-1" : {},
-			"app-misc/blocker-update-order-hard-a-1" : {},
-			"app-misc/blocker-update-order-hard-unsolvable-a-1" : {},
-			"app-misc/blocker-runtime-a-1" : {},
-			"app-misc/blocker-runtime-b-1" : {},
-			"app-misc/blocker-runtime-hard-a-1" : {},
-			"app-misc/circ-buildtime-a-0": {},
-			"app-misc/circ-buildtime-a-1": {
-				"RDEPEND": "app-misc/circ-buildtime-b",
-			},
-			"app-misc/circ-buildtime-b-1": {
-				"RDEPEND": "app-misc/circ-buildtime-c",
-			},
-			"app-misc/circ-buildtime-c-1": {
-				"DEPEND": "app-misc/circ-buildtime-a",
-			},
-			"app-misc/circ-buildtime-unsolvable-a-1": {
-				"RDEPEND": "app-misc/circ-buildtime-unsolvable-b",
-			},
-			"app-misc/circ-buildtime-unsolvable-b-1": {
-				"RDEPEND": "app-misc/circ-buildtime-unsolvable-c",
-			},
-			"app-misc/circ-buildtime-unsolvable-c-1": {
-				"DEPEND": "app-misc/circ-buildtime-unsolvable-a",
-			},
-			"app-misc/circ-post-runtime-a-1": {
-				"PDEPEND": "app-misc/circ-post-runtime-b",
-			},
-			"app-misc/circ-post-runtime-b-1": {
-				"RDEPEND": "app-misc/circ-post-runtime-c",
-			},
-			"app-misc/circ-post-runtime-c-1": {
-				"RDEPEND": "app-misc/circ-post-runtime-a",
-			},
-			"app-misc/circ-runtime-a-1": {
-				"RDEPEND": "app-misc/circ-runtime-b",
-			},
-			"app-misc/circ-runtime-b-1": {
-				"RDEPEND": "app-misc/circ-runtime-c",
-			},
-			"app-misc/circ-runtime-c-1": {
-				"RDEPEND": "app-misc/circ-runtime-a",
-			},
-			"app-misc/circ-satisfied-a-0": {
-				"RDEPEND": "app-misc/circ-satisfied-b",
-			},
-			"app-misc/circ-satisfied-a-1": {
-				"RDEPEND": "app-misc/circ-satisfied-b",
-			},
-			"app-misc/circ-satisfied-b-0": {
-				"RDEPEND": "app-misc/circ-satisfied-c",
-			},
-			"app-misc/circ-satisfied-b-1": {
-				"RDEPEND": "app-misc/circ-satisfied-c",
-			},
-			"app-misc/circ-satisfied-c-0": {
-				"DEPEND": "app-misc/circ-satisfied-a",
-				"RDEPEND": "app-misc/circ-satisfied-a",
-			},
-			"app-misc/circ-satisfied-c-1": {
-				"DEPEND": "app-misc/circ-satisfied-a",
-				"RDEPEND": "app-misc/circ-satisfied-a",
-			},
-			"app-misc/circ-smallest-a-1": {
-				"RDEPEND": "app-misc/circ-smallest-b",
-			},
-			"app-misc/circ-smallest-b-1": {
-				"RDEPEND": "app-misc/circ-smallest-a",
-			},
-			"app-misc/circ-smallest-c-1": {
-				"RDEPEND": "app-misc/circ-smallest-d",
-			},
-			"app-misc/circ-smallest-d-1": {
-				"RDEPEND": "app-misc/circ-smallest-e",
-			},
-			"app-misc/circ-smallest-e-1": {
-				"RDEPEND": "app-misc/circ-smallest-c",
-			},
-			"app-misc/circ-smallest-f-1": {
-				"RDEPEND": "app-misc/circ-smallest-g app-misc/circ-smallest-a app-misc/circ-smallest-c",
-			},
-			"app-misc/circ-smallest-g-1": {
-				"RDEPEND": "app-misc/circ-smallest-f",
-			},
-			"app-misc/installed-blocker-a-1" : {
-				"EAPI"   : "2",
-				"DEPEND" : "!app-misc/blocker-buildtime-a",
-				"RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
-			},
-			"app-misc/installed-old-version-blocks-a-1" : {
-				"RDEPEND" : "!app-misc/blocker-update-order-a",
-			},
-			"app-misc/installed-old-version-blocks-a-2" : {},
-			"app-misc/installed-old-version-blocks-hard-a-1" : {
-				"EAPI"    : "2",
-				"RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
-			},
-			"app-misc/installed-old-version-blocks-hard-a-2" : {},
-			"app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
-				"EAPI"    : "2",
-				"RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
-			},
-			"app-misc/installed-old-version-blocks-hard-unsolvable-a-2" : {
-				"DEPEND"  : "app-misc/blocker-update-order-hard-unsolvable-a",
-				"RDEPEND" : "",
-			},
-			"app-misc/some-app-a-1": {
-				"RDEPEND": "app-misc/circ-runtime-a app-misc/circ-runtime-b",
-			},
-			"app-misc/some-app-b-1": {
-				"RDEPEND": "app-misc/circ-post-runtime-a app-misc/circ-post-runtime-b",
-			},
-			"app-misc/some-app-c-1": {
-				"RDEPEND": "app-misc/circ-buildtime-a app-misc/circ-buildtime-b",
-			},
-			"app-admin/eselect-python-20100321" : {},
-			"sys-apps/portage-2.1.9.42" : {
-				"DEPEND"  : "dev-lang/python",
-				"RDEPEND" : "dev-lang/python",
-			},
-			"sys-apps/portage-2.1.9.49" : {
-				"DEPEND"  : "dev-lang/python >=app-admin/eselect-python-20091230",
-				"RDEPEND" : "dev-lang/python",
-			},
-			"dev-lang/python-3.1" : {},
-			"dev-lang/python-3.2" : {},
-			"virtual/libc-0" : {
-				"RDEPEND" : "sys-libs/glibc",
-			},
-			"sys-devel/gcc-4.5.2" : {},
-			"sys-devel/binutils-2.18" : {},
-			"sys-devel/binutils-2.20.1" : {},
-			"sys-libs/glibc-2.11" : {
-				"DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
-				"RDEPEND": "",
-			},
-			"sys-libs/glibc-2.13" : {
-				"DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
-				"RDEPEND": "",
-			},
-			"virtual/os-headers-0" : {
-				"RDEPEND" : "sys-kernel/linux-headers",
-			},
-			"sys-kernel/linux-headers-2.6.38": {
-				"DEPEND" : "app-arch/xz-utils",
-				"RDEPEND": "",
-			},
-			"sys-kernel/linux-headers-2.6.39": {
-				"DEPEND" : "app-arch/xz-utils",
-				"RDEPEND": "",
-			},
-			"app-arch/xz-utils-5.0.1" : {},
-			"app-arch/xz-utils-5.0.2" : {},
-			"dev-util/pkgconfig-0.25-r2" : {},
-			"kde-base/kdelibs-3.5.7" : {
-				"PDEPEND" : "kde-misc/kdnssd-avahi",
-			},
-			"kde-misc/kdnssd-avahi-0.1.2" : {
-				"DEPEND"  : "kde-base/kdelibs app-arch/xz-utils dev-util/pkgconfig",
-				"RDEPEND" : "kde-base/kdelibs",
-			},
-			"kde-base/kdnssd-3.5.7" : {
-				"DEPEND"  : "kde-base/kdelibs",
-				"RDEPEND" : "kde-base/kdelibs",
-			},
-			"kde-base/libkdegames-3.5.7" : {
-				"DEPEND"  : "kde-base/kdelibs",
-				"RDEPEND" : "kde-base/kdelibs",
-			},
-			"kde-base/kmines-3.5.7" : {
-				"DEPEND"  : "kde-base/libkdegames",
-				"RDEPEND" : "kde-base/libkdegames",
-			},
-			"media-video/libav-0.7_pre20110327" : {
-				"EAPI" : "2",
-				"IUSE" : "X +encode",
-				"RDEPEND" : "!media-video/ffmpeg",
-			},
-			"media-video/ffmpeg-0.7_rc1" : {
-				"EAPI" : "2",
-				"IUSE" : "X +encode",
-			},
-			"virtual/ffmpeg-0.6.90" : {
-				"EAPI" : "2",
-				"IUSE" : "X +encode",
-				"RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
-			},
-		}
-
-		installed = {
-			"app-misc/circ-buildtime-a-0": {},
-			"app-misc/circ-satisfied-a-0": {
-				"RDEPEND": "app-misc/circ-satisfied-b",
-			},
-			"app-misc/circ-satisfied-b-0": {
-				"RDEPEND": "app-misc/circ-satisfied-c",
-			},
-			"app-misc/circ-satisfied-c-0": {
-				"DEPEND": "app-misc/circ-satisfied-a",
-				"RDEPEND": "app-misc/circ-satisfied-a",
-			},
-			"app-misc/installed-blocker-a-1" : {
-				"EAPI"   : "2",
-				"DEPEND" : "!app-misc/blocker-buildtime-a",
-				"RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
-			},
-			"app-misc/installed-old-version-blocks-a-1" : {
-				"RDEPEND" : "!app-misc/blocker-update-order-a",
-			},
-			"app-misc/installed-old-version-blocks-hard-a-1" : {
-				"EAPI"    : "2",
-				"RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
-			},
-			"app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
-				"EAPI"    : "2",
-				"RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
-			},
-			"sys-apps/portage-2.1.9.42" : {
-				"DEPEND"  : "dev-lang/python",
-				"RDEPEND" : "dev-lang/python",
-			},
-			"dev-lang/python-3.1" : {},
-			"virtual/libc-0" : {
-				"RDEPEND" : "sys-libs/glibc",
-			},
-			"sys-devel/binutils-2.18" : {},
-			"sys-libs/glibc-2.11" : {
-				"DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
-				"RDEPEND": "",
-			},
-			"virtual/os-headers-0" : {
-				"RDEPEND" : "sys-kernel/linux-headers",
-			},
-			"sys-kernel/linux-headers-2.6.38": {
-				"DEPEND" : "app-arch/xz-utils",
-				"RDEPEND": "",
-			},
-			"app-arch/xz-utils-5.0.1" : {},
-			"media-video/ffmpeg-0.7_rc1" : {
-				"EAPI" : "2",
-				"IUSE" : "X +encode",
-				"USE" : "encode",
-			},
-			"virtual/ffmpeg-0.6.90" : {
-				"EAPI" : "2",
-				"IUSE" : "X +encode",
-				"USE" : "encode",
-				"RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
-			},
-		}
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				["app-misc/some-app-a"],
-				success = True,
-				ambiguous_merge_order = True,
-				mergelist = [("app-misc/circ-runtime-a-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-c-1"), "app-misc/some-app-a-1"]),
-			ResolverPlaygroundTestCase(
-				["app-misc/some-app-a"],
-				success = True,
-				ambiguous_merge_order = True,
-				mergelist = [("app-misc/circ-runtime-c-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-a-1"), "app-misc/some-app-a-1"]),
-			# Test unsolvable circular dep that is RDEPEND in one
-			# direction and DEPEND in the other.
-			ResolverPlaygroundTestCase(
-				["app-misc/circ-buildtime-unsolvable-a"],
-				success = False,
-				circular_dependency_solutions = {}),
-			# Test optimal merge order for a circular dep that is
-			# RDEPEND in one direction and DEPEND in the other.
-			# This requires an installed instance of the DEPEND
-			# package in order to be solvable.
-			ResolverPlaygroundTestCase(
-				["app-misc/some-app-c", "app-misc/circ-buildtime-a"],
-				success = True,
-				ambiguous_merge_order = True,
-				mergelist = [("app-misc/circ-buildtime-b-1", "app-misc/circ-buildtime-c-1"), "app-misc/circ-buildtime-a-1", "app-misc/some-app-c-1"]),
-			# Test optimal merge order for a circular dep that is
-			# RDEPEND in one direction and PDEPEND in the other.
-			ResolverPlaygroundTestCase(
-				["app-misc/some-app-b"],
-				success = True,
-				ambiguous_merge_order = True,
-				mergelist = ["app-misc/circ-post-runtime-a-1", ("app-misc/circ-post-runtime-b-1", "app-misc/circ-post-runtime-c-1"), "app-misc/some-app-b-1"]),
-			# Test optimal merge order for a circular dep that is
-			# RDEPEND in one direction and DEPEND in the other,
-			# with all dependencies initially satisfied. Optimally,
-			# the DEPEND/buildtime dep should be updated before the
-			# package that depends on it, even though it's feasible
-			# to update it later since it is already satisfied.
-			ResolverPlaygroundTestCase(
-				["app-misc/circ-satisfied-a", "app-misc/circ-satisfied-b", "app-misc/circ-satisfied-c"],
-				success = True,
-				all_permutations = True,
-				ambiguous_merge_order = True,
-				merge_order_assertions = (("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-c-1"),),
-				mergelist = [("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-b-1", "app-misc/circ-satisfied-c-1")]),
-			# In the case of multiple runtime cycles, where some cycles
-			# may depend on smaller independent cycles, it's optimal
-			# to merge smaller independent cycles before other cycles
-			# that depend on them.
-			ResolverPlaygroundTestCase(
-				["app-misc/circ-smallest-a", "app-misc/circ-smallest-c", "app-misc/circ-smallest-f"],
-				success = True,
-				ambiguous_merge_order = True,
-				all_permutations = True,
-				mergelist = [('app-misc/circ-smallest-a-1', 'app-misc/circ-smallest-b-1'),
-				('app-misc/circ-smallest-c-1', 'app-misc/circ-smallest-d-1', 'app-misc/circ-smallest-e-1'),
-				('app-misc/circ-smallest-f-1', 'app-misc/circ-smallest-g-1')]),
-			# installed package has buildtime-only blocker
-			# that should be ignored
-			ResolverPlaygroundTestCase(
-				["app-misc/blocker-buildtime-a"],
-				success = True,
-				mergelist = ["app-misc/blocker-buildtime-a-1"]),
-			# We're installing a package that an old version of
-			# an installed package blocks. However, an update is
-			# available to the old package. The old package should
-			# be updated first, in order to solve the blocker without
-			# any need for blocking packages to temporarily overlap.
-			ResolverPlaygroundTestCase(
-				["app-misc/blocker-update-order-a", "app-misc/installed-old-version-blocks-a"],
-				success = True,
-				all_permutations = True,
-				mergelist = ["app-misc/installed-old-version-blocks-a-2", "app-misc/blocker-update-order-a-1"]),
-			# This is the same as above but with a hard blocker. The hard
-			# blocker is solved automatically since the update makes it
-			# irrelevant.
-			ResolverPlaygroundTestCase(
-				["app-misc/blocker-update-order-hard-a", "app-misc/installed-old-version-blocks-hard-a"],
-				success = True,
-				all_permutations = True,
-				mergelist = ["app-misc/installed-old-version-blocks-hard-a-2", "app-misc/blocker-update-order-hard-a-1"]),
-			# This is similar to the above case except that it's unsolvable
-			# due to merge order, unless bug 250286 is implemented so that
-			# the installed blocker will be unmerged before installation
-			# of the package it blocks (rather than after like a soft blocker
-			# would be handled). The "unmerge before" behavior requested
-			# in bug 250286 must be optional since essential programs or
-			# libraries may be temporarily unavailable during a
-			# non-overlapping update like this.
-			ResolverPlaygroundTestCase(
-				["app-misc/blocker-update-order-hard-unsolvable-a", "app-misc/installed-old-version-blocks-hard-unsolvable-a"],
-				success = False,
-				all_permutations = True,
-				ambiguous_merge_order = True,
-				merge_order_assertions = (('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2'),),
-				mergelist = [('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2', '!!app-misc/blocker-update-order-hard-unsolvable-a')]),
-			# The installed package has runtime blockers that
-			# should cause it to be uninstalled. The uninstall
-			# task is executed only after blocking packages have
-			# been merged.
-			# TODO: distinguish between install/uninstall tasks in mergelist
-			ResolverPlaygroundTestCase(
-				["app-misc/blocker-runtime-a", "app-misc/blocker-runtime-b"],
-				success = True,
-				all_permutations = True,
-				ambiguous_merge_order = True,
-				mergelist = [("app-misc/blocker-runtime-a-1", "app-misc/blocker-runtime-b-1"), "app-misc/installed-blocker-a-1", ("!app-misc/blocker-runtime-a", "!app-misc/blocker-runtime-b")]),
-			# We have a soft buildtime blocker against an installed
-			# package that should cause it to be uninstalled. Note that with
-			# soft blockers, the blocking packages are allowed to temporarily
-			# overlap. This allows any essential programs/libraries provided
-			# by both packages to be available at all times.
-			# TODO: distinguish between install/uninstall tasks in mergelist
-			ResolverPlaygroundTestCase(
-				["app-misc/blocker-buildtime-unbuilt-a"],
-				success = True,
-				mergelist = ["app-misc/blocker-buildtime-unbuilt-a-1", "app-misc/installed-blocker-a-1", "!app-misc/installed-blocker-a"]),
-			# We have a hard buildtime blocker against an installed
-			# package that will not resolve automatically (unless
-			# the option requested in bug 250286 is implemented).
-			ResolverPlaygroundTestCase(
-				["app-misc/blocker-buildtime-unbuilt-hard-a"],
-				success = False,
-				mergelist = ['app-misc/blocker-buildtime-unbuilt-hard-a-1', '!!app-misc/installed-blocker-a']),
-			# An installed package has a hard runtime blocker that
-			# will not resolve automatically (unless the option
-			# requested in bug 250286 is implemented).
-			ResolverPlaygroundTestCase(
-				["app-misc/blocker-runtime-hard-a"],
-				success = False,
-				mergelist = ['app-misc/blocker-runtime-hard-a-1', '!!app-misc/blocker-runtime-hard-a']),
-			# Test swapping of providers for a new-style virtual package,
-			# which relies on delayed evaluation of disjunctive (virtual
-			# and ||) deps as required to solve bug #264434. Note that
-			# this behavior is not supported for old-style PROVIDE virtuals,
-			# as reported in bug #339164.
-			ResolverPlaygroundTestCase(
-				["media-video/libav"],
-				success=True,
-				mergelist = ['media-video/libav-0.7_pre20110327', 'media-video/ffmpeg-0.7_rc1', '!media-video/ffmpeg']),
-			# Test that PORTAGE_PACKAGE_ATOM is merged asap. Optimally,
-			# satisfied deps are always merged after the asap nodes that
-			# depend on them.
-			ResolverPlaygroundTestCase(
-				["dev-lang/python", portage.const.PORTAGE_PACKAGE_ATOM],
-				success = True,
-				all_permutations = True,
-				mergelist = ['app-admin/eselect-python-20100321', 'sys-apps/portage-2.1.9.49', 'dev-lang/python-3.2']),
-			# Test that OS_HEADERS_PACKAGE_ATOM and LIBC_PACKAGE_ATOM
-			# are merged asap, in order to account for implicit
-			# dependencies. See bug #303567. Optimally, satisfied deps
-			# are always merged after the asap nodes that depend on them.
-			ResolverPlaygroundTestCase(
-				["app-arch/xz-utils", "sys-kernel/linux-headers", "sys-devel/binutils", "sys-libs/glibc"],
-				options = {"--complete-graph" : True},
-				success = True,
-				all_permutations = True,
-				ambiguous_merge_order = True,
-				mergelist = ['sys-kernel/linux-headers-2.6.39', 'sys-devel/gcc-4.5.2', 'sys-libs/glibc-2.13', ('app-arch/xz-utils-5.0.2', 'sys-devel/binutils-2.20.1')]),
-			# Test asap install of PDEPEND for bug #180045.
-			ResolverPlaygroundTestCase(
-				["kde-base/kmines", "kde-base/kdnssd", "kde-base/kdelibs", "app-arch/xz-utils"],
-				success = True,
-				all_permutations = True,
-				ambiguous_merge_order = True,
-				merge_order_assertions = (
-					('dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2'),
-					('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/libkdegames-3.5.7'),
-					('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/kdnssd-3.5.7'),
-					('kde-base/libkdegames-3.5.7', 'kde-base/kmines-3.5.7'),
-				),
-				mergelist = [('kde-base/kdelibs-3.5.7', 'dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2', 'app-arch/xz-utils-5.0.2', 'kde-base/libkdegames-3.5.7', 'kde-base/kdnssd-3.5.7', 'kde-base/kmines-3.5.7')]),
-		)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py b/portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
deleted file mode 100644
index a860e7b..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class MissingIUSEandEvaluatedAtomsTestCase(TestCase):
-
-	def testMissingIUSEandEvaluatedAtoms(self):
-		ebuilds = {
-			"dev-libs/A-1": { "DEPEND": "dev-libs/B[foo?]", "IUSE": "foo bar", "EAPI": 2 }, 
-			"dev-libs/A-2": { "DEPEND": "dev-libs/B[foo?,bar]", "IUSE": "foo bar", "EAPI": 2 }, 
-			"dev-libs/B-1": { "IUSE": "bar" }, 
-			}
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				["=dev-libs/A-1"],
-				success = False),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/A-2"],
-				success = False),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py b/portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py
deleted file mode 100644
index 34c6d45..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_multirepo.py
+++ /dev/null
@@ -1,318 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class MultirepoTestCase(TestCase):
-
-	def testMultirepo(self):
-		ebuilds = {
-			#Simple repo selection
-			"dev-libs/A-1": { },
-			"dev-libs/A-1::repo1": { },
-			"dev-libs/A-2::repo1": { },
-			"dev-libs/A-1::repo2": { },
-
-			#Packages in exactly one repo
-			"dev-libs/B-1": { },
-			"dev-libs/C-1::repo1": { },
-
-			#Package in repository 1 and 2, but 1 must be used
-			"dev-libs/D-1::repo1": { },
-			"dev-libs/D-1::repo2": { },
-
-			"dev-libs/E-1": { },
-			"dev-libs/E-1::repo1": { },
-			"dev-libs/E-1::repo2": { "SLOT": "1" },
-
-			"dev-libs/F-1::repo1": { "SLOT": "1" },
-			"dev-libs/F-1::repo2": { "SLOT": "1" },
-
-			"dev-libs/G-1::repo1": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "" },
-			"dev-libs/G-1::repo2": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "^^ ( x y )" },
-
-			"dev-libs/H-1": {	"KEYWORDS": "x86", "EAPI" : "3",
-								"RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
-
-			"dev-libs/I-1::repo2": { "SLOT" : "1"},
-			"dev-libs/I-2::repo2": { "SLOT" : "2"},
-			}
-
-		installed = {
-			"dev-libs/H-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
-			"dev-libs/I-2::repo1": {"SLOT" : "2"},
-			}
-
-		sets = {
-			"multirepotest": 
-				( "dev-libs/A::test_repo", )
-		}
-
-		test_cases = (
-			#Simple repo selection
-			ResolverPlaygroundTestCase(
-				["dev-libs/A"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/A-2::repo1"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/A::test_repo"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/A-1"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/A::repo2"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/A-1::repo2"]),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/A-1::repo1"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/A-1::repo1"]),
-			ResolverPlaygroundTestCase(
-				["@multirepotest"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/A-1"]),
-
-			#Packages in exactly one repo
-			ResolverPlaygroundTestCase(
-				["dev-libs/B"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/B-1"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/C"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/C-1::repo1"]),
-
-			#Package in repository 1 and 2, but 2 must be used
-			ResolverPlaygroundTestCase(
-				["dev-libs/D"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/D-1::repo2"]),
-
-			#Atoms with slots
-			ResolverPlaygroundTestCase(
-				["dev-libs/E"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/E-1::repo2"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/E:1::repo2"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/E-1::repo2"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/E:1"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/E-1::repo2"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/F:1"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/F-1::repo2"]),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/F-1:1"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/F-1::repo2"]),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/F-1:1::repo1"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/F-1::repo1"]),
-
-			# Dependency on installed dev-libs/C-2 ebuild for which ebuild is
-			# not available from the same repo should not unnecessarily
-			# reinstall the same version from a different repo.
-			ResolverPlaygroundTestCase(
-				["dev-libs/H"],
-				options = {"--update": True, "--deep": True},
-				success = True,
-				mergelist = []),
-
-			# Check interaction between repo priority and unsatisfied
-			# REQUIRED_USE, for bug #350254.
-			ResolverPlaygroundTestCase(
-				["=dev-libs/G-1"],
-				check_repo_names = True,
-				success = False),
-
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds,
-			installed=installed, sets=sets)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-
-	def testMultirepoUserConfig(self):
-		ebuilds = {
-			#package.use test
-			"dev-libs/A-1": { "IUSE": "foo" },
-			"dev-libs/A-2::repo1": { "IUSE": "foo" },
-			"dev-libs/A-3::repo2": { },
-			"dev-libs/B-1": { "DEPEND": "dev-libs/A", "EAPI": 2 },
-			"dev-libs/B-2": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
-			"dev-libs/B-3": { "DEPEND": "dev-libs/A[-foo]", "EAPI": 2 },
-
-			#package.keywords test
-			"dev-libs/C-1": { "KEYWORDS": "~x86" },
-			"dev-libs/C-1::repo1": { "KEYWORDS": "~x86" },
-
-			#package.license
-			"dev-libs/D-1": { "LICENSE": "TEST" },
-			"dev-libs/D-1::repo1": { "LICENSE": "TEST" },
-
-			#package.mask
-			"dev-libs/E-1": { },
-			"dev-libs/E-1::repo1": { },
-			"dev-libs/H-1": { },
-			"dev-libs/H-1::repo1": { },
-			"dev-libs/I-1::repo2": { "SLOT" : "1"},
-			"dev-libs/I-2::repo2": { "SLOT" : "2"},
-			"dev-libs/J-1": {	"KEYWORDS": "x86", "EAPI" : "3",
-								"RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
-
-			#package.properties
-			"dev-libs/F-1": { "PROPERTIES": "bar"},
-			"dev-libs/F-1::repo1": { "PROPERTIES": "bar"},
-
-			#package.unmask
-			"dev-libs/G-1": { },
-			"dev-libs/G-1::repo1": { },
-
-			#package.mask with wildcards
-			"dev-libs/Z-1::repo3": { },
-			}
-
-		installed = {
-			"dev-libs/J-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )"},
-			"dev-libs/I-2::repo1": {"SLOT" : "2"},
-			}
-
-		user_config = {
-			"package.use":
-				(
-					"dev-libs/A::repo1 foo",
-				),
-			"package.keywords":
-				(
-					"=dev-libs/C-1::test_repo",
-				),
-			"package.license":
-				(
-					"=dev-libs/D-1::test_repo TEST",
-				),
-			"package.mask":
-				(
-					"dev-libs/E::repo1",
-					"dev-libs/H",
-					"dev-libs/I::repo1",
-					#needed for package.unmask test
-					"dev-libs/G",
-					#wildcard test
-					"*/*::repo3",
-				),
-			"package.properties":
-				(
-					"dev-libs/F::repo1 -bar",
-				),
-			"package.unmask":
-				(
-					"dev-libs/G::test_repo",
-				),
-			}
-
-		test_cases = (
-			#package.use test
-			ResolverPlaygroundTestCase(
-				["=dev-libs/B-1"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/A-3::repo2", "dev-libs/B-1"]),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/B-2"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/A-2::repo1", "dev-libs/B-2"]),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/B-3"],
-				options = { "--autounmask": 'n' },
-				success = False,
-				check_repo_names = True),
-
-			#package.keywords test
-			ResolverPlaygroundTestCase(
-				["dev-libs/C"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/C-1"]),
-
-			#package.license test
-			ResolverPlaygroundTestCase(
-				["dev-libs/D"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/D-1"]),
-
-			#package.mask test
-			ResolverPlaygroundTestCase(
-				["dev-libs/E"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/E-1"]),
-
-			# Dependency on installed dev-libs/C-2 ebuild for which ebuild is
-			# masked from the same repo should not unnecessarily pull
-			# in a different slot. It should just pull in the same slot from
-			# a different repo (bug #351828).
-			ResolverPlaygroundTestCase(
-				["dev-libs/J"],
-				options = {"--update": True, "--deep": True},
-				success = True,
-				mergelist = ["dev-libs/I-2"]),
-
-			#package.properties test
-			ResolverPlaygroundTestCase(
-				["dev-libs/F"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/F-1"]),
-
-			#package.mask test
-			ResolverPlaygroundTestCase(
-				["dev-libs/G"],
-				success = True,
-				check_repo_names = True,
-				mergelist = ["dev-libs/G-1"]),
-			ResolverPlaygroundTestCase(
-				["dev-libs/H"],
-				options = { "--autounmask": 'n' },
-				success = False),
-
-			#package.mask with wildcards
-			ResolverPlaygroundTestCase(
-				["dev-libs/Z"],
-				options = { "--autounmask": 'n' },
-				success = False),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds,
-			installed=installed, user_config=user_config)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_multislot.py b/portage_with_autodep/pym/portage/tests/resolver/test_multislot.py
deleted file mode 100644
index 8615419..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_multislot.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class MultSlotTestCase(TestCase):
-
-	def testMultiSlotSelective(self):
-		"""
-		Test that a package isn't reinstalled due to SLOT dependency
-		interaction with USE=multislot (bug #220341).
-		"""
-
-		ebuilds = {
-			"sys-devel/gcc-4.4.4": { "SLOT": "4.4" },
-			}
-
-		installed = {
-			"sys-devel/gcc-4.4.4": { "SLOT": "i686-pc-linux-gnu-4.4.4" },
-			}
-
-		options = {'--update' : True, '--deep' : True, '--selective' : True}
-
-		test_cases = (
-				ResolverPlaygroundTestCase(
-					["sys-devel/gcc:4.4"],
-					options = options,
-					mergelist = [],
-					success = True),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
-
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py b/portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py
deleted file mode 100644
index 8aedf59..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_old_dep_chain_display.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class OldDepChainDisplayTestCase(TestCase):
-
-	def testOldDepChainDisplay(self):
-		ebuilds = {
-			"dev-libs/A-1": { "DEPEND": "foo? ( dev-libs/B[-bar] )", "IUSE": "+foo", "EAPI": "2" }, 
-			"dev-libs/A-2": { "DEPEND": "foo? ( dev-libs/C )", "IUSE": "+foo", "EAPI": "1" }, 
-			"dev-libs/B-1": { "IUSE": "bar", "DEPEND": "!bar? ( dev-libs/D[-baz] )", "EAPI": "2" },
-			"dev-libs/C-1": { "KEYWORDS": "~x86" },
-			"dev-libs/D-1": { "IUSE": "+baz", "EAPI": "1" },
-			}
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				["=dev-libs/A-1"],
-				options = { "--autounmask": 'n' },
-				success = False),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/A-2"],
-				options = { "--autounmask": 'n' },
-				success = False),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_output.py b/portage_with_autodep/pym/portage/tests/resolver/test_output.py
deleted file mode 100644
index 34efe9c..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_output.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class MergelistOutputTestCase(TestCase):
-
-	def testMergelistOutput(self):
-		"""
-		This test doesn't check if the output is correct, but makes sure
-		that we don't backtrace somewhere in the output code.
-		"""
-		ebuilds = {
-			"dev-libs/A-1": { "DEPEND": "dev-libs/B dev-libs/C", "IUSE": "+foo", "EAPI": 1 },
-			"dev-libs/B-1": { "DEPEND": "dev-libs/D", "IUSE": "foo +bar", "EAPI": 1 },
-			"dev-libs/C-1": { "DEPEND": "dev-libs/E", "IUSE": "foo bar" },
-			"dev-libs/D-1": { "IUSE": "" },
-			"dev-libs/E-1": {},
-
-			#reinstall for flags
-			"dev-libs/Z-1": { "IUSE": "+foo", "EAPI": 1 },
-			"dev-libs/Y-1": { "IUSE": "foo", "EAPI": 1 },
-			"dev-libs/X-1": {},
-			"dev-libs/W-1": { "IUSE": "+foo", "EAPI": 1 },
-			}
-
-		installed = {
-			"dev-libs/Z-1": { "USE": "", "IUSE": "foo" },
-			"dev-libs/Y-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
-			"dev-libs/X-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
-			"dev-libs/W-1": { },
-		}
-
-		option_cobos = (
-			(),
-			("verbose",),
-			("tree",),
-			("tree", "unordered-display",),
-			("verbose",),
-			("verbose", "tree",),
-			("verbose", "tree", "unordered-display",),
-		)
-
-		test_cases = []
-		for options in option_cobos:
-			testcase_opts = {}
-			for opt in options:
-				testcase_opts["--" + opt] = True
-
-			test_cases.append(ResolverPlaygroundTestCase(
-				["dev-libs/A"],
-				options = testcase_opts,
-				success = True,
-				ignore_mergelist_order=True,
-				mergelist = ["dev-libs/D-1", "dev-libs/E-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"]))
-
-			test_cases.append(ResolverPlaygroundTestCase(
-				["dev-libs/Z"],
-				options = testcase_opts,
-				success = True,
-				mergelist = ["dev-libs/Z-1"]))
-
-			test_cases.append(ResolverPlaygroundTestCase(
-				["dev-libs/Y"],
-				options = testcase_opts,
-				success = True,
-				mergelist = ["dev-libs/Y-1"]))
-
-			test_cases.append(ResolverPlaygroundTestCase(
-				["dev-libs/X"],
-				options = testcase_opts,
-				success = True,
-				mergelist = ["dev-libs/X-1"]))
-
-			test_cases.append(ResolverPlaygroundTestCase(
-				["dev-libs/W"],
-				options = testcase_opts,
-				success = True,
-				mergelist = ["dev-libs/W-1"]))
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py b/portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py
deleted file mode 100644
index b9c4d6d..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_rebuild.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright 2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
-	ResolverPlaygroundTestCase)
-
-class RebuildTestCase(TestCase):
-
-	def testRebuild(self):
-		"""
-		Rebuild packages when dependencies that are used at both build-time and
-		run-time are upgraded.
-		"""
-
-		ebuilds = {
-			"sys-libs/x-1": { },
-			"sys-libs/x-1-r1": { },
-			"sys-libs/x-2": { },
-			"sys-apps/a-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
-			"sys-apps/a-2": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
-			"sys-apps/b-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
-			"sys-apps/b-2": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
-			"sys-apps/c-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : ""},
-			"sys-apps/c-2": { "DEPEND"  : "sys-libs/x", "RDEPEND" : ""},
-			"sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
-			"sys-apps/d-2": { "RDEPEND" : "sys-libs/x"},
-			"sys-apps/e-2": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
-			"sys-apps/f-2": { "DEPEND"  : "sys-apps/a", "RDEPEND" : "sys-apps/a"},
-			"sys-apps/g-2": { "DEPEND"  : "sys-apps/b sys-libs/x",
-				"RDEPEND" : "sys-apps/b"},
-			}
-
-		installed = {
-			"sys-libs/x-1": { },
-			"sys-apps/a-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
-			"sys-apps/b-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
-			"sys-apps/c-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : ""},
-			"sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
-			"sys-apps/e-1": { "DEPEND"  : "sys-libs/x", "RDEPEND" : "sys-libs/x"},
-			"sys-apps/f-1": { "DEPEND"  : "sys-apps/a", "RDEPEND" : "sys-apps/a"},
-			"sys-apps/g-1": { "DEPEND"  : "sys-apps/b sys-libs/x",
-				"RDEPEND" : "sys-apps/b"},
-			}
-
-		world = ["sys-apps/a", "sys-apps/b", "sys-apps/c", "sys-apps/d",
-			"sys-apps/e", "sys-apps/f", "sys-apps/g"]
-
-		test_cases = (
-				ResolverPlaygroundTestCase(
-					["sys-libs/x"],
-					options = {"--rebuild-if-unbuilt" : True,
-						"--rebuild-exclude" : ["sys-apps/b"]},
-					mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/e-2'],
-					ignore_mergelist_order = True,
-					success = True),
-
-				ResolverPlaygroundTestCase(
-					["sys-libs/x"],
-					options = {"--rebuild-if-unbuilt" : True},
-					mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
-						'sys-apps/e-2', 'sys-apps/g-2'],
-					ignore_mergelist_order = True,
-					success = True),
-
-				ResolverPlaygroundTestCase(
-					["sys-libs/x"],
-					options = {"--rebuild-if-unbuilt" : True,
-						"--rebuild-ignore" : ["sys-libs/x"]},
-					mergelist = ['sys-libs/x-2'],
-					ignore_mergelist_order = True,
-					success = True),
-
-				ResolverPlaygroundTestCase(
-					["sys-libs/x"],
-					options = {"--rebuild-if-unbuilt" : True,
-						"--rebuild-ignore" : ["sys-apps/b"]},
-					mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
-						'sys-apps/e-2'],
-					ignore_mergelist_order = True,
-					success = True),
-
-				ResolverPlaygroundTestCase(
-					["=sys-libs/x-1-r1"],
-					options = {"--rebuild-if-unbuilt" : True},
-					mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
-						'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
-					ignore_mergelist_order = True,
-					success = True),
-
-				ResolverPlaygroundTestCase(
-					["=sys-libs/x-1-r1"],
-					options = {"--rebuild-if-new-rev" : True},
-					mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
-						'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
-					ignore_mergelist_order = True,
-					success = True),
-
-				ResolverPlaygroundTestCase(
-					["=sys-libs/x-1-r1"],
-					options = {"--rebuild-if-new-ver" : True},
-					mergelist = ['sys-libs/x-1-r1'],
-					ignore_mergelist_order = True,
-					success = True),
-
-				ResolverPlaygroundTestCase(
-					["sys-libs/x"],
-					options = {"--rebuild-if-new-ver" : True},
-					mergelist = ['sys-libs/x-2', 'sys-apps/a-2',
-						'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
-					ignore_mergelist_order = True,
-					success = True),
-
-				ResolverPlaygroundTestCase(
-					["=sys-libs/x-1"],
-					options = {"--rebuild-if-new-rev" : True},
-					mergelist = ['sys-libs/x-1'],
-					ignore_mergelist_order = True,
-					success = True),
-
-				ResolverPlaygroundTestCase(
-					["=sys-libs/x-1"],
-					options = {"--rebuild-if-unbuilt" : True},
-					mergelist = ['sys-libs/x-1', 'sys-apps/a-2',
-						'sys-apps/b-2', 'sys-apps/e-2', 'sys-apps/g-2'],
-					ignore_mergelist_order = True,
-					success = True),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds,
-			installed=installed, world=world)
-
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_required_use.py b/portage_with_autodep/pym/portage/tests/resolver/test_required_use.py
deleted file mode 100644
index c8810fa..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_required_use.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class RequiredUSETestCase(TestCase):
-
-	def testRequiredUSE(self):
-		"""
-		Only simple REQUIRED_USE values here. The parser is tested under in dep/testCheckRequiredUse
-		"""
-
-		ebuilds = {
-			"dev-libs/A-1" : {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "|| ( foo bar )"},
-			"dev-libs/A-2" : {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "|| ( foo bar )"},
-			"dev-libs/A-3" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "|| ( foo bar )"},
-			"dev-libs/A-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( foo bar )"},
-			"dev-libs/A-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( )"},
-
-			"dev-libs/B-1" : {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "^^ ( foo bar )"},
-			"dev-libs/B-2" : {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "^^ ( foo bar )"},
-			"dev-libs/B-3" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "^^ ( foo bar )"},
-			"dev-libs/B-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )"},
-			"dev-libs/B-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( )"},
-
-			"dev-libs/C-1" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "foo? ( !bar )"},
-			"dev-libs/C-2" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( !bar )"},
-			"dev-libs/C-3" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( bar )"},
-			"dev-libs/C-4" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "foo? ( bar )"},
-			"dev-libs/C-5" : {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "foo? ( bar )"},
-			"dev-libs/C-6" : {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "foo? ( bar )"},
-			"dev-libs/C-7" : {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "!foo? ( bar )"},
-			"dev-libs/C-8" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( bar )"},
-			"dev-libs/C-9" : {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "!foo? ( bar )"},
-			"dev-libs/C-10": {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "!foo? ( bar )"},
-			"dev-libs/C-11": {"EAPI": "4", "IUSE": "foo bar",   "REQUIRED_USE": "!foo? ( !bar )"},
-			"dev-libs/C-12": {"EAPI": "4", "IUSE": "foo +bar",  "REQUIRED_USE": "!foo? ( !bar )"},
-			"dev-libs/C-13": {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( !bar )"},
-			"dev-libs/C-14": {"EAPI": "4", "IUSE": "+foo bar",  "REQUIRED_USE": "!foo? ( !bar )"},
-
-			"dev-libs/D-1" : {"EAPI": "4", "IUSE": "+w +x +y z",    "REQUIRED_USE": "w? ( x || ( y z ) )"},
-			"dev-libs/D-2" : {"EAPI": "4", "IUSE": "+w +x +y +z",   "REQUIRED_USE": "w? ( x || ( y z ) )"},
-			"dev-libs/D-3" : {"EAPI": "4", "IUSE": "+w +x y z",     "REQUIRED_USE": "w? ( x || ( y z ) )"},
-			"dev-libs/D-4" : {"EAPI": "4", "IUSE": "+w x +y +z",    "REQUIRED_USE": "w? ( x || ( y z ) )"},
-			"dev-libs/D-5" : {"EAPI": "4", "IUSE": "w x y z",       "REQUIRED_USE": "w? ( x || ( y z ) )"},
-			}
-
-		test_cases = (
-			ResolverPlaygroundTestCase(["=dev-libs/A-1"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/A-2"], success = True, mergelist=["dev-libs/A-2"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-3"], success = True, mergelist=["dev-libs/A-3"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-4"], success = True, mergelist=["dev-libs/A-4"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-5"], success = True, mergelist=["dev-libs/A-5"]),
-
-			ResolverPlaygroundTestCase(["=dev-libs/B-1"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/B-2"], success = True, mergelist=["dev-libs/B-2"]),
-			ResolverPlaygroundTestCase(["=dev-libs/B-3"], success = True, mergelist=["dev-libs/B-3"]),
-			ResolverPlaygroundTestCase(["=dev-libs/B-4"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/B-5"], success = True, mergelist=["dev-libs/B-5"]),
-
-			ResolverPlaygroundTestCase(["=dev-libs/C-1"],  success = True, mergelist=["dev-libs/C-1"]),
-			ResolverPlaygroundTestCase(["=dev-libs/C-2"],  success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/C-3"],  success = True, mergelist=["dev-libs/C-3"]),
-			ResolverPlaygroundTestCase(["=dev-libs/C-4"],  success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/C-5"],  success = True, mergelist=["dev-libs/C-5"]),
-			ResolverPlaygroundTestCase(["=dev-libs/C-6"],  success = True, mergelist=["dev-libs/C-6"]),
-			ResolverPlaygroundTestCase(["=dev-libs/C-7"],  success = True, mergelist=["dev-libs/C-7"]),
-			ResolverPlaygroundTestCase(["=dev-libs/C-8"],  success = True, mergelist=["dev-libs/C-8"]),
-			ResolverPlaygroundTestCase(["=dev-libs/C-9"],  success = True, mergelist=["dev-libs/C-9"]),
-			ResolverPlaygroundTestCase(["=dev-libs/C-10"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/C-11"], success = True, mergelist=["dev-libs/C-11"]),
-			ResolverPlaygroundTestCase(["=dev-libs/C-12"], success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/C-13"], success = True, mergelist=["dev-libs/C-13"]),
-			ResolverPlaygroundTestCase(["=dev-libs/C-14"], success = True, mergelist=["dev-libs/C-14"]),
-
-			ResolverPlaygroundTestCase(["=dev-libs/D-1"],  success = True, mergelist=["dev-libs/D-1"]),
-			ResolverPlaygroundTestCase(["=dev-libs/D-2"],  success = True, mergelist=["dev-libs/D-2"]),
-			ResolverPlaygroundTestCase(["=dev-libs/D-3"],  success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/D-4"],  success = False),
-			ResolverPlaygroundTestCase(["=dev-libs/D-5"],  success = True, mergelist=["dev-libs/D-5"]),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()
-
-	def testRequiredUseOrDeps(self):
-		
-		ebuilds = {
-			"dev-libs/A-1": { "IUSE": "+x +y", "REQUIRED_USE": "^^ ( x y )", "EAPI": "4" },
-			"dev-libs/B-1": { "IUSE": "+x +y", "REQUIRED_USE": "",           "EAPI": "4" },
-			"app-misc/p-1": { "RDEPEND": "|| ( =dev-libs/A-1 =dev-libs/B-1 )" },
-			}
-
-		test_cases = (
-				# This should fail and show a REQUIRED_USE error for
-				# dev-libs/A-1, since this choice it preferred.
-				ResolverPlaygroundTestCase(
-					["=app-misc/p-1"],
-					success = False),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_simple.py b/portage_with_autodep/pym/portage/tests/resolver/test_simple.py
deleted file mode 100644
index 0bcfc4b..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_simple.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class SimpleResolverTestCase(TestCase):
-
-	def testSimple(self):
-		ebuilds = {
-			"dev-libs/A-1": { "KEYWORDS": "x86" }, 
-			"dev-libs/A-2": { "KEYWORDS": "~x86" },
-			"dev-libs/B-1.2": {},
-
-			"app-misc/Z-1": { "DEPEND": "|| ( app-misc/Y ( app-misc/X app-misc/W ) )", "RDEPEND": "" },
-			"app-misc/Y-1": { "KEYWORDS": "~x86" },
-			"app-misc/X-1": {},
-			"app-misc/W-1": {},
-			}
-		installed = {
-			"dev-libs/A-1": {},
-			"dev-libs/B-1.1": {},
-			}
-
-		test_cases = (
-			ResolverPlaygroundTestCase(["dev-libs/A"], success = True, mergelist = ["dev-libs/A-1"]),
-			ResolverPlaygroundTestCase(["=dev-libs/A-2"], options = { "--autounmask": 'n' }, success = False),
-
-			ResolverPlaygroundTestCase(
-				["dev-libs/A"],
-				options = {"--noreplace": True},
-				success = True,
-				mergelist = []),
-			ResolverPlaygroundTestCase(
-				["dev-libs/B"],
-				options = {"--noreplace": True},
-				success = True,
-				mergelist = []),
-			ResolverPlaygroundTestCase(
-				["dev-libs/B"],
-				options = {"--update": True},
-				success = True,
-				mergelist = ["dev-libs/B-1.2"]),
-
-			ResolverPlaygroundTestCase(
-				["app-misc/Z"],
-				success = True,
-				mergelist = ["app-misc/W-1", "app-misc/X-1", "app-misc/Z-1"]),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py b/portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py
deleted file mode 100644
index 4867cea..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_slot_collisions.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class SlotCollisionTestCase(TestCase):
-
-	def testSlotCollision(self):
-
-		ebuilds = {
-			"dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo" }, 
-			"dev-libs/B-1": { "IUSE": "foo" },
-			"dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
-			"dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "EAPI": 2 },
-			"dev-libs/E-1": {  },
-			"dev-libs/E-2": { "IUSE": "foo" },
-
-			"app-misc/Z-1": { },
-			"app-misc/Z-2": { },
-			"app-misc/Y-1": { "DEPEND": "=app-misc/Z-1" },
-			"app-misc/Y-2": { "DEPEND": ">app-misc/Z-1" },
-			"app-misc/X-1": { "DEPEND": "=app-misc/Z-2" },
-			"app-misc/X-2": { "DEPEND": "<app-misc/Z-2" },
-
-			"sci-libs/K-1": { "IUSE": "+foo", "EAPI": 1 },
-			"sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]", "EAPI": 2 },
-			"sci-libs/M-1": { "DEPEND": "sci-libs/K[foo=]", "IUSE": "+foo", "EAPI": 2 },
-
-			"sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar foo", "EAPI": 1 },
-			"sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
-			"sci-libs/P-1": { "DEPEND": "sci-libs/Q:1[foo=]", "IUSE": "foo", "EAPI": 2 },
-
-			"sys-libs/A-1": { "RDEPEND": "foo? ( sys-libs/J[foo=] )", "IUSE": "+foo", "EAPI": "4" },
-			"sys-libs/B-1": { "RDEPEND": "bar? ( sys-libs/J[bar=] )", "IUSE": "+bar", "EAPI": "4" },
-			"sys-libs/C-1": { "RDEPEND": "sys-libs/J[bar]", "EAPI": "4" },
-			"sys-libs/D-1": { "RDEPEND": "sys-libs/J[bar?]", "IUSE": "bar", "EAPI": "4" },
-			"sys-libs/E-1": { "RDEPEND": "sys-libs/J[foo(+)?]", "IUSE": "+foo", "EAPI": "4" },
-			"sys-libs/F-1": { "RDEPEND": "sys-libs/J[foo(+)]", "EAPI": "4" },
-			"sys-libs/J-1": { "IUSE": "+foo", "EAPI": "4" },
-			"sys-libs/J-2": { "IUSE": "+bar", "EAPI": "4" },
-
-			"app-misc/A-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
-			"app-misc/B-1": { "DEPEND": "=app-misc/A-1[foo=]", "IUSE": "foo", "EAPI": 2 },
-			"app-misc/C-1": { "DEPEND": "=app-misc/A-1[foo]", "EAPI": 2 },
-			"app-misc/E-1": { "RDEPEND": "dev-libs/E[foo?]", "IUSE": "foo", "EAPI": "2" },
-			"app-misc/F-1": { "RDEPEND": "=dev-libs/E-1", "IUSE": "foo", "EAPI": "2" },
-			}
-		installed = {
-			"dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo", "USE": "foo" }, 
-			"dev-libs/B-1": { "IUSE": "foo", "USE": "foo" },
-			"dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
-			"dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "USE": "foo", "EAPI": 2 },
-			
-			"sci-libs/K-1": { "IUSE": "foo", "USE": "" },
-			"sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]" },
-
-			"sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 1 },
-			"sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
-
-			"app-misc/A-1": { "IUSE": "+foo bar", "USE": "foo", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
-			}
-
-		test_cases = (
-			#A qt-*[qt3support] like mess.
-			ResolverPlaygroundTestCase(
-				["dev-libs/A", "dev-libs/B", "dev-libs/C", "dev-libs/D"],
-				options = { "--autounmask": 'n' },
-				success = False,
-				mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
-				ignore_mergelist_order = True,
-				slot_collision_solutions = [ {"dev-libs/A-1": {"foo": True}, "dev-libs/D-1": {"foo": True}} ]),
-
-			ResolverPlaygroundTestCase(
-				["sys-libs/A", "sys-libs/B", "sys-libs/C", "sys-libs/D", "sys-libs/E", "sys-libs/F"],
-				options = { "--autounmask": 'n' },
-				success = False,
-				ignore_mergelist_order = True,
-				slot_collision_solutions = [],
-				mergelist = ['sys-libs/J-2', 'sys-libs/J-1', 'sys-libs/A-1', 'sys-libs/B-1', 'sys-libs/C-1', 'sys-libs/D-1', 'sys-libs/E-1', 'sys-libs/F-1'],
-				),
-
-			#A version based conflicts, nothing we can do.
-			ResolverPlaygroundTestCase(
-				["=app-misc/X-1", "=app-misc/Y-1"],
-				success = False,
-				mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-1", "app-misc/Y-1"],
-				ignore_mergelist_order = True,
-				slot_collision_solutions = []
-				),
-			ResolverPlaygroundTestCase(
-				["=app-misc/X-2", "=app-misc/Y-2"],
-				success = False,
-				mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-2", "app-misc/Y-2"],
-				ignore_mergelist_order = True,
-				slot_collision_solutions = []
-				),
-
-			ResolverPlaygroundTestCase(
-				["=app-misc/E-1", "=app-misc/F-1"],
-				success = False,
-				mergelist = ["dev-libs/E-1", "dev-libs/E-2", "app-misc/E-1", "app-misc/F-1"],
-				ignore_mergelist_order = True,
-				slot_collision_solutions = []
-				),
-
-			#Simple cases.
-			ResolverPlaygroundTestCase(
-				["sci-libs/L", "sci-libs/M"],
-				success = False,
-				mergelist = ["sci-libs/L-1", "sci-libs/M-1", "sci-libs/K-1"],
-				ignore_mergelist_order = True,
-				slot_collision_solutions = [{"sci-libs/K-1": {"foo": False}, "sci-libs/M-1": {"foo": False}}]
-				),
-
-			#Avoid duplicates.
-			ResolverPlaygroundTestCase(
-				["sci-libs/P", "sci-libs/Q:2"],
-				success = False,
-				options = { "--update": True, "--complete-graph": True, "--autounmask": 'n' },
-				mergelist = ["sci-libs/P-1", "sci-libs/Q-1"],
-				ignore_mergelist_order = True,
-				all_permutations=True,
-				slot_collision_solutions = [{"sci-libs/Q-1": {"foo": True}, "sci-libs/P-1": {"foo": True}}]
-				),
-
-			#Conflict with REQUIRED_USE
-			ResolverPlaygroundTestCase(
-				["=app-misc/C-1", "=app-misc/B-1"],
-				all_permutations = True,
-				slot_collision_solutions = [],
-				mergelist = ["app-misc/A-1", "app-misc/C-1", "app-misc/B-1"],
-				ignore_mergelist_order = True,
-				success = False),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py b/portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py
deleted file mode 100644
index 7d17106..0000000
--- a/portage_with_autodep/pym/portage/tests/resolver/test_use_dep_defaults.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
-
-class UseDepDefaultsTestCase(TestCase):
-
-	def testUseDepDefaultse(self):
-
-		ebuilds = {
-			"dev-libs/A-1": { "DEPEND": "dev-libs/B[foo]", "RDEPEND": "dev-libs/B[foo]", "EAPI": "2" },
-			"dev-libs/A-2": { "DEPEND": "dev-libs/B[foo(+)]", "RDEPEND": "dev-libs/B[foo(+)]", "EAPI": "4" },
-			"dev-libs/A-3": { "DEPEND": "dev-libs/B[foo(-)]", "RDEPEND": "dev-libs/B[foo(-)]", "EAPI": "4" },
-			"dev-libs/B-1": { "IUSE": "+foo", "EAPI": "1" },
-			"dev-libs/B-2": {},
-			}
-
-		test_cases = (
-			ResolverPlaygroundTestCase(
-				["=dev-libs/A-1"],
-				success = True,
-				mergelist = ["dev-libs/B-1", "dev-libs/A-1"]),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/A-2"],
-				success = True,
-				mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
-			ResolverPlaygroundTestCase(
-				["=dev-libs/A-3"],
-				success = True,
-				mergelist = ["dev-libs/B-1", "dev-libs/A-3"]),
-			)
-
-		playground = ResolverPlayground(ebuilds=ebuilds)
-		try:
-			for test_case in test_cases:
-				playground.run_TestCase(test_case)
-				self.assertEqual(test_case.test_success, True, test_case.fail_msg)
-		finally:
-			playground.cleanup()

diff --git a/portage_with_autodep/pym/portage/tests/runTests b/portage_with_autodep/pym/portage/tests/runTests
index 6b3311d..4c10087 100755
--- a/portage_with_autodep/pym/portage/tests/runTests
+++ b/portage_with_autodep/pym/portage/tests/runTests
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python -Wd
 # runTests.py -- Portage Unit Test Functionality
 # Copyright 2006 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
@@ -41,6 +41,4 @@ del path
 
 
 if __name__ == "__main__":
-	result = tests.main()
-	if not result.wasSuccessful():
-		sys.exit(1)
+	sys.exit(tests.main())

diff --git a/portage_with_autodep/pym/portage/tests/sets/__init__.py b/portage_with_autodep/pym/portage/tests/sets/__init__.py
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/sets/base/__init__.py b/portage_with_autodep/pym/portage/tests/sets/base/__init__.py
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/sets/base/__test__ b/portage_with_autodep/pym/portage/tests/sets/base/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py b/portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py
deleted file mode 100644
index e0a3478..0000000
--- a/portage_with_autodep/pym/portage/tests/sets/base/testInternalPackageSet.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# testConfigFileSet.py -- Portage Unit Testing Functionality
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.dep import Atom
-from portage.exception import InvalidAtom
-from portage.tests import TestCase
-from portage._sets.base import InternalPackageSet
-
-class InternalPackageSetTestCase(TestCase):
-	"""Simple Test Case for InternalPackageSet"""
-
-	def testInternalPackageSet(self):
-		i1_atoms = set(("dev-libs/A", ">=dev-libs/A-1", "dev-libs/B"))
-		i2_atoms = set(("dev-libs/A", "dev-libs/*", "dev-libs/C"))
-
-		i1 = InternalPackageSet(initial_atoms=i1_atoms)
-		i2 = InternalPackageSet(initial_atoms=i2_atoms, allow_wildcard=True)
-		self.assertRaises(InvalidAtom, InternalPackageSet, initial_atoms=i2_atoms)
-
-		self.assertEqual(i1.getAtoms(), i1_atoms)
-		self.assertEqual(i2.getAtoms(), i2_atoms)
-
-		new_atom = Atom("*/*", allow_wildcard=True)
-		self.assertRaises(InvalidAtom, i1.add, new_atom)
-		i2.add(new_atom)
-
-		i2_atoms.add(new_atom)
-
-		self.assertEqual(i1.getAtoms(), i1_atoms)
-		self.assertEqual(i2.getAtoms(), i2_atoms)
-
-		removed_atom = Atom("dev-libs/A")
-
-		i1.remove(removed_atom)
-		i2.remove(removed_atom)
-
-		i1_atoms.remove(removed_atom)
-		i2_atoms.remove(removed_atom)
-
-		self.assertEqual(i1.getAtoms(), i1_atoms)
-		self.assertEqual(i2.getAtoms(), i2_atoms)
-
-		update_atoms = [Atom("dev-libs/C"), Atom("dev-*/C", allow_wildcard=True)]
-
-		self.assertRaises(InvalidAtom, i1.update, update_atoms)
-		i2.update(update_atoms)
-
-		i2_atoms.update(update_atoms)
-
-		self.assertEqual(i1.getAtoms(), i1_atoms)
-		self.assertEqual(i2.getAtoms(), i2_atoms)
-
-		replace_atoms = [Atom("dev-libs/D"), Atom("*-libs/C", allow_wildcard=True)]
-
-		self.assertRaises(InvalidAtom, i1.replace, replace_atoms)
-		i2.replace(replace_atoms)
-
-		i2_atoms = set(replace_atoms)
-
-		self.assertEqual(i2.getAtoms(), i2_atoms)

diff --git a/portage_with_autodep/pym/portage/tests/sets/files/__init__.py b/portage_with_autodep/pym/portage/tests/sets/files/__init__.py
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/sets/files/__test__ b/portage_with_autodep/pym/portage/tests/sets/files/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py b/portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py
deleted file mode 100644
index 3ec26a0..0000000
--- a/portage_with_autodep/pym/portage/tests/sets/files/testConfigFileSet.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# testConfigFileSet.py -- Portage Unit Testing Functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import tempfile
-
-from portage import os
-from portage.tests import TestCase, test_cps
-from portage._sets.files import ConfigFileSet
-
-class ConfigFileSetTestCase(TestCase):
-	"""Simple Test Case for ConfigFileSet"""
-
-	def setUp(self):
-		fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
-		f = os.fdopen(fd, 'w')
-		for i in range(0, len(test_cps)):
-			atom = test_cps[i]
-			if i % 2 == 0:
-				f.write(atom + ' abc def\n')
-			else:
-				f.write(atom + '\n')
-		f.close()
-
-	def tearDown(self):
-		os.unlink(self.testfile)
-
-	def testConfigStaticFileSet(self):
-		s = ConfigFileSet(self.testfile)
-		s.load()
-		self.assertEqual(set(test_cps), s.getAtoms())
-

diff --git a/portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py b/portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py
deleted file mode 100644
index d515a67..0000000
--- a/portage_with_autodep/pym/portage/tests/sets/files/testStaticFileSet.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# testStaticFileSet.py -- Portage Unit Testing Functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import tempfile
-
-from portage import os
-from portage.tests import TestCase, test_cps
-from portage._sets.files import StaticFileSet
-
-class StaticFileSetTestCase(TestCase):
-	"""Simple Test Case for StaticFileSet"""
-
-	def setUp(self):
-		fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
-		f = os.fdopen(fd, 'w')
-		f.write("\n".join(test_cps))
-		f.close()
-
-	def tearDown(self):
-		os.unlink(self.testfile)
-
-	def testSampleStaticFileSet(self):
-		s = StaticFileSet(self.testfile)
-		s.load()
-		self.assertEqual(set(test_cps), s.getAtoms())
-

diff --git a/portage_with_autodep/pym/portage/tests/sets/shell/__init__.py b/portage_with_autodep/pym/portage/tests/sets/shell/__init__.py
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/sets/shell/__test__ b/portage_with_autodep/pym/portage/tests/sets/shell/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/sets/shell/testShell.py b/portage_with_autodep/pym/portage/tests/sets/shell/testShell.py
deleted file mode 100644
index 2cdd833..0000000
--- a/portage_with_autodep/pym/portage/tests/sets/shell/testShell.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# testCommandOututSet.py -- Portage Unit Testing Functionality
-# Copyright 2007 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.process import find_binary
-from portage.tests import TestCase, test_cps
-from portage._sets.shell import CommandOutputSet
-
-class CommandOutputSetTestCase(TestCase):
-	"""Simple Test Case for CommandOutputSet"""
-
-	def setUp(self):
-		pass
-
-	def tearDown(self):
-		pass
-
-	def testCommand(self):
-		
-		input = set(test_cps)
-		command = find_binary("bash")
-		command += " -c '"
-		for a in input:
-		  command += " echo -e \"%s\" ; " % a
-		command += "'"
-		s = CommandOutputSet(command)
-		atoms = s.getAtoms()
-		self.assertEqual(atoms, input)

diff --git a/portage_with_autodep/pym/portage/tests/unicode/__test__ b/portage_with_autodep/pym/portage/tests/unicode/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/unicode/test_string_format.py b/portage_with_autodep/pym/portage/tests/unicode/test_string_format.py
deleted file mode 100644
index fb6e8e0..0000000
--- a/portage_with_autodep/pym/portage/tests/unicode/test_string_format.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import sys
-
-from portage import _encodings, _unicode_decode
-from portage.exception import PortageException
-from portage.tests import TestCase
-from _emerge.DependencyArg import DependencyArg
-from _emerge.UseFlagDisplay import UseFlagDisplay
-
-if sys.hexversion >= 0x3000000:
-	basestring = str
-
-STR_IS_UNICODE = sys.hexversion >= 0x3000000
-
-class StringFormatTestCase(TestCase):
-	"""
-	Test that string formatting works correctly in the current interpretter,
-	which may be either python2 or python3.
-	"""
-
-	# In order to get some unicode test strings in a way that works in
-	# both python2 and python3, write them here as byte strings and
-	# decode them before use. This assumes _encodings['content'] is
-	# utf_8.
-
-	unicode_strings = (
-		b'\xE2\x80\x98',
-		b'\xE2\x80\x99',
-	)
-
-	def testDependencyArg(self):
-
-		self.assertEqual(_encodings['content'], 'utf_8')
-
-		for arg_bytes in self.unicode_strings:
-			arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
-			dependency_arg = DependencyArg(arg=arg_unicode)
-
-			# Force unicode format string so that __unicode__() is
-			# called in python2.
-			formatted_str = _unicode_decode("%s") % (dependency_arg,)
-			self.assertEqual(formatted_str, arg_unicode)
-
-			if STR_IS_UNICODE:
-
-				# Test the __str__ method which returns unicode in python3
-				formatted_str = "%s" % (dependency_arg,)
-				self.assertEqual(formatted_str, arg_unicode)
-
-			else:
-
-				# Test the __str__ method which returns encoded bytes in python2
-				formatted_bytes = "%s" % (dependency_arg,)
-				self.assertEqual(formatted_bytes, arg_bytes)
-
-	def testPortageException(self):
-
-		self.assertEqual(_encodings['content'], 'utf_8')
-
-		for arg_bytes in self.unicode_strings:
-			arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
-			e = PortageException(arg_unicode)
-
-			# Force unicode format string so that __unicode__() is
-			# called in python2.
-			formatted_str = _unicode_decode("%s") % (e,)
-			self.assertEqual(formatted_str, arg_unicode)
-
-			if STR_IS_UNICODE:
-
-				# Test the __str__ method which returns unicode in python3
-				formatted_str = "%s" % (e,)
-				self.assertEqual(formatted_str, arg_unicode)
-
-			else:
-
-				# Test the __str__ method which returns encoded bytes in python2
-				formatted_bytes = "%s" % (e,)
-				self.assertEqual(formatted_bytes, arg_bytes)
-
-	def testUseFlagDisplay(self):
-
-		self.assertEqual(_encodings['content'], 'utf_8')
-
-		for enabled in (True, False):
-			for forced in (True, False):
-				for arg_bytes in self.unicode_strings:
-					arg_unicode = _unicode_decode(arg_bytes, encoding=_encodings['content'])
-					e = UseFlagDisplay(arg_unicode, enabled, forced)
-
-					# Force unicode format string so that __unicode__() is
-					# called in python2.
-					formatted_str = _unicode_decode("%s") % (e,)
-					self.assertEqual(isinstance(formatted_str, basestring), True)
-
-					if STR_IS_UNICODE:
-
-						# Test the __str__ method which returns unicode in python3
-						formatted_str = "%s" % (e,)
-						self.assertEqual(isinstance(formatted_str, str), True)
-
-					else:
-
-						# Test the __str__ method which returns encoded bytes in python2
-						formatted_bytes = "%s" % (e,)
-						self.assertEqual(isinstance(formatted_bytes, bytes), True)

diff --git a/portage_with_autodep/pym/portage/tests/util/__init__.py b/portage_with_autodep/pym/portage/tests/util/__init__.py
deleted file mode 100644
index 69ce189..0000000
--- a/portage_with_autodep/pym/portage/tests/util/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# tests/portage.util/__init__.py -- Portage Unit Test functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-

diff --git a/portage_with_autodep/pym/portage/tests/util/__test__ b/portage_with_autodep/pym/portage/tests/util/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/util/test_digraph.py b/portage_with_autodep/pym/portage/tests/util/test_digraph.py
deleted file mode 100644
index b65c0b1..0000000
--- a/portage_with_autodep/pym/portage/tests/util/test_digraph.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# Copyright 2010-2011 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.util.digraph import digraph
-#~ from portage.util import noiselimit
-import portage.util
-
-class DigraphTest(TestCase):
-
-	def testBackwardCompatibility(self):
-		g = digraph()
-		f = g.copy()
-		g.addnode("A", None)
-		self.assertEqual("A" in g, True)
-		self.assertEqual(bool(g), True)
-		self.assertEqual(g.allnodes(), ["A"])
-		self.assertEqual(g.allzeros(), ["A"])
-		self.assertEqual(g.hasnode("A"), True)
-
-	def testDigraphEmptyGraph(self):
-		g = digraph()
-		f = g.clone()
-		for x in g, f:
-			self.assertEqual(bool(x), False)
-			self.assertEqual(x.contains("A"), False)
-			self.assertEqual(x.firstzero(), None)
-			self.assertRaises(KeyError, x.remove, "A")
-			x.delnode("A")
-			self.assertEqual(list(x), [])
-			self.assertEqual(x.get("A"), None)
-			self.assertEqual(x.get("A", "default"), "default")
-			self.assertEqual(x.all_nodes(), [])
-			self.assertEqual(x.leaf_nodes(), [])
-			self.assertEqual(x.root_nodes(), [])
-			self.assertRaises(KeyError, x.child_nodes, "A")
-			self.assertRaises(KeyError, x.parent_nodes, "A")
-			self.assertEqual(x.hasallzeros(), True)
-			self.assertRaises(KeyError, list, x.bfs("A"))
-			self.assertRaises(KeyError, x.shortest_path, "A", "B")
-			self.assertRaises(KeyError, x.remove_edge, "A", "B")
-			self.assertEqual(x.get_cycles(), [])
-			x.difference_update("A")
-			portage.util.noiselimit = -2
-			x.debug_print()
-			portage.util.noiselimit = 0
-
-	def testDigraphCircle(self):
-		g = digraph()
-		g.add("A", "B", -1)
-		g.add("B", "C", 0)
-		g.add("C", "D", 1)
-		g.add("D", "A", 2)
-
-		f = g.clone()
-		for x in g, f:
-			self.assertEqual(bool(x), True)
-			self.assertEqual(x.contains("A"), True)
-			self.assertEqual(x.firstzero(), None)
-			self.assertRaises(KeyError, x.remove, "Z")
-			x.delnode("Z")
-			self.assertEqual(list(x), ["A", "B", "C", "D"])
-			self.assertEqual(x.get("A"), "A")
-			self.assertEqual(x.get("A", "default"), "A")
-			self.assertEqual(x.all_nodes(), ["A", "B", "C", "D"])
-			self.assertEqual(x.leaf_nodes(), [])
-			self.assertEqual(x.root_nodes(), [])
-			self.assertEqual(x.child_nodes("A"), ["D"])
-			self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
-			self.assertEqual(x.parent_nodes("A"), ["B"])
-			self.assertEqual(x.parent_nodes("A", ignore_priority=-2), ["B"])
-			self.assertEqual(x.parent_nodes("A", ignore_priority=-1), [])
-			self.assertEqual(x.hasallzeros(), False)
-			self.assertEqual(list(x.bfs("A")), [(None, "A"), ("A", "D"), ("D", "C"), ("C", "B")])
-			self.assertEqual(x.shortest_path("A", "D"), ["A", "D"])
-			self.assertEqual(x.shortest_path("D", "A"), ["D", "C", "B", "A"])
-			self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
-			self.assertEqual(x.shortest_path("D", "A", ignore_priority=-2), ["D", "C", "B", "A"])
-			cycles = set(tuple(y) for y in x.get_cycles())
-			self.assertEqual(cycles, set([("D", "C", "B", "A"), ("C", "B", "A", "D"), ("B", "A", "D", "C"), \
-				("A", "D", "C", "B")]))
-			x.remove_edge("A", "B")
-			self.assertEqual(x.get_cycles(), [])
-			x.difference_update(["D"])
-			self.assertEqual(x.all_nodes(), ["A", "B", "C"])
-			portage.util.noiselimit = -2
-			x.debug_print()
-			portage.util.noiselimit = 0
-
-	def testDigraphTree(self):
-		g = digraph()
-		g.add("B", "A", -1)
-		g.add("C", "A", 0)
-		g.add("D", "C", 1)
-		g.add("E", "C", 2)
-
-		f = g.clone()
-		for x in g, f:
-			self.assertEqual(bool(x), True)
-			self.assertEqual(x.contains("A"), True)
-			self.assertEqual(x.firstzero(), "B")
-			self.assertRaises(KeyError, x.remove, "Z")
-			x.delnode("Z")
-			self.assertEqual(set(x), set(["A", "B", "C", "D", "E"]))
-			self.assertEqual(x.get("A"), "A")
-			self.assertEqual(x.get("A", "default"), "A")
-			self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "D", "E"]))
-			self.assertEqual(set(x.leaf_nodes()), set(["B", "D", "E"]))
-			self.assertEqual(set(x.leaf_nodes(ignore_priority=0)), set(["A", "B", "D", "E"]))
-			self.assertEqual(x.root_nodes(), ["A"])
-			self.assertEqual(set(x.root_nodes(ignore_priority=0)), set(["A", "B", "C"]))
-			self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
-			self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
-			self.assertEqual(x.parent_nodes("B"), ["A"])
-			self.assertEqual(x.parent_nodes("B", ignore_priority=-2), ["A"])
-			self.assertEqual(x.parent_nodes("B", ignore_priority=-1), [])
-			self.assertEqual(x.hasallzeros(), False)
-			self.assertEqual(list(x.bfs("A")), [(None, "A"), ("A", "C"), ("A", "B"), ("C", "E"), ("C", "D")])
-			self.assertEqual(x.shortest_path("A", "D"), ["A", "C", "D"])
-			self.assertEqual(x.shortest_path("D", "A"), None)
-			self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
-			cycles = set(tuple(y) for y in x.get_cycles())
-			self.assertEqual(cycles, set())
-			x.remove("D")
-			self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "E"]))
-			x.remove("C")
-			self.assertEqual(set(x.all_nodes()), set(["A", "B", "E"]))
-			portage.util.noiselimit = -2
-			x.debug_print()
-			portage.util.noiselimit = 0
-			self.assertRaises(KeyError, x.remove_edge, "A", "E")
-
-	def testDigraphCompleteGraph(self):
-		g = digraph()
-		g.add("A", "B", -1)
-		g.add("B", "A", 1)
-		g.add("A", "C", 1)
-		g.add("C", "A", -1)
-		g.add("C", "B", 1)
-		g.add("B", "C", 1)
-
-		f = g.clone()
-		for x in g, f:
-			self.assertEqual(bool(x), True)
-			self.assertEqual(x.contains("A"), True)
-			self.assertEqual(x.firstzero(), None)
-			self.assertRaises(KeyError, x.remove, "Z")
-			x.delnode("Z")
-			self.assertEqual(list(x), ["A", "B", "C"])
-			self.assertEqual(x.get("A"), "A")
-			self.assertEqual(x.get("A", "default"), "A")
-			self.assertEqual(x.all_nodes(), ["A", "B", "C"])
-			self.assertEqual(x.leaf_nodes(), [])
-			self.assertEqual(x.root_nodes(), [])
-			self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
-			self.assertEqual(x.child_nodes("A", ignore_priority=0), ["B"])
-			self.assertEqual(set(x.parent_nodes("A")), set(["B", "C"]))
-			self.assertEqual(x.parent_nodes("A", ignore_priority=0), ["C"])
-			self.assertEqual(x.parent_nodes("A", ignore_priority=1), [])
-			self.assertEqual(x.hasallzeros(), False)
-			self.assertEqual(list(x.bfs("A")), [(None, "A"), ("A", "C"), ("A", "B")])
-			self.assertEqual(x.shortest_path("A", "C"), ["A", "C"])
-			self.assertEqual(x.shortest_path("C", "A"), ["C", "A"])
-			self.assertEqual(x.shortest_path("A", "C", ignore_priority=0), ["A", "B", "C"])
-			self.assertEqual(x.shortest_path("C", "A", ignore_priority=0), ["C", "A"])
-			cycles = set(tuple(y) for y in x.get_cycles())
-			self.assertEqual(cycles, set([("C", "A"), ("A", "B"), ("A", "C")]))
-			x.remove_edge("A", "B")
-			self.assertEqual(x.get_cycles(), [["C", "A"], ["A", "C"], ["C", "B"]])
-			x.difference_update(["C"])
-			self.assertEqual(x.all_nodes(), ["A", "B"])
-			portage.util.noiselimit = -2
-			x.debug_print()
-			portage.util.noiselimit = 0
-
-	def testDigraphIgnorePriority(self):
-
-		def always_true(dummy):
-			return True
-
-		def always_false(dummy):
-			return False
-
-		g = digraph()
-		g.add("A", "B")
-
-		self.assertEqual(g.parent_nodes("A"), ["B"])
-		self.assertEqual(g.parent_nodes("A", ignore_priority=always_false), ["B"])
-		self.assertEqual(g.parent_nodes("A", ignore_priority=always_true), [])
-
-		self.assertEqual(g.child_nodes("B"), ["A"])
-		self.assertEqual(g.child_nodes("B", ignore_priority=always_false), ["A"])
-		self.assertEqual(g.child_nodes("B", ignore_priority=always_true), [])
-
-		self.assertEqual(g.leaf_nodes(), ["A"])
-		self.assertEqual(g.leaf_nodes(ignore_priority=always_false), ["A"])
-		self.assertEqual(g.leaf_nodes(ignore_priority=always_true), ["A", "B"])
-
-		self.assertEqual(g.root_nodes(), ["B"])
-		self.assertEqual(g.root_nodes(ignore_priority=always_false), ["B"])
-		self.assertEqual(g.root_nodes(ignore_priority=always_true), ["A", "B"])

diff --git a/portage_with_autodep/pym/portage/tests/util/test_getconfig.py b/portage_with_autodep/pym/portage/tests/util/test_getconfig.py
deleted file mode 100644
index 22e0bfc..0000000
--- a/portage_with_autodep/pym/portage/tests/util/test_getconfig.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage.const import PORTAGE_BASE_PATH
-from portage.tests import TestCase
-from portage.util import getconfig
-
-class GetConfigTestCase(TestCase):
-	"""
-	Test that getconfig() produces that same result as bash would when
-	sourcing the same input.
-	"""
-
-	_cases = {
-		'FETCHCOMMAND'             : '/usr/bin/wget -t 3 -T 60 --passive-ftp -O "${DISTDIR}/${FILE}" "${URI}"',
-		'FETCHCOMMAND_RSYNC'       : 'rsync -avP "${URI}" "${DISTDIR}/${FILE}"',
-		'FETCHCOMMAND_SFTP'        : 'bash -c "x=\\${2#sftp://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec sftp -P \\${port} \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" sftp "${DISTDIR}/${FILE}" "${URI}"',
-		'FETCHCOMMAND_SSH'         : 'bash -c "x=\\${2#ssh://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port=22 ; exec rsync --rsh=\\"ssh -p\\${port}\\" -avP \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" rsync "${DISTDIR}/${FILE}" "${URI}"',
-		'PORTAGE_ELOG_MAILSUBJECT' : '[portage] ebuild log for ${PACKAGE} on ${HOST}'
-	}
-
-	def testGetConfig(self):
-
-		make_globals_file = os.path.join(PORTAGE_BASE_PATH,
-			'cnf', 'make.globals')
-		d = getconfig(make_globals_file)
-		for k, v in self._cases.items():
-			self.assertEqual(d[k], v)

diff --git a/portage_with_autodep/pym/portage/tests/util/test_grabdict.py b/portage_with_autodep/pym/portage/tests/util/test_grabdict.py
deleted file mode 100644
index e62a75d..0000000
--- a/portage_with_autodep/pym/portage/tests/util/test_grabdict.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# test_grabDict.py -- Portage Unit Testing Functionality
-# Copyright 2006-2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-#from portage.util import grabdict
-
-class GrabDictTestCase(TestCase):
-	
-	def testGrabDictPass(self):
-		pass

diff --git a/portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py b/portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py
deleted file mode 100644
index f993886..0000000
--- a/portage_with_autodep/pym/portage/tests/util/test_normalizedPath.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# test_normalizePath.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-
-class NormalizePathTestCase(TestCase):
-	
-	def testNormalizePath(self):
-		
-		from portage.util import normalize_path
-		path = "///foo/bar/baz"
-		good = "/foo/bar/baz"
-		self.assertEqual(normalize_path(path), good)

diff --git a/portage_with_autodep/pym/portage/tests/util/test_stackDictList.py b/portage_with_autodep/pym/portage/tests/util/test_stackDictList.py
deleted file mode 100644
index 678001c..0000000
--- a/portage_with_autodep/pym/portage/tests/util/test_stackDictList.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# test_stackDictList.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-
-class StackDictListTestCase(TestCase):
-
-	def testStackDictList(self):
-		from portage.util import stack_dictlist
-		
-		tests = [ ({'a':'b'},{'x':'y'},False,{'a':['b'],'x':['y']}) ]
-		tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-*']},True,{} ))
-		tests.append(( {'KEYWORDS':['alpha','x86']},{'KEYWORDS':['-x86']},True,{'KEYWORDS':['alpha']} ))
-		for test in tests:
-			self.assertEqual(
-			stack_dictlist([test[0],test[1]],incremental=test[2]), test[3] )

diff --git a/portage_with_autodep/pym/portage/tests/util/test_stackDicts.py b/portage_with_autodep/pym/portage/tests/util/test_stackDicts.py
deleted file mode 100644
index 0d2cadd..0000000
--- a/portage_with_autodep/pym/portage/tests/util/test_stackDicts.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# test_stackDicts.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.util import stack_dicts
-
-
-class StackDictsTestCase(TestCase):
-	
-	def testStackDictsPass(self):
-		
-		tests = [ ( [ { "a":"b" }, { "b":"c" } ], { "a":"b", "b":"c" },
-			  False, [], False ),
-			  ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
-			  True, [], False ),
-			  ( [ { "a":"b" }, { "a":"c" } ], { "a":"b c" },
-			  False, ["a"], False ),
-			  ( [ { "a":"b" }, None ], { "a":"b" },
-			  False, [], True ),
-			  ( [ None ], {}, False, [], False ),
-			  ( [ None, {}], {}, False, [], True ) ]
-
-
-		for test in tests:
-			result = stack_dicts( test[0], test[2], test[3], test[4] )
-			self.assertEqual( result, test[1] )
-	
-	def testStackDictsFail(self):
-		
-		tests = [ ( [ None, {} ], None, False, [], True ),
-			  ( [ { "a":"b"}, {"a":"c" } ], { "a":"b c" },
-				False, [], False ) ]
-		for test in tests:
-			result = stack_dicts( test[0], test[2], test[3], test[4] )
-			self.assertNotEqual( result , test[1] )

diff --git a/portage_with_autodep/pym/portage/tests/util/test_stackLists.py b/portage_with_autodep/pym/portage/tests/util/test_stackLists.py
deleted file mode 100644
index 8d01ea5..0000000
--- a/portage_with_autodep/pym/portage/tests/util/test_stackLists.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# test_stackLists.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.util import stack_lists
-
-class StackListsTestCase(TestCase):
-	
-	def testStackLists(self):
-		
-		tests = [ ( [ ['a','b','c'], ['d','e','f'] ], ['a','c','b','e','d','f'], False ),
-			  ( [ ['a','x'], ['b','x'] ], ['a','x','b'], False ),
-			  ( [ ['a','b','c'], ['-*'] ], [], True ),
-			  ( [ ['a'], ['-a'] ], [], True ) ]
-
-		for test in tests:
-			result = stack_lists( test[0], test[2] )
-			self.assertEqual( result , test[1] )

diff --git a/portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py b/portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py
deleted file mode 100644
index 2a1a209..0000000
--- a/portage_with_autodep/pym/portage/tests/util/test_uniqueArray.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# test_uniqueArray.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage.tests import TestCase
-from portage.util import unique_array
-
-class UniqueArrayTestCase(TestCase):
-	
-	def testUniqueArrayPass(self):
-		"""
-		test portage.util.uniqueArray()
-		"""
-
-		tests = [ ( ["a","a","a",os,os,[],[],[]], ['a',os,[]] ), 
-			  ( [1,1,1,2,3,4,4] , [1,2,3,4]) ]
-
-		for test in tests:
-			result = unique_array( test[0] )
-			for item in test[1]:
-				number = result.count(item)
-				self.assertFalse( number is not 1, msg="%s contains %s of %s, \
-					should be only 1" % (result, number, item) )

diff --git a/portage_with_autodep/pym/portage/tests/util/test_varExpand.py b/portage_with_autodep/pym/portage/tests/util/test_varExpand.py
deleted file mode 100644
index 7b528d6..0000000
--- a/portage_with_autodep/pym/portage/tests/util/test_varExpand.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# test_varExpand.py -- Portage Unit Testing Functionality
-# Copyright 2006-2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.util import varexpand
-
-class VarExpandTestCase(TestCase):
-	
-	def testVarExpandPass(self):
-
-		varDict = { "a":"5", "b":"7", "c":"-5" }
-		for key in varDict:
-			result = varexpand( "$%s" % key, varDict )
-			
-			self.assertFalse( result != varDict[key],
-				msg="Got %s != %s, from varexpand( %s, %s )" % \
-					( result, varDict[key], "$%s" % key, varDict ) )
-			result = varexpand( "${%s}" % key, varDict )
-			self.assertFalse( result != varDict[key],
-				msg="Got %s != %s, from varexpand( %s, %s )" % \
-					( result, varDict[key], "${%s}" % key, varDict ) )
-
-	def testVarExpandBackslashes(self):
-		"""
-		We want to behave like bash does when expanding a variable
-		assignment in a sourced file, in which case it performs
-		backslash removal for \\ and \$ but nothing more. It also
-		removes escaped newline characters. Note that we don't
-		handle escaped quotes here, since getconfig() uses shlex
-		to handle that earlier.
-		"""
-
-		varDict = {}
-		tests = [
-			("\\", "\\"),
-			("\\\\", "\\"),
-			("\\\\\\", "\\\\"),
-			("\\\\\\\\", "\\\\"),
-			("\\$", "$"),
-			("\\\\$", "\\$"),
-			("\\a", "\\a"),
-			("\\b", "\\b"),
-			("\\n", "\\n"),
-			("\\r", "\\r"),
-			("\\t", "\\t"),
-			("\\\n", ""),
-			("\\\"", "\\\""),
-			("\\'", "\\'"),
-		]
-		for test in tests:
-			result = varexpand( test[0], varDict )
-			self.assertFalse( result != test[1],
-				msg="Got %s != %s from varexpand( %s, %s )" \
-				% ( result, test[1], test[0], varDict ) )
-
-	def testVarExpandDoubleQuotes(self):
-		
-		varDict = { "a":"5" }
-		tests = [ ("\"${a}\"", "\"5\"") ]
-		for test in tests:
-			result = varexpand( test[0], varDict )
-			self.assertFalse( result != test[1],
-				msg="Got %s != %s from varexpand( %s, %s )" \
-				% ( result, test[1], test[0], varDict ) )
-
-	def testVarExpandSingleQuotes(self):
-		
-		varDict = { "a":"5" }
-		tests = [ ("\'${a}\'", "\'${a}\'") ]
-		for test in tests:
-			result = varexpand( test[0], varDict )
-			self.assertFalse( result != test[1],
-				msg="Got %s != %s from varexpand( %s, %s )" \
-				% ( result, test[1], test[0], varDict ) )
-
-	def testVarExpandFail(self):
-
-		varDict = { "a":"5", "b":"7", "c":"15" }
-
-		testVars = [ "fail" ]
-
-		for var in testVars:
-			result = varexpand( "$%s" % var, varDict )
-			self.assertFalse( len(result),
-				msg="Got %s == %s, from varexpand( %s, %s )" \
-					% ( result, var, "$%s" % var, varDict ) )
-
-			result = varexpand( "${%s}" % var, varDict )
-			self.assertFalse( len(result),
-				msg="Got %s == %s, from varexpand( %s, %s )" \
-					% ( result, var, "${%s}" % var, varDict ) )

diff --git a/portage_with_autodep/pym/portage/tests/versions/__init__.py b/portage_with_autodep/pym/portage/tests/versions/__init__.py
deleted file mode 100644
index 2b14180..0000000
--- a/portage_with_autodep/pym/portage/tests/versions/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# tests/portage.versions/__init__.py -- Portage Unit Test functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/versions/__test__ b/portage_with_autodep/pym/portage/tests/versions/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py b/portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py
deleted file mode 100644
index a223d78..0000000
--- a/portage_with_autodep/pym/portage/tests/versions/test_cpv_sort_key.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2010 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.versions import cpv_sort_key
-
-class CpvSortKeyTestCase(TestCase):
-
-	def testCpvSortKey(self):
-
-		tests = [ (("a/b-2_alpha", "a", "b", "a/b-2", "a/a-1", "a/b-1"),
-			( "a", "a/a-1", "a/b-1", "a/b-2_alpha", "a/b-2", "b")),
-		]
-
-		for test in tests:
-			self.assertEqual( tuple(sorted(test[0], key=cpv_sort_key())), test[1] )

diff --git a/portage_with_autodep/pym/portage/tests/versions/test_vercmp.py b/portage_with_autodep/pym/portage/tests/versions/test_vercmp.py
deleted file mode 100644
index aa7969c..0000000
--- a/portage_with_autodep/pym/portage/tests/versions/test_vercmp.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# test_vercmp.py -- Portage Unit Testing Functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage.tests import TestCase
-from portage.versions import vercmp
-
-class VerCmpTestCase(TestCase):
-	""" A simple testCase for portage.versions.vercmp()
-	"""
-	
-	def testVerCmpGreater(self):
-		
-		tests = [ ( "6.0", "5.0"), ("5.0","5"),
-			("1.0-r1", "1.0-r0"),
-			("1.0-r1", "1.0"),
-			("cvs.9999", "9999"),
-			("999999999999999999999999999999", "999999999999999999999999999998"),
-			("1.0.0", "1.0"),
-			("1.0.0", "1.0b"),
-			("1b", "1"),
-			("1b_p1", "1_p1"),
-			("1.1b", "1.1"),
-			("12.2.5", "12.2b"),
-		]
-		for test in tests:
-			self.assertFalse( vercmp( test[0], test[1] ) <= 0, msg="%s < %s? Wrong!" % (test[0],test[1]) )
-
-	def testVerCmpLess(self):
-		"""
-		pre < alpha < beta < rc < p -> test each of these, they are inductive (or should be..)
-		"""
-		tests = [ ( "4.0", "5.0"), ("5", "5.0"), ("1.0_pre2","1.0_p2"),
-			("1.0_alpha2", "1.0_p2"),("1.0_alpha1", "1.0_beta1"),("1.0_beta3","1.0_rc3"),
-			("1.001000000000000000001", "1.001000000000000000002"),
-			("1.00100000000", "1.0010000000000000001"),
-			("9999", "cvs.9999"),
-			("999999999999999999999999999998", "999999999999999999999999999999"),
-			("1.01", "1.1"),
-			("1.0-r0", "1.0-r1"),
-			("1.0", "1.0-r1"),
-			("1.0", "1.0.0"),
-			("1.0b", "1.0.0"),
-			("1_p1", "1b_p1"),
-			("1", "1b"),
-			("1.1", "1.1b"),
-			("12.2b", "12.2.5"),
-		]
-		for test in tests:
-			self.assertFalse( vercmp( test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0],test[1]))
-	
-	
-	def testVerCmpEqual(self):
-		
-		tests = [ ("4.0", "4.0"),
-			("1.0", "1.0"),
-			("1.0-r0", "1.0"),
-			("1.0", "1.0-r0"),
-			("1.0-r0", "1.0-r0"),
-			("1.0-r1", "1.0-r1")]
-		for test in tests:
-			self.assertFalse( vercmp( test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0],test[1]))
-			
-	def testVerNotEqual(self):
-		
-		tests = [ ("1","2"),("1.0_alpha","1.0_pre"),("1.0_beta","1.0_alpha"),
-			("0", "0.0"),
-			("cvs.9999", "9999"),
-			("1.0-r0", "1.0-r1"),
-			("1.0-r1", "1.0-r0"),
-			("1.0", "1.0-r1"),
-			("1.0-r1", "1.0"),
-			("1.0", "1.0.0"),
-			("1_p1", "1b_p1"),
-			("1b", "1"),
-			("1.1b", "1.1"),
-			("12.2b", "12.2"),
-		]
-		for test in tests:
-			self.assertFalse( vercmp( test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0],test[1]))

diff --git a/portage_with_autodep/pym/portage/tests/xpak/__init__.py b/portage_with_autodep/pym/portage/tests/xpak/__init__.py
deleted file mode 100644
index 9c3f524..0000000
--- a/portage_with_autodep/pym/portage/tests/xpak/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# tests/portage.dep/__init__.py -- Portage Unit Test functionality
-# Copyright 2006 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/tests/xpak/__test__ b/portage_with_autodep/pym/portage/tests/xpak/__test__
deleted file mode 100644
index e69de29..0000000

diff --git a/portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py b/portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py
deleted file mode 100644
index 2da5735..0000000
--- a/portage_with_autodep/pym/portage/tests/xpak/test_decodeint.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# xpak/test_decodeint.py
-# Copright Gentoo Foundation 2006
-# Portage Unit Testing Functionality
-
-from portage.tests import TestCase
-from portage.xpak import decodeint, encodeint
-
-class testDecodeIntTestCase(TestCase):
-
-	def testDecodeInt(self):
-		
-		for n in range(1000):
-			self.assertEqual(decodeint(encodeint(n)), n)
-
-		for n in (2 ** 32 - 1,):
-			self.assertEqual(decodeint(encodeint(n)), n)

diff --git a/portage_with_autodep/pym/portage/update.py b/portage_with_autodep/pym/portage/update.py
index 52ab506..34e4663 100644
--- a/portage_with_autodep/pym/portage/update.py
+++ b/portage_with_autodep/pym/portage/update.py
@@ -86,10 +86,11 @@ def fixdbentries(update_iter, dbdir):
 	mydata = {}
 	for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
 		file_path = os.path.join(dbdir, myfile)
-		mydata[myfile] = io.open(_unicode_encode(file_path,
+		with io.open(_unicode_encode(file_path,
 			encoding=_encodings['fs'], errors='strict'),
 			mode='r', encoding=_encodings['repo.content'],
-			errors='replace').read()
+			errors='replace') as f:
+			mydata[myfile] = f.read()
 	updated_items = update_dbentries(update_iter, mydata)
 	for myfile, mycontent in updated_items.items():
 		file_path = os.path.join(dbdir, myfile)
@@ -132,10 +133,11 @@ def grab_updates(updpath, prev_mtimes=None):
 		if update_data or \
 			file_path not in prev_mtimes or \
 			long(prev_mtimes[file_path]) != mystat[stat.ST_MTIME]:
-			content = io.open(_unicode_encode(file_path,
+			f = io.open(_unicode_encode(file_path,
 				encoding=_encodings['fs'], errors='strict'),
-				mode='r', encoding=_encodings['repo.content'], errors='replace'
-				).read()
+				mode='r', encoding=_encodings['repo.content'], errors='replace')
+			content = f.read()
+			f.close()
 			update_data.append((file_path, mystat, content))
 	return update_data
 
@@ -155,6 +157,7 @@ def parse_updates(mycontent):
 			if len(mysplit) != 3:
 				errors.append(_("ERROR: Update command invalid '%s'") % myline)
 				continue
+			valid = True
 			for i in (1, 2):
 				try:
 					atom = Atom(mysplit[i])
@@ -168,7 +171,11 @@ def parse_updates(mycontent):
 				else:
 					errors.append(
 						_("ERROR: Malformed update entry '%s'") % myline)
+					valid = False
 					break
+			if not valid:
+				continue
+
 		if mysplit[0] == "slotmove":
 			if len(mysplit)!=4:
 				errors.append(_("ERROR: Update command invalid '%s'") % myline)
@@ -252,14 +259,19 @@ def update_config_files(config_root, protect, protect_mask, update_iter, match_c
 			recursivefiles.append(x)
 	myxfiles = recursivefiles
 	for x in myxfiles:
+		f = None
 		try:
-			file_contents[x] = io.open(
+			f = io.open(
 				_unicode_encode(os.path.join(abs_user_config, x),
 				encoding=_encodings['fs'], errors='strict'),
 				mode='r', encoding=_encodings['content'],
-				errors='replace').readlines()
+				errors='replace')
+			file_contents[x] = f.readlines()
 		except IOError:
 			continue
+		finally:
+			if f is not None:
+				f.close()
 
 	# update /etc/portage/packages.*
 	ignore_line_re = re.compile(r'^#|^\s*$')

diff --git a/portage_with_autodep/pym/portage/update.pyo b/portage_with_autodep/pym/portage/update.pyo
new file mode 100644
index 0000000..9628ea9
Binary files /dev/null and b/portage_with_autodep/pym/portage/update.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
index 5cb9747..69bd58a 100644
--- a/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
+++ b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.py
@@ -14,7 +14,7 @@ def ExtractKernelVersion(base_dir):
 	@param base_dir: Path to sources (usually /usr/src/linux)
 	@type base_dir: string
 	@rtype: tuple( version[string], error[string])
-	@returns:
+	@return:
 	1. tuple( version[string], error[string])
 	Either version or error is populated (but never both)
 
@@ -37,6 +37,8 @@ def ExtractKernelVersion(base_dir):
 		return (None, str(details))
 	except IOError as details:
 		return (None, str(details))
+	finally:
+		f.close()
 
 	lines = [l.strip() for l in lines]
 

diff --git a/portage_with_autodep/pym/portage/util/ExtractKernelVersion.pyo b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.pyo
new file mode 100644
index 0000000..d0302fd
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/ExtractKernelVersion.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/SlotObject.py b/portage_with_autodep/pym/portage/util/SlotObject.py
similarity index 67%
rename from portage_with_autodep/pym/_emerge/SlotObject.py
rename to portage_with_autodep/pym/portage/util/SlotObject.py
index fdc6f35..a59dfc1 100644
--- a/portage_with_autodep/pym/_emerge/SlotObject.py
+++ b/portage_with_autodep/pym/portage/util/SlotObject.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 class SlotObject(object):
@@ -15,9 +15,18 @@ class SlotObject(object):
 			if not slots:
 				continue
 			for myattr in slots:
-				myvalue = kwargs.get(myattr, None)
+				myvalue = kwargs.pop(myattr, None)
+				if myvalue is None and getattr(self, myattr, None) is not None:
+					raise AssertionError(
+						"class '%s' duplicates '%s' value in __slots__ of base class '%s'" %
+						(self.__class__.__name__, myattr, c.__name__))
 				setattr(self, myattr, myvalue)
 
+		if kwargs:
+			raise TypeError(
+				"'%s' is an invalid keyword argument for this constructor" %
+				(next(iter(kwargs)),))
+
 	def copy(self):
 		"""
 		Create a new instance and copy all attributes

diff --git a/portage_with_autodep/pym/portage/util/SlotObject.pyo b/portage_with_autodep/pym/portage/util/SlotObject.pyo
new file mode 100644
index 0000000..11d0ec7
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/SlotObject.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_ShelveUnicodeWrapper.py b/portage_with_autodep/pym/portage/util/_ShelveUnicodeWrapper.py
new file mode 100644
index 0000000..adbd519
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_ShelveUnicodeWrapper.py
@@ -0,0 +1,45 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class ShelveUnicodeWrapper(object):
+	"""
+	Convert unicode to str and back again, since python-2.x shelve
+	module doesn't support unicode.
+	"""
+	def __init__(self, shelve_instance):
+		self._shelve = shelve_instance
+
+	def _encode(self, s):
+		if isinstance(s, unicode):
+			s = s.encode('utf_8')
+		return s
+
+	def __len__(self):
+		return len(self._shelve)
+
+	def __contains__(self, k):
+		return self._encode(k) in self._shelve
+
+	def __iter__(self):
+		return self._shelve.__iter__()
+
+	def items(self):
+		return self._shelve.iteritems()
+
+	def __setitem__(self, k, v):
+		self._shelve[self._encode(k)] = self._encode(v)
+
+	def __getitem__(self, k):
+		return self._shelve[self._encode(k)]
+
+	def __delitem__(self, k):
+		del self._shelve[self._encode(k)]
+
+	def get(self, k, *args):
+		return self._shelve.get(self._encode(k), *args)
+
+	def close(self):
+		self._shelve.close()
+
+	def clear(self):
+		self._shelve.clear()

diff --git a/portage_with_autodep/pym/portage/util/__init__.py b/portage_with_autodep/pym/portage/util/__init__.py
index 4aa63d5..2e0a32b 100644
--- a/portage_with_autodep/pym/portage/util/__init__.py
+++ b/portage_with_autodep/pym/portage/util/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2004-2011 Gentoo Foundation
+# Copyright 2004-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = ['apply_permissions', 'apply_recursive_permissions',
@@ -25,6 +25,7 @@ import stat
 import string
 import sys
 import traceback
+import glob
 
 import portage
 portage.proxy.lazyimport.lazyimport(globals(),
@@ -40,7 +41,7 @@ from portage import _os_merge
 from portage import _unicode_encode
 from portage import _unicode_decode
 from portage.exception import InvalidAtom, PortageException, FileNotFound, \
-       OperationNotPermitted, PermissionDenied, ReadOnlyFileSystem
+       OperationNotPermitted, ParseError, PermissionDenied, ReadOnlyFileSystem
 from portage.localization import _
 from portage.proxy.objectproxy import ObjectProxy
 from portage.cache.mappings import UserDict
@@ -318,11 +319,11 @@ def stack_lists(lists, incremental=1, remember_source_file=False,
 		for source_file, tokens in unmatched_removals.items():
 			if len(tokens) > 3:
 				selected = [tokens.pop(), tokens.pop(), tokens.pop()]
-				writemsg(_("--- Unmatch removal atoms in %s: %s and %s more\n") % \
+				writemsg(_("--- Unmatched removal atoms in %s: %s and %s more\n") % \
 					(source_file, ", ".join(selected), len(tokens)),
 					noiselevel=-1)
 			else:
-				writemsg(_("--- Unmatch removal atom(s) in %s: %s\n") % (source_file, ", ".join(tokens)),
+				writemsg(_("--- Unmatched removal atom(s) in %s: %s\n") % (source_file, ", ".join(tokens)),
 					noiselevel=-1)
 
 	if remember_source_file:
@@ -345,12 +346,11 @@ def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
 	@param incremental: Append to the return list, don't overwrite
 	@type incremental: Boolean (integer)
 	@rtype: Dictionary
-	@returns:
+	@return:
 	1.  Returns the lines in a file in a dictionary, for example:
 		'sys-apps/portage x86 amd64 ppc'
 		would return
 		{ "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
-		the line syntax is key : [list of values]
 	"""
 	newdict={}
 	for x in grablines(myfilename, recursive):
@@ -387,7 +387,9 @@ def read_corresponding_eapi_file(filename):
 	default = "0"
 	eapi_file = os.path.join(os.path.dirname(filename), "eapi")
 	try:
-		f = open(eapi_file, "r")
+		f = io.open(_unicode_encode(eapi_file,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'], errors='replace')
 		lines = f.readlines()
 		if len(lines) == 1:
 			eapi = lines[0].rstrip("\n")
@@ -503,14 +505,15 @@ def writedict(mydict,myfilename,writekey=True):
 
 def shlex_split(s):
 	"""
-	This is equivalent to shlex.split but it temporarily encodes unicode
-	strings to bytes since shlex.split() doesn't handle unicode strings.
+	This is equivalent to shlex.split, but if the current interpreter is
+	python2, it temporarily encodes unicode strings to bytes since python2's
+	shlex.split() doesn't handle unicode strings.
 	"""
-	is_unicode = sys.hexversion < 0x3000000 and isinstance(s, unicode)
-	if is_unicode:
+	convert_to_bytes = sys.hexversion < 0x3000000 and not isinstance(s, bytes)
+	if convert_to_bytes:
 		s = _unicode_encode(s)
 	rval = shlex.split(s)
-	if is_unicode:
+	if convert_to_bytes:
 		rval = [_unicode_decode(x) for x in rval]
 	return rval
 
@@ -534,16 +537,18 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
 	else:
 		expand_map = {}
 	mykeys = {}
+	f = None
 	try:
 		# NOTE: shlex doesn't support unicode objects with Python 2
 		# (produces spurious \0 characters).
 		if sys.hexversion < 0x3000000:
-			content = open(_unicode_encode(mycfg,
-				encoding=_encodings['fs'], errors='strict'), 'rb').read()
+			f = open(_unicode_encode(mycfg,
+				encoding=_encodings['fs'], errors='strict'), 'rb')
 		else:
-			content = open(_unicode_encode(mycfg,
+			f = open(_unicode_encode(mycfg,
 				encoding=_encodings['fs'], errors='strict'), mode='r',
-				encoding=_encodings['content'], errors='replace').read()
+				encoding=_encodings['content'], errors='replace')
+		content = f.read()
 	except IOError as e:
 		if e.errno == PermissionDenied.errno:
 			raise PermissionDenied(mycfg)
@@ -552,6 +557,9 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
 			if e.errno not in (errno.EISDIR,):
 				raise
 		return None
+	finally:
+		if f is not None:
+			f.close()
 
 	# Workaround for avoiding a silent error in shlex that is
 	# triggered by a source statement at the end of the file
@@ -565,6 +573,7 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
 		writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
 			"in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
 
+	lex = None
 	try:
 		if tolerant:
 			shlex_class = _tolerant_shlex
@@ -588,64 +597,63 @@ def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
 				break;
 			equ=lex.get_token()
 			if (equ==''):
-				#unexpected end of file
-				#lex.error_leader(self.filename,lex.lineno)
+				msg = lex.error_leader() + _("Unexpected EOF")
 				if not tolerant:
-					writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
-						noiselevel=-1)
-					raise Exception(_("ParseError: Unexpected EOF: %s: on/before line %s") % (mycfg, lex.lineno))
+					raise ParseError(msg)
 				else:
+					writemsg("%s\n" % msg, noiselevel=-1)
 					return mykeys
 			elif (equ!='='):
-				#invalid token
-				#lex.error_leader(self.filename,lex.lineno)
+				msg = lex.error_leader() + \
+					_("Invalid token '%s' (not '=')") % (equ,)
 				if not tolerant:
-					raise Exception(_("ParseError: Invalid token "
-						"'%s' (not '='): %s: line %s") % \
-						(equ, mycfg, lex.lineno))
+					raise ParseError(msg)
 				else:
+					writemsg("%s\n" % msg, noiselevel=-1)
 					return mykeys
 			val=lex.get_token()
 			if val is None:
-				#unexpected end of file
-				#lex.error_leader(self.filename,lex.lineno)
+				msg = lex.error_leader() + \
+					_("Unexpected end of config file: variable '%s'") % (key,)
 				if not tolerant:
-					writemsg(_("!!! Unexpected end of config file: variable %s\n") % key,
-						noiselevel=-1)
-					raise portage.exception.CorruptionError(_("ParseError: Unexpected EOF: %s: line %s") % (mycfg, lex.lineno))
+					raise ParseError(msg)
 				else:
+					writemsg("%s\n" % msg, noiselevel=-1)
 					return mykeys
 			key = _unicode_decode(key)
 			val = _unicode_decode(val)
 
 			if _invalid_var_name_re.search(key) is not None:
+				msg = lex.error_leader() + \
+					_("Invalid variable name '%s'") % (key,)
 				if not tolerant:
-					raise Exception(_(
-						"ParseError: Invalid variable name '%s': line %s") % \
-						(key, lex.lineno - 1))
-				writemsg(_("!!! Invalid variable name '%s': line %s in %s\n") \
-					% (key, lex.lineno - 1, mycfg), noiselevel=-1)
+					raise ParseError(msg)
+				writemsg("%s\n" % msg, noiselevel=-1)
 				continue
 
 			if expand:
-				mykeys[key] = varexpand(val, expand_map)
+				mykeys[key] = varexpand(val, mydict=expand_map,
+					error_leader=lex.error_leader)
 				expand_map[key] = mykeys[key]
 			else:
 				mykeys[key] = val
 	except SystemExit as e:
 		raise
 	except Exception as e:
-		raise portage.exception.ParseError(str(e)+" in "+mycfg)
+		if isinstance(e, ParseError) or lex is None:
+			raise
+		msg = _unicode_decode("%s%s") % (lex.error_leader(), e)
+		writemsg("%s\n" % msg, noiselevel=-1)
+		raise
+
 	return mykeys
-	
-#cache expansions of constant strings
-cexpand={}
-def varexpand(mystring, mydict=None):
+
+_varexpand_word_chars = frozenset(string.ascii_letters + string.digits + "_")
+_varexpand_unexpected_eof_msg = "unexpected EOF while looking for matching `}'"
+
+def varexpand(mystring, mydict=None, error_leader=None):
 	if mydict is None:
 		mydict = {}
-	newstring = cexpand.get(" "+mystring, None)
-	if newstring is not None:
-		return newstring
 
 	"""
 	new variable expansion code.  Preserves quotes, handles \n, etc.
@@ -653,36 +661,37 @@ def varexpand(mystring, mydict=None):
 	This would be a good bunch of code to port to C.
 	"""
 	numvars=0
-	mystring=" "+mystring
 	#in single, double quotes
 	insing=0
 	indoub=0
-	pos=1
-	newstring=" "
-	while (pos<len(mystring)):
-		if (mystring[pos]=="'") and (mystring[pos-1]!="\\"):
+	pos = 0
+	length = len(mystring)
+	newstring = []
+	while pos < length:
+		current = mystring[pos]
+		if current == "'":
 			if (indoub):
-				newstring=newstring+"'"
+				newstring.append("'")
 			else:
-				newstring += "'" # Quote removal is handled by shlex.
+				newstring.append("'") # Quote removal is handled by shlex.
 				insing=not insing
 			pos=pos+1
 			continue
-		elif (mystring[pos]=='"') and (mystring[pos-1]!="\\"):
+		elif current == '"':
 			if (insing):
-				newstring=newstring+'"'
+				newstring.append('"')
 			else:
-				newstring += '"' # Quote removal is handled by shlex.
+				newstring.append('"') # Quote removal is handled by shlex.
 				indoub=not indoub
 			pos=pos+1
 			continue
 		if (not insing): 
 			#expansion time
-			if (mystring[pos]=="\n"):
+			if current == "\n":
 				#convert newlines to spaces
-				newstring=newstring+" "
-				pos=pos+1
-			elif (mystring[pos]=="\\"):
+				newstring.append(" ")
+				pos += 1
+			elif current == "\\":
 				# For backslash expansion, this function used to behave like
 				# echo -e, but that's not needed for our purposes. We want to
 				# behave like bash does when expanding a variable assignment
@@ -692,19 +701,27 @@ def varexpand(mystring, mydict=None):
 				# escaped quotes here, since getconfig() uses shlex
 				# to handle that earlier.
 				if (pos+1>=len(mystring)):
-					newstring=newstring+mystring[pos]
+					newstring.append(current)
 					break
 				else:
-					a = mystring[pos + 1]
-					pos = pos + 2
-					if a in ("\\", "$"):
-						newstring = newstring + a
-					elif a == "\n":
+					current = mystring[pos + 1]
+					pos += 2
+					if current == "$":
+						newstring.append(current)
+					elif current == "\\":
+						newstring.append(current)
+						# BUG: This spot appears buggy, but it's intended to
+						# be bug-for-bug compatible with existing behavior.
+						if pos < length and \
+							mystring[pos] in ("'", '"', "$"):
+							newstring.append(mystring[pos])
+							pos += 1
+					elif current == "\n":
 						pass
 					else:
-						newstring = newstring + mystring[pos-2:pos]
+						newstring.append(mystring[pos - 2:pos])
 					continue
-			elif (mystring[pos]=="$") and (mystring[pos-1]!="\\"):
+			elif current == "$":
 				pos=pos+1
 				if mystring[pos]=="{":
 					pos=pos+1
@@ -712,11 +729,13 @@ def varexpand(mystring, mydict=None):
 				else:
 					braced=False
 				myvstart=pos
-				validchars=string.ascii_letters+string.digits+"_"
-				while mystring[pos] in validchars:
+				while mystring[pos] in _varexpand_word_chars:
 					if (pos+1)>=len(mystring):
 						if braced:
-							cexpand[mystring]=""
+							msg = _varexpand_unexpected_eof_msg
+							if error_leader is not None:
+								msg = error_leader() + msg
+							writemsg(msg + "\n", noiselevel=-1)
 							return ""
 						else:
 							pos=pos+1
@@ -725,25 +744,33 @@ def varexpand(mystring, mydict=None):
 				myvarname=mystring[myvstart:pos]
 				if braced:
 					if mystring[pos]!="}":
-						cexpand[mystring]=""
+						msg = _varexpand_unexpected_eof_msg
+						if error_leader is not None:
+							msg = error_leader() + msg
+						writemsg(msg + "\n", noiselevel=-1)
 						return ""
 					else:
 						pos=pos+1
 				if len(myvarname)==0:
-					cexpand[mystring]=""
+					msg = "$"
+					if braced:
+						msg += "{}"
+					msg += ": bad substitution"
+					if error_leader is not None:
+						msg = error_leader() + msg
+					writemsg(msg + "\n", noiselevel=-1)
 					return ""
 				numvars=numvars+1
 				if myvarname in mydict:
-					newstring=newstring+mydict[myvarname] 
+					newstring.append(mydict[myvarname])
 			else:
-				newstring=newstring+mystring[pos]
-				pos=pos+1
+				newstring.append(current)
+				pos += 1
 		else:
-			newstring=newstring+mystring[pos]
-			pos=pos+1
-	if numvars==0:
-		cexpand[mystring]=newstring[1:]
-	return newstring[1:]	
+			newstring.append(current)
+			pos += 1
+
+	return "".join(newstring)
 
 # broken and removed, but can still be imported
 pickle_write = None
@@ -1589,13 +1616,27 @@ def find_updated_config_files(target_root, config_protect):
 					else:
 						yield (x, None)
 
+_ld_so_include_re = re.compile(r'^include\s+(\S.*)')
+
 def getlibpaths(root, env=None):
+	def read_ld_so_conf(path):
+		for l in grabfile(path):
+			include_match = _ld_so_include_re.match(l)
+			if include_match is not None:
+				subpath = os.path.join(os.path.dirname(path),
+					include_match.group(1))
+				for p in glob.glob(subpath):
+					for r in read_ld_so_conf(p):
+						yield r
+			else:
+				yield l
+
 	""" Return a list of paths that are used for library lookups """
 	if env is None:
 		env = os.environ
 	# the following is based on the information from ld.so(8)
 	rval = env.get("LD_LIBRARY_PATH", "").split(":")
-	rval.extend(grabfile(os.path.join(root, "etc", "ld.so.conf")))
+	rval.extend(read_ld_so_conf(os.path.join(root, "etc", "ld.so.conf")))
 	rval.append("/usr/lib")
 	rval.append("/lib")
 

diff --git a/portage_with_autodep/pym/portage/util/__init__.pyo b/portage_with_autodep/pym/portage/util/__init__.pyo
new file mode 100644
index 0000000..941b286
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_argparse.py b/portage_with_autodep/pym/portage/util/_argparse.py
new file mode 100644
index 0000000..6ca7852
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_argparse.py
@@ -0,0 +1,42 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['ArgumentParser']
+
+try:
+	from argparse import ArgumentParser
+except ImportError:
+	# Compatibility with Python 2.6 and 3.1
+	from optparse import OptionGroup, OptionParser
+
+	from portage.localization import _
+
+	class ArgumentParser(object):
+		def __init__(self, **kwargs):
+			add_help = kwargs.pop("add_help", None)
+			if add_help is not None:
+				kwargs["add_help_option"] = add_help
+			parser = OptionParser(**kwargs)
+			self._parser = parser
+			self.add_argument = parser.add_option
+			self.print_help = parser.print_help
+			self.error = parser.error
+
+		def add_argument_group(self, title=None, **kwargs):
+			optiongroup = OptionGroup(self._parser, title, **kwargs)
+			self._parser.add_option_group(optiongroup)
+			return _ArgumentGroup(optiongroup)
+
+		def parse_known_args(self, args=None, namespace=None):
+			return self._parser.parse_args(args, namespace)
+
+		def parse_args(self, args=None, namespace=None):
+			args, argv = self.parse_known_args(args, namespace)
+			if argv:
+				msg = _('unrecognized arguments: %s')
+				self.error(msg % ' '.join(argv))
+			return args
+
+	class _ArgumentGroup(object):
+		def __init__(self, optiongroup):
+			self.add_argument = optiongroup.add_option

diff --git a/portage_with_autodep/pym/portage/util/_async/AsyncScheduler.py b/portage_with_autodep/pym/portage/util/_async/AsyncScheduler.py
new file mode 100644
index 0000000..9b96c6f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/AsyncScheduler.py
@@ -0,0 +1,102 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollScheduler import PollScheduler
+
+class AsyncScheduler(AsynchronousTask, PollScheduler):
+
+	def __init__(self, max_jobs=None, max_load=None, **kwargs):
+		AsynchronousTask.__init__(self)
+		PollScheduler.__init__(self, **kwargs)
+
+		if max_jobs is None:
+			max_jobs = 1
+		self._max_jobs = max_jobs
+		self._max_load = max_load
+		self._error_count = 0
+		self._running_tasks = set()
+		self._remaining_tasks = True
+		self._term_check_id = None
+		self._loadavg_check_id = None
+
+	def _poll(self):
+		if not (self._is_work_scheduled() or self._keep_scheduling()):
+			self.wait()
+		return self.returncode
+
+	def _cancel(self):
+		self._terminated.set()
+		self._termination_check()
+
+	def _terminate_tasks(self):
+		for task in list(self._running_tasks):
+			task.cancel()
+
+	def _next_task(self):
+		raise NotImplementedError(self)
+
+	def _keep_scheduling(self):
+		return self._remaining_tasks and not self._terminated.is_set()
+
+	def _running_job_count(self):
+		return len(self._running_tasks)
+
+	def _schedule_tasks(self):
+		while self._keep_scheduling() and self._can_add_job():
+			try:
+				task = self._next_task()
+			except StopIteration:
+				self._remaining_tasks = False
+			else:
+				self._running_tasks.add(task)
+				task.scheduler = self._sched_iface
+				task.addExitListener(self._task_exit)
+				task.start()
+
+		# Triggers cleanup and exit listeners if there's nothing left to do.
+		self.poll()
+
+	def _task_exit(self, task):
+		self._running_tasks.discard(task)
+		if task.returncode != os.EX_OK:
+			self._error_count += 1
+		self._schedule()
+
+	def _start(self):
+		self._term_check_id = self._event_loop.idle_add(self._termination_check)
+		if self._max_load is not None and \
+			self._loadavg_latency is not None and \
+			(self._max_jobs is True or self._max_jobs > 1):
+			# We have to schedule periodically, in case the load
+			# average has changed since the last call.
+			self._loadavg_check_id = self._event_loop.timeout_add(
+				self._loadavg_latency, self._schedule)
+		self._schedule()
+
+	def _wait(self):
+		# Loop while there are jobs to be scheduled.
+		while self._keep_scheduling():
+			self._event_loop.iteration()
+
+		# Clean shutdown of previously scheduled jobs. In the
+		# case of termination, this allows for basic cleanup
+		# such as flushing of buffered output to logs.
+		while self._is_work_scheduled():
+			self._event_loop.iteration()
+
+		if self._term_check_id is not None:
+			self._event_loop.source_remove(self._term_check_id)
+			self._term_check_id = None
+
+		if self._loadavg_check_id is not None:
+			self._event_loop.source_remove(self._loadavg_check_id)
+			self._loadavg_check_id = None
+
+		if self._error_count > 0:
+			self.returncode = 1
+		else:
+			self.returncode = os.EX_OK 
+
+		return self.returncode

diff --git a/portage_with_autodep/pym/portage/util/_async/FileCopier.py b/portage_with_autodep/pym/portage/util/_async/FileCopier.py
new file mode 100644
index 0000000..27e5ab4
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/FileCopier.py
@@ -0,0 +1,17 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage import shutil
+from portage.util._async.ForkProcess import ForkProcess
+
+class FileCopier(ForkProcess):
+	"""
+	Asynchronously copy a file.
+	"""
+
+	__slots__ = ('src_path', 'dest_path')
+
+	def _run(self):
+		shutil.copy(self.src_path, self.dest_path)
+		return os.EX_OK

diff --git a/portage_with_autodep/pym/portage/util/_async/FileDigester.py b/portage_with_autodep/pym/portage/util/_async/FileDigester.py
new file mode 100644
index 0000000..881c692
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/FileDigester.py
@@ -0,0 +1,73 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.checksum import perform_multiple_checksums
+from portage.util._async.ForkProcess import ForkProcess
+from _emerge.PipeReader import PipeReader
+
+class FileDigester(ForkProcess):
+	"""
+	Asynchronously generate file digests. Pass in file_path and
+	hash_names, and after successful execution, the digests
+	attribute will be a dict containing all of the requested
+	digests.
+	"""
+
+	__slots__ = ('file_path', 'digests', 'hash_names',
+		'_digest_pipe_reader', '_digest_pw')
+
+	def _start(self):
+		pr, pw = os.pipe()
+		self.fd_pipes = {}
+		self.fd_pipes[pw] = pw
+		self._digest_pw = pw
+		self._digest_pipe_reader = PipeReader(
+			input_files={"input":pr},
+			scheduler=self.scheduler)
+		self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
+		self._digest_pipe_reader.start()
+		ForkProcess._start(self)
+		os.close(pw)
+
+	def _run(self):
+		digests = perform_multiple_checksums(self.file_path,
+			hashes=self.hash_names)
+
+		buf = "".join("%s=%s\n" % item
+			for item in digests.items()).encode('utf_8')
+
+		while buf:
+			buf = buf[os.write(self._digest_pw, buf):]
+
+		return os.EX_OK
+
+	def _parse_digests(self, data):
+
+		digests = {}
+		for line in data.decode('utf_8').splitlines():
+			parts = line.split('=', 1)
+			if len(parts) == 2:
+				digests[parts[0]] = parts[1]
+
+		self.digests = digests
+
+	def _pipe_logger_exit(self, pipe_logger):
+		# Ignore this event, since we want to ensure that we
+		# exit only after _digest_pipe_reader has reached EOF.
+		self._pipe_logger = None
+
+	def _digest_pipe_reader_exit(self, pipe_reader):
+		self._parse_digests(pipe_reader.getvalue())
+		self._digest_pipe_reader = None
+		self._unregister()
+		self.wait()
+
+	def _unregister(self):
+		ForkProcess._unregister(self)
+
+		pipe_reader = self._digest_pipe_reader
+		if pipe_reader is not None:
+			self._digest_pipe_reader = None
+			pipe_reader.removeExitListener(self._digest_pipe_reader_exit)
+			pipe_reader.cancel()

diff --git a/portage_with_autodep/pym/portage/util/_async/ForkProcess.py b/portage_with_autodep/pym/portage/util/_async/ForkProcess.py
new file mode 100644
index 0000000..25f72d3
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/ForkProcess.py
@@ -0,0 +1,65 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import sys
+import traceback
+
+import portage
+from portage import os
+from _emerge.SpawnProcess import SpawnProcess
+
+class ForkProcess(SpawnProcess):
+
+	__slots__ = ()
+
+	def _spawn(self, args, fd_pipes=None, **kwargs):
+		"""
+		Fork a subprocess, apply local settings, and call fetch().
+		"""
+
+		parent_pid = os.getpid()
+		pid = None
+		try:
+			pid = os.fork()
+
+			if pid != 0:
+				if not isinstance(pid, int):
+					raise AssertionError(
+						"fork returned non-integer: %s" % (repr(pid),))
+				return [pid]
+
+			rval = 1
+			try:
+
+				# Use default signal handlers in order to avoid problems
+				# killing subprocesses as reported in bug #353239.
+				signal.signal(signal.SIGINT, signal.SIG_DFL)
+				signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+				portage.locks._close_fds()
+				# We don't exec, so use close_fds=False
+				# (see _setup_pipes docstring).
+				portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+				rval = self._run()
+			except SystemExit:
+				raise
+			except:
+				traceback.print_exc()
+				# os._exit() skips stderr flush!
+				sys.stderr.flush()
+			finally:
+				os._exit(rval)
+
+		finally:
+			if pid == 0 or (pid is None and os.getpid() != parent_pid):
+				# Call os._exit() from a finally block in order
+				# to suppress any finally blocks from earlier
+				# in the call stack (see bug #345289). This
+				# finally block has to be setup before the fork
+				# in order to avoid a race condition.
+				os._exit(1)
+
+	def _run(self):
+		raise NotImplementedError(self)

diff --git a/portage_with_autodep/pym/portage/util/_async/PipeLogger.py b/portage_with_autodep/pym/portage/util/_async/PipeLogger.py
new file mode 100644
index 0000000..aa605d9
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/PipeLogger.py
@@ -0,0 +1,163 @@
+# Copyright 2008-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import fcntl
+import errno
+import gzip
+import sys
+
+import portage
+from portage import os, _encodings, _unicode_encode
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeLogger(AbstractPollTask):
+
+	"""
+	This can be used for logging output of a child process,
+	optionally outputing to log_file_path and/or stdout_fd.  It can
+	also monitor for EOF on input_fd, which may be used to detect
+	termination of a child process. If log_file_path ends with
+	'.gz' then the log file is written with compression.
+	"""
+
+	__slots__ = ("input_fd", "log_file_path", "stdout_fd") + \
+		("_log_file", "_log_file_real", "_reg_id")
+
+	def _start(self):
+
+		log_file_path = self.log_file_path
+		if log_file_path is not None:
+
+			self._log_file = open(_unicode_encode(log_file_path,
+				encoding=_encodings['fs'], errors='strict'), mode='ab')
+			if log_file_path.endswith('.gz'):
+				self._log_file_real = self._log_file
+				self._log_file = gzip.GzipFile(filename='', mode='ab',
+					fileobj=self._log_file)
+
+			portage.util.apply_secpass_permissions(log_file_path,
+				uid=portage.portage_uid, gid=portage.portage_gid,
+				mode=0o660)
+
+		if isinstance(self.input_fd, int):
+			fd = self.input_fd
+		else:
+			fd = self.input_fd.fileno()
+
+		fcntl.fcntl(fd, fcntl.F_SETFL,
+			fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+		# FD_CLOEXEC is enabled by default in Python >=3.4.
+		if sys.hexversion < 0x3040000:
+			try:
+				fcntl.FD_CLOEXEC
+			except AttributeError:
+				pass
+			else:
+				fcntl.fcntl(fd, fcntl.F_SETFD,
+					fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+		self._reg_id = self.scheduler.io_add_watch(fd,
+			self._registered_events, self._output_handler)
+		self._registered = True
+
+	def _cancel(self):
+		self._unregister()
+		if self.returncode is None:
+			self.returncode = self._cancelled_returncode
+
+	def _wait(self):
+		if self.returncode is not None:
+			return self.returncode
+		self._wait_loop()
+		self.returncode = os.EX_OK
+		return self.returncode
+
+	def _output_handler(self, fd, event):
+
+		background = self.background
+		stdout_fd = self.stdout_fd
+		log_file = self._log_file 
+
+		while True:
+			buf = self._read_buf(fd, event)
+
+			if buf is None:
+				# not a POLLIN event, EAGAIN, etc...
+				break
+
+			if not buf:
+				# EOF
+				self._unregister()
+				self.wait()
+				break
+
+			else:
+				if not background and stdout_fd is not None:
+					failures = 0
+					stdout_buf = buf
+					while stdout_buf:
+						try:
+							stdout_buf = \
+								stdout_buf[os.write(stdout_fd, stdout_buf):]
+						except OSError as e:
+							if e.errno != errno.EAGAIN:
+								raise
+							del e
+							failures += 1
+							if failures > 50:
+								# Avoid a potentially infinite loop. In
+								# most cases, the failure count is zero
+								# and it's unlikely to exceed 1.
+								raise
+
+							# This means that a subprocess has put an inherited
+							# stdio file descriptor (typically stdin) into
+							# O_NONBLOCK mode. This is not acceptable (see bug
+							# #264435), so revert it. We need to use a loop
+							# here since there's a race condition due to
+							# parallel processes being able to change the
+							# flags on the inherited file descriptor.
+							# TODO: When possible, avoid having child processes
+							# inherit stdio file descriptors from portage
+							# (maybe it can't be avoided with
+							# PROPERTIES=interactive).
+							fcntl.fcntl(stdout_fd, fcntl.F_SETFL,
+								fcntl.fcntl(stdout_fd,
+								fcntl.F_GETFL) ^ os.O_NONBLOCK)
+
+				if log_file is not None:
+					log_file.write(buf)
+					log_file.flush()
+
+		self._unregister_if_appropriate(event)
+
+		return True
+
+	def _unregister(self):
+
+		if self._reg_id is not None:
+			self.scheduler.source_remove(self._reg_id)
+			self._reg_id = None
+
+		if self.input_fd is not None:
+			if isinstance(self.input_fd, int):
+				os.close(self.input_fd)
+			else:
+				self.input_fd.close()
+			self.input_fd = None
+
+		if self.stdout_fd is not None:
+			os.close(self.stdout_fd)
+			self.stdout_fd = None
+
+		if self._log_file is not None:
+			self._log_file.close()
+			self._log_file = None
+
+		if self._log_file_real is not None:
+			# Avoid "ResourceWarning: unclosed file" since python 3.2.
+			self._log_file_real.close()
+			self._log_file_real = None
+
+		self._registered = False

diff --git a/portage_with_autodep/pym/portage/util/_async/PipeReaderBlockingIO.py b/portage_with_autodep/pym/portage/util/_async/PipeReaderBlockingIO.py
new file mode 100644
index 0000000..b06adf6
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/PipeReaderBlockingIO.py
@@ -0,0 +1,91 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+	import threading
+except ImportError:
+	# dummy_threading will not suffice
+	threading = None
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeReaderBlockingIO(AbstractPollTask):
+	"""
+	Reads output from one or more files and saves it in memory, for
+	retrieval via the getvalue() method. This is driven by a thread
+	for each input file, in order to support blocking IO.  This may
+	be useful for using threads to handle blocking IO with Jython,
+	since Jython lacks the fcntl module which is needed for
+	non-blocking IO (see http://bugs.jython.org/issue1074).
+	"""
+
+	__slots__ = ("input_files", "_read_data", "_terminate",
+		"_threads", "_thread_rlock")
+
+	def _start(self):
+		self._terminate = threading.Event()
+		self._threads = {}
+		self._read_data = []
+
+		self._registered = True
+		self._thread_rlock = threading.RLock()
+		with self._thread_rlock:
+			for f in self.input_files.values():
+				t = threading.Thread(target=self._reader_thread, args=(f,))
+				t.daemon = True
+				t.start()
+				self._threads[f] = t
+
+	def _reader_thread(self, f):
+		try:
+			terminated = self._terminate.is_set
+		except AttributeError:
+			# Jython 2.7.0a2
+			terminated = self._terminate.isSet
+		bufsize = self._bufsize
+		while not terminated():
+			buf = f.read(bufsize)
+			with self._thread_rlock:
+				if terminated():
+					break
+				elif buf:
+					self._read_data.append(buf)
+				else:
+					del self._threads[f]
+					if not self._threads:
+						# Thread-safe callback to EventLoop
+						self.scheduler.idle_add(self._eof)
+					break
+		f.close()
+
+	def _eof(self):
+		self._registered = False
+		if self.returncode is None:
+			self.returncode = os.EX_OK
+		self.wait()
+		return False
+
+	def _cancel(self):
+		self._terminate.set()
+		self._registered = False
+		if self.returncode is None:
+			self.returncode = self._cancelled_returncode
+		self.wait()
+
+	def _wait(self):
+		if self.returncode is not None:
+			return self.returncode
+		self._wait_loop()
+		self.returncode = os.EX_OK
+		return self.returncode
+
+	def getvalue(self):
+		"""Retrieve the entire contents"""
+		with self._thread_rlock:
+			return b''.join(self._read_data)
+
+	def close(self):
+		"""Free the memory buffer."""
+		with self._thread_rlock:
+			self._read_data = None

diff --git a/portage_with_autodep/pym/portage/util/_async/PopenProcess.py b/portage_with_autodep/pym/portage/util/_async/PopenProcess.py
new file mode 100644
index 0000000..2fc56d2
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/PopenProcess.py
@@ -0,0 +1,33 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+
+class PopenProcess(SubProcess):
+
+	__slots__ = ("pipe_reader", "proc",)
+
+	def _start(self):
+
+		self.pid = self.proc.pid
+		self._registered = True
+
+		if self.pipe_reader is None:
+			self._reg_id = self.scheduler.child_watch_add(
+				self.pid, self._child_watch_cb)
+		else:
+			try:
+				self.pipe_reader.scheduler = self.scheduler
+			except AttributeError:
+				pass
+			self.pipe_reader.addExitListener(self._pipe_reader_exit)
+			self.pipe_reader.start()
+
+	def _pipe_reader_exit(self, pipe_reader):
+		self._reg_id = self.scheduler.child_watch_add(
+			self.pid, self._child_watch_cb)
+
+	def _child_watch_cb(self, pid, condition, user_data=None):
+		self._reg_id = None
+		self._waitpid_cb(pid, condition)
+		self.wait()

diff --git a/portage_with_autodep/pym/portage/util/_async/SchedulerInterface.py b/portage_with_autodep/pym/portage/util/_async/SchedulerInterface.py
new file mode 100644
index 0000000..2ab668e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/SchedulerInterface.py
@@ -0,0 +1,79 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import errno
+
+from portage import _encodings
+from portage import _unicode_encode
+from portage.util import writemsg_level
+from ..SlotObject import SlotObject
+
+class SchedulerInterface(SlotObject):
+
+	_event_loop_attrs = ("IO_ERR", "IO_HUP", "IO_IN",
+		"IO_NVAL", "IO_OUT", "IO_PRI",
+		"child_watch_add", "idle_add", "io_add_watch",
+		"iteration", "source_remove", "timeout_add")
+
+	__slots__ = _event_loop_attrs + ("_event_loop", "_is_background")
+
+	def __init__(self, event_loop, is_background=None, **kwargs):
+		SlotObject.__init__(self, **kwargs)
+		self._event_loop = event_loop
+		if is_background is None:
+			is_background = self._return_false
+		self._is_background = is_background
+		for k in self._event_loop_attrs:
+			setattr(self, k, getattr(event_loop, k))
+
+	@staticmethod
+	def _return_false():
+		return False
+
+	def output(self, msg, log_path=None, background=None,
+		level=0, noiselevel=-1):
+		"""
+		Output msg to stdout if not self._is_background(). If log_path
+		is not None then append msg to the log (appends with
+		compression if the filename extension of log_path corresponds
+		to a supported compression type).
+		"""
+
+		global_background = self._is_background()
+		if background is None or global_background:
+			# Use the global value if the task does not have a local
+			# background value. For example, parallel-fetch tasks run
+			# in the background while other tasks concurrently run in
+			# the foreground.
+			background = global_background
+
+		msg_shown = False
+		if not background:
+			writemsg_level(msg, level=level, noiselevel=noiselevel)
+			msg_shown = True
+
+		if log_path is not None:
+			try:
+				f = open(_unicode_encode(log_path,
+					encoding=_encodings['fs'], errors='strict'),
+					mode='ab')
+				f_real = f
+			except IOError as e:
+				if e.errno not in (errno.ENOENT, errno.ESTALE):
+					raise
+				if not msg_shown:
+					writemsg_level(msg, level=level, noiselevel=noiselevel)
+			else:
+
+				if log_path.endswith('.gz'):
+					# NOTE: The empty filename argument prevents us from
+					# triggering a bug in python3 which causes GzipFile
+					# to raise AttributeError if fileobj.name is bytes
+					# instead of unicode.
+					f =  gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+				f.write(_unicode_encode(msg))
+				f.close()
+				if f_real is not f:
+					f_real.close()

diff --git a/portage_with_autodep/pym/portage/util/_async/TaskScheduler.py b/portage_with_autodep/pym/portage/util/_async/TaskScheduler.py
new file mode 100644
index 0000000..35b3875
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/TaskScheduler.py
@@ -0,0 +1,20 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from .AsyncScheduler import AsyncScheduler
+
+class TaskScheduler(AsyncScheduler):
+
+	"""
+	A simple way to handle scheduling of AbstractPollTask instances. Simply
+	pass a task iterator into the constructor and call start(). Use the
+	poll, wait, or addExitListener methods to be notified when all of the
+	tasks have completed.
+	"""
+
+	def __init__(self, task_iter, **kwargs):
+		AsyncScheduler.__init__(self, **kwargs)
+		self._task_iter = task_iter
+
+	def _next_task(self):
+		return next(self._task_iter)

diff --git a/portage_with_autodep/pym/portage/tests/locks/__init__.py b/portage_with_autodep/pym/portage/util/_async/__init__.py
similarity index 65%
rename from portage_with_autodep/pym/portage/tests/locks/__init__.py
rename to portage_with_autodep/pym/portage/util/_async/__init__.py
index 21a391a..418ad86 100644
--- a/portage_with_autodep/pym/portage/tests/locks/__init__.py
+++ b/portage_with_autodep/pym/portage/util/_async/__init__.py
@@ -1,2 +1,2 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/util/_async/run_main_scheduler.py b/portage_with_autodep/pym/portage/util/_async/run_main_scheduler.py
new file mode 100644
index 0000000..10fed34
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_async/run_main_scheduler.py
@@ -0,0 +1,41 @@
+
+import signal
+
+def run_main_scheduler(scheduler):
+	"""
+	Start and run an AsyncScheduler (or compatible object), and handle
+	SIGINT or SIGTERM by calling its terminate() method and waiting
+	for it to clean up after itself. If SIGINT or SIGTERM is received,
+	return signum, else return None. Any previous SIGINT or SIGTERM
+	signal handlers are automatically saved and restored before
+	returning.
+	"""
+
+	received_signal = []
+
+	def sighandler(signum, frame):
+		signal.signal(signal.SIGINT, signal.SIG_IGN)
+		signal.signal(signal.SIGTERM, signal.SIG_IGN)
+		received_signal.append(signum)
+		scheduler.terminate()
+
+	earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+	earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+	try:
+		scheduler.start()
+		scheduler.wait()
+	finally:
+		# Restore previous handlers
+		if earlier_sigint_handler is not None:
+			signal.signal(signal.SIGINT, earlier_sigint_handler)
+		else:
+			signal.signal(signal.SIGINT, signal.SIG_DFL)
+		if earlier_sigterm_handler is not None:
+			signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+		else:
+			signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+	if received_signal:
+		return received_signal[0]
+	return None

diff --git a/portage_with_autodep/pym/portage/util/_ctypes.py b/portage_with_autodep/pym/portage/util/_ctypes.py
new file mode 100644
index 0000000..aeceebc
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_ctypes.py
@@ -0,0 +1,47 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+	import ctypes
+	import ctypes.util
+except ImportError:
+	ctypes = None
+else:
+	try:
+		ctypes.cdll
+	except AttributeError:
+		ctypes = None
+
+_library_names = {}
+
+def find_library(name):
+	"""
+	Calls ctype.util.find_library() if the ctypes module is available,
+	and otherwise returns None. Results are cached for future invocations.
+	"""
+	filename = _library_names.get(name)
+	if filename is None:
+		if ctypes is not None:
+			filename = ctypes.util.find_library(name)
+			if filename is None:
+				filename = False
+			_library_names[name] = filename
+
+	if filename is False:
+		return None
+	return filename
+
+_library_handles = {}
+
+def LoadLibrary(name):
+	"""
+	Calls ctypes.cdll.LoadLibrary(name) if the ctypes module is available,
+	and otherwise returns None. Results are cached for future invocations.
+	"""
+	handle = _library_handles.get(name)
+
+	if handle is None and ctypes is not None:
+		handle = ctypes.CDLL(name, use_errno=True)
+		_library_handles[name] = handle
+
+	return handle

diff --git a/portage_with_autodep/pym/portage/util/_desktop_entry.py b/portage_with_autodep/pym/portage/util/_desktop_entry.py
new file mode 100644
index 0000000..7901780
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_desktop_entry.py
@@ -0,0 +1,75 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import subprocess
+import sys
+
+try:
+	from configparser import Error as ConfigParserError, RawConfigParser
+except ImportError:
+	from ConfigParser import Error as ConfigParserError, RawConfigParser
+
+from portage import _encodings, _unicode_encode, _unicode_decode
+
+def parse_desktop_entry(path):
+	"""
+	Parse the given file with RawConfigParser and return the
+	result. This may raise an IOError from io.open(), or a
+	ParsingError from RawConfigParser.
+	"""
+	parser = RawConfigParser()
+
+	# use read_file/readfp in order to control decoding of unicode
+	try:
+		# Python >=3.2
+		read_file = parser.read_file
+	except AttributeError:
+		read_file = parser.readfp
+
+	with io.open(_unicode_encode(path,
+		encoding=_encodings['fs'], errors='strict'),
+		mode='r', encoding=_encodings['repo.content'],
+		errors='replace') as f:
+		read_file(f)
+
+	return parser
+
+_ignored_service_errors = (
+	'error: required key "Name" in group "Desktop Entry" is not present',
+	'error: key "Actions" is present in group "Desktop Entry", but the type is "Service" while this key is only valid for type "Application"',
+	'error: key "MimeType" is present in group "Desktop Entry", but the type is "Service" while this key is only valid for type "Application"',
+)
+
+def validate_desktop_entry(path):
+	args = ["desktop-file-validate", path]
+	if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
+		# Python 3.1 does not support bytes in Popen args.
+		args = [_unicode_encode(x, errors='strict') for x in args]
+	proc = subprocess.Popen(args,
+		stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+	output_lines = _unicode_decode(proc.communicate()[0]).splitlines()
+	proc.wait()
+
+	if output_lines:
+		try:
+			desktop_entry = parse_desktop_entry(path)
+		except ConfigParserError:
+			pass
+		else:
+			if desktop_entry.has_section("Desktop Entry"):
+				try:
+					entry_type = desktop_entry.get("Desktop Entry", "Type")
+				except ConfigParserError:
+					pass
+				else:
+					if entry_type == "Service":
+						# Filter false errors for Type=Service (bug #414125).
+						filtered_output = []
+						for line in output_lines:
+							if line[len(path)+2:] in _ignored_service_errors:
+								continue
+							filtered_output.append(line)
+						output_lines = filtered_output
+
+	return output_lines

diff --git a/portage_with_autodep/pym/portage/util/_desktop_entry.pyo b/portage_with_autodep/pym/portage/util/_desktop_entry.pyo
new file mode 100644
index 0000000..7dec17a
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_desktop_entry.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
index 52670d9..e71ac73 100644
--- a/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.py
@@ -267,6 +267,7 @@ class LinkageMapELF(object):
 					owner = plibs.pop(fields[1], None)
 					lines.append((owner, "scanelf", ";".join(fields)))
 				proc.wait()
+				proc.stdout.close()
 
 		if plibs:
 			# Preserved libraries that did not appear in the scanelf output.
@@ -286,6 +287,13 @@ class LinkageMapELF(object):
 			l = l.rstrip("\n")
 			if not l:
 				continue
+			if '\0' in l:
+				# os.stat() will raise "TypeError: must be encoded string
+				# without NULL bytes, not str" in this case.
+				writemsg_level(_("\nLine contains null byte(s) " \
+					"in %s: %s\n\n") % (location, l),
+					level=logging.ERROR, noiselevel=-1)
+				continue
 			fields = l.split(";")
 			if len(fields) < 5:
 				writemsg_level(_("\nWrong number of fields " \

diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.pyo b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.pyo
new file mode 100644
index 0000000..c1e5603
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_dyn_libs/LinkageMapELF.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
index 602cf87..4bc64db 100644
--- a/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.py
@@ -1,8 +1,10 @@
-# Copyright 1998-2011 Gentoo Foundation
+# Copyright 1998-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 import errno
+import json
 import logging
+import stat
 import sys
 
 try:
@@ -27,6 +29,19 @@ if sys.hexversion >= 0x3000000:
 
 class PreservedLibsRegistry(object):
 	""" This class handles the tracking of preserved library objects """
+
+	# JSON read support has been available since portage-2.2.0_alpha89.
+	_json_write = True
+
+	_json_write_opts = {
+		"ensure_ascii": False,
+		"indent": "\t",
+		"sort_keys": True
+	}
+	if sys.hexversion < 0x30200F0:
+		# indent only supports int number of spaces
+		_json_write_opts["indent"] = 4
+
 	def __init__(self, root, filename):
 		""" 
 			@param root: root used to check existence of paths in pruneNonExisting
@@ -55,22 +70,51 @@ class PreservedLibsRegistry(object):
 	def load(self):
 		""" Reload the registry data from file """
 		self._data = None
+		f = None
+		content = None
 		try:
-			self._data = pickle.load(
-				open(_unicode_encode(self._filename,
-					encoding=_encodings['fs'], errors='strict'), 'rb'))
-		except (ValueError, pickle.UnpicklingError) as e:
-			writemsg_level(_("!!! Error loading '%s': %s\n") % \
-				(self._filename, e), level=logging.ERROR, noiselevel=-1)
-		except (EOFError, IOError) as e:
-			if isinstance(e, EOFError) or e.errno == errno.ENOENT:
+			f = open(_unicode_encode(self._filename,
+					encoding=_encodings['fs'], errors='strict'), 'rb')
+			content = f.read()
+		except EnvironmentError as e:
+			if not hasattr(e, 'errno'):
+				raise
+			elif e.errno == errno.ENOENT:
 				pass
 			elif e.errno == PermissionDenied.errno:
 				raise PermissionDenied(self._filename)
 			else:
 				raise
+		finally:
+			if f is not None:
+				f.close()
+
+		# content is empty if it's an empty lock file
+		if content:
+			try:
+				self._data = json.loads(_unicode_decode(content,
+					encoding=_encodings['repo.content'], errors='strict'))
+			except SystemExit:
+				raise
+			except Exception as e:
+				try:
+					self._data = pickle.loads(content)
+				except SystemExit:
+					raise
+				except Exception:
+					writemsg_level(_("!!! Error loading '%s': %s\n") %
+						(self._filename, e), level=logging.ERROR,
+						noiselevel=-1)
+
 		if self._data is None:
 			self._data = {}
+		else:
+			for k, v in self._data.items():
+				if isinstance(v, (list, tuple)) and len(v) == 3 and \
+					isinstance(v[2], set):
+					# convert set to list, for write with JSONEncoder
+					self._data[k] = (v[0], v[1], list(v[2]))
+
 		self._data_orig = self._data.copy()
 		self.pruneNonExisting()
 
@@ -87,7 +131,12 @@ class PreservedLibsRegistry(object):
 			return
 		try:
 			f = atomic_ofstream(self._filename, 'wb')
-			pickle.dump(self._data, f, protocol=2)
+			if self._json_write:
+				f.write(_unicode_encode(
+					json.dumps(self._data, **self._json_write_opts),
+					encoding=_encodings['repo.content'], errors='strict'))
+			else:
+				pickle.dump(self._data, f, protocol=2)
 			f.close()
 		except EnvironmentError as e:
 			if e.errno != PermissionDenied.errno:
@@ -128,6 +177,9 @@ class PreservedLibsRegistry(object):
 				self._normalize_counter(self._data[cps][1]) == counter:
 			del self._data[cps]
 		elif len(paths) > 0:
+			if isinstance(paths, set):
+				# convert set to list, for write with JSONEncoder
+				paths = list(paths)
 			self._data[cps] = (cpv, counter, paths)
 
 	def unregister(self, cpv, slot, counter):
@@ -145,9 +197,38 @@ class PreservedLibsRegistry(object):
 		os = _os_merge
 
 		for cps in list(self._data):
-			cpv, counter, paths = self._data[cps]
-			paths = [f for f in paths \
-				if os.path.exists(os.path.join(self._root, f.lstrip(os.sep)))]
+			cpv, counter, _paths = self._data[cps]
+
+			paths = []
+			hardlinks = set()
+			symlinks = {}
+			for f in _paths:
+				f_abs = os.path.join(self._root, f.lstrip(os.sep))
+				try:
+					lst = os.lstat(f_abs)
+				except OSError:
+					continue
+				if stat.S_ISLNK(lst.st_mode):
+					try:
+						symlinks[f] = os.readlink(f_abs)
+					except OSError:
+						continue
+				elif stat.S_ISREG(lst.st_mode):
+					hardlinks.add(f)
+					paths.append(f)
+
+			# Only count symlinks as preserved if they still point to a hardink
+			# in the same directory, in order to handle cases where a tool such
+			# as eselect-opengl has updated the symlink to point to a hardlink
+			# in a different directory (see bug #406837). The unused hardlink
+			# is automatically found by _find_unused_preserved_libs, since the
+			# soname symlink no longer points to it. After the hardlink is
+			# removed by _remove_preserved_libs, it calls pruneNonExisting
+			# which eliminates the irrelevant symlink from the registry here.
+			for f, target in symlinks.items():
+				if os.path.join(os.path.dirname(f), target) in hardlinks:
+					paths.append(f)
+
 			if len(paths) > 0:
 				self._data[cps] = (cpv, counter, paths)
 			else:
@@ -161,7 +242,7 @@ class PreservedLibsRegistry(object):
 	
 	def getPreservedLibs(self):
 		""" Return a mapping of packages->preserved objects.
-			@returns mapping of package instances to preserved objects
+			@return mapping of package instances to preserved objects
 			@rtype Dict cpv->list-of-paths
 		"""
 		if self._data is None:

diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.pyo b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.pyo
new file mode 100644
index 0000000..8cdd7cb
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_dyn_libs/PreservedLibsRegistry.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.pyo b/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.pyo
new file mode 100644
index 0000000..960b66e
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_dyn_libs/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_dyn_libs/display_preserved_libs.py b/portage_with_autodep/pym/portage/util/_dyn_libs/display_preserved_libs.py
new file mode 100644
index 0000000..b16478d
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_dyn_libs/display_preserved_libs.py
@@ -0,0 +1,98 @@
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage.output import colorize
+
+def display_preserved_libs(vardb):
+
+	MAX_DISPLAY = 3
+
+	plibdata = vardb._plib_registry.getPreservedLibs()
+	linkmap = vardb._linkmap
+	consumer_map = {}
+	owners = {}
+
+	try:
+		linkmap.rebuild()
+	except portage.exception.CommandNotFound as e:
+		portage.util.writemsg_level("!!! Command Not Found: %s\n" % (e,),
+			level=logging.ERROR, noiselevel=-1)
+	else:
+		search_for_owners = set()
+		for cpv in plibdata:
+			internal_plib_keys = set(linkmap._obj_key(f) \
+				for f in plibdata[cpv])
+			for f in plibdata[cpv]:
+				if f in consumer_map:
+					continue
+				consumers = []
+				for c in linkmap.findConsumers(f, greedy=False):
+					# Filter out any consumers that are also preserved libs
+					# belonging to the same package as the provider.
+					if linkmap._obj_key(c) not in internal_plib_keys:
+						consumers.append(c)
+				consumers.sort()
+				consumer_map[f] = consumers
+				search_for_owners.update(consumers[:MAX_DISPLAY+1])
+
+		owners = {}
+		for f in search_for_owners:
+			owner_set = set()
+			for owner in linkmap.getOwners(f):
+				owner_dblink = vardb._dblink(owner)
+				if owner_dblink.exists():
+					owner_set.add(owner_dblink)
+			if owner_set:
+				owners[f] = owner_set
+
+	all_preserved = set()
+	all_preserved.update(*plibdata.values())
+
+	for cpv in plibdata:
+		print(colorize("WARN", ">>>") + " package: %s" % cpv)
+		samefile_map = {}
+		for f in plibdata[cpv]:
+			obj_key = linkmap._obj_key(f)
+			alt_paths = samefile_map.get(obj_key)
+			if alt_paths is None:
+				alt_paths = set()
+				samefile_map[obj_key] = alt_paths
+			alt_paths.add(f)
+
+		for alt_paths in samefile_map.values():
+			alt_paths = sorted(alt_paths)
+			for p in alt_paths:
+				print(colorize("WARN", " * ") + " - %s" % (p,))
+			f = alt_paths[0]
+			consumers = consumer_map.get(f, [])
+			consumers_non_preserved = [c for c in consumers
+				if c not in all_preserved]
+			if consumers_non_preserved:
+				# Filter the consumers that are preserved libraries, since
+				# they don't need to be rebuilt (see bug #461908).
+				consumers = consumers_non_preserved
+
+			if len(consumers) == MAX_DISPLAY + 1:
+				# Display 1 extra consumer, instead of displaying
+				# "used by 1 other files".
+				max_display = MAX_DISPLAY + 1
+			else:
+				max_display = MAX_DISPLAY
+			for c in consumers[:max_display]:
+				if c in all_preserved:
+					# The owner is displayed elsewhere due to having
+					# its libs preserved, so distinguish this special
+					# case (see bug #461908).
+					owners_desc = "preserved"
+				else:
+					owners_desc = ", ".join(x.mycpv for x in owners.get(c, []))
+				print(colorize("WARN", " * ") + "     used by %s (%s)" % \
+					(c, owners_desc))
+			if len(consumers) > max_display:
+				print(colorize("WARN", " * ") + "     used by %d other files" %
+					(len(consumers) - max_display))

diff --git a/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.py b/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.py
new file mode 100644
index 0000000..bbbce52
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.py
@@ -0,0 +1,490 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import fcntl
+import logging
+import os
+import select
+import signal
+import time
+
+from portage.util import writemsg_level
+from ..SlotObject import SlotObject
+from .PollConstants import PollConstants
+from .PollSelectAdapter import PollSelectAdapter
+
+class EventLoop(object):
+
+	supports_multiprocessing = True
+
+	# TODO: Find out why SIGCHLD signals aren't delivered during poll
+	# calls, forcing us to wakeup in order to receive them.
+	_sigchld_interval = 250
+
+	class _child_callback_class(SlotObject):
+		__slots__ = ("callback", "data", "pid", "source_id")
+
+	class _idle_callback_class(SlotObject):
+		__slots__ = ("args", "callback", "calling", "source_id")
+
+	class _io_handler_class(SlotObject):
+		__slots__ = ("args", "callback", "f", "source_id")
+
+	class _timeout_handler_class(SlotObject):
+		__slots__ = ("args", "function", "calling", "interval", "source_id",
+			"timestamp")
+
+	def __init__(self, main=True):
+		"""
+		@param main: If True then this is a singleton instance for use
+			in the main thread, otherwise it is a local instance which
+			can safely be use in a non-main thread (default is True, so
+			that global_event_loop does not need constructor arguments)
+		@type main: bool
+		"""
+		self._use_signal = main
+		self._poll_event_queue = []
+		self._poll_event_handlers = {}
+		self._poll_event_handler_ids = {}
+		# Increment id for each new handler.
+		self._event_handler_id = 0
+		self._idle_callbacks = {}
+		self._timeout_handlers = {}
+		self._timeout_interval = None
+		self._poll_obj = create_poll_instance()
+
+		self.IO_ERR = PollConstants.POLLERR
+		self.IO_HUP = PollConstants.POLLHUP
+		self.IO_IN = PollConstants.POLLIN
+		self.IO_NVAL = PollConstants.POLLNVAL
+		self.IO_OUT = PollConstants.POLLOUT
+		self.IO_PRI = PollConstants.POLLPRI
+
+		self._child_handlers = {}
+		self._sigchld_read = None
+		self._sigchld_write = None
+		self._sigchld_src_id = None
+		self._pid = os.getpid()
+
+	def _poll(self, timeout=None):
+		"""
+		All poll() calls pass through here. The poll events
+		are added directly to self._poll_event_queue.
+		In order to avoid endless blocking, this raises
+		StopIteration if timeout is None and there are
+		no file descriptors to poll.
+		"""
+
+		if timeout is None and \
+			not self._poll_event_handlers:
+			raise StopIteration(
+				"timeout is None and there are no poll() event handlers")
+
+		while True:
+			try:
+				self._poll_event_queue.extend(self._poll_obj.poll(timeout))
+				break
+			except select.error as e:
+				# Silently handle EINTR, which is normal when we have
+				# received a signal such as SIGINT.
+				if not (e.args and e.args[0] == errno.EINTR):
+					writemsg_level("\n!!! select error: %s\n" % (e,),
+						level=logging.ERROR, noiselevel=-1)
+				del e
+
+				# This typically means that we've received a SIGINT, so
+				# raise StopIteration in order to break out of our current
+				# iteration and respond appropriately to the signal as soon
+				# as possible.
+				raise StopIteration("interrupted")
+
+	def iteration(self, *args):
+		"""
+		Like glib.MainContext.iteration(), runs a single iteration.
+		@type may_block: bool
+		@param may_block: if True the call may block waiting for an event
+			(default is True).
+		@rtype: bool
+		@return: True if events were dispatched.
+		"""
+
+		may_block = True
+
+		if args:
+			if len(args) > 1:
+				raise TypeError(
+					"expected at most 1 argument (%s given)" % len(args))
+			may_block = args[0]
+
+		event_queue =  self._poll_event_queue
+		event_handlers = self._poll_event_handlers
+		events_handled = 0
+
+		if not event_handlers:
+			if self._run_timeouts():
+				events_handled += 1
+			if not event_handlers:
+				if not events_handled and may_block and \
+					self._timeout_interval is not None:
+					# Block so that we don't waste cpu time by looping too
+					# quickly. This makes EventLoop useful for code that needs
+					# to wait for timeout callbacks regardless of whether or
+					# not any IO handlers are currently registered.
+					try:
+						self._poll(timeout=self._timeout_interval)
+					except StopIteration:
+						pass
+					if self._run_timeouts():
+						events_handled += 1
+
+			# If any timeouts have executed, then return immediately,
+			# in order to minimize latency in termination of iteration
+			# loops that they may control.
+			if events_handled or not event_handlers:
+				return bool(events_handled)
+
+		if not event_queue:
+
+			if may_block:
+				if self._child_handlers:
+					if self._timeout_interval is None:
+						timeout = self._sigchld_interval
+					else:
+						timeout = min(self._sigchld_interval,
+							self._timeout_interval)
+				else:
+					timeout = self._timeout_interval
+			else:
+				timeout = 0
+
+			try:
+				self._poll(timeout=timeout)
+			except StopIteration:
+				# This can be triggered by EINTR which is caused by signals.
+				pass
+
+		# NOTE: IO event handlers may be re-entrant, in case something
+		# like AbstractPollTask._wait_loop() needs to be called inside
+		# a handler for some reason.
+		while event_queue:
+			events_handled += 1
+			f, event = event_queue.pop()
+			x = event_handlers[f]
+			if not x.callback(f, event, *x.args):
+				self.source_remove(x.source_id)
+
+		# Run timeouts last, in order to minimize latency in
+		# termination of iteration loops that they may control.
+		if self._run_timeouts():
+			events_handled += 1
+
+		return bool(events_handled)
+
+	def child_watch_add(self, pid, callback, data=None):
+		"""
+		Like glib.child_watch_add(), sets callback to be called with the
+		user data specified by data when the child indicated by pid exits.
+		The signature for the callback is:
+
+			def callback(pid, condition, user_data)
+
+		where pid is is the child process id, condition is the status
+		information about the child process and user_data is data.
+
+		@type int
+		@param pid: process id of a child process to watch
+		@type callback: callable
+		@param callback: a function to call
+		@type data: object
+		@param data: the optional data to pass to function
+		@rtype: int
+		@return: an integer ID
+		"""
+		self._event_handler_id += 1
+		source_id = self._event_handler_id
+		self._child_handlers[source_id] = self._child_callback_class(
+			callback=callback, data=data, pid=pid, source_id=source_id)
+
+		if self._use_signal:
+			if self._sigchld_read is None:
+				self._sigchld_read, self._sigchld_write = os.pipe()
+				fcntl.fcntl(self._sigchld_read, fcntl.F_SETFL,
+					fcntl.fcntl(self._sigchld_read,
+					fcntl.F_GETFL) | os.O_NONBLOCK)
+
+			# The IO watch is dynamically registered and unregistered as
+			# needed, since we don't want to consider it as a valid source
+			# of events when there are no child listeners. It's important
+			# to distinguish when there are no valid sources of IO events,
+			# in order to avoid an endless poll call if there's no timeout.
+			if self._sigchld_src_id is None:
+				self._sigchld_src_id = self.io_add_watch(
+					self._sigchld_read, self.IO_IN, self._sigchld_io_cb)
+				signal.signal(signal.SIGCHLD, self._sigchld_sig_cb)
+
+		# poll now, in case the SIGCHLD has already arrived
+		self._poll_child_processes()
+		return source_id
+
+	def _sigchld_sig_cb(self, signum, frame):
+		# If this signal handler was not installed by the
+		# current process then the signal doesn't belong to
+		# this EventLoop instance.
+		if os.getpid() == self._pid:
+			os.write(self._sigchld_write, b'\0')
+
+	def _sigchld_io_cb(self, fd, events):
+		try:
+			while True:
+				os.read(self._sigchld_read, 4096)
+		except OSError:
+			# read until EAGAIN
+			pass
+		self._poll_child_processes()
+		return True
+
+	def _poll_child_processes(self):
+		if not self._child_handlers:
+			return False
+
+		calls = 0
+
+		for x in list(self._child_handlers.values()):
+			if x.source_id not in self._child_handlers:
+				# it's already been called via re-entrance
+				continue
+			try:
+				wait_retval = os.waitpid(x.pid, os.WNOHANG)
+			except OSError as e:
+				if e.errno != errno.ECHILD:
+					raise
+				del e
+				self.source_remove(x.source_id)
+			else:
+				# With waitpid and WNOHANG, only check the
+				# first element of the tuple since the second
+				# element may vary (bug #337465).
+				if wait_retval[0] != 0:
+					calls += 1
+					self.source_remove(x.source_id)
+					x.callback(x.pid, wait_retval[1], x.data)
+
+		return bool(calls)
+
+	def idle_add(self, callback, *args):
+		"""
+		Like glib.idle_add(), if callback returns False it is
+		automatically removed from the list of event sources and will
+		not be called again.
+
+		@type callback: callable
+		@param callback: a function to call
+		@rtype: int
+		@return: an integer ID
+		"""
+		self._event_handler_id += 1
+		source_id = self._event_handler_id
+		self._idle_callbacks[source_id] = self._idle_callback_class(
+			args=args, callback=callback, source_id=source_id)
+		return source_id
+
+	def _run_idle_callbacks(self):
+		if not self._idle_callbacks:
+			return
+		# Iterate of our local list, since self._idle_callbacks can be
+		# modified during the exection of these callbacks.
+		for x in list(self._idle_callbacks.values()):
+			if x.source_id not in self._idle_callbacks:
+				# it got cancelled while executing another callback
+				continue
+			if x.calling:
+				# don't call it recursively
+				continue
+			x.calling = True
+			try:
+				if not x.callback(*x.args):
+					self.source_remove(x.source_id)
+			finally:
+				x.calling = False
+
+	def timeout_add(self, interval, function, *args):
+		"""
+		Like glib.timeout_add(), interval argument is the number of
+		milliseconds between calls to your function, and your function
+		should return False to stop being called, or True to continue
+		being called. Any additional positional arguments given here
+		are passed to your function when it's called.
+		"""
+		self._event_handler_id += 1
+		source_id = self._event_handler_id
+		self._timeout_handlers[source_id] = \
+			self._timeout_handler_class(
+				interval=interval, function=function, args=args,
+				source_id=source_id, timestamp=time.time())
+		if self._timeout_interval is None or self._timeout_interval > interval:
+			self._timeout_interval = interval
+		return source_id
+
+	def _run_timeouts(self):
+
+		calls = 0
+		if not self._use_signal:
+			if self._poll_child_processes():
+				calls += 1
+
+		self._run_idle_callbacks()
+
+		if not self._timeout_handlers:
+			return bool(calls)
+
+		ready_timeouts = []
+		current_time = time.time()
+		for x in self._timeout_handlers.values():
+			elapsed_seconds = current_time - x.timestamp
+			# elapsed_seconds < 0 means the system clock has been adjusted
+			if elapsed_seconds < 0 or \
+				(x.interval - 1000 * elapsed_seconds) <= 0:
+				ready_timeouts.append(x)
+
+		# Iterate of our local list, since self._timeout_handlers can be
+		# modified during the exection of these callbacks.
+		for x in ready_timeouts:
+			if x.source_id not in self._timeout_handlers:
+				# it got cancelled while executing another timeout
+				continue
+			if x.calling:
+				# don't call it recursively
+				continue
+			calls += 1
+			x.calling = True
+			try:
+				x.timestamp = time.time()
+				if not x.function(*x.args):
+					self.source_remove(x.source_id)
+			finally:
+				x.calling = False
+
+		return bool(calls)
+
+	def io_add_watch(self, f, condition, callback, *args):
+		"""
+		Like glib.io_add_watch(), your function should return False to
+		stop being called, or True to continue being called. Any
+		additional positional arguments given here are passed to your
+		function when it's called.
+
+		@type f: int or object with fileno() method
+		@param f: a file descriptor to monitor
+		@type condition: int
+		@param condition: a condition mask
+		@type callback: callable
+		@param callback: a function to call
+		@rtype: int
+		@return: an integer ID of the event source
+		"""
+		if f in self._poll_event_handlers:
+			raise AssertionError("fd %d is already registered" % f)
+		self._event_handler_id += 1
+		source_id = self._event_handler_id
+		self._poll_event_handler_ids[source_id] = f
+		self._poll_event_handlers[f] = self._io_handler_class(
+			args=args, callback=callback, f=f, source_id=source_id)
+		self._poll_obj.register(f, condition)
+		return source_id
+
+	def source_remove(self, reg_id):
+		"""
+		Like glib.source_remove(), this returns True if the given reg_id
+		is found and removed, and False if the reg_id is invalid or has
+		already been removed.
+		"""
+		x = self._child_handlers.pop(reg_id, None)
+		if x is not None:
+			if not self._child_handlers and self._use_signal:
+				signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+				self.source_remove(self._sigchld_src_id)
+				self._sigchld_src_id = None
+			return True
+		idle_callback = self._idle_callbacks.pop(reg_id, None)
+		if idle_callback is not None:
+			return True
+		timeout_handler = self._timeout_handlers.pop(reg_id, None)
+		if timeout_handler is not None:
+			if timeout_handler.interval == self._timeout_interval:
+				if self._timeout_handlers:
+					self._timeout_interval = \
+						min(x.interval for x in self._timeout_handlers.values())
+				else:
+					self._timeout_interval = None
+			return True
+		f = self._poll_event_handler_ids.pop(reg_id, None)
+		if f is None:
+			return False
+		self._poll_obj.unregister(f)
+		if self._poll_event_queue:
+			# Discard any unhandled events that belong to this file,
+			# in order to prevent these events from being erroneously
+			# delivered to a future handler that is using a reallocated
+			# file descriptor of the same numeric value (causing
+			# extremely confusing bugs).
+			remaining_events = []
+			discarded_events = False
+			for event in self._poll_event_queue:
+				if event[0] == f:
+					discarded_events = True
+				else:
+					remaining_events.append(event)
+
+			if discarded_events:
+				self._poll_event_queue[:] = remaining_events
+
+		del self._poll_event_handlers[f]
+		return True
+
+_can_poll_device = None
+
+def can_poll_device():
+	"""
+	Test if it's possible to use poll() on a device such as a pty. This
+	is known to fail on Darwin.
+	@rtype: bool
+	@return: True if poll() on a device succeeds, False otherwise.
+	"""
+
+	global _can_poll_device
+	if _can_poll_device is not None:
+		return _can_poll_device
+
+	if not hasattr(select, "poll"):
+		_can_poll_device = False
+		return _can_poll_device
+
+	try:
+		dev_null = open('/dev/null', 'rb')
+	except IOError:
+		_can_poll_device = False
+		return _can_poll_device
+
+	p = select.poll()
+	p.register(dev_null.fileno(), PollConstants.POLLIN)
+
+	invalid_request = False
+	for f, event in p.poll():
+		if event & PollConstants.POLLNVAL:
+			invalid_request = True
+			break
+	dev_null.close()
+
+	_can_poll_device = not invalid_request
+	return _can_poll_device
+
+def create_poll_instance():
+	"""
+	Create an instance of select.poll, or an instance of
+	PollSelectAdapter there is no poll() implementation or
+	it is broken somehow.
+	"""
+	if can_poll_device():
+		return select.poll()
+	return PollSelectAdapter()

diff --git a/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.pyo b/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.pyo
new file mode 100644
index 0000000..6ce2883
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_eventloop/EventLoop.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.py b/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.py
new file mode 100644
index 0000000..f2f5c5e
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.py
@@ -0,0 +1,23 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class GlibEventLoop(object):
+
+	# TODO: Support multiprocessing by using a separate glib.MainContext
+	# instance for each process.
+	supports_multiprocessing = False
+
+	def __init__(self):
+		import gi.repository.GLib as glib
+		self.IO_ERR = glib.IO_ERR
+		self.IO_HUP = glib.IO_HUP
+		self.IO_IN = glib.IO_IN
+		self.IO_NVAL = glib.IO_NVAL
+		self.IO_OUT = glib.IO_OUT
+		self.IO_PRI = glib.IO_PRI
+		self.iteration = glib.main_context_default().iteration
+		self.child_watch_add = glib.child_watch_add
+		self.idle_add = glib.idle_add
+		self.io_add_watch = glib.io_add_watch
+		self.timeout_add = glib.timeout_add
+		self.source_remove = glib.source_remove

diff --git a/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.pyo b/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.pyo
new file mode 100644
index 0000000..d3453a4
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_eventloop/GlibEventLoop.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/PollConstants.py b/portage_with_autodep/pym/portage/util/_eventloop/PollConstants.py
similarity index 100%
rename from portage_with_autodep/pym/_emerge/PollConstants.py
rename to portage_with_autodep/pym/portage/util/_eventloop/PollConstants.py

diff --git a/portage_with_autodep/pym/portage/util/_eventloop/PollConstants.pyo b/portage_with_autodep/pym/portage/util/_eventloop/PollConstants.pyo
new file mode 100644
index 0000000..6c7c953
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_eventloop/PollConstants.pyo differ

diff --git a/portage_with_autodep/pym/_emerge/PollSelectAdapter.py b/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.py
similarity index 93%
rename from portage_with_autodep/pym/_emerge/PollSelectAdapter.py
rename to portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.py
index c11dab8..17e63d9 100644
--- a/portage_with_autodep/pym/_emerge/PollSelectAdapter.py
+++ b/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.py
@@ -1,9 +1,10 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-from _emerge.PollConstants import PollConstants
+from .PollConstants import PollConstants
 import select
-class PollSelectAdapter(PollConstants):
+
+class PollSelectAdapter(object):
 
 	"""
 	Use select to emulate a poll object, for

diff --git a/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.pyo b/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.pyo
new file mode 100644
index 0000000..e9ecc51
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_eventloop/PollSelectAdapter.pyo differ

diff --git a/portage_with_autodep/pym/portage/tests/unicode/__init__.py b/portage_with_autodep/pym/portage/util/_eventloop/__init__.py
similarity index 65%
rename from portage_with_autodep/pym/portage/tests/unicode/__init__.py
rename to portage_with_autodep/pym/portage/util/_eventloop/__init__.py
index 21a391a..418ad86 100644
--- a/portage_with_autodep/pym/portage/tests/unicode/__init__.py
+++ b/portage_with_autodep/pym/portage/util/_eventloop/__init__.py
@@ -1,2 +1,2 @@
-# Copyright 2010 Gentoo Foundation
+# Copyright 2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2

diff --git a/portage_with_autodep/pym/portage/util/_eventloop/__init__.pyo b/portage_with_autodep/pym/portage/util/_eventloop/__init__.pyo
new file mode 100644
index 0000000..69864a6
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_eventloop/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.py b/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.py
new file mode 100644
index 0000000..502dab8
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.py
@@ -0,0 +1,35 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+
+from .EventLoop import EventLoop
+
+_default_constructor = EventLoop
+#from .GlibEventLoop import GlibEventLoop as _default_constructor
+
+# If _default_constructor doesn't support multiprocessing,
+# then _multiprocessing_constructor is used in subprocesses.
+_multiprocessing_constructor = EventLoop
+
+_MAIN_PID = os.getpid()
+_instances = {}
+
+def global_event_loop():
+	"""
+	Get a global EventLoop (or compatible object) instance which
+	belongs exclusively to the current process.
+	"""
+
+	pid = os.getpid()
+	instance = _instances.get(pid)
+	if instance is not None:
+		return instance
+
+	constructor = _default_constructor
+	if not constructor.supports_multiprocessing and pid != _MAIN_PID:
+		constructor = _multiprocessing_constructor
+
+	instance = constructor()
+	_instances[pid] = instance
+	return instance

diff --git a/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.pyo b/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.pyo
new file mode 100644
index 0000000..3d57192
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_eventloop/global_event_loop.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_get_vm_info.py b/portage_with_autodep/pym/portage/util/_get_vm_info.py
new file mode 100644
index 0000000..e8ad938
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_get_vm_info.py
@@ -0,0 +1,80 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import platform
+import subprocess
+
+from portage import _unicode_decode
+
+def get_vm_info():
+
+	vm_info = {}
+
+	if platform.system() == 'Linux':
+		try:
+			proc = subprocess.Popen(["free"],
+				stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+		except OSError:
+			pass
+		else:
+			output = _unicode_decode(proc.communicate()[0])
+			if proc.wait() == os.EX_OK:
+				for line in output.splitlines():
+					line = line.split()
+					if len(line) < 2:
+						continue
+					if line[0] == "Mem:":
+						try:
+							vm_info["ram.total"] = int(line[1]) * 1024
+						except ValueError:
+							pass
+						if len(line) > 3:
+							try:
+								vm_info["ram.free"] = int(line[3]) * 1024
+							except ValueError:
+								pass
+					elif line[0] == "Swap:":
+						try:
+							vm_info["swap.total"] = int(line[1]) * 1024
+						except ValueError:
+							pass
+						if len(line) > 3:
+							try:
+								vm_info["swap.free"] = int(line[3]) * 1024
+							except ValueError:
+								pass
+
+	else:
+
+		try:
+			proc = subprocess.Popen(["sysctl", "-a"],
+				stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+		except OSError:
+			pass
+		else:
+			output = _unicode_decode(proc.communicate()[0])
+			if proc.wait() == os.EX_OK:
+				for line in output.splitlines():
+					line = line.split(":", 1)
+					if len(line) != 2:
+						continue
+					line[1] = line[1].strip()
+					if line[0] == "hw.physmem":
+						try:
+							vm_info["ram.total"] = int(line[1])
+						except ValueError:
+							pass
+					elif line[0] == "vm.swap_total":
+						try:
+							vm_info["swap.total"] = int(line[1])
+						except ValueError:
+							pass
+					elif line[0] == "Free Memory Pages":
+						if line[1][-1] == "K":
+							try:
+								vm_info["ram.free"] = int(line[1][:-1]) * 1024
+							except ValueError:
+								pass
+
+	return vm_info

diff --git a/portage_with_autodep/pym/portage/util/_info_files.py b/portage_with_autodep/pym/portage/util/_info_files.py
new file mode 100644
index 0000000..fabf74b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_info_files.py
@@ -0,0 +1,138 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import re
+import stat
+import subprocess
+
+import portage
+from portage import os
+
+def chk_updated_info_files(root, infodirs, prev_mtimes):
+
+	if os.path.exists("/usr/bin/install-info"):
+		out = portage.output.EOutput()
+		regen_infodirs = []
+		for z in infodirs:
+			if z == '':
+				continue
+			inforoot = portage.util.normalize_path(root + z)
+			if os.path.isdir(inforoot) and \
+				not [x for x in os.listdir(inforoot) \
+				if x.startswith('.keepinfodir')]:
+					infomtime = os.stat(inforoot)[stat.ST_MTIME]
+					if inforoot not in prev_mtimes or \
+						prev_mtimes[inforoot] != infomtime:
+							regen_infodirs.append(inforoot)
+
+		if not regen_infodirs:
+			portage.util.writemsg_stdout("\n")
+			if portage.util.noiselimit >= 0:
+				out.einfo("GNU info directory index is up-to-date.")
+		else:
+			portage.util.writemsg_stdout("\n")
+			if portage.util.noiselimit >= 0:
+				out.einfo("Regenerating GNU info directory index...")
+
+			dir_extensions = ("", ".gz", ".bz2")
+			icount = 0
+			badcount = 0
+			errmsg = ""
+			for inforoot in regen_infodirs:
+				if inforoot == '':
+					continue
+
+				if not os.path.isdir(inforoot) or \
+					not os.access(inforoot, os.W_OK):
+					continue
+
+				file_list = os.listdir(inforoot)
+				file_list.sort()
+				dir_file = os.path.join(inforoot, "dir")
+				moved_old_dir = False
+				processed_count = 0
+				for x in file_list:
+					if x.startswith(".") or \
+						os.path.isdir(os.path.join(inforoot, x)):
+						continue
+					if x.startswith("dir"):
+						skip = False
+						for ext in dir_extensions:
+							if x == "dir" + ext or \
+								x == "dir" + ext + ".old":
+								skip = True
+								break
+						if skip:
+							continue
+					if processed_count == 0:
+						for ext in dir_extensions:
+							try:
+								os.rename(dir_file + ext, dir_file + ext + ".old")
+								moved_old_dir = True
+							except EnvironmentError as e:
+								if e.errno != errno.ENOENT:
+									raise
+								del e
+					processed_count += 1
+					try:
+						proc = subprocess.Popen(
+							['/usr/bin/install-info',
+							'--dir-file=%s' % os.path.join(inforoot, "dir"),
+							os.path.join(inforoot, x)],
+							env=dict(os.environ, LANG="C", LANGUAGE="C"),
+							stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+					except OSError:
+						myso = None
+					else:
+						myso = portage._unicode_decode(
+							proc.communicate()[0]).rstrip("\n")
+						proc.wait()
+					existsstr = "already exists, for file `"
+					if myso:
+						if re.search(existsstr, myso):
+							# Already exists... Don't increment the count for this.
+							pass
+						elif myso[:44] == "install-info: warning: no info dir entry in ":
+							# This info file doesn't contain a DIR-header: install-info produces this
+							# (harmless) warning (the --quiet switch doesn't seem to work).
+							# Don't increment the count for this.
+							pass
+						else:
+							badcount += 1
+							errmsg += myso + "\n"
+					icount += 1
+
+				if moved_old_dir and not os.path.exists(dir_file):
+					# We didn't generate a new dir file, so put the old file
+					# back where it was originally found.
+					for ext in dir_extensions:
+						try:
+							os.rename(dir_file + ext + ".old", dir_file + ext)
+						except EnvironmentError as e:
+							if e.errno != errno.ENOENT:
+								raise
+							del e
+
+				# Clean dir.old cruft so that they don't prevent
+				# unmerge of otherwise empty directories.
+				for ext in dir_extensions:
+					try:
+						os.unlink(dir_file + ext + ".old")
+					except EnvironmentError as e:
+						if e.errno != errno.ENOENT:
+							raise
+						del e
+
+				#update mtime so we can potentially avoid regenerating.
+				prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
+
+			if badcount:
+				out.eerror("Processed %d info files; %d errors." % \
+					(icount, badcount))
+				portage.util.writemsg_level(errmsg,
+					level=logging.ERROR, noiselevel=-1)
+			else:
+				if icount > 0 and portage.util.noiselimit >= 0:
+					out.einfo("Processed %d info files." % (icount,))

diff --git a/portage_with_autodep/pym/portage/util/_path.py b/portage_with_autodep/pym/portage/util/_path.py
new file mode 100644
index 0000000..6fbcb43
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_path.py
@@ -0,0 +1,27 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import stat
+
+from portage import os
+from portage.exception import PermissionDenied
+
+def exists_raise_eaccess(path):
+	try:
+		os.stat(path)
+	except OSError as e:
+		if e.errno == PermissionDenied.errno:
+			raise PermissionDenied("stat('%s')" % path)
+		return False
+	else:
+		return True
+
+def isdir_raise_eaccess(path):
+	try:
+		st = os.stat(path)
+	except OSError as e:
+		if e.errno == PermissionDenied.errno:
+			raise PermissionDenied("stat('%s')" % path)
+		return False
+	else:
+		return stat.S_ISDIR(st.st_mode)

diff --git a/portage_with_autodep/pym/portage/util/_pty.py b/portage_with_autodep/pym/portage/util/_pty.py
index f45ff0a..11c8b92 100644
--- a/portage_with_autodep/pym/portage/util/_pty.py
+++ b/portage_with_autodep/pym/portage/util/_pty.py
@@ -1,144 +1,20 @@
 # Copyright 2010-2011 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-import array
-import fcntl
 import platform
 import pty
-import select
-import sys
 import termios
 
-from portage import os, _unicode_decode, _unicode_encode
+from portage import os
 from portage.output import get_term_size, set_term_size
-from portage.process import spawn_bash
 from portage.util import writemsg
 
-def _can_test_pty_eof():
-	"""
-	The _test_pty_eof() function seems to hang on most
-	kernels other than Linux.
-	This was reported for the following kernels which used to work fine
-	without this EOF test: Darwin, AIX, FreeBSD.  They seem to hang on
-	the slave_file.close() call.  Note that Python's implementation of
-	openpty on Solaris already caused random hangs without this EOF test
-	and hence is globally disabled.
-	@rtype: bool
-	@returns: True if _test_pty_eof() won't hang, False otherwise.
-	"""
-	return platform.system() in ("Linux",)
-
-def _test_pty_eof(fdopen_buffered=False):
-	"""
-	Returns True if this issues is fixed for the currently
-	running version of python: http://bugs.python.org/issue5380
-	Raises an EnvironmentError from openpty() if it fails.
-
-	NOTE: This issue is only problematic when array.fromfile()
-	is used, rather than os.read(). However, array.fromfile()
-	is preferred since it is approximately 10% faster.
-
-	New development: It appears that array.fromfile() is usable
-	with python3 as long as fdopen is called with a bufsize
-	argument of 0.
-	"""
-
-	use_fork = False
-
-	test_string = 2 * "blah blah blah\n"
-	test_string = _unicode_decode(test_string,
-		encoding='utf_8', errors='strict')
-
-	# may raise EnvironmentError
-	master_fd, slave_fd = pty.openpty()
-
-	# Non-blocking mode is required for Darwin kernel.
-	fcntl.fcntl(master_fd, fcntl.F_SETFL,
-		fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
-
-	# Disable post-processing of output since otherwise weird
-	# things like \n -> \r\n transformations may occur.
-	mode = termios.tcgetattr(slave_fd)
-	mode[1] &= ~termios.OPOST
-	termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
-
-	# Simulate a subprocess writing some data to the
-	# slave end of the pipe, and then exiting.
-	pid = None
-	if use_fork:
-		pids = spawn_bash(_unicode_encode("echo -n '%s'" % test_string,
-			encoding='utf_8', errors='strict'), env=os.environ,
-			fd_pipes={0:sys.stdin.fileno(), 1:slave_fd, 2:slave_fd},
-			returnpid=True)
-		if isinstance(pids, int):
-			os.close(master_fd)
-			os.close(slave_fd)
-			raise EnvironmentError('spawn failed')
-		pid = pids[0]
-	else:
-		os.write(slave_fd, _unicode_encode(test_string,
-			encoding='utf_8', errors='strict'))
-	os.close(slave_fd)
-
-	# If using a fork, we must wait for the child here,
-	# in order to avoid a race condition that would
-	# lead to inconsistent results.
-	if pid is not None:
-		os.waitpid(pid, 0)
-
-	if fdopen_buffered:
-		master_file = os.fdopen(master_fd, 'rb')
-	else:
-		master_file = os.fdopen(master_fd, 'rb', 0)
-	eof = False
-	data = []
-	iwtd = [master_file]
-	owtd = []
-	ewtd = []
-
-	while not eof:
-
-		events = select.select(iwtd, owtd, ewtd)
-		if not events[0]:
-			eof = True
-			break
-
-		buf = array.array('B')
-		try:
-			buf.fromfile(master_file, 1024)
-		except (EOFError, IOError):
-			eof = True
-
-		if not buf:
-			eof = True
-		else:
-			data.append(_unicode_decode(buf.tostring(),
-				encoding='utf_8', errors='strict'))
-
-	master_file.close()
-
-	return test_string == ''.join(data)
-
-# If _test_pty_eof() can't be used for runtime detection of
-# http://bugs.python.org/issue5380, openpty can't safely be used
-# unless we can guarantee that the current version of python has
-# been fixed (affects all current versions of python3). When
-# this issue is fixed in python3, we can add another sys.hexversion
-# conditional to enable openpty support in the fixed versions.
-if sys.hexversion >= 0x3000000 and not _can_test_pty_eof():
-	_disable_openpty = True
-else:
-	# Disable the use of openpty on Solaris as it seems Python's openpty
-	# implementation doesn't play nice on Solaris with Portage's
-	# behaviour causing hangs/deadlocks.
-	# Additional note for the future: on Interix, pipes do NOT work, so
-	# _disable_openpty on Interix must *never* be True
-	_disable_openpty = platform.system() in ("SunOS",)
-_tested_pty = False
-
-if not _can_test_pty_eof():
-	# Skip _test_pty_eof() on systems where it hangs.
-	_tested_pty = True
+# Disable the use of openpty on Solaris as it seems Python's openpty
+# implementation doesn't play nice on Solaris with Portage's
+# behaviour causing hangs/deadlocks.
+# Additional note for the future: on Interix, pipes do NOT work, so
+# _disable_openpty on Interix must *never* be True
+_disable_openpty = platform.system() in ("SunOS",)
 
 _fbsd_test_pty = platform.system() == 'FreeBSD'
 
@@ -151,24 +27,14 @@ def _create_pty_or_pipe(copy_term_size=None):
 		then the term size will be copied to the pty.
 	@type copy_term_size: int
 	@rtype: tuple
-	@returns: A tuple of (is_pty, master_fd, slave_fd) where
+	@return: A tuple of (is_pty, master_fd, slave_fd) where
 		is_pty is True if a pty was successfully allocated, and
 		False if a normal pipe was allocated.
 	"""
 
 	got_pty = False
 
-	global _disable_openpty, _fbsd_test_pty, _tested_pty
-	if not (_tested_pty or _disable_openpty):
-		try:
-			if not _test_pty_eof():
-				_disable_openpty = True
-		except EnvironmentError as e:
-			_disable_openpty = True
-			writemsg("openpty failed: '%s'\n" % str(e),
-				noiselevel=-1)
-			del e
-		_tested_pty = True
+	global _disable_openpty, _fbsd_test_pty
 
 	if _fbsd_test_pty and not _disable_openpty:
 		# Test for python openpty breakage after freebsd7 to freebsd8

diff --git a/portage_with_autodep/pym/portage/util/_pty.pyo b/portage_with_autodep/pym/portage/util/_pty.pyo
new file mode 100644
index 0000000..70b5eb0
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_pty.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/_urlopen.py b/portage_with_autodep/pym/portage/util/_urlopen.py
new file mode 100644
index 0000000..307624b
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/_urlopen.py
@@ -0,0 +1,42 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+try:
+	from urllib.request import urlopen as _urlopen
+	import urllib.parse as urllib_parse
+	import urllib.request as urllib_request
+	from urllib.parse import splituser as urllib_parse_splituser
+except ImportError:
+	from urllib import urlopen as _urlopen
+	import urlparse as urllib_parse
+	import urllib2 as urllib_request
+	from urllib import splituser as urllib_parse_splituser
+
+def urlopen(url):
+	try:
+		return _urlopen(url)
+	except SystemExit:
+		raise
+	except Exception:
+		if sys.hexversion < 0x3000000:
+			raise
+		parse_result = urllib_parse.urlparse(url)
+		if parse_result.scheme not in ("http", "https") or \
+			not parse_result.username:
+			raise
+
+	return _new_urlopen(url)
+
+def _new_urlopen(url):
+	# This is experimental code for bug #413983.
+	parse_result = urllib_parse.urlparse(url)
+	netloc = urllib_parse_splituser(parse_result.netloc)[1]
+	url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
+	password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
+	if parse_result.username is not None:
+		password_manager.add_password(None, url, parse_result.username, parse_result.password)
+	auth_handler = urllib_request.HTTPBasicAuthHandler(password_manager)
+	opener = urllib_request.build_opener(auth_handler)
+	return opener.open(url)

diff --git a/portage_with_autodep/pym/portage/util/_urlopen.pyo b/portage_with_autodep/pym/portage/util/_urlopen.pyo
new file mode 100644
index 0000000..9f51de8
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/_urlopen.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/digraph.py b/portage_with_autodep/pym/portage/util/digraph.py
index 1bbe10f..f3ae658 100644
--- a/portage_with_autodep/pym/portage/util/digraph.py
+++ b/portage_with_autodep/pym/portage/util/digraph.py
@@ -317,16 +317,23 @@ class digraph(object):
 		"""
 		all_cycles = []
 		for node in self.nodes:
+			# If we have multiple paths of the same length, we have to
+			# return them all, so that we always get the same results
+			# even with PYTHONHASHSEED="random" enabled.
 			shortest_path = None
+			candidates = []
 			for child in self.child_nodes(node, ignore_priority):
 				path = self.shortest_path(child, node, ignore_priority)
 				if path is None:
 					continue
-				if not shortest_path or len(shortest_path) > len(path):
+				if not shortest_path or len(shortest_path) >= len(path):
 					shortest_path = path
-			if shortest_path:
-				if not max_length or len(shortest_path) <= max_length:
-					all_cycles.append(shortest_path)
+					candidates.append(path)
+			if shortest_path and \
+				(not max_length or len(shortest_path) <= max_length):
+				for path in candidates:
+					if len(path) == len(shortest_path):
+						all_cycles.append(path)
 		return all_cycles
 
 	# Backward compatibility

diff --git a/portage_with_autodep/pym/portage/util/digraph.pyo b/portage_with_autodep/pym/portage/util/digraph.pyo
new file mode 100644
index 0000000..8e503a6
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/digraph.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/env_update.py b/portage_with_autodep/pym/portage/util/env_update.py
index eb8a0d9..ace4077 100644
--- a/portage_with_autodep/pym/portage/util/env_update.py
+++ b/portage_with_autodep/pym/portage/util/env_update.py
@@ -19,12 +19,14 @@ from portage.process import find_binary
 from portage.util import atomic_ofstream, ensure_dirs, getconfig, \
 	normalize_path, writemsg
 from portage.util.listdir import listdir
+from portage.dbapi.vartree import vartree
+from portage.package.ebuild.config import config
 
 if sys.hexversion >= 0x3000000:
 	long = int
 
 def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
-	env=None, writemsg_level=None):
+	env=None, writemsg_level=None, vardbapi=None):
 	"""
 	Parse /etc/env.d and use it to generate /etc/profile.env, csh.env,
 	ld.so.conf, and prelink.conf. Finally, run ldconfig. When ldconfig is
@@ -39,6 +41,40 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 		defaults to portage.settings["ROOT"].
 	@type target_root: String (Path)
 	"""
+	if vardbapi is None:
+		if isinstance(env, config):
+			vardbapi = vartree(settings=env).dbapi
+		else:
+			if target_root is None:
+				eprefix = portage.settings["EPREFIX"]
+				target_root = portage.settings["ROOT"]
+				target_eroot = portage.settings['EROOT']
+			else:
+				eprefix = portage.const.EPREFIX
+				target_eroot = os.path.join(target_root,
+					eprefix.lstrip(os.sep))
+				target_eroot = target_eroot.rstrip(os.sep) + os.sep
+			if hasattr(portage, "db") and target_eroot in portage.db:
+				vardbapi = portage.db[target_eroot]["vartree"].dbapi
+			else:
+				settings = config(config_root=target_root,
+					target_root=target_root, eprefix=eprefix)
+				target_root = settings["ROOT"]
+				if env is None:
+					env = settings
+				vardbapi = vartree(settings=settings).dbapi
+
+	# Lock the config memory file to prevent symlink creation
+	# in merge_contents from overlapping with env-update.
+	vardbapi._fs_lock()
+	try:
+		return _env_update(makelinks, target_root, prev_mtimes, contents,
+			env, writemsg_level)
+	finally:
+		vardbapi._fs_unlock()
+
+def _env_update(makelinks, target_root, prev_mtimes, contents, env,
+	writemsg_level):
 	if writemsg_level is None:
 		writemsg_level = portage.util.writemsg_level
 	if target_root is None:
@@ -46,8 +82,13 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 	if prev_mtimes is None:
 		prev_mtimes = portage.mtimedb["ldpath"]
 	if env is None:
-		env = os.environ
-	envd_dir = os.path.join(target_root, "etc", "env.d")
+		settings = portage.settings
+	else:
+		settings = env
+
+	eprefix = settings.get("EPREFIX", "")
+	eprefix_lstrip = eprefix.lstrip(os.sep)
+	envd_dir = os.path.join(target_root, eprefix_lstrip, "etc", "env.d")
 	ensure_dirs(envd_dir, mode=0o755)
 	fns = listdir(envd_dir, EmptyOnError=1)
 	fns.sort()
@@ -123,7 +164,8 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 		they won't be overwritten by this dict.update call."""
 		env.update(myconfig)
 
-	ldsoconf_path = os.path.join(target_root, "etc", "ld.so.conf")
+	ldsoconf_path = os.path.join(
+		target_root, eprefix_lstrip, "etc", "ld.so.conf")
 	try:
 		myld = io.open(_unicode_encode(ldsoconf_path,
 			encoding=_encodings['fs'], errors='strict'),
@@ -141,8 +183,6 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 			raise
 		oldld = None
 
-	ld_cache_update=False
-
 	newld = specials["LDPATH"]
 	if (oldld != newld):
 		#ld.so.conf needs updating and ldconfig needs to be run
@@ -152,12 +192,11 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 		for x in specials["LDPATH"]:
 			myfd.write(x + "\n")
 		myfd.close()
-		ld_cache_update=True
 
 	# Update prelink.conf if we are prelink-enabled
 	if prelink_capable:
-		newprelink = atomic_ofstream(
-			os.path.join(target_root, "etc", "prelink.conf"))
+		newprelink = atomic_ofstream(os.path.join(
+			target_root, eprefix_lstrip, "etc", "prelink.conf"))
 		newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
 		newprelink.write("# contents of /etc/env.d directory\n")
 
@@ -193,7 +232,7 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 	lib_dirs = set()
 	for lib_dir in set(specials["LDPATH"] + \
 		['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
-		x = os.path.join(target_root, lib_dir.lstrip(os.sep))
+		x = os.path.join(target_root, eprefix_lstrip, lib_dir.lstrip(os.sep))
 		try:
 			newldpathtime = os.stat(x)[stat.ST_MTIME]
 			lib_dirs.add(normalize_path(x))
@@ -223,11 +262,8 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 			prev_mtimes[x] = newldpathtime
 			mtime_changed = True
 
-	if mtime_changed:
-		ld_cache_update = True
-
 	if makelinks and \
-		not ld_cache_update and \
+		not mtime_changed and \
 		contents is not None:
 		libdir_contents_changed = False
 		for mypath, mydata in contents.items():
@@ -241,12 +277,12 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 			makelinks = False
 
 	ldconfig = "/sbin/ldconfig"
-	if "CHOST" in env and "CBUILD" in env and \
-		env["CHOST"] != env["CBUILD"]:
-		ldconfig = find_binary("%s-ldconfig" % env["CHOST"])
+	if "CHOST" in settings and "CBUILD" in settings and \
+		settings["CHOST"] != settings["CBUILD"]:
+		ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])
 
 	# Only run ldconfig as needed
-	if (ld_cache_update or makelinks) and ldconfig:
+	if makelinks and ldconfig and not eprefix:
 		# ldconfig has very different behaviour between FreeBSD and Linux
 		if ostype == "Linux" or ostype.lower().endswith("gnu"):
 			# We can't update links if we haven't cleaned other versions first, as
@@ -272,7 +308,8 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 	cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
 
 	#create /etc/profile.env for bash support
-	outfile = atomic_ofstream(os.path.join(target_root, "etc", "profile.env"))
+	outfile = atomic_ofstream(os.path.join(
+		target_root, eprefix_lstrip, "etc", "profile.env"))
 	outfile.write(penvnotice)
 
 	env_keys = [ x for x in env if x != "LDPATH" ]
@@ -286,7 +323,8 @@ def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
 	outfile.close()
 
 	#create /etc/csh.env for (t)csh support
-	outfile = atomic_ofstream(os.path.join(target_root, "etc", "csh.env"))
+	outfile = atomic_ofstream(os.path.join(
+		target_root, eprefix_lstrip, "etc", "csh.env"))
 	outfile.write(cenvnotice)
 	for x in env_keys:
 		outfile.write("setenv %s '%s'\n" % (x, env[x]))

diff --git a/portage_with_autodep/pym/portage/util/env_update.pyo b/portage_with_autodep/pym/portage/util/env_update.pyo
new file mode 100644
index 0000000..ee3b187
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/env_update.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/lafilefixer.py b/portage_with_autodep/pym/portage/util/lafilefixer.py
index 2b093d8..54ff20d 100644
--- a/portage_with_autodep/pym/portage/util/lafilefixer.py
+++ b/portage_with_autodep/pym/portage/util/lafilefixer.py
@@ -80,7 +80,7 @@ def rewrite_lafile(contents):
 	@param contents: the contents of a libtool archive file
 	@type contents: bytes
 	@rtype: tuple
-	@returns: (True, fixed_contents) if something needed to be
+	@return: (True, fixed_contents) if something needed to be
 		fixed, (False, None) otherwise.
 	"""
 	#Parse the 'dependency_libs' and 'inherited_linker_flags' lines.

diff --git a/portage_with_autodep/pym/portage/util/lafilefixer.pyo b/portage_with_autodep/pym/portage/util/lafilefixer.pyo
new file mode 100644
index 0000000..a6e06ab
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/lafilefixer.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/listdir.py b/portage_with_autodep/pym/portage/util/listdir.py
index 5753d2f..c2628cb 100644
--- a/portage_with_autodep/pym/portage/util/listdir.py
+++ b/portage_with_autodep/pym/portage/util/listdir.py
@@ -109,7 +109,7 @@ def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelis
 	@param dirsonly: Only return directories.
 	@type dirsonly: Boolean
 	@rtype: List
-	@returns: A list of files and directories (or just files or just directories) or an empty list.
+	@return: A list of files and directories (or just files or just directories) or an empty list.
 	"""
 
 	list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)

diff --git a/portage_with_autodep/pym/portage/util/listdir.pyo b/portage_with_autodep/pym/portage/util/listdir.pyo
new file mode 100644
index 0000000..0f02d6d
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/listdir.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/movefile.py b/portage_with_autodep/pym/portage/util/movefile.py
index 30cb6f1..10577b5 100644
--- a/portage_with_autodep/pym/portage/util/movefile.py
+++ b/portage_with_autodep/pym/portage/util/movefile.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = ['movefile']
@@ -7,33 +7,98 @@ import errno
 import os as _os
 import shutil as _shutil
 import stat
+import subprocess
+import textwrap
 
 import portage
 from portage import bsd_chflags, _encodings, _os_overrides, _selinux, \
-	_unicode_decode, _unicode_func_wrapper, _unicode_module_wrapper
+	_unicode_decode, _unicode_encode, _unicode_func_wrapper,\
+	_unicode_module_wrapper
 from portage.const import MOVE_BINARY
+from portage.exception import OperationNotSupported
 from portage.localization import _
 from portage.process import spawn
 from portage.util import writemsg
 
+def _apply_stat(src_stat, dest):
+	_os.chown(dest, src_stat.st_uid, src_stat.st_gid)
+	_os.chmod(dest, stat.S_IMODE(src_stat.st_mode))
+
+if hasattr(_os, "getxattr"):
+	# Python >=3.3 and GNU/Linux
+	def _copyxattr(src, dest):
+		for attr in _os.listxattr(src):
+			try:
+				_os.setxattr(dest, attr, _os.getxattr(src, attr))
+				raise_exception = False
+			except OSError:
+				raise_exception = True
+			if raise_exception:
+				raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+else:
+	try:
+		import xattr
+	except ImportError:
+		xattr = None
+	if xattr is not None:
+		def _copyxattr(src, dest):
+			for attr in xattr.list(src):
+				try:
+					xattr.set(dest, attr, xattr.get(src, attr))
+					raise_exception = False
+				except IOError:
+					raise_exception = True
+				if raise_exception:
+					raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+	else:
+		_devnull = open("/dev/null", "wb")
+		try:
+			subprocess.call(["getfattr", "--version"], stdout=_devnull)
+			subprocess.call(["setfattr", "--version"], stdout=_devnull)
+			_has_getfattr_and_setfattr = True
+		except OSError:
+			_has_getfattr_and_setfattr = False
+		_devnull.close()
+		if _has_getfattr_and_setfattr:
+			def _copyxattr(src, dest):
+				getfattr_process = subprocess.Popen(["getfattr", "-d", "--absolute-names", src], stdout=subprocess.PIPE)
+				getfattr_process.wait()
+				extended_attributes = getfattr_process.stdout.readlines()
+				getfattr_process.stdout.close()
+				if extended_attributes:
+					extended_attributes[0] = b"# file: " + _unicode_encode(dest) + b"\n"
+					setfattr_process = subprocess.Popen(["setfattr", "--restore=-"], stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+					setfattr_process.communicate(input=b"".join(extended_attributes))
+					if setfattr_process.returncode != 0:
+						raise OperationNotSupported("Filesystem containing file '%s' does not support extended attributes" % dest)
+		else:
+			def _copyxattr(src, dest):
+				pass
+
 def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
 		hardlink_candidates=None, encoding=_encodings['fs']):
 	"""moves a file from src to dest, preserving all permissions and attributes; mtime will
 	be preserved even when moving across filesystems.  Returns true on success and false on
 	failure.  Move is atomic."""
-	#print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
 
 	if mysettings is None:
 		mysettings = portage.settings
 
+	src_bytes = _unicode_encode(src, encoding=encoding, errors='strict')
+	dest_bytes = _unicode_encode(dest, encoding=encoding, errors='strict')
+	xattr_enabled = "xattr" in mysettings.features
 	selinux_enabled = mysettings.selinux_enabled()
 	if selinux_enabled:
 		selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
+		_copyfile = selinux.copyfile
+		_rename = selinux.rename
+	else:
+		_copyfile = _shutil.copyfile
+		_rename = _os.rename
 
 	lchown = _unicode_func_wrapper(portage.data.lchown, encoding=encoding)
 	os = _unicode_module_wrapper(_os,
 		encoding=encoding, overrides=_os_overrides)
-	shutil = _unicode_module_wrapper(_shutil, encoding=encoding)
 
 	try:
 		if not sstat:
@@ -42,8 +107,9 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
 	except SystemExit as e:
 		raise
 	except Exception as e:
-		print(_("!!! Stating source file failed... movefile()"))
-		print("!!!",e)
+		writemsg("!!! %s\n" % _("Stating source file failed... movefile()"),
+			noiselevel=-1)
+		writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
 		return None
 
 	destexists=1
@@ -75,9 +141,9 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
 	if stat.S_ISLNK(sstat[stat.ST_MODE]):
 		try:
 			target=os.readlink(src)
-			if mysettings and mysettings["D"]:
-				if target.find(mysettings["D"])==0:
-					target=target[len(mysettings["D"]):]
+			if mysettings and "D" in mysettings and \
+				target.startswith(mysettings["D"]):
+				target = target[len(mysettings["D"])-1:]
 			if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
 				os.unlink(dest)
 			try:
@@ -100,9 +166,10 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
 		except SystemExit as e:
 			raise
 		except Exception as e:
-			print(_("!!! failed to properly create symlink:"))
-			print("!!!",dest,"->",target)
-			print("!!!",e)
+			writemsg("!!! %s\n" % _("failed to properly create symlink:"),
+				noiselevel=-1)
+			writemsg("!!! %s -> %s\n" % (dest, target), noiselevel=-1)
+			writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
 			return None
 
 	hardlinked = False
@@ -152,26 +219,40 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
 		except OSError as e:
 			if e.errno != errno.EXDEV:
 				# Some random error.
-				print(_("!!! Failed to move %(src)s to %(dest)s") % {"src": src, "dest": dest})
-				print("!!!",e)
+				writemsg("!!! %s\n" % _("Failed to move %(src)s to %(dest)s") %
+					{"src": src, "dest": dest}, noiselevel=-1)
+				writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
 				return None
 			# Invalid cross-device-link 'bind' mounted or actually Cross-Device
 	if renamefailed:
-		didcopy=0
 		if stat.S_ISREG(sstat[stat.ST_MODE]):
+			dest_tmp = dest + "#new"
+			dest_tmp_bytes = _unicode_encode(dest_tmp, encoding=encoding,
+				errors='strict')
 			try: # For safety copy then move it over.
-				if selinux_enabled:
-					selinux.copyfile(src, dest + "#new")
-					selinux.rename(dest + "#new", dest)
-				else:
-					shutil.copyfile(src,dest+"#new")
-					os.rename(dest+"#new",dest)
-				didcopy=1
+				_copyfile(src_bytes, dest_tmp_bytes)
+				if xattr_enabled:
+					try:
+						_copyxattr(src_bytes, dest_tmp_bytes)
+					except SystemExit:
+						raise
+					except:
+						msg = _("Failed to copy extended attributes. "
+							"In order to avoid this error, set "
+							"FEATURES=\"-xattr\" in make.conf.")
+						msg = textwrap.wrap(msg, 65)
+						for line in msg:
+							writemsg("!!! %s\n" % (line,), noiselevel=-1)
+						raise
+				_apply_stat(sstat, dest_tmp_bytes)
+				_rename(dest_tmp_bytes, dest_bytes)
+				_os.unlink(src_bytes)
 			except SystemExit as e:
 				raise
 			except Exception as e:
-				print(_('!!! copy %(src)s -> %(dest)s failed.') % {"src": src, "dest": dest})
-				print("!!!",e)
+				writemsg("!!! %s\n" % _('copy %(src)s -> %(dest)s failed.') %
+					{"src": src, "dest": dest}, noiselevel=-1)
+				writemsg(_unicode_decode("!!! %s\n") % (e,), noiselevel=-1)
 				return None
 		else:
 			#we don't yet handle special, so we need to fall back to /bin/mv
@@ -183,21 +264,6 @@ def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
 					"dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
 				writemsg("!!! %s\n" % a, noiselevel=-1)
 				return None # failure
-		try:
-			if didcopy:
-				if stat.S_ISLNK(sstat[stat.ST_MODE]):
-					lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
-				else:
-					os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
-				os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
-				os.unlink(src)
-		except SystemExit as e:
-			raise
-		except Exception as e:
-			print(_("!!! Failed to chown/chmod/unlink in movefile()"))
-			print("!!!",dest)
-			print("!!!",e)
-			return None
 
 	# Always use stat_obj[stat.ST_MTIME] for the integral timestamp which
 	# is returned, since the stat_obj.st_mtime float attribute rounds *up*

diff --git a/portage_with_autodep/pym/portage/util/movefile.pyo b/portage_with_autodep/pym/portage/util/movefile.pyo
new file mode 100644
index 0000000..1228ee7
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/movefile.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/mtimedb.py b/portage_with_autodep/pym/portage/util/mtimedb.py
index 67f93e8..30922a9 100644
--- a/portage_with_autodep/pym/portage/util/mtimedb.py
+++ b/portage_with_autodep/pym/portage/util/mtimedb.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = ['MtimeDB']
@@ -9,35 +9,77 @@ try:
 except ImportError:
 	import pickle
 
+import errno
+import io
+import json
+import sys
+
 import portage
+from portage import _encodings
+from portage import _unicode_decode
 from portage import _unicode_encode
 from portage.data import portage_gid, uid
 from portage.localization import _
 from portage.util import apply_secpass_permissions, atomic_ofstream, writemsg
 
 class MtimeDB(dict):
+
+	# JSON read support has been available since portage-2.1.10.49.
+	_json_write = True
+
+	_json_write_opts = {
+		"ensure_ascii": False,
+		"indent": "\t",
+		"sort_keys": True
+	}
+	if sys.hexversion < 0x30200F0:
+		# indent only supports int number of spaces
+		_json_write_opts["indent"] = 4
+
 	def __init__(self, filename):
 		dict.__init__(self)
 		self.filename = filename
 		self._load(filename)
 
 	def _load(self, filename):
+		f = None
+		content = None
 		try:
 			f = open(_unicode_encode(filename), 'rb')
-			mypickle = pickle.Unpickler(f)
-			try:
-				mypickle.find_global = None
-			except AttributeError:
-				# TODO: If py3k, override Unpickler.find_class().
+			content = f.read()
+		except EnvironmentError as e:
+			if getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
 				pass
-			d = mypickle.load()
-			f.close()
-			del f
-		except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
-			if isinstance(e, pickle.UnpicklingError):
+			else:
 				writemsg(_("!!! Error loading '%s': %s\n") % \
-					(filename, str(e)), noiselevel=-1)
-			del e
+					(filename, e), noiselevel=-1)
+		finally:
+			if f is not None:
+				f.close()
+
+		d = None
+		if content:
+			try:
+				d = json.loads(_unicode_decode(content,
+					encoding=_encodings['repo.content'], errors='strict'))
+			except SystemExit:
+				raise
+			except Exception as e:
+				try:
+					mypickle = pickle.Unpickler(io.BytesIO(content))
+					try:
+						mypickle.find_global = None
+					except AttributeError:
+						# Python >=3
+						pass
+					d = mypickle.load()
+				except SystemExit:
+					raise
+				except Exception:
+					writemsg(_("!!! Error loading '%s': %s\n") % \
+						(filename, e), noiselevel=-1)
+
+		if d is None:
 			d = {}
 
 		if "old" in d:
@@ -74,7 +116,12 @@ class MtimeDB(dict):
 			except EnvironmentError:
 				pass
 			else:
-				pickle.dump(d, f, protocol=2)
+				if self._json_write:
+					f.write(_unicode_encode(
+						json.dumps(d, **self._json_write_opts),
+						encoding=_encodings['repo.content'], errors='strict'))
+				else:
+					pickle.dump(d, f, protocol=2)
 				f.close()
 				apply_secpass_permissions(self.filename,
 					uid=uid, gid=portage_gid, mode=0o644)

diff --git a/portage_with_autodep/pym/portage/util/mtimedb.pyo b/portage_with_autodep/pym/portage/util/mtimedb.pyo
new file mode 100644
index 0000000..fda479a
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/mtimedb.pyo differ

diff --git a/portage_with_autodep/pym/portage/util/whirlpool.py b/portage_with_autodep/pym/portage/util/whirlpool.py
new file mode 100644
index 0000000..c696f6f
--- /dev/null
+++ b/portage_with_autodep/pym/portage/util/whirlpool.py
@@ -0,0 +1,794 @@
+## whirlpool.py - pure Python implementation of the Whirlpool algorithm.
+## Bjorn Edstrom <be@bjrn.se> 16 december 2007.
+##
+## Copyrights
+## ==========
+##
+## This code is based on the reference implementation by
+## Paulo S.L.M. Barreto and Vincent Rijmen. The reference implementation
+## is placed in the public domain but has the following headers:
+##
+## * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+## * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+## * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+## * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+## * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+## * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+## * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+## * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+## * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+## * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+## * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+## *
+## */
+## /* The code contained in this file (Whirlpool.c) is in the public domain. */
+##
+## This Python implementation is therefore also placed in the public domain.
+
+import sys
+if sys.hexversion >= 0x3000000:
+    xrange = range
+
+#block_size = 64
+digest_size = 64
+digestsize = 64
+
+class Whirlpool:
+    """Return a new Whirlpool object. An optional string argument
+    may be provided; if present, this string will be automatically
+    hashed."""
+    def __init__(self, arg=None):
+        self.ctx = WhirlpoolStruct()
+        if arg:
+            self.update(arg)
+        self.digest_status = 0
+
+    def update(self, arg):
+        """update(arg)"""
+        WhirlpoolAdd(arg, len(arg)*8, self.ctx)
+        self.digest_status = 0
+
+    def digest(self):
+        """digest()"""
+        if self.digest_status == 0:
+            self.dig = WhirlpoolFinalize(self.ctx)
+        self.digest_status = 1
+        return self.dig
+
+    def hexdigest(self):
+        """hexdigest()"""
+        dig = self.digest()
+        tempstr = ''
+        for d in dig:
+            xxx = '%02x' % (ord(d))
+            tempstr = tempstr + xxx
+        return tempstr
+
+    def copy(self):
+        """copy()"""
+        import copy
+        return copy.deepcopy(self)
+
+
+def new(init=None):
+    """Return a new Whirlpool object. An optional string argument
+    may be provided; if present, this string will be automatically
+    hashed."""
+    return Whirlpool(init)
+
+#
+# Private.
+#
+
+R = 10
+
+C0 = [
+0x18186018c07830d8, 0x23238c2305af4626, 0xc6c63fc67ef991b8, 0xe8e887e8136fcdfb,
+0x878726874ca113cb, 0xb8b8dab8a9626d11, 0x0101040108050209, 0x4f4f214f426e9e0d,
+0x3636d836adee6c9b, 0xa6a6a2a6590451ff, 0xd2d26fd2debdb90c, 0xf5f5f3f5fb06f70e,
+0x7979f979ef80f296, 0x6f6fa16f5fcede30, 0x91917e91fcef3f6d, 0x52525552aa07a4f8,
+0x60609d6027fdc047, 0xbcbccabc89766535, 0x9b9b569baccd2b37, 0x8e8e028e048c018a,
+0xa3a3b6a371155bd2, 0x0c0c300c603c186c, 0x7b7bf17bff8af684, 0x3535d435b5e16a80,
+0x1d1d741de8693af5, 0xe0e0a7e05347ddb3, 0xd7d77bd7f6acb321, 0xc2c22fc25eed999c,
+0x2e2eb82e6d965c43, 0x4b4b314b627a9629, 0xfefedffea321e15d, 0x575741578216aed5,
+0x15155415a8412abd, 0x7777c1779fb6eee8, 0x3737dc37a5eb6e92, 0xe5e5b3e57b56d79e,
+0x9f9f469f8cd92313, 0xf0f0e7f0d317fd23, 0x4a4a354a6a7f9420, 0xdada4fda9e95a944,
+0x58587d58fa25b0a2, 0xc9c903c906ca8fcf, 0x2929a429558d527c, 0x0a0a280a5022145a,
+0xb1b1feb1e14f7f50, 0xa0a0baa0691a5dc9, 0x6b6bb16b7fdad614, 0x85852e855cab17d9,
+0xbdbdcebd8173673c, 0x5d5d695dd234ba8f, 0x1010401080502090, 0xf4f4f7f4f303f507,
+0xcbcb0bcb16c08bdd, 0x3e3ef83eedc67cd3, 0x0505140528110a2d, 0x676781671fe6ce78,
+0xe4e4b7e47353d597, 0x27279c2725bb4e02, 0x4141194132588273, 0x8b8b168b2c9d0ba7,
+0xa7a7a6a7510153f6, 0x7d7de97dcf94fab2, 0x95956e95dcfb3749, 0xd8d847d88e9fad56,
+0xfbfbcbfb8b30eb70, 0xeeee9fee2371c1cd, 0x7c7ced7cc791f8bb, 0x6666856617e3cc71,
+0xdddd53dda68ea77b, 0x17175c17b84b2eaf, 0x4747014702468e45, 0x9e9e429e84dc211a,
+0xcaca0fca1ec589d4, 0x2d2db42d75995a58, 0xbfbfc6bf9179632e, 0x07071c07381b0e3f,
+0xadad8ead012347ac, 0x5a5a755aea2fb4b0, 0x838336836cb51bef, 0x3333cc3385ff66b6,
+0x636391633ff2c65c, 0x02020802100a0412, 0xaaaa92aa39384993, 0x7171d971afa8e2de,
+0xc8c807c80ecf8dc6, 0x19196419c87d32d1, 0x494939497270923b, 0xd9d943d9869aaf5f,
+0xf2f2eff2c31df931, 0xe3e3abe34b48dba8, 0x5b5b715be22ab6b9, 0x88881a8834920dbc,
+0x9a9a529aa4c8293e, 0x262698262dbe4c0b, 0x3232c8328dfa64bf, 0xb0b0fab0e94a7d59,
+0xe9e983e91b6acff2, 0x0f0f3c0f78331e77, 0xd5d573d5e6a6b733, 0x80803a8074ba1df4,
+0xbebec2be997c6127, 0xcdcd13cd26de87eb, 0x3434d034bde46889, 0x48483d487a759032,
+0xffffdbffab24e354, 0x7a7af57af78ff48d, 0x90907a90f4ea3d64, 0x5f5f615fc23ebe9d,
+0x202080201da0403d, 0x6868bd6867d5d00f, 0x1a1a681ad07234ca, 0xaeae82ae192c41b7,
+0xb4b4eab4c95e757d, 0x54544d549a19a8ce, 0x93937693ece53b7f, 0x222288220daa442f,
+0x64648d6407e9c863, 0xf1f1e3f1db12ff2a, 0x7373d173bfa2e6cc, 0x12124812905a2482,
+0x40401d403a5d807a, 0x0808200840281048, 0xc3c32bc356e89b95, 0xecec97ec337bc5df,
+0xdbdb4bdb9690ab4d, 0xa1a1bea1611f5fc0, 0x8d8d0e8d1c830791, 0x3d3df43df5c97ac8,
+0x97976697ccf1335b, 0x0000000000000000, 0xcfcf1bcf36d483f9, 0x2b2bac2b4587566e,
+0x7676c57697b3ece1, 0x8282328264b019e6, 0xd6d67fd6fea9b128, 0x1b1b6c1bd87736c3,
+0xb5b5eeb5c15b7774, 0xafaf86af112943be, 0x6a6ab56a77dfd41d, 0x50505d50ba0da0ea,
+0x45450945124c8a57, 0xf3f3ebf3cb18fb38, 0x3030c0309df060ad, 0xefef9bef2b74c3c4,
+0x3f3ffc3fe5c37eda, 0x55554955921caac7, 0xa2a2b2a2791059db, 0xeaea8fea0365c9e9,
+0x656589650fecca6a, 0xbabad2bab9686903, 0x2f2fbc2f65935e4a, 0xc0c027c04ee79d8e,
+0xdede5fdebe81a160, 0x1c1c701ce06c38fc, 0xfdfdd3fdbb2ee746, 0x4d4d294d52649a1f,
+0x92927292e4e03976, 0x7575c9758fbceafa, 0x06061806301e0c36, 0x8a8a128a249809ae,
+0xb2b2f2b2f940794b, 0xe6e6bfe66359d185, 0x0e0e380e70361c7e, 0x1f1f7c1ff8633ee7,
+0x6262956237f7c455, 0xd4d477d4eea3b53a, 0xa8a89aa829324d81, 0x96966296c4f43152,
+0xf9f9c3f99b3aef62, 0xc5c533c566f697a3, 0x2525942535b14a10, 0x59597959f220b2ab,
+0x84842a8454ae15d0, 0x7272d572b7a7e4c5, 0x3939e439d5dd72ec, 0x4c4c2d4c5a619816,
+0x5e5e655eca3bbc94, 0x7878fd78e785f09f, 0x3838e038ddd870e5, 0x8c8c0a8c14860598,
+0xd1d163d1c6b2bf17, 0xa5a5aea5410b57e4, 0xe2e2afe2434dd9a1, 0x616199612ff8c24e,
+0xb3b3f6b3f1457b42, 0x2121842115a54234, 0x9c9c4a9c94d62508, 0x1e1e781ef0663cee,
+0x4343114322528661, 0xc7c73bc776fc93b1, 0xfcfcd7fcb32be54f, 0x0404100420140824,
+0x51515951b208a2e3, 0x99995e99bcc72f25, 0x6d6da96d4fc4da22, 0x0d0d340d68391a65,
+0xfafacffa8335e979, 0xdfdf5bdfb684a369, 0x7e7ee57ed79bfca9, 0x242490243db44819,
+0x3b3bec3bc5d776fe, 0xabab96ab313d4b9a, 0xcece1fce3ed181f0, 0x1111441188552299,
+0x8f8f068f0c890383, 0x4e4e254e4a6b9c04, 0xb7b7e6b7d1517366, 0xebeb8beb0b60cbe0,
+0x3c3cf03cfdcc78c1, 0x81813e817cbf1ffd, 0x94946a94d4fe3540, 0xf7f7fbf7eb0cf31c,
+0xb9b9deb9a1676f18, 0x13134c13985f268b, 0x2c2cb02c7d9c5851, 0xd3d36bd3d6b8bb05,
+0xe7e7bbe76b5cd38c, 0x6e6ea56e57cbdc39, 0xc4c437c46ef395aa, 0x03030c03180f061b,
+0x565645568a13acdc, 0x44440d441a49885e, 0x7f7fe17fdf9efea0, 0xa9a99ea921374f88,
+0x2a2aa82a4d825467, 0xbbbbd6bbb16d6b0a, 0xc1c123c146e29f87, 0x53535153a202a6f1,
+0xdcdc57dcae8ba572, 0x0b0b2c0b58271653, 0x9d9d4e9d9cd32701, 0x6c6cad6c47c1d82b,
+0x3131c43195f562a4, 0x7474cd7487b9e8f3, 0xf6f6fff6e309f115, 0x464605460a438c4c,
+0xacac8aac092645a5, 0x89891e893c970fb5, 0x14145014a04428b4, 0xe1e1a3e15b42dfba,
+0x16165816b04e2ca6, 0x3a3ae83acdd274f7, 0x6969b9696fd0d206, 0x09092409482d1241,
+0x7070dd70a7ade0d7, 0xb6b6e2b6d954716f, 0xd0d067d0ceb7bd1e, 0xeded93ed3b7ec7d6,
+0xcccc17cc2edb85e2, 0x424215422a578468, 0x98985a98b4c22d2c, 0xa4a4aaa4490e55ed,
+0x2828a0285d885075, 0x5c5c6d5cda31b886, 0xf8f8c7f8933fed6b, 0x8686228644a411c2,
+]
+C1 = [
+0xd818186018c07830, 0x2623238c2305af46, 0xb8c6c63fc67ef991, 0xfbe8e887e8136fcd,
+0xcb878726874ca113, 0x11b8b8dab8a9626d, 0x0901010401080502, 0x0d4f4f214f426e9e,
+0x9b3636d836adee6c, 0xffa6a6a2a6590451, 0x0cd2d26fd2debdb9, 0x0ef5f5f3f5fb06f7,
+0x967979f979ef80f2, 0x306f6fa16f5fcede, 0x6d91917e91fcef3f, 0xf852525552aa07a4,
+0x4760609d6027fdc0, 0x35bcbccabc897665, 0x379b9b569baccd2b, 0x8a8e8e028e048c01,
+0xd2a3a3b6a371155b, 0x6c0c0c300c603c18, 0x847b7bf17bff8af6, 0x803535d435b5e16a,
+0xf51d1d741de8693a, 0xb3e0e0a7e05347dd, 0x21d7d77bd7f6acb3, 0x9cc2c22fc25eed99,
+0x432e2eb82e6d965c, 0x294b4b314b627a96, 0x5dfefedffea321e1, 0xd5575741578216ae,
+0xbd15155415a8412a, 0xe87777c1779fb6ee, 0x923737dc37a5eb6e, 0x9ee5e5b3e57b56d7,
+0x139f9f469f8cd923, 0x23f0f0e7f0d317fd, 0x204a4a354a6a7f94, 0x44dada4fda9e95a9,
+0xa258587d58fa25b0, 0xcfc9c903c906ca8f, 0x7c2929a429558d52, 0x5a0a0a280a502214,
+0x50b1b1feb1e14f7f, 0xc9a0a0baa0691a5d, 0x146b6bb16b7fdad6, 0xd985852e855cab17,
+0x3cbdbdcebd817367, 0x8f5d5d695dd234ba, 0x9010104010805020, 0x07f4f4f7f4f303f5,
+0xddcbcb0bcb16c08b, 0xd33e3ef83eedc67c, 0x2d0505140528110a, 0x78676781671fe6ce,
+0x97e4e4b7e47353d5, 0x0227279c2725bb4e, 0x7341411941325882, 0xa78b8b168b2c9d0b,
+0xf6a7a7a6a7510153, 0xb27d7de97dcf94fa, 0x4995956e95dcfb37, 0x56d8d847d88e9fad,
+0x70fbfbcbfb8b30eb, 0xcdeeee9fee2371c1, 0xbb7c7ced7cc791f8, 0x716666856617e3cc,
+0x7bdddd53dda68ea7, 0xaf17175c17b84b2e, 0x454747014702468e, 0x1a9e9e429e84dc21,
+0xd4caca0fca1ec589, 0x582d2db42d75995a, 0x2ebfbfc6bf917963, 0x3f07071c07381b0e,
+0xacadad8ead012347, 0xb05a5a755aea2fb4, 0xef838336836cb51b, 0xb63333cc3385ff66,
+0x5c636391633ff2c6, 0x1202020802100a04, 0x93aaaa92aa393849, 0xde7171d971afa8e2,
+0xc6c8c807c80ecf8d, 0xd119196419c87d32, 0x3b49493949727092, 0x5fd9d943d9869aaf,
+0x31f2f2eff2c31df9, 0xa8e3e3abe34b48db, 0xb95b5b715be22ab6, 0xbc88881a8834920d,
+0x3e9a9a529aa4c829, 0x0b262698262dbe4c, 0xbf3232c8328dfa64, 0x59b0b0fab0e94a7d,
+0xf2e9e983e91b6acf, 0x770f0f3c0f78331e, 0x33d5d573d5e6a6b7, 0xf480803a8074ba1d,
+0x27bebec2be997c61, 0xebcdcd13cd26de87, 0x893434d034bde468, 0x3248483d487a7590,
+0x54ffffdbffab24e3, 0x8d7a7af57af78ff4, 0x6490907a90f4ea3d, 0x9d5f5f615fc23ebe,
+0x3d202080201da040, 0x0f6868bd6867d5d0, 0xca1a1a681ad07234, 0xb7aeae82ae192c41,
+0x7db4b4eab4c95e75, 0xce54544d549a19a8, 0x7f93937693ece53b, 0x2f222288220daa44,
+0x6364648d6407e9c8, 0x2af1f1e3f1db12ff, 0xcc7373d173bfa2e6, 0x8212124812905a24,
+0x7a40401d403a5d80, 0x4808082008402810, 0x95c3c32bc356e89b, 0xdfecec97ec337bc5,
+0x4ddbdb4bdb9690ab, 0xc0a1a1bea1611f5f, 0x918d8d0e8d1c8307, 0xc83d3df43df5c97a,
+0x5b97976697ccf133, 0x0000000000000000, 0xf9cfcf1bcf36d483, 0x6e2b2bac2b458756,
+0xe17676c57697b3ec, 0xe68282328264b019, 0x28d6d67fd6fea9b1, 0xc31b1b6c1bd87736,
+0x74b5b5eeb5c15b77, 0xbeafaf86af112943, 0x1d6a6ab56a77dfd4, 0xea50505d50ba0da0,
+0x5745450945124c8a, 0x38f3f3ebf3cb18fb, 0xad3030c0309df060, 0xc4efef9bef2b74c3,
+0xda3f3ffc3fe5c37e, 0xc755554955921caa, 0xdba2a2b2a2791059, 0xe9eaea8fea0365c9,
+0x6a656589650fecca, 0x03babad2bab96869, 0x4a2f2fbc2f65935e, 0x8ec0c027c04ee79d,
+0x60dede5fdebe81a1, 0xfc1c1c701ce06c38, 0x46fdfdd3fdbb2ee7, 0x1f4d4d294d52649a,
+0x7692927292e4e039, 0xfa7575c9758fbcea, 0x3606061806301e0c, 0xae8a8a128a249809,
+0x4bb2b2f2b2f94079, 0x85e6e6bfe66359d1, 0x7e0e0e380e70361c, 0xe71f1f7c1ff8633e,
+0x556262956237f7c4, 0x3ad4d477d4eea3b5, 0x81a8a89aa829324d, 0x5296966296c4f431,
+0x62f9f9c3f99b3aef, 0xa3c5c533c566f697, 0x102525942535b14a, 0xab59597959f220b2,
+0xd084842a8454ae15, 0xc57272d572b7a7e4, 0xec3939e439d5dd72, 0x164c4c2d4c5a6198,
+0x945e5e655eca3bbc, 0x9f7878fd78e785f0, 0xe53838e038ddd870, 0x988c8c0a8c148605,
+0x17d1d163d1c6b2bf, 0xe4a5a5aea5410b57, 0xa1e2e2afe2434dd9, 0x4e616199612ff8c2,
+0x42b3b3f6b3f1457b, 0x342121842115a542, 0x089c9c4a9c94d625, 0xee1e1e781ef0663c,
+0x6143431143225286, 0xb1c7c73bc776fc93, 0x4ffcfcd7fcb32be5, 0x2404041004201408,
+0xe351515951b208a2, 0x2599995e99bcc72f, 0x226d6da96d4fc4da, 0x650d0d340d68391a,
+0x79fafacffa8335e9, 0x69dfdf5bdfb684a3, 0xa97e7ee57ed79bfc, 0x19242490243db448,
+0xfe3b3bec3bc5d776, 0x9aabab96ab313d4b, 0xf0cece1fce3ed181, 0x9911114411885522,
+0x838f8f068f0c8903, 0x044e4e254e4a6b9c, 0x66b7b7e6b7d15173, 0xe0ebeb8beb0b60cb,
+0xc13c3cf03cfdcc78, 0xfd81813e817cbf1f, 0x4094946a94d4fe35, 0x1cf7f7fbf7eb0cf3,
+0x18b9b9deb9a1676f, 0x8b13134c13985f26, 0x512c2cb02c7d9c58, 0x05d3d36bd3d6b8bb,
+0x8ce7e7bbe76b5cd3, 0x396e6ea56e57cbdc, 0xaac4c437c46ef395, 0x1b03030c03180f06,
+0xdc565645568a13ac, 0x5e44440d441a4988, 0xa07f7fe17fdf9efe, 0x88a9a99ea921374f,
+0x672a2aa82a4d8254, 0x0abbbbd6bbb16d6b, 0x87c1c123c146e29f, 0xf153535153a202a6,
+0x72dcdc57dcae8ba5, 0x530b0b2c0b582716, 0x019d9d4e9d9cd327, 0x2b6c6cad6c47c1d8,
+0xa43131c43195f562, 0xf37474cd7487b9e8, 0x15f6f6fff6e309f1, 0x4c464605460a438c,
+0xa5acac8aac092645, 0xb589891e893c970f, 0xb414145014a04428, 0xbae1e1a3e15b42df,
+0xa616165816b04e2c, 0xf73a3ae83acdd274, 0x066969b9696fd0d2, 0x4109092409482d12,
+0xd77070dd70a7ade0, 0x6fb6b6e2b6d95471, 0x1ed0d067d0ceb7bd, 0xd6eded93ed3b7ec7,
+0xe2cccc17cc2edb85, 0x68424215422a5784, 0x2c98985a98b4c22d, 0xeda4a4aaa4490e55,
+0x752828a0285d8850, 0x865c5c6d5cda31b8, 0x6bf8f8c7f8933fed, 0xc28686228644a411,
+]
+C2 = [
+0x30d818186018c078, 0x462623238c2305af, 0x91b8c6c63fc67ef9, 0xcdfbe8e887e8136f,
+0x13cb878726874ca1, 0x6d11b8b8dab8a962, 0x0209010104010805, 0x9e0d4f4f214f426e,
+0x6c9b3636d836adee, 0x51ffa6a6a2a65904, 0xb90cd2d26fd2debd, 0xf70ef5f5f3f5fb06,
+0xf2967979f979ef80, 0xde306f6fa16f5fce, 0x3f6d91917e91fcef, 0xa4f852525552aa07,
+0xc04760609d6027fd, 0x6535bcbccabc8976, 0x2b379b9b569baccd, 0x018a8e8e028e048c,
+0x5bd2a3a3b6a37115, 0x186c0c0c300c603c, 0xf6847b7bf17bff8a, 0x6a803535d435b5e1,
+0x3af51d1d741de869, 0xddb3e0e0a7e05347, 0xb321d7d77bd7f6ac, 0x999cc2c22fc25eed,
+0x5c432e2eb82e6d96, 0x96294b4b314b627a, 0xe15dfefedffea321, 0xaed5575741578216,
+0x2abd15155415a841, 0xeee87777c1779fb6, 0x6e923737dc37a5eb, 0xd79ee5e5b3e57b56,
+0x23139f9f469f8cd9, 0xfd23f0f0e7f0d317, 0x94204a4a354a6a7f, 0xa944dada4fda9e95,
+0xb0a258587d58fa25, 0x8fcfc9c903c906ca, 0x527c2929a429558d, 0x145a0a0a280a5022,
+0x7f50b1b1feb1e14f, 0x5dc9a0a0baa0691a, 0xd6146b6bb16b7fda, 0x17d985852e855cab,
+0x673cbdbdcebd8173, 0xba8f5d5d695dd234, 0x2090101040108050, 0xf507f4f4f7f4f303,
+0x8bddcbcb0bcb16c0, 0x7cd33e3ef83eedc6, 0x0a2d050514052811, 0xce78676781671fe6,
+0xd597e4e4b7e47353, 0x4e0227279c2725bb, 0x8273414119413258, 0x0ba78b8b168b2c9d,
+0x53f6a7a7a6a75101, 0xfab27d7de97dcf94, 0x374995956e95dcfb, 0xad56d8d847d88e9f,
+0xeb70fbfbcbfb8b30, 0xc1cdeeee9fee2371, 0xf8bb7c7ced7cc791, 0xcc716666856617e3,
+0xa77bdddd53dda68e, 0x2eaf17175c17b84b, 0x8e45474701470246, 0x211a9e9e429e84dc,
+0x89d4caca0fca1ec5, 0x5a582d2db42d7599, 0x632ebfbfc6bf9179, 0x0e3f07071c07381b,
+0x47acadad8ead0123, 0xb4b05a5a755aea2f, 0x1bef838336836cb5, 0x66b63333cc3385ff,
+0xc65c636391633ff2, 0x041202020802100a, 0x4993aaaa92aa3938, 0xe2de7171d971afa8,
+0x8dc6c8c807c80ecf, 0x32d119196419c87d, 0x923b494939497270, 0xaf5fd9d943d9869a,
+0xf931f2f2eff2c31d, 0xdba8e3e3abe34b48, 0xb6b95b5b715be22a, 0x0dbc88881a883492,
+0x293e9a9a529aa4c8, 0x4c0b262698262dbe, 0x64bf3232c8328dfa, 0x7d59b0b0fab0e94a,
+0xcff2e9e983e91b6a, 0x1e770f0f3c0f7833, 0xb733d5d573d5e6a6, 0x1df480803a8074ba,
+0x6127bebec2be997c, 0x87ebcdcd13cd26de, 0x68893434d034bde4, 0x903248483d487a75,
+0xe354ffffdbffab24, 0xf48d7a7af57af78f, 0x3d6490907a90f4ea, 0xbe9d5f5f615fc23e,
+0x403d202080201da0, 0xd00f6868bd6867d5, 0x34ca1a1a681ad072, 0x41b7aeae82ae192c,
+0x757db4b4eab4c95e, 0xa8ce54544d549a19, 0x3b7f93937693ece5, 0x442f222288220daa,
+0xc86364648d6407e9, 0xff2af1f1e3f1db12, 0xe6cc7373d173bfa2, 0x248212124812905a,
+0x807a40401d403a5d, 0x1048080820084028, 0x9b95c3c32bc356e8, 0xc5dfecec97ec337b,
+0xab4ddbdb4bdb9690, 0x5fc0a1a1bea1611f, 0x07918d8d0e8d1c83, 0x7ac83d3df43df5c9,
+0x335b97976697ccf1, 0x0000000000000000, 0x83f9cfcf1bcf36d4, 0x566e2b2bac2b4587,
+0xece17676c57697b3, 0x19e68282328264b0, 0xb128d6d67fd6fea9, 0x36c31b1b6c1bd877,
+0x7774b5b5eeb5c15b, 0x43beafaf86af1129, 0xd41d6a6ab56a77df, 0xa0ea50505d50ba0d,
+0x8a5745450945124c, 0xfb38f3f3ebf3cb18, 0x60ad3030c0309df0, 0xc3c4efef9bef2b74,
+0x7eda3f3ffc3fe5c3, 0xaac755554955921c, 0x59dba2a2b2a27910, 0xc9e9eaea8fea0365,
+0xca6a656589650fec, 0x6903babad2bab968, 0x5e4a2f2fbc2f6593, 0x9d8ec0c027c04ee7,
+0xa160dede5fdebe81, 0x38fc1c1c701ce06c, 0xe746fdfdd3fdbb2e, 0x9a1f4d4d294d5264,
+0x397692927292e4e0, 0xeafa7575c9758fbc, 0x0c3606061806301e, 0x09ae8a8a128a2498,
+0x794bb2b2f2b2f940, 0xd185e6e6bfe66359, 0x1c7e0e0e380e7036, 0x3ee71f1f7c1ff863,
+0xc4556262956237f7, 0xb53ad4d477d4eea3, 0x4d81a8a89aa82932, 0x315296966296c4f4,
+0xef62f9f9c3f99b3a, 0x97a3c5c533c566f6, 0x4a102525942535b1, 0xb2ab59597959f220,
+0x15d084842a8454ae, 0xe4c57272d572b7a7, 0x72ec3939e439d5dd, 0x98164c4c2d4c5a61,
+0xbc945e5e655eca3b, 0xf09f7878fd78e785, 0x70e53838e038ddd8, 0x05988c8c0a8c1486,
+0xbf17d1d163d1c6b2, 0x57e4a5a5aea5410b, 0xd9a1e2e2afe2434d, 0xc24e616199612ff8,
+0x7b42b3b3f6b3f145, 0x42342121842115a5, 0x25089c9c4a9c94d6, 0x3cee1e1e781ef066,
+0x8661434311432252, 0x93b1c7c73bc776fc, 0xe54ffcfcd7fcb32b, 0x0824040410042014,
+0xa2e351515951b208, 0x2f2599995e99bcc7, 0xda226d6da96d4fc4, 0x1a650d0d340d6839,
+0xe979fafacffa8335, 0xa369dfdf5bdfb684, 0xfca97e7ee57ed79b, 0x4819242490243db4,
+0x76fe3b3bec3bc5d7, 0x4b9aabab96ab313d, 0x81f0cece1fce3ed1, 0x2299111144118855,
+0x03838f8f068f0c89, 0x9c044e4e254e4a6b, 0x7366b7b7e6b7d151, 0xcbe0ebeb8beb0b60,
+0x78c13c3cf03cfdcc, 0x1ffd81813e817cbf, 0x354094946a94d4fe, 0xf31cf7f7fbf7eb0c,
+0x6f18b9b9deb9a167, 0x268b13134c13985f, 0x58512c2cb02c7d9c, 0xbb05d3d36bd3d6b8,
+0xd38ce7e7bbe76b5c, 0xdc396e6ea56e57cb, 0x95aac4c437c46ef3, 0x061b03030c03180f,
+0xacdc565645568a13, 0x885e44440d441a49, 0xfea07f7fe17fdf9e, 0x4f88a9a99ea92137,
+0x54672a2aa82a4d82, 0x6b0abbbbd6bbb16d, 0x9f87c1c123c146e2, 0xa6f153535153a202,
+0xa572dcdc57dcae8b, 0x16530b0b2c0b5827, 0x27019d9d4e9d9cd3, 0xd82b6c6cad6c47c1,
+0x62a43131c43195f5, 0xe8f37474cd7487b9, 0xf115f6f6fff6e309, 0x8c4c464605460a43,
+0x45a5acac8aac0926, 0x0fb589891e893c97, 0x28b414145014a044, 0xdfbae1e1a3e15b42,
+0x2ca616165816b04e, 0x74f73a3ae83acdd2, 0xd2066969b9696fd0, 0x124109092409482d,
+0xe0d77070dd70a7ad, 0x716fb6b6e2b6d954, 0xbd1ed0d067d0ceb7, 0xc7d6eded93ed3b7e,
+0x85e2cccc17cc2edb, 0x8468424215422a57, 0x2d2c98985a98b4c2, 0x55eda4a4aaa4490e,
+0x50752828a0285d88, 0xb8865c5c6d5cda31, 0xed6bf8f8c7f8933f, 0x11c28686228644a4,
+]
+C3 = [
+0x7830d818186018c0, 0xaf462623238c2305, 0xf991b8c6c63fc67e, 0x6fcdfbe8e887e813,
+0xa113cb878726874c, 0x626d11b8b8dab8a9, 0x0502090101040108, 0x6e9e0d4f4f214f42,
+0xee6c9b3636d836ad, 0x0451ffa6a6a2a659, 0xbdb90cd2d26fd2de, 0x06f70ef5f5f3f5fb,
+0x80f2967979f979ef, 0xcede306f6fa16f5f, 0xef3f6d91917e91fc, 0x07a4f852525552aa,
+0xfdc04760609d6027, 0x766535bcbccabc89, 0xcd2b379b9b569bac, 0x8c018a8e8e028e04,
+0x155bd2a3a3b6a371, 0x3c186c0c0c300c60, 0x8af6847b7bf17bff, 0xe16a803535d435b5,
+0x693af51d1d741de8, 0x47ddb3e0e0a7e053, 0xacb321d7d77bd7f6, 0xed999cc2c22fc25e,
+0x965c432e2eb82e6d, 0x7a96294b4b314b62, 0x21e15dfefedffea3, 0x16aed55757415782,
+0x412abd15155415a8, 0xb6eee87777c1779f, 0xeb6e923737dc37a5, 0x56d79ee5e5b3e57b,
+0xd923139f9f469f8c, 0x17fd23f0f0e7f0d3, 0x7f94204a4a354a6a, 0x95a944dada4fda9e,
+0x25b0a258587d58fa, 0xca8fcfc9c903c906, 0x8d527c2929a42955, 0x22145a0a0a280a50,
+0x4f7f50b1b1feb1e1, 0x1a5dc9a0a0baa069, 0xdad6146b6bb16b7f, 0xab17d985852e855c,
+0x73673cbdbdcebd81, 0x34ba8f5d5d695dd2, 0x5020901010401080, 0x03f507f4f4f7f4f3,
+0xc08bddcbcb0bcb16, 0xc67cd33e3ef83eed, 0x110a2d0505140528, 0xe6ce78676781671f,
+0x53d597e4e4b7e473, 0xbb4e0227279c2725, 0x5882734141194132, 0x9d0ba78b8b168b2c,
+0x0153f6a7a7a6a751, 0x94fab27d7de97dcf, 0xfb374995956e95dc, 0x9fad56d8d847d88e,
+0x30eb70fbfbcbfb8b, 0x71c1cdeeee9fee23, 0x91f8bb7c7ced7cc7, 0xe3cc716666856617,
+0x8ea77bdddd53dda6, 0x4b2eaf17175c17b8, 0x468e454747014702, 0xdc211a9e9e429e84,
+0xc589d4caca0fca1e, 0x995a582d2db42d75, 0x79632ebfbfc6bf91, 0x1b0e3f07071c0738,
+0x2347acadad8ead01, 0x2fb4b05a5a755aea, 0xb51bef838336836c, 0xff66b63333cc3385,
+0xf2c65c636391633f, 0x0a04120202080210, 0x384993aaaa92aa39, 0xa8e2de7171d971af,
+0xcf8dc6c8c807c80e, 0x7d32d119196419c8, 0x70923b4949394972, 0x9aaf5fd9d943d986,
+0x1df931f2f2eff2c3, 0x48dba8e3e3abe34b, 0x2ab6b95b5b715be2, 0x920dbc88881a8834,
+0xc8293e9a9a529aa4, 0xbe4c0b262698262d, 0xfa64bf3232c8328d, 0x4a7d59b0b0fab0e9,
+0x6acff2e9e983e91b, 0x331e770f0f3c0f78, 0xa6b733d5d573d5e6, 0xba1df480803a8074,
+0x7c6127bebec2be99, 0xde87ebcdcd13cd26, 0xe468893434d034bd, 0x75903248483d487a,
+0x24e354ffffdbffab, 0x8ff48d7a7af57af7, 0xea3d6490907a90f4, 0x3ebe9d5f5f615fc2,
+0xa0403d202080201d, 0xd5d00f6868bd6867, 0x7234ca1a1a681ad0, 0x2c41b7aeae82ae19,
+0x5e757db4b4eab4c9, 0x19a8ce54544d549a, 0xe53b7f93937693ec, 0xaa442f222288220d,
+0xe9c86364648d6407, 0x12ff2af1f1e3f1db, 0xa2e6cc7373d173bf, 0x5a24821212481290,
+0x5d807a40401d403a, 0x2810480808200840, 0xe89b95c3c32bc356, 0x7bc5dfecec97ec33,
+0x90ab4ddbdb4bdb96, 0x1f5fc0a1a1bea161, 0x8307918d8d0e8d1c, 0xc97ac83d3df43df5,
+0xf1335b97976697cc, 0x0000000000000000, 0xd483f9cfcf1bcf36, 0x87566e2b2bac2b45,
+0xb3ece17676c57697, 0xb019e68282328264, 0xa9b128d6d67fd6fe, 0x7736c31b1b6c1bd8,
+0x5b7774b5b5eeb5c1, 0x2943beafaf86af11, 0xdfd41d6a6ab56a77, 0x0da0ea50505d50ba,
+0x4c8a574545094512, 0x18fb38f3f3ebf3cb, 0xf060ad3030c0309d, 0x74c3c4efef9bef2b,
+0xc37eda3f3ffc3fe5, 0x1caac75555495592, 0x1059dba2a2b2a279, 0x65c9e9eaea8fea03,
+0xecca6a656589650f, 0x686903babad2bab9, 0x935e4a2f2fbc2f65, 0xe79d8ec0c027c04e,
+0x81a160dede5fdebe, 0x6c38fc1c1c701ce0, 0x2ee746fdfdd3fdbb, 0x649a1f4d4d294d52,
+0xe0397692927292e4, 0xbceafa7575c9758f, 0x1e0c360606180630, 0x9809ae8a8a128a24,
+0x40794bb2b2f2b2f9, 0x59d185e6e6bfe663, 0x361c7e0e0e380e70, 0x633ee71f1f7c1ff8,
+0xf7c4556262956237, 0xa3b53ad4d477d4ee, 0x324d81a8a89aa829, 0xf4315296966296c4,
+0x3aef62f9f9c3f99b, 0xf697a3c5c533c566, 0xb14a102525942535, 0x20b2ab59597959f2,
+0xae15d084842a8454, 0xa7e4c57272d572b7, 0xdd72ec3939e439d5, 0x6198164c4c2d4c5a,
+0x3bbc945e5e655eca, 0x85f09f7878fd78e7, 0xd870e53838e038dd, 0x8605988c8c0a8c14,
+0xb2bf17d1d163d1c6, 0x0b57e4a5a5aea541, 0x4dd9a1e2e2afe243, 0xf8c24e616199612f,
+0x457b42b3b3f6b3f1, 0xa542342121842115, 0xd625089c9c4a9c94, 0x663cee1e1e781ef0,
+0x5286614343114322, 0xfc93b1c7c73bc776, 0x2be54ffcfcd7fcb3, 0x1408240404100420,
+0x08a2e351515951b2, 0xc72f2599995e99bc, 0xc4da226d6da96d4f, 0x391a650d0d340d68,
+0x35e979fafacffa83, 0x84a369dfdf5bdfb6, 0x9bfca97e7ee57ed7, 0xb44819242490243d,
+0xd776fe3b3bec3bc5, 0x3d4b9aabab96ab31, 0xd181f0cece1fce3e, 0x5522991111441188,
+0x8903838f8f068f0c, 0x6b9c044e4e254e4a, 0x517366b7b7e6b7d1, 0x60cbe0ebeb8beb0b,
+0xcc78c13c3cf03cfd, 0xbf1ffd81813e817c, 0xfe354094946a94d4, 0x0cf31cf7f7fbf7eb,
+0x676f18b9b9deb9a1, 0x5f268b13134c1398, 0x9c58512c2cb02c7d, 0xb8bb05d3d36bd3d6,
+0x5cd38ce7e7bbe76b, 0xcbdc396e6ea56e57, 0xf395aac4c437c46e, 0x0f061b03030c0318,
+0x13acdc565645568a, 0x49885e44440d441a, 0x9efea07f7fe17fdf, 0x374f88a9a99ea921,
+0x8254672a2aa82a4d, 0x6d6b0abbbbd6bbb1, 0xe29f87c1c123c146, 0x02a6f153535153a2,
+0x8ba572dcdc57dcae, 0x2716530b0b2c0b58, 0xd327019d9d4e9d9c, 0xc1d82b6c6cad6c47,
+0xf562a43131c43195, 0xb9e8f37474cd7487, 0x09f115f6f6fff6e3, 0x438c4c464605460a,
+0x2645a5acac8aac09, 0x970fb589891e893c, 0x4428b414145014a0, 0x42dfbae1e1a3e15b,
+0x4e2ca616165816b0, 0xd274f73a3ae83acd, 0xd0d2066969b9696f, 0x2d12410909240948,
+0xade0d77070dd70a7, 0x54716fb6b6e2b6d9, 0xb7bd1ed0d067d0ce, 0x7ec7d6eded93ed3b,
+0xdb85e2cccc17cc2e, 0x578468424215422a, 0xc22d2c98985a98b4, 0x0e55eda4a4aaa449,
+0x8850752828a0285d, 0x31b8865c5c6d5cda, 0x3fed6bf8f8c7f893, 0xa411c28686228644,
+]
+C4 = [
+0xc07830d818186018, 0x05af462623238c23, 0x7ef991b8c6c63fc6, 0x136fcdfbe8e887e8,
+0x4ca113cb87872687, 0xa9626d11b8b8dab8, 0x0805020901010401, 0x426e9e0d4f4f214f,
+0xadee6c9b3636d836, 0x590451ffa6a6a2a6, 0xdebdb90cd2d26fd2, 0xfb06f70ef5f5f3f5,
+0xef80f2967979f979, 0x5fcede306f6fa16f, 0xfcef3f6d91917e91, 0xaa07a4f852525552,
+0x27fdc04760609d60, 0x89766535bcbccabc, 0xaccd2b379b9b569b, 0x048c018a8e8e028e,
+0x71155bd2a3a3b6a3, 0x603c186c0c0c300c, 0xff8af6847b7bf17b, 0xb5e16a803535d435,
+0xe8693af51d1d741d, 0x5347ddb3e0e0a7e0, 0xf6acb321d7d77bd7, 0x5eed999cc2c22fc2,
+0x6d965c432e2eb82e, 0x627a96294b4b314b, 0xa321e15dfefedffe, 0x8216aed557574157,
+0xa8412abd15155415, 0x9fb6eee87777c177, 0xa5eb6e923737dc37, 0x7b56d79ee5e5b3e5,
+0x8cd923139f9f469f, 0xd317fd23f0f0e7f0, 0x6a7f94204a4a354a, 0x9e95a944dada4fda,
+0xfa25b0a258587d58, 0x06ca8fcfc9c903c9, 0x558d527c2929a429, 0x5022145a0a0a280a,
+0xe14f7f50b1b1feb1, 0x691a5dc9a0a0baa0, 0x7fdad6146b6bb16b, 0x5cab17d985852e85,
+0x8173673cbdbdcebd, 0xd234ba8f5d5d695d, 0x8050209010104010, 0xf303f507f4f4f7f4,
+0x16c08bddcbcb0bcb, 0xedc67cd33e3ef83e, 0x28110a2d05051405, 0x1fe6ce7867678167,
+0x7353d597e4e4b7e4, 0x25bb4e0227279c27, 0x3258827341411941, 0x2c9d0ba78b8b168b,
+0x510153f6a7a7a6a7, 0xcf94fab27d7de97d, 0xdcfb374995956e95, 0x8e9fad56d8d847d8,
+0x8b30eb70fbfbcbfb, 0x2371c1cdeeee9fee, 0xc791f8bb7c7ced7c, 0x17e3cc7166668566,
+0xa68ea77bdddd53dd, 0xb84b2eaf17175c17, 0x02468e4547470147, 0x84dc211a9e9e429e,
+0x1ec589d4caca0fca, 0x75995a582d2db42d, 0x9179632ebfbfc6bf, 0x381b0e3f07071c07,
+0x012347acadad8ead, 0xea2fb4b05a5a755a, 0x6cb51bef83833683, 0x85ff66b63333cc33,
+0x3ff2c65c63639163, 0x100a041202020802, 0x39384993aaaa92aa, 0xafa8e2de7171d971,
+0x0ecf8dc6c8c807c8, 0xc87d32d119196419, 0x7270923b49493949, 0x869aaf5fd9d943d9,
+0xc31df931f2f2eff2, 0x4b48dba8e3e3abe3, 0xe22ab6b95b5b715b, 0x34920dbc88881a88,
+0xa4c8293e9a9a529a, 0x2dbe4c0b26269826, 0x8dfa64bf3232c832, 0xe94a7d59b0b0fab0,
+0x1b6acff2e9e983e9, 0x78331e770f0f3c0f, 0xe6a6b733d5d573d5, 0x74ba1df480803a80,
+0x997c6127bebec2be, 0x26de87ebcdcd13cd, 0xbde468893434d034, 0x7a75903248483d48,
+0xab24e354ffffdbff, 0xf78ff48d7a7af57a, 0xf4ea3d6490907a90, 0xc23ebe9d5f5f615f,
+0x1da0403d20208020, 0x67d5d00f6868bd68, 0xd07234ca1a1a681a, 0x192c41b7aeae82ae,
+0xc95e757db4b4eab4, 0x9a19a8ce54544d54, 0xece53b7f93937693, 0x0daa442f22228822,
+0x07e9c86364648d64, 0xdb12ff2af1f1e3f1, 0xbfa2e6cc7373d173, 0x905a248212124812,
+0x3a5d807a40401d40, 0x4028104808082008, 0x56e89b95c3c32bc3, 0x337bc5dfecec97ec,
+0x9690ab4ddbdb4bdb, 0x611f5fc0a1a1bea1, 0x1c8307918d8d0e8d, 0xf5c97ac83d3df43d,
+0xccf1335b97976697, 0x0000000000000000, 0x36d483f9cfcf1bcf, 0x4587566e2b2bac2b,
+0x97b3ece17676c576, 0x64b019e682823282, 0xfea9b128d6d67fd6, 0xd87736c31b1b6c1b,
+0xc15b7774b5b5eeb5, 0x112943beafaf86af, 0x77dfd41d6a6ab56a, 0xba0da0ea50505d50,
+0x124c8a5745450945, 0xcb18fb38f3f3ebf3, 0x9df060ad3030c030, 0x2b74c3c4efef9bef,
+0xe5c37eda3f3ffc3f, 0x921caac755554955, 0x791059dba2a2b2a2, 0x0365c9e9eaea8fea,
+0x0fecca6a65658965, 0xb9686903babad2ba, 0x65935e4a2f2fbc2f, 0x4ee79d8ec0c027c0,
+0xbe81a160dede5fde, 0xe06c38fc1c1c701c, 0xbb2ee746fdfdd3fd, 0x52649a1f4d4d294d,
+0xe4e0397692927292, 0x8fbceafa7575c975, 0x301e0c3606061806, 0x249809ae8a8a128a,
+0xf940794bb2b2f2b2, 0x6359d185e6e6bfe6, 0x70361c7e0e0e380e, 0xf8633ee71f1f7c1f,
+0x37f7c45562629562, 0xeea3b53ad4d477d4, 0x29324d81a8a89aa8, 0xc4f4315296966296,
+0x9b3aef62f9f9c3f9, 0x66f697a3c5c533c5, 0x35b14a1025259425, 0xf220b2ab59597959,
+0x54ae15d084842a84, 0xb7a7e4c57272d572, 0xd5dd72ec3939e439, 0x5a6198164c4c2d4c,
+0xca3bbc945e5e655e, 0xe785f09f7878fd78, 0xddd870e53838e038, 0x148605988c8c0a8c,
+0xc6b2bf17d1d163d1, 0x410b57e4a5a5aea5, 0x434dd9a1e2e2afe2, 0x2ff8c24e61619961,
+0xf1457b42b3b3f6b3, 0x15a5423421218421, 0x94d625089c9c4a9c, 0xf0663cee1e1e781e,
+0x2252866143431143, 0x76fc93b1c7c73bc7, 0xb32be54ffcfcd7fc, 0x2014082404041004,
+0xb208a2e351515951, 0xbcc72f2599995e99, 0x4fc4da226d6da96d, 0x68391a650d0d340d,
+0x8335e979fafacffa, 0xb684a369dfdf5bdf, 0xd79bfca97e7ee57e, 0x3db4481924249024,
+0xc5d776fe3b3bec3b, 0x313d4b9aabab96ab, 0x3ed181f0cece1fce, 0x8855229911114411,
+0x0c8903838f8f068f, 0x4a6b9c044e4e254e, 0xd1517366b7b7e6b7, 0x0b60cbe0ebeb8beb,
+0xfdcc78c13c3cf03c, 0x7cbf1ffd81813e81, 0xd4fe354094946a94, 0xeb0cf31cf7f7fbf7,
+0xa1676f18b9b9deb9, 0x985f268b13134c13, 0x7d9c58512c2cb02c, 0xd6b8bb05d3d36bd3,
+0x6b5cd38ce7e7bbe7, 0x57cbdc396e6ea56e, 0x6ef395aac4c437c4, 0x180f061b03030c03,
+0x8a13acdc56564556, 0x1a49885e44440d44, 0xdf9efea07f7fe17f, 0x21374f88a9a99ea9,
+0x4d8254672a2aa82a, 0xb16d6b0abbbbd6bb, 0x46e29f87c1c123c1, 0xa202a6f153535153,
+0xae8ba572dcdc57dc, 0x582716530b0b2c0b, 0x9cd327019d9d4e9d, 0x47c1d82b6c6cad6c,
+0x95f562a43131c431, 0x87b9e8f37474cd74, 0xe309f115f6f6fff6, 0x0a438c4c46460546,
+0x092645a5acac8aac, 0x3c970fb589891e89, 0xa04428b414145014, 0x5b42dfbae1e1a3e1,
+0xb04e2ca616165816, 0xcdd274f73a3ae83a, 0x6fd0d2066969b969, 0x482d124109092409,
+0xa7ade0d77070dd70, 0xd954716fb6b6e2b6, 0xceb7bd1ed0d067d0, 0x3b7ec7d6eded93ed,
+0x2edb85e2cccc17cc, 0x2a57846842421542, 0xb4c22d2c98985a98, 0x490e55eda4a4aaa4,
+0x5d8850752828a028, 0xda31b8865c5c6d5c, 0x933fed6bf8f8c7f8, 0x44a411c286862286,
+]
+C5 = [
+0x18c07830d8181860, 0x2305af462623238c, 0xc67ef991b8c6c63f, 0xe8136fcdfbe8e887,
+0x874ca113cb878726, 0xb8a9626d11b8b8da, 0x0108050209010104, 0x4f426e9e0d4f4f21,
+0x36adee6c9b3636d8, 0xa6590451ffa6a6a2, 0xd2debdb90cd2d26f, 0xf5fb06f70ef5f5f3,
+0x79ef80f2967979f9, 0x6f5fcede306f6fa1, 0x91fcef3f6d91917e, 0x52aa07a4f8525255,
+0x6027fdc04760609d, 0xbc89766535bcbcca, 0x9baccd2b379b9b56, 0x8e048c018a8e8e02,
+0xa371155bd2a3a3b6, 0x0c603c186c0c0c30, 0x7bff8af6847b7bf1, 0x35b5e16a803535d4,
+0x1de8693af51d1d74, 0xe05347ddb3e0e0a7, 0xd7f6acb321d7d77b, 0xc25eed999cc2c22f,
+0x2e6d965c432e2eb8, 0x4b627a96294b4b31, 0xfea321e15dfefedf, 0x578216aed5575741,
+0x15a8412abd151554, 0x779fb6eee87777c1, 0x37a5eb6e923737dc, 0xe57b56d79ee5e5b3,
+0x9f8cd923139f9f46, 0xf0d317fd23f0f0e7, 0x4a6a7f94204a4a35, 0xda9e95a944dada4f,
+0x58fa25b0a258587d, 0xc906ca8fcfc9c903, 0x29558d527c2929a4, 0x0a5022145a0a0a28,
+0xb1e14f7f50b1b1fe, 0xa0691a5dc9a0a0ba, 0x6b7fdad6146b6bb1, 0x855cab17d985852e,
+0xbd8173673cbdbdce, 0x5dd234ba8f5d5d69, 0x1080502090101040, 0xf4f303f507f4f4f7,
+0xcb16c08bddcbcb0b, 0x3eedc67cd33e3ef8, 0x0528110a2d050514, 0x671fe6ce78676781,
+0xe47353d597e4e4b7, 0x2725bb4e0227279c, 0x4132588273414119, 0x8b2c9d0ba78b8b16,
+0xa7510153f6a7a7a6, 0x7dcf94fab27d7de9, 0x95dcfb374995956e, 0xd88e9fad56d8d847,
+0xfb8b30eb70fbfbcb, 0xee2371c1cdeeee9f, 0x7cc791f8bb7c7ced, 0x6617e3cc71666685,
+0xdda68ea77bdddd53, 0x17b84b2eaf17175c, 0x4702468e45474701, 0x9e84dc211a9e9e42,
+0xca1ec589d4caca0f, 0x2d75995a582d2db4, 0xbf9179632ebfbfc6, 0x07381b0e3f07071c,
+0xad012347acadad8e, 0x5aea2fb4b05a5a75, 0x836cb51bef838336, 0x3385ff66b63333cc,
+0x633ff2c65c636391, 0x02100a0412020208, 0xaa39384993aaaa92, 0x71afa8e2de7171d9,
+0xc80ecf8dc6c8c807, 0x19c87d32d1191964, 0x497270923b494939, 0xd9869aaf5fd9d943,
+0xf2c31df931f2f2ef, 0xe34b48dba8e3e3ab, 0x5be22ab6b95b5b71, 0x8834920dbc88881a,
+0x9aa4c8293e9a9a52, 0x262dbe4c0b262698, 0x328dfa64bf3232c8, 0xb0e94a7d59b0b0fa,
+0xe91b6acff2e9e983, 0x0f78331e770f0f3c, 0xd5e6a6b733d5d573, 0x8074ba1df480803a,
+0xbe997c6127bebec2, 0xcd26de87ebcdcd13, 0x34bde468893434d0, 0x487a75903248483d,
+0xffab24e354ffffdb, 0x7af78ff48d7a7af5, 0x90f4ea3d6490907a, 0x5fc23ebe9d5f5f61,
+0x201da0403d202080, 0x6867d5d00f6868bd, 0x1ad07234ca1a1a68, 0xae192c41b7aeae82,
+0xb4c95e757db4b4ea, 0x549a19a8ce54544d, 0x93ece53b7f939376, 0x220daa442f222288,
+0x6407e9c86364648d, 0xf1db12ff2af1f1e3, 0x73bfa2e6cc7373d1, 0x12905a2482121248,
+0x403a5d807a40401d, 0x0840281048080820, 0xc356e89b95c3c32b, 0xec337bc5dfecec97,
+0xdb9690ab4ddbdb4b, 0xa1611f5fc0a1a1be, 0x8d1c8307918d8d0e, 0x3df5c97ac83d3df4,
+0x97ccf1335b979766, 0x0000000000000000, 0xcf36d483f9cfcf1b, 0x2b4587566e2b2bac,
+0x7697b3ece17676c5, 0x8264b019e6828232, 0xd6fea9b128d6d67f, 0x1bd87736c31b1b6c,
+0xb5c15b7774b5b5ee, 0xaf112943beafaf86, 0x6a77dfd41d6a6ab5, 0x50ba0da0ea50505d,
+0x45124c8a57454509, 0xf3cb18fb38f3f3eb, 0x309df060ad3030c0, 0xef2b74c3c4efef9b,
+0x3fe5c37eda3f3ffc, 0x55921caac7555549, 0xa2791059dba2a2b2, 0xea0365c9e9eaea8f,
+0x650fecca6a656589, 0xbab9686903babad2, 0x2f65935e4a2f2fbc, 0xc04ee79d8ec0c027,
+0xdebe81a160dede5f, 0x1ce06c38fc1c1c70, 0xfdbb2ee746fdfdd3, 0x4d52649a1f4d4d29,
+0x92e4e03976929272, 0x758fbceafa7575c9, 0x06301e0c36060618, 0x8a249809ae8a8a12,
+0xb2f940794bb2b2f2, 0xe66359d185e6e6bf, 0x0e70361c7e0e0e38, 0x1ff8633ee71f1f7c,
+0x6237f7c455626295, 0xd4eea3b53ad4d477, 0xa829324d81a8a89a, 0x96c4f43152969662,
+0xf99b3aef62f9f9c3, 0xc566f697a3c5c533, 0x2535b14a10252594, 0x59f220b2ab595979,
+0x8454ae15d084842a, 0x72b7a7e4c57272d5, 0x39d5dd72ec3939e4, 0x4c5a6198164c4c2d,
+0x5eca3bbc945e5e65, 0x78e785f09f7878fd, 0x38ddd870e53838e0, 0x8c148605988c8c0a,
+0xd1c6b2bf17d1d163, 0xa5410b57e4a5a5ae, 0xe2434dd9a1e2e2af, 0x612ff8c24e616199,
+0xb3f1457b42b3b3f6, 0x2115a54234212184, 0x9c94d625089c9c4a, 0x1ef0663cee1e1e78,
+0x4322528661434311, 0xc776fc93b1c7c73b, 0xfcb32be54ffcfcd7, 0x0420140824040410,
+0x51b208a2e3515159, 0x99bcc72f2599995e, 0x6d4fc4da226d6da9, 0x0d68391a650d0d34,
+0xfa8335e979fafacf, 0xdfb684a369dfdf5b, 0x7ed79bfca97e7ee5, 0x243db44819242490,
+0x3bc5d776fe3b3bec, 0xab313d4b9aabab96, 0xce3ed181f0cece1f, 0x1188552299111144,
+0x8f0c8903838f8f06, 0x4e4a6b9c044e4e25, 0xb7d1517366b7b7e6, 0xeb0b60cbe0ebeb8b,
+0x3cfdcc78c13c3cf0, 0x817cbf1ffd81813e, 0x94d4fe354094946a, 0xf7eb0cf31cf7f7fb,
+0xb9a1676f18b9b9de, 0x13985f268b13134c, 0x2c7d9c58512c2cb0, 0xd3d6b8bb05d3d36b,
+0xe76b5cd38ce7e7bb, 0x6e57cbdc396e6ea5, 0xc46ef395aac4c437, 0x03180f061b03030c,
+0x568a13acdc565645, 0x441a49885e44440d, 0x7fdf9efea07f7fe1, 0xa921374f88a9a99e,
+0x2a4d8254672a2aa8, 0xbbb16d6b0abbbbd6, 0xc146e29f87c1c123, 0x53a202a6f1535351,
+0xdcae8ba572dcdc57, 0x0b582716530b0b2c, 0x9d9cd327019d9d4e, 0x6c47c1d82b6c6cad,
+0x3195f562a43131c4, 0x7487b9e8f37474cd, 0xf6e309f115f6f6ff, 0x460a438c4c464605,
+0xac092645a5acac8a, 0x893c970fb589891e, 0x14a04428b4141450, 0xe15b42dfbae1e1a3,
+0x16b04e2ca6161658, 0x3acdd274f73a3ae8, 0x696fd0d2066969b9, 0x09482d1241090924,
+0x70a7ade0d77070dd, 0xb6d954716fb6b6e2, 0xd0ceb7bd1ed0d067, 0xed3b7ec7d6eded93,
+0xcc2edb85e2cccc17, 0x422a578468424215, 0x98b4c22d2c98985a, 0xa4490e55eda4a4aa,
+0x285d8850752828a0, 0x5cda31b8865c5c6d, 0xf8933fed6bf8f8c7, 0x8644a411c2868622,
+]
+C6 = [
+0x6018c07830d81818, 0x8c2305af46262323, 0x3fc67ef991b8c6c6, 0x87e8136fcdfbe8e8,
+0x26874ca113cb8787, 0xdab8a9626d11b8b8, 0x0401080502090101, 0x214f426e9e0d4f4f,
+0xd836adee6c9b3636, 0xa2a6590451ffa6a6, 0x6fd2debdb90cd2d2, 0xf3f5fb06f70ef5f5,
+0xf979ef80f2967979, 0xa16f5fcede306f6f, 0x7e91fcef3f6d9191, 0x5552aa07a4f85252,
+0x9d6027fdc0476060, 0xcabc89766535bcbc, 0x569baccd2b379b9b, 0x028e048c018a8e8e,
+0xb6a371155bd2a3a3, 0x300c603c186c0c0c, 0xf17bff8af6847b7b, 0xd435b5e16a803535,
+0x741de8693af51d1d, 0xa7e05347ddb3e0e0, 0x7bd7f6acb321d7d7, 0x2fc25eed999cc2c2,
+0xb82e6d965c432e2e, 0x314b627a96294b4b, 0xdffea321e15dfefe, 0x41578216aed55757,
+0x5415a8412abd1515, 0xc1779fb6eee87777, 0xdc37a5eb6e923737, 0xb3e57b56d79ee5e5,
+0x469f8cd923139f9f, 0xe7f0d317fd23f0f0, 0x354a6a7f94204a4a, 0x4fda9e95a944dada,
+0x7d58fa25b0a25858, 0x03c906ca8fcfc9c9, 0xa429558d527c2929, 0x280a5022145a0a0a,
+0xfeb1e14f7f50b1b1, 0xbaa0691a5dc9a0a0, 0xb16b7fdad6146b6b, 0x2e855cab17d98585,
+0xcebd8173673cbdbd, 0x695dd234ba8f5d5d, 0x4010805020901010, 0xf7f4f303f507f4f4,
+0x0bcb16c08bddcbcb, 0xf83eedc67cd33e3e, 0x140528110a2d0505, 0x81671fe6ce786767,
+0xb7e47353d597e4e4, 0x9c2725bb4e022727, 0x1941325882734141, 0x168b2c9d0ba78b8b,
+0xa6a7510153f6a7a7, 0xe97dcf94fab27d7d, 0x6e95dcfb37499595, 0x47d88e9fad56d8d8,
+0xcbfb8b30eb70fbfb, 0x9fee2371c1cdeeee, 0xed7cc791f8bb7c7c, 0x856617e3cc716666,
+0x53dda68ea77bdddd, 0x5c17b84b2eaf1717, 0x014702468e454747, 0x429e84dc211a9e9e,
+0x0fca1ec589d4caca, 0xb42d75995a582d2d, 0xc6bf9179632ebfbf, 0x1c07381b0e3f0707,
+0x8ead012347acadad, 0x755aea2fb4b05a5a, 0x36836cb51bef8383, 0xcc3385ff66b63333,
+0x91633ff2c65c6363, 0x0802100a04120202, 0x92aa39384993aaaa, 0xd971afa8e2de7171,
+0x07c80ecf8dc6c8c8, 0x6419c87d32d11919, 0x39497270923b4949, 0x43d9869aaf5fd9d9,
+0xeff2c31df931f2f2, 0xabe34b48dba8e3e3, 0x715be22ab6b95b5b, 0x1a8834920dbc8888,
+0x529aa4c8293e9a9a, 0x98262dbe4c0b2626, 0xc8328dfa64bf3232, 0xfab0e94a7d59b0b0,
+0x83e91b6acff2e9e9, 0x3c0f78331e770f0f, 0x73d5e6a6b733d5d5, 0x3a8074ba1df48080,
+0xc2be997c6127bebe, 0x13cd26de87ebcdcd, 0xd034bde468893434, 0x3d487a7590324848,
+0xdbffab24e354ffff, 0xf57af78ff48d7a7a, 0x7a90f4ea3d649090, 0x615fc23ebe9d5f5f,
+0x80201da0403d2020, 0xbd6867d5d00f6868, 0x681ad07234ca1a1a, 0x82ae192c41b7aeae,
+0xeab4c95e757db4b4, 0x4d549a19a8ce5454, 0x7693ece53b7f9393, 0x88220daa442f2222,
+0x8d6407e9c8636464, 0xe3f1db12ff2af1f1, 0xd173bfa2e6cc7373, 0x4812905a24821212,
+0x1d403a5d807a4040, 0x2008402810480808, 0x2bc356e89b95c3c3, 0x97ec337bc5dfecec,
+0x4bdb9690ab4ddbdb, 0xbea1611f5fc0a1a1, 0x0e8d1c8307918d8d, 0xf43df5c97ac83d3d,
+0x6697ccf1335b9797, 0x0000000000000000, 0x1bcf36d483f9cfcf, 0xac2b4587566e2b2b,
+0xc57697b3ece17676, 0x328264b019e68282, 0x7fd6fea9b128d6d6, 0x6c1bd87736c31b1b,
+0xeeb5c15b7774b5b5, 0x86af112943beafaf, 0xb56a77dfd41d6a6a, 0x5d50ba0da0ea5050,
+0x0945124c8a574545, 0xebf3cb18fb38f3f3, 0xc0309df060ad3030, 0x9bef2b74c3c4efef,
+0xfc3fe5c37eda3f3f, 0x4955921caac75555, 0xb2a2791059dba2a2, 0x8fea0365c9e9eaea,
+0x89650fecca6a6565, 0xd2bab9686903baba, 0xbc2f65935e4a2f2f, 0x27c04ee79d8ec0c0,
+0x5fdebe81a160dede, 0x701ce06c38fc1c1c, 0xd3fdbb2ee746fdfd, 0x294d52649a1f4d4d,
+0x7292e4e039769292, 0xc9758fbceafa7575, 0x1806301e0c360606, 0x128a249809ae8a8a,
+0xf2b2f940794bb2b2, 0xbfe66359d185e6e6, 0x380e70361c7e0e0e, 0x7c1ff8633ee71f1f,
+0x956237f7c4556262, 0x77d4eea3b53ad4d4, 0x9aa829324d81a8a8, 0x6296c4f431529696,
+0xc3f99b3aef62f9f9, 0x33c566f697a3c5c5, 0x942535b14a102525, 0x7959f220b2ab5959,
+0x2a8454ae15d08484, 0xd572b7a7e4c57272, 0xe439d5dd72ec3939, 0x2d4c5a6198164c4c,
+0x655eca3bbc945e5e, 0xfd78e785f09f7878, 0xe038ddd870e53838, 0x0a8c148605988c8c,
+0x63d1c6b2bf17d1d1, 0xaea5410b57e4a5a5, 0xafe2434dd9a1e2e2, 0x99612ff8c24e6161,
+0xf6b3f1457b42b3b3, 0x842115a542342121, 0x4a9c94d625089c9c, 0x781ef0663cee1e1e,
+0x1143225286614343, 0x3bc776fc93b1c7c7, 0xd7fcb32be54ffcfc, 0x1004201408240404,
+0x5951b208a2e35151, 0x5e99bcc72f259999, 0xa96d4fc4da226d6d, 0x340d68391a650d0d,
+0xcffa8335e979fafa, 0x5bdfb684a369dfdf, 0xe57ed79bfca97e7e, 0x90243db448192424,
+0xec3bc5d776fe3b3b, 0x96ab313d4b9aabab, 0x1fce3ed181f0cece, 0x4411885522991111,
+0x068f0c8903838f8f, 0x254e4a6b9c044e4e, 0xe6b7d1517366b7b7, 0x8beb0b60cbe0ebeb,
+0xf03cfdcc78c13c3c, 0x3e817cbf1ffd8181, 0x6a94d4fe35409494, 0xfbf7eb0cf31cf7f7,
+0xdeb9a1676f18b9b9, 0x4c13985f268b1313, 0xb02c7d9c58512c2c, 0x6bd3d6b8bb05d3d3,
+0xbbe76b5cd38ce7e7, 0xa56e57cbdc396e6e, 0x37c46ef395aac4c4, 0x0c03180f061b0303,
+0x45568a13acdc5656, 0x0d441a49885e4444, 0xe17fdf9efea07f7f, 0x9ea921374f88a9a9,
+0xa82a4d8254672a2a, 0xd6bbb16d6b0abbbb, 0x23c146e29f87c1c1, 0x5153a202a6f15353,
+0x57dcae8ba572dcdc, 0x2c0b582716530b0b, 0x4e9d9cd327019d9d, 0xad6c47c1d82b6c6c,
+0xc43195f562a43131, 0xcd7487b9e8f37474, 0xfff6e309f115f6f6, 0x05460a438c4c4646,
+0x8aac092645a5acac, 0x1e893c970fb58989, 0x5014a04428b41414, 0xa3e15b42dfbae1e1,
+0x5816b04e2ca61616, 0xe83acdd274f73a3a, 0xb9696fd0d2066969, 0x2409482d12410909,
+0xdd70a7ade0d77070, 0xe2b6d954716fb6b6, 0x67d0ceb7bd1ed0d0, 0x93ed3b7ec7d6eded,
+0x17cc2edb85e2cccc, 0x15422a5784684242, 0x5a98b4c22d2c9898, 0xaaa4490e55eda4a4,
+0xa0285d8850752828, 0x6d5cda31b8865c5c, 0xc7f8933fed6bf8f8, 0x228644a411c28686,
+]
+C7 = [
+0x186018c07830d818, 0x238c2305af462623, 0xc63fc67ef991b8c6, 0xe887e8136fcdfbe8,
+0x8726874ca113cb87, 0xb8dab8a9626d11b8, 0x0104010805020901, 0x4f214f426e9e0d4f,
+0x36d836adee6c9b36, 0xa6a2a6590451ffa6, 0xd26fd2debdb90cd2, 0xf5f3f5fb06f70ef5,
+0x79f979ef80f29679, 0x6fa16f5fcede306f, 0x917e91fcef3f6d91, 0x525552aa07a4f852,
+0x609d6027fdc04760, 0xbccabc89766535bc, 0x9b569baccd2b379b, 0x8e028e048c018a8e,
+0xa3b6a371155bd2a3, 0x0c300c603c186c0c, 0x7bf17bff8af6847b, 0x35d435b5e16a8035,
+0x1d741de8693af51d, 0xe0a7e05347ddb3e0, 0xd77bd7f6acb321d7, 0xc22fc25eed999cc2,
+0x2eb82e6d965c432e, 0x4b314b627a96294b, 0xfedffea321e15dfe, 0x5741578216aed557,
+0x155415a8412abd15, 0x77c1779fb6eee877, 0x37dc37a5eb6e9237, 0xe5b3e57b56d79ee5,
+0x9f469f8cd923139f, 0xf0e7f0d317fd23f0, 0x4a354a6a7f94204a, 0xda4fda9e95a944da,
+0x587d58fa25b0a258, 0xc903c906ca8fcfc9, 0x29a429558d527c29, 0x0a280a5022145a0a,
+0xb1feb1e14f7f50b1, 0xa0baa0691a5dc9a0, 0x6bb16b7fdad6146b, 0x852e855cab17d985,
+0xbdcebd8173673cbd, 0x5d695dd234ba8f5d, 0x1040108050209010, 0xf4f7f4f303f507f4,
+0xcb0bcb16c08bddcb, 0x3ef83eedc67cd33e, 0x05140528110a2d05, 0x6781671fe6ce7867,
+0xe4b7e47353d597e4, 0x279c2725bb4e0227, 0x4119413258827341, 0x8b168b2c9d0ba78b,
+0xa7a6a7510153f6a7, 0x7de97dcf94fab27d, 0x956e95dcfb374995, 0xd847d88e9fad56d8,
+0xfbcbfb8b30eb70fb, 0xee9fee2371c1cdee, 0x7ced7cc791f8bb7c, 0x66856617e3cc7166,
+0xdd53dda68ea77bdd, 0x175c17b84b2eaf17, 0x47014702468e4547, 0x9e429e84dc211a9e,
+0xca0fca1ec589d4ca, 0x2db42d75995a582d, 0xbfc6bf9179632ebf, 0x071c07381b0e3f07,
+0xad8ead012347acad, 0x5a755aea2fb4b05a, 0x8336836cb51bef83, 0x33cc3385ff66b633,
+0x6391633ff2c65c63, 0x020802100a041202, 0xaa92aa39384993aa, 0x71d971afa8e2de71,
+0xc807c80ecf8dc6c8, 0x196419c87d32d119, 0x4939497270923b49, 0xd943d9869aaf5fd9,
+0xf2eff2c31df931f2, 0xe3abe34b48dba8e3, 0x5b715be22ab6b95b, 0x881a8834920dbc88,
+0x9a529aa4c8293e9a, 0x2698262dbe4c0b26, 0x32c8328dfa64bf32, 0xb0fab0e94a7d59b0,
+0xe983e91b6acff2e9, 0x0f3c0f78331e770f, 0xd573d5e6a6b733d5, 0x803a8074ba1df480,
+0xbec2be997c6127be, 0xcd13cd26de87ebcd, 0x34d034bde4688934, 0x483d487a75903248,
+0xffdbffab24e354ff, 0x7af57af78ff48d7a, 0x907a90f4ea3d6490, 0x5f615fc23ebe9d5f,
+0x2080201da0403d20, 0x68bd6867d5d00f68, 0x1a681ad07234ca1a, 0xae82ae192c41b7ae,
+0xb4eab4c95e757db4, 0x544d549a19a8ce54, 0x937693ece53b7f93, 0x2288220daa442f22,
+0x648d6407e9c86364, 0xf1e3f1db12ff2af1, 0x73d173bfa2e6cc73, 0x124812905a248212,
+0x401d403a5d807a40, 0x0820084028104808, 0xc32bc356e89b95c3, 0xec97ec337bc5dfec,
+0xdb4bdb9690ab4ddb, 0xa1bea1611f5fc0a1, 0x8d0e8d1c8307918d, 0x3df43df5c97ac83d,
+0x976697ccf1335b97, 0x0000000000000000, 0xcf1bcf36d483f9cf, 0x2bac2b4587566e2b,
+0x76c57697b3ece176, 0x82328264b019e682, 0xd67fd6fea9b128d6, 0x1b6c1bd87736c31b,
+0xb5eeb5c15b7774b5, 0xaf86af112943beaf, 0x6ab56a77dfd41d6a, 0x505d50ba0da0ea50,
+0x450945124c8a5745, 0xf3ebf3cb18fb38f3, 0x30c0309df060ad30, 0xef9bef2b74c3c4ef,
+0x3ffc3fe5c37eda3f, 0x554955921caac755, 0xa2b2a2791059dba2, 0xea8fea0365c9e9ea,
+0x6589650fecca6a65, 0xbad2bab9686903ba, 0x2fbc2f65935e4a2f, 0xc027c04ee79d8ec0,
+0xde5fdebe81a160de, 0x1c701ce06c38fc1c, 0xfdd3fdbb2ee746fd, 0x4d294d52649a1f4d,
+0x927292e4e0397692, 0x75c9758fbceafa75, 0x061806301e0c3606, 0x8a128a249809ae8a,
+0xb2f2b2f940794bb2, 0xe6bfe66359d185e6, 0x0e380e70361c7e0e, 0x1f7c1ff8633ee71f,
+0x62956237f7c45562, 0xd477d4eea3b53ad4, 0xa89aa829324d81a8, 0x966296c4f4315296,
+0xf9c3f99b3aef62f9, 0xc533c566f697a3c5, 0x25942535b14a1025, 0x597959f220b2ab59,
+0x842a8454ae15d084, 0x72d572b7a7e4c572, 0x39e439d5dd72ec39, 0x4c2d4c5a6198164c,
+0x5e655eca3bbc945e, 0x78fd78e785f09f78, 0x38e038ddd870e538, 0x8c0a8c148605988c,
+0xd163d1c6b2bf17d1, 0xa5aea5410b57e4a5, 0xe2afe2434dd9a1e2, 0x6199612ff8c24e61,
+0xb3f6b3f1457b42b3, 0x21842115a5423421, 0x9c4a9c94d625089c, 0x1e781ef0663cee1e,
+0x4311432252866143, 0xc73bc776fc93b1c7, 0xfcd7fcb32be54ffc, 0x0410042014082404,
+0x515951b208a2e351, 0x995e99bcc72f2599, 0x6da96d4fc4da226d, 0x0d340d68391a650d,
+0xfacffa8335e979fa, 0xdf5bdfb684a369df, 0x7ee57ed79bfca97e, 0x2490243db4481924,
+0x3bec3bc5d776fe3b, 0xab96ab313d4b9aab, 0xce1fce3ed181f0ce, 0x1144118855229911,
+0x8f068f0c8903838f, 0x4e254e4a6b9c044e, 0xb7e6b7d1517366b7, 0xeb8beb0b60cbe0eb,
+0x3cf03cfdcc78c13c, 0x813e817cbf1ffd81, 0x946a94d4fe354094, 0xf7fbf7eb0cf31cf7,
+0xb9deb9a1676f18b9, 0x134c13985f268b13, 0x2cb02c7d9c58512c, 0xd36bd3d6b8bb05d3,
+0xe7bbe76b5cd38ce7, 0x6ea56e57cbdc396e, 0xc437c46ef395aac4, 0x030c03180f061b03,
+0x5645568a13acdc56, 0x440d441a49885e44, 0x7fe17fdf9efea07f, 0xa99ea921374f88a9,
+0x2aa82a4d8254672a, 0xbbd6bbb16d6b0abb, 0xc123c146e29f87c1, 0x535153a202a6f153,
+0xdc57dcae8ba572dc, 0x0b2c0b582716530b, 0x9d4e9d9cd327019d, 0x6cad6c47c1d82b6c,
+0x31c43195f562a431, 0x74cd7487b9e8f374, 0xf6fff6e309f115f6, 0x4605460a438c4c46,
+0xac8aac092645a5ac, 0x891e893c970fb589, 0x145014a04428b414, 0xe1a3e15b42dfbae1,
+0x165816b04e2ca616, 0x3ae83acdd274f73a, 0x69b9696fd0d20669, 0x092409482d124109,
+0x70dd70a7ade0d770, 0xb6e2b6d954716fb6, 0xd067d0ceb7bd1ed0, 0xed93ed3b7ec7d6ed,
+0xcc17cc2edb85e2cc, 0x4215422a57846842, 0x985a98b4c22d2c98, 0xa4aaa4490e55eda4,
+0x28a0285d88507528, 0x5c6d5cda31b8865c, 0xf8c7f8933fed6bf8, 0x86228644a411c286,
+]
+
+rc = [
+0x0000000000000000,
+0x1823c6e887b8014f,
+0x36a6d2f5796f9152,
+0x60bc9b8ea30c7b35,
+0x1de0d7c22e4bfe57,
+0x157737e59ff04ada,
+0x58c9290ab1a06b85,
+0xbd5d10f4cb3e0567,
+0xe427418ba77d95d8,
+0xfbee7c66dd17479e,
+0xca2dbf07ad5a8333
+]
+
+DIGESTBYTES = 64
+class WhirlpoolStruct:
+    def __init__(self):
+        self.bitLength = [0]*32
+        self.buffer = [0]*64
+        self.bufferBits = 0
+        self.bufferPos = 0
+        self.hash = [0]*8
+
+def WhirlpoolInit(ctx):
+    ctx = WhirlpoolStruct()
+    return
+
+def WhirlpoolAdd(source, sourceBits, ctx):
+    if sys.hexversion < 0x3000000:
+        source = [ord(s)&0xff for s in source]
+
+    carry = 0
+    value = sourceBits
+    i = 31
+    while i >= 0 and (carry != 0 or value != 0):
+        carry += ctx.bitLength[i] + ((value % 0x100000000) & 0xff)
+        ctx.bitLength[i] = carry % 0x100
+        carry >>= 8
+        value >>= 8
+        i -= 1
+
+    bufferBits = ctx.bufferBits
+    bufferPos = ctx.bufferPos
+    sourcePos = 0
+    sourceGap = (8 - (sourceBits & 7)) & 7
+    bufferRem = ctx.bufferBits & 7
+    buffr = ctx.buffer
+
+    while sourceBits > 8:
+        b = ((source[sourcePos] << sourceGap) & 0xff) | ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap))
+        buffr[bufferPos] |= (b >> bufferRem) % 0x100
+        bufferPos += 1
+        bufferBits += 8 - bufferRem
+        if bufferBits == 512:
+            processBuffer(ctx)
+            bufferBits = 0
+            bufferPos = 0
+
+        buffr[bufferPos] = b << (8 - bufferRem)
+        bufferBits += bufferRem
+
+        sourceBits -= 8
+        sourcePos += 1
+
+    b = (source[sourcePos] << sourceGap) & 0xff
+    buffr[bufferPos] |= b >> bufferRem
+    if bufferRem + sourceBits < 8:
+        bufferBits += sourceBits
+    else:
+        bufferPos += 1
+        bufferBits += 8 - bufferRem
+        sourceBits -= 8 - bufferRem
+        if bufferBits == 512:
+            processBuffer(ctx)
+            bufferBits = 0
+            bufferPos = 0
+        buffr[bufferPos] = b << (8 - bufferRem)
+        bufferBits += sourceBits
+    ctx.bufferBits = bufferBits
+    ctx.bufferPos = bufferPos
+
+def WhirlpoolFinalize(ctx):
+    bufferPos = ctx.bufferPos
+    ctx.buffer[bufferPos] |= 0x80 >> (ctx.bufferBits & 7)
+    bufferPos += 1
+    if bufferPos > 32:
+        if bufferPos < 64:
+            for i in xrange(64 - bufferPos):
+                ctx.buffer[bufferPos+i] = 0
+        processBuffer(ctx)
+        bufferPos = 0
+    if bufferPos < 32:
+        for i in xrange(32 - bufferPos):
+            ctx.buffer[bufferPos+i] = 0
+    bufferPos = 32
+    for i in xrange(32):
+        ctx.buffer[32+i] = ctx.bitLength[i]
+    processBuffer(ctx)
+    digest = ''
+    for i in xrange(8):
+        digest += chr((ctx.hash[i] >> 56) % 0x100)
+        digest += chr((ctx.hash[i] >> 48) % 0x100)
+        digest += chr((ctx.hash[i] >> 40) % 0x100)
+        digest += chr((ctx.hash[i] >> 32) % 0x100)
+        digest += chr((ctx.hash[i] >> 24) % 0x100)
+        digest += chr((ctx.hash[i] >> 16) % 0x100)
+        digest += chr((ctx.hash[i] >> 8) % 0x100)
+        digest += chr((ctx.hash[i]) % 0x100)
+    ctx.bufferPos = bufferPos
+    return digest
+
+def CDo(buf, a0, a1, a2, a3, a4, a5, a6, a7):
+    return C0[((buf[a0] >> 56) % 0x100000000) & 0xff] ^ \
+           C1[((buf[a1] >> 48) % 0x100000000) & 0xff] ^ \
+           C2[((buf[a2] >> 40) % 0x100000000) & 0xff] ^ \
+           C3[((buf[a3] >> 32) % 0x100000000) & 0xff] ^ \
+           C4[((buf[a4] >> 24) % 0x100000000) & 0xff] ^ \
+           C5[((buf[a5] >> 16) % 0x100000000) & 0xff] ^ \
+           C6[((buf[a6] >>  8) % 0x100000000) & 0xff] ^ \
+           C7[((buf[a7] >>  0) % 0x100000000) & 0xff]
+
+def processBuffer(ctx):
+    i, r = 0, 0
+    K = [0]*8
+    block = [0]*8
+    state = [0]*8
+    L = [0]*8
+    buffr = ctx.buffer
+
+    buf_cnt = 0
+    for i in xrange(8):
+        block[i] = ((buffr[buf_cnt+0] & 0xff) << 56) ^ \
+                   ((buffr[buf_cnt+1] & 0xff) << 48) ^ \
+                   ((buffr[buf_cnt+2] & 0xff) << 40) ^ \
+                   ((buffr[buf_cnt+3] & 0xff) << 32) ^ \
+                   ((buffr[buf_cnt+4] & 0xff) << 24) ^ \
+                   ((buffr[buf_cnt+5] & 0xff) << 16) ^ \
+                   ((buffr[buf_cnt+6] & 0xff) <<  8) ^ \
+                   ((buffr[buf_cnt+7] & 0xff) <<  0)
+        buf_cnt += 8
+    for i in xrange(8):
+        K[i] = ctx.hash[i]
+        state[i] = block[i] ^ K[i]
+
+    for r in xrange(1, R+1):
+        L[0] = CDo(K, 0, 7, 6, 5, 4, 3, 2, 1) ^ rc[r]
+        L[1] = CDo(K, 1, 0, 7, 6, 5, 4, 3, 2)
+        L[2] = CDo(K, 2, 1, 0, 7, 6, 5, 4, 3)
+        L[3] = CDo(K, 3, 2, 1, 0, 7, 6, 5, 4)
+        L[4] = CDo(K, 4, 3, 2, 1, 0, 7, 6, 5)
+        L[5] = CDo(K, 5, 4, 3, 2, 1, 0, 7, 6)
+        L[6] = CDo(K, 6, 5, 4, 3, 2, 1, 0, 7)
+        L[7] = CDo(K, 7, 6, 5, 4, 3, 2, 1, 0)
+        for i in xrange(8):
+            K[i] = L[i]
+        L[0] = CDo(state, 0, 7, 6, 5, 4, 3, 2, 1) ^ K[0]
+        L[1] = CDo(state, 1, 0, 7, 6, 5, 4, 3, 2) ^ K[1]
+        L[2] = CDo(state, 2, 1, 0, 7, 6, 5, 4, 3) ^ K[2]
+        L[3] = CDo(state, 3, 2, 1, 0, 7, 6, 5, 4) ^ K[3]
+        L[4] = CDo(state, 4, 3, 2, 1, 0, 7, 6, 5) ^ K[4]
+        L[5] = CDo(state, 5, 4, 3, 2, 1, 0, 7, 6) ^ K[5]
+        L[6] = CDo(state, 6, 5, 4, 3, 2, 1, 0, 7) ^ K[6]
+        L[7] = CDo(state, 7, 6, 5, 4, 3, 2, 1, 0) ^ K[7]
+        for i in xrange(8):
+            state[i] = L[i]
+    # apply the Miyaguchi-Preneel compression function
+    for i in xrange(8):
+        ctx.hash[i] ^= state[i] ^ block[i]
+    return
+
+#
+# Tests.
+#
+
+if __name__ == '__main__':
+    assert Whirlpool(b'The quick brown fox jumps over the lazy dog').hexdigest() == \
+        'b97de512e91e3828b40d2b0fdce9ceb3c4a71f9bea8d88e75c4fa854df36725fd2b52eb6544edcacd6f8beddfea403cb55ae31f03ad62a5ef54e42ee82c3fb35'
+    assert Whirlpool(b'The quick brown fox jumps over the lazy eog').hexdigest() == \
+        'c27ba124205f72e6847f3e19834f925cc666d0974167af915bb462420ed40cc50900d85a1f923219d832357750492d5c143011a76988344c2635e69d06f2d38c'
+    assert Whirlpool(b'').hexdigest() == \
+        '19fa61d75522a4669b44e39c1d2e1726c530232130d407f89afee0964997f7a73e83be698b288febcf88e3e03c4f0757ea8964e59b63d93708b138cc42a66eb3'

diff --git a/portage_with_autodep/pym/portage/util/whirlpool.pyo b/portage_with_autodep/pym/portage/util/whirlpool.pyo
new file mode 100644
index 0000000..4bcf49a
Binary files /dev/null and b/portage_with_autodep/pym/portage/util/whirlpool.pyo differ

diff --git a/portage_with_autodep/pym/portage/versions.py b/portage_with_autodep/pym/portage/versions.py
index f8691d1..db14e99 100644
--- a/portage_with_autodep/pym/portage/versions.py
+++ b/portage_with_autodep/pym/portage/versions.py
@@ -1,5 +1,5 @@
 # versions.py -- core Portage functionality
-# Copyright 1998-2010 Gentoo Foundation
+# Copyright 1998-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 __all__ = [
@@ -9,14 +9,26 @@ __all__ = [
 ]
 
 import re
+import sys
 import warnings
 
+if sys.hexversion < 0x3000000:
+	_unicode = unicode
+else:
+	_unicode = str
+
 import portage
 portage.proxy.lazyimport.lazyimport(globals(),
-	'portage.util:cmp_sort_key'
+	'portage.repository.config:_gen_valid_repo',
+	'portage.util:cmp_sort_key',
 )
+from portage import _unicode_decode
+from portage.eapi import eapi_allows_dots_in_PN
+from portage.exception import InvalidData
 from portage.localization import _
 
+_unknown_repo = "__unknown__"
+
 # \w is [a-zA-Z0-9_]
 
 # 2.1.1 A category name may contain any of the characters [A-Za-z0-9+_.-].
@@ -26,15 +38,27 @@ _cat = r'[\w+][\w+.-]*'
 # 2.1.2 A package name may contain any of the characters [A-Za-z0-9+_-].
 # It must not begin with a hyphen,
 # and must not end in a hyphen followed by one or more digits.
-_pkg = r'[\w+][\w+-]*?'
+_pkg = {
+	"dots_disallowed_in_PN": r'[\w+][\w+-]*?',
+	"dots_allowed_in_PN":    r'[\w+][\w+.-]*?',
+}
 
 _v = r'(cvs\.)?(\d+)((\.\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\d*)*)'
 _rev = r'\d+'
 _vr = _v + '(-r(' + _rev + '))?'
 
-_cp = '(' + _cat + '/' + _pkg + '(-' + _vr + ')?)'
-_cpv = '(' + _cp + '-' + _vr + ')'
-_pv = '(?P<pn>' + _pkg + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?'
+_cp = {
+	"dots_disallowed_in_PN": '(' + _cat + '/' + _pkg['dots_disallowed_in_PN'] + '(-' + _vr + ')?)',
+	"dots_allowed_in_PN":    '(' + _cat + '/' + _pkg['dots_allowed_in_PN']    + '(-' + _vr + ')?)',
+}
+_cpv = {
+	"dots_disallowed_in_PN": '(' + _cp['dots_disallowed_in_PN'] + '-' + _vr + ')',
+	"dots_allowed_in_PN":    '(' + _cp['dots_allowed_in_PN']    + '-' + _vr + ')',
+}
+_pv = {
+	"dots_disallowed_in_PN": '(?P<pn>' + _pkg['dots_disallowed_in_PN'] + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?',
+	"dots_allowed_in_PN":    '(?P<pn>' + _pkg['dots_allowed_in_PN']    + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?',
+}
 
 ver_regexp = re.compile("^" + _vr + "$")
 suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
@@ -49,7 +73,6 @@ def ververify(myver, silent=1):
 			print(_("!!! syntax error in version: %s") % myver)
 		return 0
 
-vercmp_cache = {}
 def vercmp(ver1, ver2, silent=1):
 	"""
 	Compare two versions
@@ -76,11 +99,7 @@ def vercmp(ver1, ver2, silent=1):
 
 	if ver1 == ver2:
 		return 0
-	mykey=ver1+":"+ver2
-	try:
-		return vercmp_cache[mykey]
-	except KeyError:
-		pass
+
 	match1 = ver_regexp.match(ver1)
 	match2 = ver_regexp.match(ver2)
 	
@@ -96,10 +115,8 @@ def vercmp(ver1, ver2, silent=1):
 
 	# shortcut for cvs ebuilds (new style)
 	if match1.group(1) and not match2.group(1):
-		vercmp_cache[mykey] = 1
 		return 1
 	elif match2.group(1) and not match1.group(1):
-		vercmp_cache[mykey] = -1
 		return -1
 	
 	# building lists of the version parts before the suffix
@@ -153,16 +170,13 @@ def vercmp(ver1, ver2, silent=1):
 
 	for i in range(0, max(len(list1), len(list2))):
 		if len(list1) <= i:
-			vercmp_cache[mykey] = -1
 			return -1
 		elif len(list2) <= i:
-			vercmp_cache[mykey] = 1
 			return 1
 		elif list1[i] != list2[i]:
 			a = list1[i]
 			b = list2[i]
 			rval = (a > b) - (a < b)
-			vercmp_cache[mykey] = rval
 			return rval
 
 	# main version is equal, so now compare the _suffix part
@@ -183,7 +197,6 @@ def vercmp(ver1, ver2, silent=1):
 			a = suffix_value[s1[0]]
 			b = suffix_value[s2[0]]
 			rval = (a > b) - (a < b)
-			vercmp_cache[mykey] = rval
 			return rval
 		if s1[1] != s2[1]:
 			# it's possible that the s(1|2)[1] == ''
@@ -198,7 +211,6 @@ def vercmp(ver1, ver2, silent=1):
 				r2 = 0
 			rval = (r1 > r2) - (r1 < r2)
 			if rval:
-				vercmp_cache[mykey] = rval
 				return rval
 
 	# the suffix part is equal to, so finally check the revision
@@ -211,7 +223,6 @@ def vercmp(ver1, ver2, silent=1):
 	else:
 		r2 = 0
 	rval = (r1 > r2) - (r1 < r2)
-	vercmp_cache[mykey] = rval
 	return rval
 	
 def pkgcmp(pkg1, pkg2):
@@ -240,16 +251,25 @@ def pkgcmp(pkg1, pkg2):
 		return None
 	return vercmp("-".join(pkg1[1:]), "-".join(pkg2[1:]))
 
-_pv_re = re.compile('^' + _pv + '$', re.VERBOSE)
+_pv_re = {
+	"dots_disallowed_in_PN": re.compile('^' + _pv['dots_disallowed_in_PN'] + '$', re.VERBOSE),
+	"dots_allowed_in_PN":    re.compile('^' + _pv['dots_allowed_in_PN']    + '$', re.VERBOSE),
+}
 
-def _pkgsplit(mypkg):
+def _get_pv_re(eapi):
+	if eapi is None or eapi_allows_dots_in_PN(eapi):
+		return _pv_re["dots_allowed_in_PN"]
+	else:
+		return _pv_re["dots_disallowed_in_PN"]
+
+def _pkgsplit(mypkg, eapi=None):
 	"""
 	@param mypkg: pv
 	@return:
 	1. None if input is invalid.
 	2. (pn, ver, rev) if input is pv
 	"""
-	m = _pv_re.match(mypkg)
+	m = _get_pv_re(eapi).match(mypkg)
 	if m is None:
 		return None
 
@@ -266,8 +286,8 @@ def _pkgsplit(mypkg):
 
 _cat_re = re.compile('^%s$' % _cat)
 _missing_cat = 'null'
-catcache={}
-def catpkgsplit(mydata,silent=1):
+
+def catpkgsplit(mydata, silent=1, eapi=None):
 	"""
 	Takes a Category/Package-Version-Rev and returns a list of each.
 	
@@ -281,28 +301,65 @@ def catpkgsplit(mydata,silent=1):
 	2.  If cat is not specificed in mydata, cat will be "null"
 	3.  if rev does not exist it will be '-r0'
 	"""
-
 	try:
-		return catcache[mydata]
-	except KeyError:
+		return mydata.cpv_split
+	except AttributeError:
 		pass
 	mysplit = mydata.split('/', 1)
 	p_split=None
 	if len(mysplit)==1:
 		cat = _missing_cat
-		p_split = _pkgsplit(mydata)
+		p_split = _pkgsplit(mydata, eapi=eapi)
 	elif len(mysplit)==2:
 		cat = mysplit[0]
 		if _cat_re.match(cat) is not None:
-			p_split = _pkgsplit(mysplit[1])
+			p_split = _pkgsplit(mysplit[1], eapi=eapi)
 	if not p_split:
-		catcache[mydata]=None
 		return None
 	retval = (cat, p_split[0], p_split[1], p_split[2])
-	catcache[mydata]=retval
 	return retval
 
-def pkgsplit(mypkg, silent=1):
+class _pkg_str(_unicode):
+	"""
+	This class represents a cpv. It inherits from str (unicode in python2) and
+	has attributes that cache results for use by functions like catpkgsplit and
+	cpv_getkey which are called frequently (especially in match_from_list).
+	Instances are typically created in dbapi.cp_list() or the Atom contructor,
+	and propagate from there. Generally, code that pickles these objects will
+	manually convert them to a plain unicode object first.
+	"""
+
+	def __new__(cls, cpv, slot=None, repo=None, eapi=None):
+		return _unicode.__new__(cls, cpv)
+
+	def __init__(self, cpv, slot=None, repo=None, eapi=None):
+		if not isinstance(cpv, _unicode):
+			# Avoid TypeError from _unicode.__init__ with PyPy.
+			cpv = _unicode_decode(cpv)
+		_unicode.__init__(cpv)
+		self.__dict__['cpv_split'] = catpkgsplit(cpv, eapi=eapi)
+		if self.cpv_split is None:
+			raise InvalidData(cpv)
+		self.__dict__['cp'] = self.cpv_split[0] + '/' + self.cpv_split[1]
+		if self.cpv_split[-1] == "r0" and cpv[-3:] != "-r0":
+			self.__dict__['version'] = "-".join(self.cpv_split[2:-1])
+		else:
+			self.__dict__['version'] = "-".join(self.cpv_split[2:])
+		# for match_from_list introspection
+		self.__dict__['cpv'] = self
+		if slot is not None:
+			self.__dict__['slot'] = slot
+		if repo is not None:
+			repo = _gen_valid_repo(repo)
+			if not repo:
+				repo = _unknown_repo
+			self.__dict__['repo'] = repo
+
+	def __setattr__(self, name, value):
+		raise AttributeError("_pkg_str instances are immutable",
+			self.__class__, name, value)
+
+def pkgsplit(mypkg, silent=1, eapi=None):
 	"""
 	@param mypkg: either a pv or cpv
 	@return:
@@ -310,7 +367,7 @@ def pkgsplit(mypkg, silent=1):
 	2. (pn, ver, rev) if input is pv
 	3. (cp, ver, rev) if input is a cpv
 	"""
-	catpsplit = catpkgsplit(mypkg)
+	catpsplit = catpkgsplit(mypkg, eapi=eapi)
 	if catpsplit is None:
 		return None
 	cat, pn, ver, rev = catpsplit
@@ -319,9 +376,13 @@ def pkgsplit(mypkg, silent=1):
 	else:
 		return (cat + '/' + pn, ver, rev)
 
-def cpv_getkey(mycpv):
+def cpv_getkey(mycpv, eapi=None):
 	"""Calls catpkgsplit on a cpv and returns only the cp."""
-	mysplit = catpkgsplit(mycpv)
+	try:
+		return mycpv.cp
+	except AttributeError:
+		pass
+	mysplit = catpkgsplit(mycpv, eapi=eapi)
 	if mysplit is not None:
 		return mysplit[0] + '/' + mysplit[1]
 
@@ -330,7 +391,7 @@ def cpv_getkey(mycpv):
 		DeprecationWarning, stacklevel=2)
 
 	myslash = mycpv.split("/", 1)
-	mysplit = _pkgsplit(myslash[-1])
+	mysplit = _pkgsplit(myslash[-1], eapi=eapi)
 	if mysplit is None:
 		return None
 	mylen = len(myslash)
@@ -339,14 +400,18 @@ def cpv_getkey(mycpv):
 	else:
 		return mysplit[0]
 
-def cpv_getversion(mycpv):
+def cpv_getversion(mycpv, eapi=None):
 	"""Returns the v (including revision) from an cpv."""
-	cp = cpv_getkey(mycpv)
+	try:
+		return mycpv.version
+	except AttributeError:
+		pass
+	cp = cpv_getkey(mycpv, eapi=eapi)
 	if cp is None:
 		return None
 	return mycpv[len(cp+"-"):]
 
-def cpv_sort_key():
+def cpv_sort_key(eapi=None):
 	"""
 	Create an object for sorting cpvs, to be used as the 'key' parameter
 	in places like list.sort() or sorted(). This calls catpkgsplit() once for
@@ -365,39 +430,55 @@ def cpv_sort_key():
 
 		split1 = split_cache.get(cpv1, False)
 		if split1 is False:
-			split1 = catpkgsplit(cpv1)
-			if split1 is not None:
-				split1 = (split1[:2], '-'.join(split1[2:]))
+			split1 = None
+			try:
+				split1 = cpv1.cpv
+			except AttributeError:
+				try:
+					split1 = _pkg_str(cpv1, eapi=eapi)
+				except InvalidData:
+					pass
 			split_cache[cpv1] = split1
 
 		split2 = split_cache.get(cpv2, False)
 		if split2 is False:
-			split2 = catpkgsplit(cpv2)
-			if split2 is not None:
-				split2 = (split2[:2], '-'.join(split2[2:]))
+			split2 = None
+			try:
+				split2 = cpv2.cpv
+			except AttributeError:
+				try:
+					split2 = _pkg_str(cpv2, eapi=eapi)
+				except InvalidData:
+					pass
 			split_cache[cpv2] = split2
 
-		if split1 is None or split2 is None or split1[0] != split2[0]:
+		if split1 is None or split2 is None or split1.cp != split2.cp:
 			return (cpv1 > cpv2) - (cpv1 < cpv2)
 
-		return vercmp(split1[1], split2[1])
+		return vercmp(split1.version, split2.version)
 
 	return cmp_sort_key(cmp_cpv)
 
 def catsplit(mydep):
         return mydep.split("/", 1)
 
-def best(mymatches):
+def best(mymatches, eapi=None):
 	"""Accepts None arguments; assumes matches are valid."""
 	if not mymatches:
 		return ""
 	if len(mymatches) == 1:
 		return mymatches[0]
 	bestmatch = mymatches[0]
-	p2 = catpkgsplit(bestmatch)[1:]
+	try:
+		v2 = bestmatch.version
+	except AttributeError:
+		v2 = _pkg_str(bestmatch, eapi=eapi).version
 	for x in mymatches[1:]:
-		p1 = catpkgsplit(x)[1:]
-		if pkgcmp(p1, p2) > 0:
+		try:
+			v1 = x.version
+		except AttributeError:
+			v1 = _pkg_str(x, eapi=eapi).version
+		if vercmp(v1, v2) > 0:
 			bestmatch = x
-			p2 = catpkgsplit(bestmatch)[1:]
+			v2 = v1
 	return bestmatch

diff --git a/portage_with_autodep/pym/portage/versions.pyo b/portage_with_autodep/pym/portage/versions.pyo
new file mode 100644
index 0000000..eae2743
Binary files /dev/null and b/portage_with_autodep/pym/portage/versions.pyo differ

diff --git a/portage_with_autodep/pym/portage/xml/__init__.pyo b/portage_with_autodep/pym/portage/xml/__init__.pyo
new file mode 100644
index 0000000..15f1b77
Binary files /dev/null and b/portage_with_autodep/pym/portage/xml/__init__.pyo differ

diff --git a/portage_with_autodep/pym/portage/xml/metadata.py b/portage_with_autodep/pym/portage/xml/metadata.py
index 7acc1f3..25f801a 100644
--- a/portage_with_autodep/pym/portage/xml/metadata.py
+++ b/portage_with_autodep/pym/portage/xml/metadata.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 """Provides an easy-to-use python interface to Gentoo's metadata.xml file.
@@ -30,16 +30,40 @@
 
 __all__ = ('MetaDataXML',)
 
-try:
-	import xml.etree.cElementTree as etree
-except ImportError:
+import sys
+
+if sys.hexversion < 0x2070000 or \
+	(sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000):
+	# Our _MetadataTreeBuilder usage is incompatible with
+	# cElementTree in Python 2.6, 3.0, and 3.1:
+	#  File "/usr/lib/python2.6/xml/etree/ElementTree.py", line 644, in findall
+	#    assert self._root is not None
 	import xml.etree.ElementTree as etree
+else:
+	try:
+		import xml.etree.cElementTree as etree
+	except (ImportError, SystemError):
+		import xml.etree.ElementTree as etree
+
+try:
+	from xml.parsers.expat import ExpatError
+except (ImportError, SystemError):
+	ExpatError = SyntaxError
 
 import re
+import xml.etree.ElementTree
 import portage
-from portage import os
+from portage import os, _unicode_decode
 from portage.util import unique_everseen
 
+class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
+	"""
+	Implements doctype() as required to avoid deprecation warnings with
+	Python >=2.7.
+	"""
+	def doctype(self, name, pubid, system):
+		pass
+
 class _Maintainer(object):
 	"""An object for representing one maintainer.
 
@@ -63,8 +87,7 @@ class _Maintainer(object):
 		self.description = None
 		self.restrict = node.get('restrict')
 		self.status = node.get('status')
-		maint_attrs = node.getchildren()
-		for attr in maint_attrs:
+		for attr in node:
 			setattr(self, attr.tag, attr.text)
 
 	def __repr__(self):
@@ -174,9 +197,12 @@ class MetaDataXML(object):
 		self._xml_tree = None
 
 		try:
-			self._xml_tree = etree.parse(metadata_xml_path)
+			self._xml_tree = etree.parse(metadata_xml_path,
+				parser=etree.XMLParser(target=_MetadataTreeBuilder()))
 		except ImportError:
 			pass
+		except ExpatError as e:
+			raise SyntaxError(_unicode_decode("%s") % (e,))
 
 		if isinstance(herds, etree.ElementTree):
 			herds_etree = herds
@@ -209,7 +235,8 @@ class MetaDataXML(object):
 
 		if self._herdstree is None:
 			try:
-				self._herdstree = etree.parse(self._herds_path)
+				self._herdstree = etree.parse(self._herds_path,
+					parser=etree.XMLParser(target=_MetadataTreeBuilder()))
 			except (ImportError, IOError, SyntaxError):
 				return None
 
@@ -217,7 +244,13 @@ class MetaDataXML(object):
 		if herd in ('no-herd', 'maintainer-wanted', 'maintainer-needed'):
 			return None
 
-		for node in self._herdstree.getiterator('herd'):
+		try:
+			# Python 2.7 or >=3.2
+			iterate = self._herdstree.iter
+		except AttributeError:
+			iterate = self._herdstree.getiterator
+
+		for node in iterate('herd'):
 			if node.findtext('name') == herd:
 				return node.findtext('email')
 
@@ -292,8 +325,13 @@ class MetaDataXML(object):
 			if self._xml_tree is None:
 				self._useflags = tuple()
 			else:
+				try:
+					# Python 2.7 or >=3.2
+					iterate = self._xml_tree.iter
+				except AttributeError:
+					iterate = self._xml_tree.getiterator
 				self._useflags = tuple(_Useflag(node) \
-					for node in self._xml_tree.getiterator('flag'))
+					for node in iterate('flag'))
 
 		return self._useflags
 

diff --git a/portage_with_autodep/pym/portage/xml/metadata.pyo b/portage_with_autodep/pym/portage/xml/metadata.pyo
new file mode 100644
index 0000000..0103456
Binary files /dev/null and b/portage_with_autodep/pym/portage/xml/metadata.pyo differ

diff --git a/portage_with_autodep/pym/portage/xpak.py b/portage_with_autodep/pym/portage/xpak.py
index 7487d67..3262326 100644
--- a/portage_with_autodep/pym/portage/xpak.py
+++ b/portage_with_autodep/pym/portage/xpak.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2010 Gentoo Foundation
+# Copyright 2001-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 
@@ -22,11 +22,11 @@ __all__ = ['addtolist', 'decodeint', 'encodeint', 'getboth',
 
 import array
 import errno
-import shutil
 import sys
 
 import portage
 from portage import os
+from portage import shutil
 from portage import normalize_path
 from portage import _encodings
 from portage import _unicode_decode
@@ -62,11 +62,15 @@ def encodeint(myint):
 	"""Takes a 4 byte integer and converts it into a string of 4 characters.
 	Returns the characters in a string."""
 	a = array.array('B')
-	a.append((myint >> 24 ) & 0xff)
-	a.append((myint >> 16 ) & 0xff)
-	a.append((myint >> 8 ) & 0xff)
+	a.append((myint >> 24) & 0xff)
+	a.append((myint >> 16) & 0xff)
+	a.append((myint >>  8) & 0xff)
 	a.append(myint & 0xff)
-	return a.tostring()
+	try:
+		# Python >= 3.2
+		return a.tobytes()
+	except AttributeError:
+		return a.tostring()
 
 def decodeint(mystring):
 	"""Takes a 4 byte string and converts it into a 4 byte integer.
@@ -80,12 +84,12 @@ def decodeint(mystring):
 	myint += mystring[0] << 24
 	return myint
 
-def xpak(rootdir,outfile=None):
-	"""(rootdir,outfile) -- creates an xpak segment of the directory 'rootdir'
+def xpak(rootdir, outfile=None):
+	"""(rootdir, outfile) -- creates an xpak segment of the directory 'rootdir'
 	and under the name 'outfile' if it is specified. Otherwise it returns the
 	xpak segment."""
 
-	mylist=[]
+	mylist = []
 
 	addtolist(mylist, rootdir)
 	mylist.sort()
@@ -95,7 +99,8 @@ def xpak(rootdir,outfile=None):
 			# CONTENTS is generated during the merge process.
 			continue
 		x = _unicode_encode(x, encoding=_encodings['fs'], errors='strict')
-		mydata[x] = open(os.path.join(rootdir, x), 'rb').read()
+		with open(os.path.join(rootdir, x), 'rb') as f:
+			mydata[x] = f.read()
 
 	xpak_segment = xpak_mem(mydata)
 	if outfile:
@@ -107,7 +112,7 @@ def xpak(rootdir,outfile=None):
 		return xpak_segment
 
 def xpak_mem(mydata):
-	"""Create an xpack segement from a map object."""
+	"""Create an xpack segment from a map object."""
 
 	mydata_encoded = {}
 	for k, v in mydata.items():
@@ -120,21 +125,21 @@ def xpak_mem(mydata):
 	del mydata_encoded
 
 	indexglob = b''
-	indexpos=0
+	indexpos = 0
 	dataglob = b''
-	datapos=0
+	datapos = 0
 	for x, newglob in mydata.items():
-		mydatasize=len(newglob)
-		indexglob=indexglob+encodeint(len(x))+x+encodeint(datapos)+encodeint(mydatasize)
-		indexpos=indexpos+4+len(x)+4+4
-		dataglob=dataglob+newglob
-		datapos=datapos+mydatasize
+		mydatasize = len(newglob)
+		indexglob = indexglob + encodeint(len(x)) + x + encodeint(datapos) + encodeint(mydatasize)
+		indexpos = indexpos + 4 + len(x) + 4 + 4
+		dataglob = dataglob + newglob
+		datapos = datapos + mydatasize
 	return b'XPAKPACK' \
-	+ encodeint(len(indexglob)) \
-	+ encodeint(len(dataglob)) \
-	+ indexglob \
-	+ dataglob \
-	+ b'XPAKSTOP'
+		+ encodeint(len(indexglob)) \
+		+ encodeint(len(dataglob)) \
+		+ indexglob \
+		+ dataglob \
+		+ b'XPAKSTOP'
 
 def xsplit(infile):
 	"""(infile) -- Splits the infile into two files.
@@ -144,7 +149,7 @@ def xsplit(infile):
 		encoding=_encodings['fs'], errors='strict')
 	myfile = open(_unicode_encode(infile,
 		encoding=_encodings['fs'], errors='strict'), 'rb')
-	mydat=myfile.read()
+	mydat = myfile.read()
 	myfile.close()
 	
 	splits = xsplit_mem(mydat)
@@ -166,35 +171,35 @@ def xsplit_mem(mydat):
 		return None
 	if mydat[-8:] != b'XPAKSTOP':
 		return None
-	indexsize=decodeint(mydat[8:12])
-	return (mydat[16:indexsize+16], mydat[indexsize+16:-8])
+	indexsize = decodeint(mydat[8:12])
+	return (mydat[16:indexsize + 16], mydat[indexsize + 16:-8])
 
 def getindex(infile):
 	"""(infile) -- grabs the index segment from the infile and returns it."""
 	myfile = open(_unicode_encode(infile,
 		encoding=_encodings['fs'], errors='strict'), 'rb')
-	myheader=myfile.read(16)
+	myheader = myfile.read(16)
 	if myheader[0:8] != b'XPAKPACK':
 		myfile.close()
 		return
-	indexsize=decodeint(myheader[8:12])
-	myindex=myfile.read(indexsize)
+	indexsize = decodeint(myheader[8:12])
+	myindex = myfile.read(indexsize)
 	myfile.close()
 	return myindex
 
 def getboth(infile):
 	"""(infile) -- grabs the index and data segments from the infile.
-	Returns an array [indexSegment,dataSegment]"""
+	Returns an array [indexSegment, dataSegment]"""
 	myfile = open(_unicode_encode(infile,
 		encoding=_encodings['fs'], errors='strict'), 'rb')
-	myheader=myfile.read(16)
+	myheader = myfile.read(16)
 	if myheader[0:8] != b'XPAKPACK':
 		myfile.close()
 		return
-	indexsize=decodeint(myheader[8:12])
-	datasize=decodeint(myheader[12:16])
-	myindex=myfile.read(indexsize)
-	mydata=myfile.read(datasize)
+	indexsize = decodeint(myheader[8:12])
+	datasize = decodeint(myheader[12:16])
+	myindex = myfile.read(indexsize)
+	mydata = myfile.read(datasize)
 	myfile.close()
 	return myindex, mydata
 
@@ -205,83 +210,82 @@ def listindex(myindex):
 
 def getindex_mem(myindex):
 	"""Returns the filenames listed in the indexglob passed in."""
-	myindexlen=len(myindex)
-	startpos=0
-	myret=[]
-	while ((startpos+8)<myindexlen):
-		mytestlen=decodeint(myindex[startpos:startpos+4])
-		myret=myret+[myindex[startpos+4:startpos+4+mytestlen]]
-		startpos=startpos+mytestlen+12
+	myindexlen = len(myindex)
+	startpos = 0
+	myret = []
+	while ((startpos + 8) < myindexlen):
+		mytestlen = decodeint(myindex[startpos:startpos + 4])
+		myret = myret + [myindex[startpos + 4:startpos + 4 + mytestlen]]
+		startpos = startpos + mytestlen + 12
 	return myret
 
-def searchindex(myindex,myitem):
-	"""(index,item) -- Finds the offset and length of the file 'item' in the
+def searchindex(myindex, myitem):
+	"""(index, item) -- Finds the offset and length of the file 'item' in the
 	datasegment via the index 'index' provided."""
 	myitem = _unicode_encode(myitem,
 		encoding=_encodings['repo.content'], errors='backslashreplace')
-	mylen=len(myitem)
-	myindexlen=len(myindex)
-	startpos=0
-	while ((startpos+8)<myindexlen):
-		mytestlen=decodeint(myindex[startpos:startpos+4])
-		if mytestlen==mylen:
-			if myitem==myindex[startpos+4:startpos+4+mytestlen]:
+	mylen = len(myitem)
+	myindexlen = len(myindex)
+	startpos = 0
+	while ((startpos + 8) < myindexlen):
+		mytestlen = decodeint(myindex[startpos:startpos + 4])
+		if mytestlen == mylen:
+			if myitem == myindex[startpos + 4:startpos + 4 + mytestlen]:
 				#found
-				datapos=decodeint(myindex[startpos+4+mytestlen:startpos+8+mytestlen]);
-				datalen=decodeint(myindex[startpos+8+mytestlen:startpos+12+mytestlen]);
+				datapos = decodeint(myindex[startpos + 4 + mytestlen:startpos + 8 + mytestlen])
+				datalen = decodeint(myindex[startpos + 8 + mytestlen:startpos + 12 + mytestlen])
 				return datapos, datalen
-		startpos=startpos+mytestlen+12
+		startpos = startpos + mytestlen + 12
 		
-def getitem(myid,myitem):
-	myindex=myid[0]
-	mydata=myid[1]
-	myloc=searchindex(myindex,myitem)
+def getitem(myid, myitem):
+	myindex = myid[0]
+	mydata = myid[1]
+	myloc = searchindex(myindex, myitem)
 	if not myloc:
 		return None
-	return mydata[myloc[0]:myloc[0]+myloc[1]]
-
-def xpand(myid,mydest):
-	myindex=myid[0]
-	mydata=myid[1]
-	try:
-		origdir=os.getcwd()
-	except SystemExit as e:
-		raise
-	except:
-		os.chdir("/")
-		origdir="/"
-	os.chdir(mydest)
-	myindexlen=len(myindex)
-	startpos=0
-	while ((startpos+8)<myindexlen):
-		namelen=decodeint(myindex[startpos:startpos+4])
-		datapos=decodeint(myindex[startpos+4+namelen:startpos+8+namelen]);
-		datalen=decodeint(myindex[startpos+8+namelen:startpos+12+namelen]);
-		myname=myindex[startpos+4:startpos+4+namelen]
-		dirname=os.path.dirname(myname)
+	return mydata[myloc[0]:myloc[0] + myloc[1]]
+
+def xpand(myid, mydest):
+	mydest = normalize_path(mydest) + os.sep
+	myindex = myid[0]
+	mydata = myid[1]
+	myindexlen = len(myindex)
+	startpos = 0
+	while ((startpos + 8) < myindexlen):
+		namelen = decodeint(myindex[startpos:startpos + 4])
+		datapos = decodeint(myindex[startpos + 4 + namelen:startpos + 8 + namelen])
+		datalen = decodeint(myindex[startpos + 8 + namelen:startpos + 12 + namelen])
+		myname = myindex[startpos + 4:startpos + 4 + namelen]
+		myname = _unicode_decode(myname,
+			encoding=_encodings['repo.content'], errors='replace')
+		filename = os.path.join(mydest, myname.lstrip(os.sep))
+		filename = normalize_path(filename)
+		if not filename.startswith(mydest):
+			# myname contains invalid ../ component(s)
+			continue
+		dirname = os.path.dirname(filename)
 		if dirname:
 			if not os.path.exists(dirname):
 				os.makedirs(dirname)
-		mydat = open(_unicode_encode(myname,
+		mydat = open(_unicode_encode(filename,
 			encoding=_encodings['fs'], errors='strict'), 'wb')
-		mydat.write(mydata[datapos:datapos+datalen])
+		mydat.write(mydata[datapos:datapos + datalen])
 		mydat.close()
-		startpos=startpos+namelen+12
-	os.chdir(origdir)
+		startpos = startpos + namelen + 12
 
 class tbz2(object):
-	def __init__(self,myfile):
-		self.file=myfile
-		self.filestat=None
+	def __init__(self, myfile):
+		self.file = myfile
+		self.filestat = None
 		self.index = b''
-		self.infosize=0
-		self.xpaksize=0
-		self.indexsize=None
-		self.datasize=None
-		self.indexpos=None
-		self.datapos=None
-
-	def decompose(self,datadir,cleanup=1):
+		self.infosize = 0
+		self.xpaksize = 0
+		self.indexsize = None
+		self.datasize = None
+		self.indexpos = None
+		self.datapos = None
+
+	def decompose(self, datadir, cleanup=1):
 		"""Alias for unpackinfo() --- Complement to recompose() but optionally
 		deletes the destination directory. Extracts the xpak from the tbz2 into
 		the directory provided. Raises IOError if scan() fails.
@@ -293,9 +297,9 @@ class tbz2(object):
 		if not os.path.exists(datadir):
 			os.makedirs(datadir)
 		return self.unpackinfo(datadir)
-	def compose(self,datadir,cleanup=0):
+	def compose(self, datadir, cleanup=0):
 		"""Alias for recompose()."""
-		return self.recompose(datadir,cleanup)
+		return self.recompose(datadir, cleanup)
 
 	def recompose(self, datadir, cleanup=0, break_hardlinks=True):
 		"""Creates an xpak segment from the datadir provided, truncates the tbz2
@@ -333,9 +337,9 @@ class tbz2(object):
 			encoding=_encodings['fs'], errors='strict'), 'ab+')
 		if not myfile:
 			raise IOError
-		myfile.seek(-self.xpaksize,2) # 0,2 or -0,2 just mean EOF.
+		myfile.seek(-self.xpaksize, 2) # 0,2 or -0,2 just mean EOF.
 		myfile.truncate()
-		myfile.write(xpdata+encodeint(len(xpdata)) + b'STOP')
+		myfile.write(xpdata + encodeint(len(xpdata)) + b'STOP')
 		myfile.flush()
 		myfile.close()
 		return 1
@@ -356,47 +360,47 @@ class tbz2(object):
 	def scan(self):
 		"""Scans the tbz2 to locate the xpak segment and setup internal values.
 		This function is called by relevant functions already."""
+		a = None
 		try:
-			mystat=os.stat(self.file)
+			mystat = os.stat(self.file)
 			if self.filestat:
-				changed=0
+				changed = 0
 				if mystat.st_size != self.filestat.st_size \
 					or mystat.st_mtime != self.filestat.st_mtime \
 					or mystat.st_ctime != self.filestat.st_ctime:
 					changed = True
 				if not changed:
 					return 1
-			self.filestat=mystat
+			self.filestat = mystat
 			a = open(_unicode_encode(self.file,
 				encoding=_encodings['fs'], errors='strict'), 'rb')
-			a.seek(-16,2)
-			trailer=a.read()
-			self.infosize=0
-			self.xpaksize=0
+			a.seek(-16, 2)
+			trailer = a.read()
+			self.infosize = 0
+			self.xpaksize = 0
 			if trailer[-4:] != b'STOP':
-				a.close()
 				return 0
 			if trailer[0:8] != b'XPAKSTOP':
-				a.close()
 				return 0
-			self.infosize=decodeint(trailer[8:12])
-			self.xpaksize=self.infosize+8
-			a.seek(-(self.xpaksize),2)
-			header=a.read(16)
+			self.infosize = decodeint(trailer[8:12])
+			self.xpaksize = self.infosize + 8
+			a.seek(-(self.xpaksize), 2)
+			header = a.read(16)
 			if header[0:8] != b'XPAKPACK':
-				a.close()
 				return 0
-			self.indexsize=decodeint(header[8:12])
-			self.datasize=decodeint(header[12:16])
-			self.indexpos=a.tell()
-			self.index=a.read(self.indexsize)
-			self.datapos=a.tell()
-			a.close()
+			self.indexsize = decodeint(header[8:12])
+			self.datasize = decodeint(header[12:16])
+			self.indexpos = a.tell()
+			self.index = a.read(self.indexsize)
+			self.datapos = a.tell()
 			return 2
-		except SystemExit as e:
+		except SystemExit:
 			raise
 		except:
 			return 0
+		finally:
+			if a is not None:
+				a.close()
 
 	def filelist(self):
 		"""Return an array of each file listed in the index."""
@@ -404,63 +408,60 @@ class tbz2(object):
 			return None
 		return getindex_mem(self.index)
 
-	def getfile(self,myfile,mydefault=None):
+	def getfile(self, myfile, mydefault=None):
 		"""Finds 'myfile' in the data segment and returns it."""
 		if not self.scan():
 			return None
-		myresult=searchindex(self.index,myfile)
+		myresult = searchindex(self.index, myfile)
 		if not myresult:
 			return mydefault
 		a = open(_unicode_encode(self.file,
 			encoding=_encodings['fs'], errors='strict'), 'rb')
-		a.seek(self.datapos+myresult[0],0)
-		myreturn=a.read(myresult[1])
+		a.seek(self.datapos + myresult[0], 0)
+		myreturn = a.read(myresult[1])
 		a.close()
 		return myreturn
 
-	def getelements(self,myfile):
+	def getelements(self, myfile):
 		"""A split/array representation of tbz2.getfile()"""
-		mydat=self.getfile(myfile)
+		mydat = self.getfile(myfile)
 		if not mydat:
 			return []
 		return mydat.split()
 
-	def unpackinfo(self,mydest):
+	def unpackinfo(self, mydest):
 		"""Unpacks all the files from the dataSegment into 'mydest'."""
 		if not self.scan():
 			return 0
-		try:
-			origdir=os.getcwd()
-		except SystemExit as e:
-			raise
-		except:
-			os.chdir("/")
-			origdir="/"
+		mydest = normalize_path(mydest) + os.sep
 		a = open(_unicode_encode(self.file,
 			encoding=_encodings['fs'], errors='strict'), 'rb')
 		if not os.path.exists(mydest):
 			os.makedirs(mydest)
-		os.chdir(mydest)
-		startpos=0
-		while ((startpos+8)<self.indexsize):
-			namelen=decodeint(self.index[startpos:startpos+4])
-			datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
-			datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
-			myname=self.index[startpos+4:startpos+4+namelen]
+		startpos = 0
+		while ((startpos + 8) < self.indexsize):
+			namelen = decodeint(self.index[startpos:startpos + 4])
+			datapos = decodeint(self.index[startpos + 4 + namelen:startpos + 8 + namelen])
+			datalen = decodeint(self.index[startpos + 8 + namelen:startpos + 12 + namelen])
+			myname = self.index[startpos + 4:startpos + 4 + namelen]
 			myname = _unicode_decode(myname,
 				encoding=_encodings['repo.content'], errors='replace')
-			dirname=os.path.dirname(myname)
+			filename = os.path.join(mydest, myname.lstrip(os.sep))
+			filename = normalize_path(filename)
+			if not filename.startswith(mydest):
+				# myname contains invalid ../ component(s)
+				continue
+			dirname = os.path.dirname(filename)
 			if dirname:
 				if not os.path.exists(dirname):
 					os.makedirs(dirname)
-			mydat = open(_unicode_encode(myname,
+			mydat = open(_unicode_encode(filename,
 				encoding=_encodings['fs'], errors='strict'), 'wb')
-			a.seek(self.datapos+datapos)
+			a.seek(self.datapos + datapos)
 			mydat.write(a.read(datalen))
 			mydat.close()
-			startpos=startpos+namelen+12
+			startpos = startpos + namelen + 12
 		a.close()
-		os.chdir(origdir)
 		return 1
 
 	def get_data(self):
@@ -470,28 +471,27 @@ class tbz2(object):
 		a = open(_unicode_encode(self.file,
 			encoding=_encodings['fs'], errors='strict'), 'rb')
 		mydata = {}
-		startpos=0
-		while ((startpos+8)<self.indexsize):
-			namelen=decodeint(self.index[startpos:startpos+4])
-			datapos=decodeint(self.index[startpos+4+namelen:startpos+8+namelen]);
-			datalen=decodeint(self.index[startpos+8+namelen:startpos+12+namelen]);
-			myname=self.index[startpos+4:startpos+4+namelen]
-			a.seek(self.datapos+datapos)
+		startpos = 0
+		while ((startpos + 8) < self.indexsize):
+			namelen = decodeint(self.index[startpos:startpos + 4])
+			datapos = decodeint(self.index[startpos + 4 + namelen:startpos + 8 + namelen])
+			datalen = decodeint(self.index[startpos + 8 + namelen:startpos + 12 + namelen])
+			myname = self.index[startpos + 4:startpos + 4 + namelen]
+			a.seek(self.datapos + datapos)
 			mydata[myname] = a.read(datalen)
-			startpos=startpos+namelen+12
+			startpos = startpos + namelen + 12
 		a.close()
 		return mydata
 
 	def getboth(self):
-		"""Returns an array [indexSegment,dataSegment]"""
+		"""Returns an array [indexSegment, dataSegment]"""
 		if not self.scan():
 			return None
 
 		a = open(_unicode_encode(self.file,
 			encoding=_encodings['fs'], errors='strict'), 'rb')
 		a.seek(self.datapos)
-		mydata =a.read(self.datasize)
+		mydata = a.read(self.datasize)
 		a.close()
 
 		return self.index, mydata
-

diff --git a/portage_with_autodep/pym/portage/xpak.pyo b/portage_with_autodep/pym/portage/xpak.pyo
new file mode 100644
index 0000000..361a709
Binary files /dev/null and b/portage_with_autodep/pym/portage/xpak.pyo differ

diff --git a/portage_with_autodep/pym/repoman/__init__.pyo b/portage_with_autodep/pym/repoman/__init__.pyo
new file mode 100644
index 0000000..b443f38
Binary files /dev/null and b/portage_with_autodep/pym/repoman/__init__.pyo differ

diff --git a/portage_with_autodep/pym/repoman/checks.py b/portage_with_autodep/pym/repoman/checks.py
index e7472df..77df603 100644
--- a/portage_with_autodep/pym/repoman/checks.py
+++ b/portage_with_autodep/pym/repoman/checks.py
@@ -1,5 +1,5 @@
 # repoman: Checks
-# Copyright 2007, 2011 Gentoo Foundation
+# Copyright 2007-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 """This module contains functions used in Repoman to ascertain the quality
@@ -8,6 +8,7 @@ and correctness of an ebuild."""
 import re
 import time
 import repoman.errors as errors
+import portage
 from portage.eapi import eapi_supports_prefix, eapi_has_implicit_rdepend, \
 	eapi_has_src_prepare_and_src_configure, eapi_has_dosed_dohard, \
 	eapi_exports_AA, eapi_exports_KV
@@ -77,6 +78,7 @@ class EbuildHeader(LineCheck):
 	# gentoo_license = re.compile(r'^# Distributed under the terms of the GNU General Public License v2$')
 	gentoo_license = '# Distributed under the terms of the GNU General Public License v2'
 	cvs_header = re.compile(r'^# \$Header: .*\$$')
+	ignore_comment = False
 
 	def new(self, pkg):
 		if pkg.mtime is None:
@@ -282,21 +284,35 @@ class EbuildUselessCdS(LineCheck):
 			self.check_next_line = True
 
 class EapiDefinition(LineCheck):
-	""" Check that EAPI is defined before inherits"""
+	"""
+	Check that EAPI assignment conforms to PMS section 7.3.1
+	(first non-comment, non-blank line).
+	"""
 	repoman_check_name = 'EAPI.definition'
-
-	eapi_re = re.compile(r'^EAPI=')
-	inherit_re = re.compile(r'^\s*inherit\s')
+	ignore_comment = True
+	_eapi_re = portage._pms_eapi_re
 
 	def new(self, pkg):
-		self.inherit_line = None
+		self._cached_eapi = pkg.metadata['EAPI']
+		self._parsed_eapi = None
+		self._eapi_line_num = None
 
 	def check(self, num, line):
-		if self.eapi_re.match(line) is not None:
-			if self.inherit_line is not None:
-				return errors.EAPI_DEFINED_AFTER_INHERIT
-		elif self.inherit_re.match(line) is not None:
-			self.inherit_line = line
+		if self._eapi_line_num is None and line.strip():
+			self._eapi_line_num = num + 1
+			m = self._eapi_re.match(line)
+			if m is not None:
+				self._parsed_eapi = m.group(2)
+
+	def end(self):
+		if self._parsed_eapi is None:
+			if self._cached_eapi != "0":
+				yield "valid EAPI assignment must occur on or before line: %s" % \
+					self._eapi_line_num
+		elif self._parsed_eapi != self._cached_eapi:
+			yield ("bash returned EAPI '%s' which does not match "
+				"assignment on line: %s") % \
+				(self._cached_eapi, self._eapi_line_num)
 
 class EbuildPatches(LineCheck):
 	"""Ensure ebuilds use bash arrays for PATCHES to ensure white space safety"""
@@ -341,7 +357,7 @@ class NoOffsetWithHelpers(LineCheck):
 	repoman_check_name = 'variable.usedwithhelpers'
 	# Ignore matches in quoted strings like this:
 	# elog "installed into ${ROOT}usr/share/php5/apc/."
-	re = re.compile(r'^[^#"\']*\b(dodir|dohard|exeinto|insinto|into)\s+"?\$\{?(D|ROOT|ED|EROOT|EPREFIX)\b.*')
+	re = re.compile(r'^[^#"\']*\b(docinto|docompress|dodir|dohard|exeinto|fowners|fperms|insinto|into)\s+"?\$\{?(D|ROOT|ED|EROOT|EPREFIX)\b.*')
 	error = errors.NO_OFFSET_WITH_HELPERS
 
 class ImplicitRuntimeDeps(LineCheck):
@@ -384,6 +400,7 @@ class InheritDeprecated(LineCheck):
 
 	# deprecated eclass : new eclass (False if no new eclass)
 	deprecated_classes = {
+		"bash-completion": "bash-completion-r1",
 		"gems": "ruby-fakegem",
 		"git": "git-2",
 		"mozconfig-2": "mozconfig-3",
@@ -646,13 +663,17 @@ class Eapi4GoneVars(LineCheck):
 
 class PortageInternal(LineCheck):
 	repoman_check_name = 'portage.internal'
-	re = re.compile(r'[^#]*\b(ecompress|ecompressdir|prepall|prepalldocs|preplib)\b')
+	ignore_comment = True
+	# Match when the command is preceded only by leading whitespace or a shell
+	# operator such as (, {, |, ||, or &&. This prevents false postives in
+	# things like elog messages, as reported in bug #413285.
+	re = re.compile(r'^(\s*|.*[|&{(]+\s*)\b(ecompress|ecompressdir|env-update|prepall|prepalldocs|preplib)\b')
 
 	def check(self, num, line):
 		"""Run the check on line and return error if there is one"""
 		m = self.re.match(line)
 		if m is not None:
-			return ("'%s'" % m.group(1)) + " called on line: %d"
+			return ("'%s'" % m.group(2)) + " called on line: %d"
 
 _constant_checks = tuple((c() for c in (
 	EbuildHeader, EbuildWhitespace, EbuildBlankLine, EbuildQuote,

diff --git a/portage_with_autodep/pym/repoman/checks.pyo b/portage_with_autodep/pym/repoman/checks.pyo
new file mode 100644
index 0000000..1db2838
Binary files /dev/null and b/portage_with_autodep/pym/repoman/checks.pyo differ

diff --git a/portage_with_autodep/pym/repoman/errors.pyo b/portage_with_autodep/pym/repoman/errors.pyo
new file mode 100644
index 0000000..b7c2b8d
Binary files /dev/null and b/portage_with_autodep/pym/repoman/errors.pyo differ

diff --git a/portage_with_autodep/pym/repoman/herdbase.py b/portage_with_autodep/pym/repoman/herdbase.py
index 64b59e6..91a32cb 100644
--- a/portage_with_autodep/pym/repoman/herdbase.py
+++ b/portage_with_autodep/pym/repoman/herdbase.py
@@ -1,13 +1,13 @@
 # -*- coding: utf-8 -*-
 # repoman: Herd database analysis
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2 or later
 
 import errno
 import xml.etree.ElementTree
 try:
 	from xml.parsers.expat import ExpatError
-except ImportError:
+except (ImportError, SystemError):
 	# This means that python is built without xml support.
 	# We tolerate global scope import failures for optional
 	# modules, so that ImportModulesTestCase can succeed (or
@@ -19,8 +19,6 @@ __all__ = [
 	"make_herd_base"
 ]
 
-_SPECIAL_HERDS = set(('no-herd',))
-
 def _make_email(nick_name):
 	if not nick_name.endswith('@gentoo.org'):
 		nick_name = nick_name + '@gentoo.org'
@@ -33,8 +31,6 @@ class HerdBase(object):
 		self.all_emails = all_emails
 
 	def known_herd(self, herd_name):
-		if herd_name in _SPECIAL_HERDS:
-			return True
 		return herd_name in self.herd_to_emails
 
 	def known_maintainer(self, nick_name):

diff --git a/portage_with_autodep/pym/repoman/herdbase.pyo b/portage_with_autodep/pym/repoman/herdbase.pyo
new file mode 100644
index 0000000..112152f
Binary files /dev/null and b/portage_with_autodep/pym/repoman/herdbase.pyo differ

diff --git a/portage_with_autodep/pym/repoman/utilities.py b/portage_with_autodep/pym/repoman/utilities.py
index 232739e..bee67aa 100644
--- a/portage_with_autodep/pym/repoman/utilities.py
+++ b/portage_with_autodep/pym/repoman/utilities.py
@@ -1,10 +1,12 @@
 # repoman: Utilities
-# Copyright 2007-2011 Gentoo Foundation
+# Copyright 2007-2012 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 """This module contains utility functions to help repoman find ebuilds to
 scan"""
 
+from __future__ import print_function
+
 __all__ = [
 	"detect_vcs_conflicts",
 	"editor_is_executable",
@@ -14,17 +16,30 @@ __all__ = [
 	"format_qa_output",
 	"get_commit_message_with_editor",
 	"get_commit_message_with_stdin",
+	"get_committer_name",
+	"have_ebuild_dir",
 	"have_profile_dir",
 	"parse_metadata_use",
 	"UnknownHerdsError",
-	"check_metadata"
+	"check_metadata",
+	"UpdateChangeLog"
 ]
 
 import errno
 import io
+from itertools import chain
 import logging
+import pwd
+import re
+import stat
 import sys
+import time
+import textwrap
+import difflib
+from tempfile import mkstemp
+
 from portage import os
+from portage import shutil
 from portage import subprocess_getstatusoutput
 from portage import _encodings
 from portage import _unicode_decode
@@ -60,7 +75,7 @@ def detect_vcs_conflicts(options, vcs):
 	if vcs == 'cvs':
 		logging.info("Performing a " + output.green("cvs -n up") + \
 			" with a little magic grep to check for updates.")
-		retval = subprocess_getstatusoutput("cvs -n up 2>&1 | " + \
+		retval = subprocess_getstatusoutput("cvs -n up 2>/dev/null | " + \
 			"egrep '^[^\?] .*' | " + \
 			"egrep -v '^. .*/digest-[^/]+|^cvs server: .* -- ignored$'")
 	if vcs == 'svn':
@@ -113,6 +128,33 @@ def have_profile_dir(path, maxdepth=3, filename="profiles.desc"):
 		path = normalize_path(path + "/..")
 		maxdepth -= 1
 
+def have_ebuild_dir(path, maxdepth=3):
+	""" 
+	Try to figure out if 'path' or a subdirectory contains one or more
+	ebuild files named appropriately for their parent directory.
+	"""
+	stack = [(normalize_path(path), 1)]
+	while stack:
+		path, depth = stack.pop()
+		basename = os.path.basename(path)
+		try:
+			listdir = os.listdir(path)
+		except OSError:
+			continue
+		for filename in listdir:
+			abs_filename = os.path.join(path, filename)
+			try:
+				st = os.stat(abs_filename)
+			except OSError:
+				continue
+			if stat.S_ISDIR(st.st_mode):
+				if depth < maxdepth:
+					stack.append((abs_filename, depth + 1))
+			elif stat.S_ISREG(st.st_mode):
+				if filename.endswith(".ebuild") and \
+					filename.startswith(basename + "-"):
+					return os.path.dirname(os.path.dirname(path))
+
 def parse_metadata_use(xml_tree):
 	"""
 	Records are wrapped in XML as per GLEP 56
@@ -285,7 +327,7 @@ def editor_is_executable(editor):
 	@param editor: An EDITOR value from the environment.
 	@type: string
 	@rtype: bool
-	@returns: True if an executable is found, False otherwise.
+	@return: True if an executable is found, False otherwise.
 	"""
 	editor_split = util.shlex_split(editor)
 	if not editor_split:
@@ -306,9 +348,8 @@ def get_commit_message_with_editor(editor, message=None):
 	@param message: An iterable of lines to show in the editor.
 	@type: iterable
 	@rtype: string or None
-	@returns: A string on success or None if an error occurs.
+	@return: A string on success or None if an error occurs.
 	"""
-	from tempfile import mkstemp
 	fd, filename = mkstemp()
 	try:
 		os.write(fd, _unicode_encode(_(
@@ -348,7 +389,7 @@ def get_commit_message_with_stdin():
 	Read a commit message from the user and return it.
 
 	@rtype: string or None
-	@returns: A string on success or None if an error occurs.
+	@return: A string on success or None if an error occurs.
 	"""
 	print("Please enter a commit message. Use Ctrl-d to finish or Ctrl-c to abort.")
 	commitmessage = []
@@ -435,6 +476,8 @@ def FindPortdir(settings):
 	# file.
 	if not portdir_overlay:
 		portdir_overlay = have_profile_dir(location, filename="repo_name")
+		if not portdir_overlay:
+			portdir_overlay = have_ebuild_dir(location)
 		if portdir_overlay:
 			subdir = location[len(portdir_overlay):]
 			if subdir and subdir[-1] != os.sep:
@@ -472,7 +515,7 @@ def FindVCS():
 	outvcs = []
 
 	def seek(depth = None):
-		""" Seek for distributed VCSes. """
+		""" Seek for VCSes that have a top-level data directory only. """
 		retvcs = []
 		pathprep = ''
 
@@ -483,6 +526,8 @@ def FindVCS():
 				retvcs.append('bzr')
 			if os.path.isdir(os.path.join(pathprep, '.hg')):
 				retvcs.append('hg')
+			if os.path.isdir(os.path.join(pathprep, '.svn')):  # >=1.7
+				retvcs.append('svn')
 
 			if retvcs:
 				break
@@ -497,7 +542,7 @@ def FindVCS():
 	# Level zero VCS-es.
 	if os.path.isdir('CVS'):
 		outvcs.append('cvs')
-	if os.path.isdir('.svn'):
+	if os.path.isdir('.svn'):  # <1.7
 		outvcs.append('svn')
 
 	# If we already found one of 'level zeros', just take a quick look
@@ -508,4 +553,325 @@ def FindVCS():
 	else:
 		outvcs = seek()
 
+	if len(outvcs) > 1:
+		# eliminate duplicates, like for svn in bug #391199
+		outvcs = list(set(outvcs))
+
 	return outvcs
+
+_copyright_re1 = re.compile(br'^(# Copyright \d\d\d\d)-\d\d\d\d ')
+_copyright_re2 = re.compile(br'^(# Copyright )(\d\d\d\d) ')
+
+
+class _copyright_repl(object):
+	__slots__ = ('year',)
+	def __init__(self, year):
+		self.year = year
+	def __call__(self, matchobj):
+		if matchobj.group(2) == self.year:
+			return matchobj.group(0)
+		else:
+			return matchobj.group(1) + matchobj.group(2) + \
+				b'-' + self.year + b' '
+
+def _update_copyright_year(year, line):
+	"""
+	These two regexes are taken from echangelog
+	update_copyright(), except that we don't hardcode
+	1999 here (in order to be more generic).
+	"""
+	is_bytes = isinstance(line, bytes)
+	if is_bytes:
+		if not line.startswith(b'# Copyright '):
+			return line
+	else:
+		if not line.startswith('# Copyright '):
+			return line
+
+	year = _unicode_encode(year)
+	line = _unicode_encode(line)
+
+	line = _copyright_re1.sub(br'\1-' + year + b' ', line)
+	line = _copyright_re2.sub(_copyright_repl(year), line)
+	if not is_bytes:
+		line = _unicode_decode(line)
+	return line
+
+def update_copyright(fn_path, year, pretend=False):
+	"""
+	Check file for a Copyright statement, and update its year.  The
+	patterns used for replacing copyrights are taken from echangelog.
+	Only the first lines of each file that start with a hash ('#') are
+	considered, until a line is found that doesn't start with a hash.
+	Files are read and written in binary mode, so that this function
+	will work correctly with files encoded in any character set, as
+	long as the copyright statements consist of plain ASCII.
+	"""
+
+	try:
+		fn_hdl = io.open(_unicode_encode(fn_path,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='rb')
+	except EnvironmentError:
+		return
+
+	orig_header = []
+	new_header = []
+
+	for line in fn_hdl:
+		line_strip = line.strip()
+		orig_header.append(line)
+		if not line_strip or line_strip[:1] != b'#':
+			new_header.append(line)
+			break
+
+		line = _update_copyright_year(year, line)
+		new_header.append(line)
+
+	difflines = 0
+	for line in difflib.unified_diff(
+		[_unicode_decode(line) for line in orig_header],
+		[_unicode_decode(line) for line in new_header],
+			fromfile=fn_path, tofile=fn_path, n=0):
+		util.writemsg_stdout(line, noiselevel=-1)
+		difflines += 1
+	util.writemsg_stdout("\n", noiselevel=-1)
+
+	# unified diff has three lines to start with
+	if difflines > 3 and not pretend:
+		# write new file with changed header
+		f, fnnew_path = mkstemp()
+		f = io.open(f, mode='wb')
+		for line in new_header:
+			f.write(line)
+		for line in fn_hdl:
+			f.write(line)
+		f.close()
+		try:
+			fn_stat = os.stat(fn_path)
+		except OSError:
+			fn_stat = None
+
+		shutil.move(fnnew_path, fn_path)
+
+		if fn_stat is None:
+			util.apply_permissions(fn_path, mode=0o644)
+		else:
+			util.apply_stat_permissions(fn_path, fn_stat)
+	fn_hdl.close()
+
+def get_committer_name(env=None):
+	"""Generate a committer string like echangelog does."""
+	if env is None:
+		env = os.environ
+	if 'GENTOO_COMMITTER_NAME' in env and \
+		'GENTOO_COMMITTER_EMAIL' in env:
+		user = '%s <%s>' % (env['GENTOO_COMMITTER_NAME'],
+			env['GENTOO_COMMITTER_EMAIL'])
+	elif 'GENTOO_AUTHOR_NAME' in env and \
+			'GENTOO_AUTHOR_EMAIL' in env:
+		user = '%s <%s>' % (env['GENTOO_AUTHOR_NAME'],
+			env['GENTOO_AUTHOR_EMAIL'])
+	elif 'ECHANGELOG_USER' in env:
+		user = env['ECHANGELOG_USER']
+	else:
+		pwd_struct = pwd.getpwuid(os.getuid())
+		gecos = pwd_struct.pw_gecos.split(',')[0]  # bug #80011
+		user = '%s <%s@gentoo.org>' % (gecos, pwd_struct.pw_name)
+	return user
+
+def UpdateChangeLog(pkgdir, user, msg, skel_path, category, package,
+	new=(), removed=(), changed=(), pretend=False):
+	"""
+	Write an entry to an existing ChangeLog, or create a new one.
+	Updates copyright year on changed files, and updates the header of
+	ChangeLog with the contents of skel.ChangeLog.
+	"""
+
+	if '<root@' in user:
+		err = 'Please set ECHANGELOG_USER or run as non-root'
+		logging.critical(err)
+		return None
+
+	# ChangeLog times are in UTC
+	gmtime = time.gmtime()
+	year = time.strftime('%Y', gmtime)
+	date = time.strftime('%d %b %Y', gmtime)
+
+	# check modified files and the ChangeLog for copyright updates
+	# patches and diffs (identified by .patch and .diff) are excluded
+	for fn in chain(new, changed):
+		if fn.endswith('.diff') or fn.endswith('.patch'):
+			continue
+		update_copyright(os.path.join(pkgdir, fn), year, pretend=pretend)
+
+	cl_path = os.path.join(pkgdir, 'ChangeLog')
+	clold_lines = []
+	clnew_lines = []
+	old_header_lines = []
+	header_lines = []
+
+	try:
+		clold_file = io.open(_unicode_encode(cl_path,
+			encoding=_encodings['fs'], errors='strict'),
+			mode='r', encoding=_encodings['repo.content'], errors='replace')
+	except EnvironmentError:
+		clold_file = None
+
+	clskel_file = None
+	if clold_file is None:
+		# we will only need the ChangeLog skeleton if there is no
+		# ChangeLog yet
+		try:
+			clskel_file = io.open(_unicode_encode(skel_path,
+				encoding=_encodings['fs'], errors='strict'),
+				mode='r', encoding=_encodings['repo.content'],
+				errors='replace')
+		except EnvironmentError:
+			pass
+
+	f, clnew_path = mkstemp()
+
+	# construct correct header first
+	try:
+		if clold_file is not None:
+			# retain header from old ChangeLog
+			for line in clold_file:
+				line_strip =  line.strip()
+				if line_strip and line[:1] != "#":
+					clold_lines.append(line)
+					break
+				old_header_lines.append(line)
+				header_lines.append(_update_copyright_year(year, line))
+				if not line_strip:
+					break
+
+		elif clskel_file is not None:
+			# read skel.ChangeLog up to first empty line
+			for line in clskel_file:
+				line_strip = line.strip()
+				if not line_strip:
+					break
+				line = line.replace('<CATEGORY>', category)
+				line = line.replace('<PACKAGE_NAME>', package)
+				line = _update_copyright_year(year, line)
+				header_lines.append(line)
+			header_lines.append(_unicode_decode('\n'))
+			clskel_file.close()
+
+		# write new ChangeLog entry
+		clnew_lines.extend(header_lines)
+		newebuild = False
+		for fn in new:
+			if not fn.endswith('.ebuild'):
+				continue
+			ebuild = fn.split(os.sep)[-1][0:-7] 
+			clnew_lines.append(_unicode_decode('*%s (%s)\n' % (ebuild, date)))
+			newebuild = True
+		if newebuild:
+			clnew_lines.append(_unicode_decode('\n'))
+		trivial_files = ('ChangeLog', 'Manifest')
+		display_new = ['+' + elem for elem in new
+			if elem not in trivial_files]
+		display_removed = ['-' + elem for elem in removed]
+		display_changed = [elem for elem in changed
+			if elem not in trivial_files]
+		if not (display_new or display_removed or display_changed):
+			# If there's nothing else to display, show one of the
+			# trivial files.
+			for fn in trivial_files:
+				if fn in new:
+					display_new = ['+' + fn]
+					break
+				elif fn in changed:
+					display_changed = [fn]
+					break
+
+		display_new.sort()
+		display_removed.sort()
+		display_changed.sort()
+
+		mesg = '%s; %s %s:' % (date, user, ', '.join(chain(
+			display_new, display_removed, display_changed)))
+		for line in textwrap.wrap(mesg, 80, \
+				initial_indent='  ', subsequent_indent='  ', \
+				break_on_hyphens=False):
+			clnew_lines.append(_unicode_decode('%s\n' % line))
+		for line in textwrap.wrap(msg, 80, \
+				initial_indent='  ', subsequent_indent='  '):
+			clnew_lines.append(_unicode_decode('%s\n' % line))
+		clnew_lines.append(_unicode_decode('\n'))
+
+		f = io.open(f, mode='w', encoding=_encodings['repo.content'],
+			errors='backslashreplace')
+
+		for line in clnew_lines:
+			f.write(line)
+
+		# append stuff from old ChangeLog
+		if clold_file is not None:
+
+			if clold_lines:
+				# clold_lines may contain a saved non-header line
+				# that we want to write first.
+				# Also, append this line to clnew_lines so that the
+				# unified_diff call doesn't show it as removed.
+				for line in clold_lines:
+					f.write(line)
+					clnew_lines.append(line)
+
+			else:
+				# ensure that there is no more than one blank
+				# line after our new entry
+				for line in clold_file:
+					if line.strip():
+						f.write(line)
+						break
+
+			# Now prepend old_header_lines to clold_lines, for use
+			# in the unified_diff call below.
+			clold_lines = old_header_lines + clold_lines
+
+			for line in clold_file:
+				f.write(line)
+			clold_file.close()
+		f.close()
+
+		# show diff (do we want to keep on doing this, or only when
+		# pretend?)
+		for line in difflib.unified_diff(clold_lines, clnew_lines,
+			fromfile=cl_path, tofile=cl_path, n=0):
+			util.writemsg_stdout(line, noiselevel=-1)
+		util.writemsg_stdout("\n", noiselevel=-1)
+
+		if pretend:
+			# remove what we've done
+			os.remove(clnew_path)
+		else:
+			# rename to ChangeLog, and set permissions
+			try:
+				clold_stat = os.stat(cl_path)
+			except OSError:
+				clold_stat = None
+
+			shutil.move(clnew_path, cl_path)
+
+			if clold_stat is None:
+				util.apply_permissions(cl_path, mode=0o644)
+			else:
+				util.apply_stat_permissions(cl_path, clold_stat)
+
+		if clold_file is None:
+			return True
+		else:
+			return False
+	except IOError as e:
+		err = 'Repoman is unable to create/write to Changelog.new file: %s' % (e,)
+		logging.critical(err)
+		# try to remove if possible
+		try:
+			os.remove(clnew_path)
+		except OSError:
+			pass
+		return None
+

diff --git a/portage_with_autodep/pym/repoman/utilities.pyo b/portage_with_autodep/pym/repoman/utilities.pyo
new file mode 100644
index 0000000..c9f6734
Binary files /dev/null and b/portage_with_autodep/pym/repoman/utilities.pyo differ


^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2014-02-17 11:55 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-08-21 11:37 [gentoo-commits] proj/autodep:master commit in: portage_with_autodep/pym/portage/tests/news/, Александр Берсенев
  -- strict thread matches above, loose matches on Subject: below --
2014-02-17 11:55 Александр Берсенев

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox