diff -uNr portage-orig/pym/plugins/__init__.py portage-plugin-framework/pym/plugins/__init__.py
--- portage-orig/pym/plugins/__init__.py	1970-01-01 09:00:00.000000000 +0900
+++ portage-plugin-framework/pym/plugins/__init__.py	2005-11-12 21:52:15.000000000 +0900
@@ -0,0 +1,80 @@
+class registry:
+
+	class _plugin_loader:
+
+		def __init__(self, path):
+			try:
+				import sys
+				for prefix in sys.path:
+					if path.startswith(prefix):
+						mod_name = path[len(prefix)+1:]
+						self._plugin = __import__(mod_name)
+						getattr(self._plugin, "plugin_class")
+						break
+			except (ImportError, AttributeError), e:
+				raise ImportError
+			self._path = path
+			self._loaded = {}
+
+		def __getitem__(self, name):
+			if name in self._loaded:
+				return self._loaded[name]
+			try:
+				import os
+				plugin_path = os.path.join(self._path, name)
+				import sys
+				for prefix in sys.path:
+					if plugin_path.startswith(prefix):
+						plugin_name = plugin_path[len(prefix)+1:]
+						plugin = __import__(plugin_name)
+						break
+				plugin = getattr(plugin, name)
+				if issubclass(plugin, self._plugin.plugin_class):
+					self._loaded[name] = plugin
+					return plugin
+			except (ImportError, AttributeError), e:
+				pass
+			raise KeyError(name)
+
+		def keys(self):
+			import os
+			for name in os.listdir(self._path):
+				(name, ext) = os.path.splitext(name)
+				if name.startswith("_") or not ext.startswith(".py"):
+					continue
+				if name not in self._loaded:
+					try:
+						self[name]
+					except KeyError:
+						pass
+			keys = self._loaded.keys()
+			keys.sort()
+			return keys
+
+	def __init__(self):
+		import os
+		self._path = os.path.dirname(__file__)
+		self._loaded = {}
+
+	def __getitem__(self, name):
+		if name not in self._loaded:
+			try:
+				import os
+				self._loaded[name] = self._plugin_loader(os.path.join(self._path, name))
+			except ImportError, e:
+				raise KeyError, name
+		return self._loaded[name]
+
+	def keys(self):
+		import os
+		for name in os.listdir(self._path):
+			if name not in self._loaded:
+				try:
+					self[name]
+				except KeyError:
+					pass
+		keys = self._loaded.keys()
+		keys.sort()
+		return keys
+
+registry = registry()
diff -uNr portage-orig/pym/plugins/cache/__init__.py portage-plugin-framework/pym/plugins/cache/__init__.py
--- portage-orig/pym/plugins/cache/__init__.py	1970-01-01 09:00:00.000000000 +0900
+++ portage-plugin-framework/pym/plugins/cache/__init__.py	2005-11-12 21:52:15.000000000 +0900
@@ -0,0 +1 @@
+from cache.template import database as plugin_class
diff -uNr portage-orig/pym/plugins/cache/anydbm.py portage-plugin-framework/pym/plugins/cache/anydbm.py
--- portage-orig/pym/plugins/cache/anydbm.py	1970-01-01 09:00:00.000000000 +0900
+++ portage-plugin-framework/pym/plugins/cache/anydbm.py	2005-11-12 21:52:15.000000000 +0900
@@ -0,0 +1,75 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id: anydbm.py 1911 2005-08-25 03:44:21Z ferringb $
+
+anydbm_module = __import__("anydbm")
+try:
+	import cPickle as pickle
+except ImportError:
+	import pickle
+import os
+from cache import fs_template
+from cache import cache_errors
+
+
+class anydbm(fs_template.FsBased):
+
+	autocommits = True
+	cleanse_keys = True
+
+	def __init__(self, *args, **config):
+		super(anydbm,self).__init__(*args, **config)
+
+		default_db = config.get("dbtype","anydbm")
+		if not default_db.startswith("."):
+			default_db = '.' + default_db
+
+		self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db)
+		self.__db = None
+		try:
+			self.__db = anydbm_module.open(self._db_path, "w", self._perms)
+				
+		except anydbm_module.error:
+			# XXX handle this at some point
+			try:
+				self._ensure_dirs()
+				self._ensure_dirs(self._db_path)
+				self._ensure_access(self._db_path)
+			except (OSError, IOError), e:
+				raise cache_errors.InitializationError(self.__class__, e)
+
+			# try again if failed
+			try:
+				if self.__db == None:
+					self.__db = anydbm_module.open(self._db_path, "c", self._perms)
+			except andbm_module.error, e:
+				raise cache_errors.InitializationError(self.__class__, e)
+
+	def iteritems(self):
+		return self.__db.iteritems()
+
+	def __getitem__(self, cpv):
+		# we override getitem because it's just a cpickling of the data handed in.
+		return pickle.loads(self.__db[cpv])
+
+
+	def _setitem(self, cpv, values):
+		self.__db[cpv] = pickle.dumps(values,pickle.HIGHEST_PROTOCOL)
+
+	def _delitem(self, cpv):
+		del self.__db[cpv]
+
+
+	def iterkeys(self):
+		return iter(self.__db)
+
+
+	def has_key(self, cpv):
+		return cpv in self.__db
+
+
+	def __del__(self):
+		if "__db" in self.__dict__ and self.__db != None:
+			self.__db.sync()
+			self.__db.close()
diff -uNr portage-orig/pym/plugins/cache/flat_hash.py portage-plugin-framework/pym/plugins/cache/flat_hash.py
--- portage-orig/pym/plugins/cache/flat_hash.py	1970-01-01 09:00:00.000000000 +0900
+++ portage-plugin-framework/pym/plugins/cache/flat_hash.py	2005-11-12 21:52:15.000000000 +0900
@@ -0,0 +1,129 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id: flat_list.py 1911 2005-08-25 03:44:21Z ferringb $
+
+from cache import fs_template
+from cache import cache_errors
+import os, stat
+from cache.mappings import LazyLoad, ProtectedDict
+from cache.template import reconstruct_eclasses
+# store the current key order *here*.
+class flat_hash(fs_template.FsBased):
+
+	autocommits = True
+
+	def __init__(self, *args, **config):
+		super(flat_hash,self).__init__(*args, **config)
+		self.location = os.path.join(self.location, 
+			self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+		if not os.path.exists(self.location):
+			self._ensure_dirs()
+
+	def __getitem__(self, cpv):
+		fp = os.path.join(self.location, cpv)
+		try:
+			def curry(*args):
+				def callit(*args2):
+					return args[0](*args[1:]+args2)
+				return callit
+			return ProtectedDict(LazyLoad(curry(self._pull, fp, cpv), initial_items=[("_mtime_", os.stat(fp).st_mtime)]))
+		except OSError:
+			raise KeyError(cpv)
+		return self._getitem(cpv)
+
+	def _pull(self, fp, cpv):
+		try:
+			myf = open(fp,"r")
+		except IOError:
+			raise KeyError(cpv)
+		except OSError, e:
+			raise cache_errors.CacheCorruption(cpv, e)
+		try:
+			d = self._parse_data(myf, cpv)
+		except (OSError, ValueError), e:
+			myf.close()
+			raise cache_errors.CacheCorruption(cpv, e)
+		myf.close()
+		return d
+
+
+	def _parse_data(self, data, cpv, mtime=0):
+		d = dict(map(lambda x:x.rstrip().split("=", 1), data))
+		if mtime != 0:
+			d["_mtime_"] = long(mtime)
+		if "_eclasses_" in d:
+			d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"])
+		return d
+		
+		for x in self._known_keys:
+			if x not in d:
+				d[x] = ''
+
+
+		return d
+
+
+	def _setitem(self, cpv, values):
+#		import pdb;pdb.set_trace()
+		s = cpv.rfind("/")
+		fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+		try:	myf=open(fp, "w")
+		except IOError, ie:
+			if ie.errno == 2:
+				try:
+					self._ensure_dirs(cpv)
+					myf=open(fp,"w")
+				except (OSError, IOError),e:
+					raise cache_errors.CacheCorruption(cpv, e)
+		except OSError, e:
+			raise cache_errors.CacheCorruption(cpv, e)
+		
+		for k, v in values.items():
+			if k != "_mtime_":
+				myf.writelines("%s=%s\n" % (k, v))
+
+		myf.close()
+		self._ensure_access(fp, mtime=values["_mtime_"])
+
+		#update written.  now we move it.
+
+		new_fp = os.path.join(self.location,cpv)
+		try:	os.rename(fp, new_fp)
+		except (OSError, IOError), e:
+			os.remove(fp)
+			raise cache_errors.CacheCorruption(cpv, e)
+
+
+	def _delitem(self, cpv):
+#		import pdb;pdb.set_trace()
+		try:
+			os.remove(os.path.join(self.location,cpv))
+		except OSError, e:
+			if e.errno == 2:
+				raise KeyError(cpv)
+			else:
+				raise cache_errors.CacheCorruption(cpv, e)
+
+
+	def has_key(self, cpv):
+		return os.path.exists(os.path.join(self.location, cpv))
+
+
+	def iterkeys(self):
+		"""generator for walking the dir struct"""
+		dirs = [self.location]
+		len_base = len(self.location)
+		while len(dirs):
+			for l in os.listdir(dirs[0]):
+				if l.endswith(".cpickle"):
+					continue
+				p = os.path.join(dirs[0],l)
+				st = os.lstat(p)
+				if stat.S_ISDIR(st.st_mode):
+					dirs.append(p)
+					continue
+				yield p[len_base+1:]
+			dirs.pop(0)
+
diff -uNr portage-orig/pym/plugins/cache/flat_list.py portage-plugin-framework/pym/plugins/cache/flat_list.py
--- portage-orig/pym/plugins/cache/flat_list.py	1970-01-01 09:00:00.000000000 +0900
+++ portage-plugin-framework/pym/plugins/cache/flat_list.py	2005-11-12 21:52:15.000000000 +0900
@@ -0,0 +1,109 @@
+from cache import fs_template
+from cache import cache_errors
+import os, stat
+
+# store the current key order *here*.
+class flat_list(fs_template.FsBased):
+
+	autocommits = True
+
+	# do not screw with this ordering. _eclasses_ needs to be last
+	auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+		'RESTRICT',  'HOMEPAGE',  'LICENSE', 'DESCRIPTION',
+		'KEYWORDS',  'IUSE', 'CDEPEND',
+		'PDEPEND',   'PROVIDE','_eclasses_')
+
+	def __init__(self, label, auxdbkeys, **config):
+		super(flat_list,self).__init__(label, auxdbkeys, **config)
+		self._base = os.path.join(self._base, 
+			self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+		if len(self._known_keys) > len(self.auxdbkey_order) + 2:
+			raise Exception("less ordered keys then auxdbkeys")
+		if not os.path.exists(self._base):
+			self._ensure_dirs()
+
+
+	def _getitem(self, cpv):
+		d = {}
+		try:
+			myf = open(os.path.join(self._base, cpv),"r")
+			for k,v in zip(self.auxdbkey_order, myf):
+				d[k] = v.rstrip("\n")
+		except (OSError, IOError),e:
+			if isinstance(e,IOError) and e.errno == 2:
+#				print "caught for %s" % cpv, e
+#				l=os.listdir(os.path.dirname(os.path.join(self._base,cpv)))
+#				l.sort()
+#				print l
+				raise KeyError(cpv)
+			raise cache_errors.CacheCorruption(cpv, e)
+
+		try:	d["_mtime_"] = os.fstat(myf.fileno()).st_mtime
+		except OSError, e:	
+			myf.close()
+			raise cache_errors.CacheCorruption(cpv, e)
+		myf.close()
+		return d
+
+
+	def _setitem(self, cpv, values):
+		s = cpv.rfind("/")
+		fp=os.path.join(self._base,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+		try:	myf=open(fp, "w")
+		except (OSError, IOError), e:
+			if e.errno == 2:
+				try:
+					self._ensure_dirs(cpv)
+					myf=open(fp,"w")
+				except (OSError, IOError),e:
+					raise cache_errors.CacheCorruption(cpv, e)
+			else:
+				raise cache_errors.CacheCorruption(cpv, e)
+		
+
+		for x in self.auxdbkey_order:
+			myf.write(values.get(x,"")+"\n")
+
+		myf.close()
+		self._ensure_access(fp, mtime=values["_mtime_"])
+		#update written.  now we move it.
+		new_fp = os.path.join(self._base,cpv)
+		try:	os.rename(fp, new_fp)
+		except (OSError, IOError), e:
+			os.remove(fp)
+			raise cache_errors.CacheCorruption(cpv, e)
+
+
+	def _delitem(self, cpv):
+		try:
+			os.remove(os.path.join(self._base,cpv))
+		except OSError, e:
+			if e.errno == 2:
+				raise KeyError(cpv)
+			else:
+				raise cache_errors.CacheCorruption(cpv, e)
+
+
+	def has_key(self, cpv):
+		return os.path.exists(os.path.join(self._base, cpv))
+
+
+	def iterkeys(self):
+		"""generator for walking the dir struct"""
+		dirs = [self._base]
+		len_base = len(self._base)
+		while len(dirs):
+			for l in os.listdir(dirs[0]):
+				if l.endswith(".cpickle"):
+					continue
+				p = os.path.join(dirs[0],l)
+				st = os.lstat(p)
+				if stat.S_ISDIR(st.st_mode):
+					dirs.append(p)
+					continue
+				yield p[len_base+1:]
+			dirs.pop(0)
+
+
+	def commit(self):	pass
diff -uNr portage-orig/pym/plugins/cache/metadata.py portage-plugin-framework/pym/plugins/cache/metadata.py
--- portage-orig/pym/plugins/cache/metadata.py	1970-01-01 09:00:00.000000000 +0900
+++ portage-plugin-framework/pym/plugins/cache/metadata.py	2005-11-12 21:52:15.000000000 +0900
@@ -0,0 +1,88 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id: metadata.py 1964 2005-09-03 00:16:16Z ferringb $
+
+import os, stat
+from plugins.cache import flat_hash
+from cache import cache_errors
+import eclass_cache 
+from cache.template import reconstruct_eclasses, serialize_eclasses
+from cache.mappings import ProtectedDict, LazyLoad
+
+# this is the old cache format, flat_list.  count maintained here.
+magic_line_count = 22
+
+# store the current key order *here*.
+class metadata(flat_hash.flat_hash):
+	complete_eclass_entries = False
+	auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+		'RESTRICT',  'HOMEPAGE',  'LICENSE', 'DESCRIPTION',
+		'KEYWORDS',  'INHERITED', 'IUSE', 'CDEPEND',
+		'PDEPEND',   'PROVIDE', 'EAPI')
+
+	autocommits = True
+
+	def __init__(self, location, *args, **config):
+		loc = location
+		super(metadata, self).__init__(location, *args, **config)
+		self.location = os.path.join(loc, "metadata","cache")
+		self.ec = eclass_cache.cache(loc)
+
+	def __getitem__(self, cpv):
+		return flat_hash.database.__getitem__(self, cpv)
+
+
+	def _parse_data(self, data, mtime):
+		# easy attempt first.
+		data = list(data)
+		if len(data) != magic_line_count:
+			d = flat_hash.database._parse_data(self, data, mtime)
+		else:
+			# this one's interesting.
+			d = {}
+
+			for line in data:
+				# yes, meant to iterate over a string.
+				hashed = False
+				# poor mans enumerate.  replace when python 2.3 is required
+				for idx, c in zip(range(len(line)), line):
+					if not c.isalpha():
+						if c == "=" and idx > 0:
+							hashed = True
+							d[line[:idx]] = line[idx + 1:]
+						elif c == "_" or c.isdigit():
+							continue
+						break
+					elif not c.isupper():
+						break
+			
+				if not hashed:
+					# non hashed.
+					d.clear()
+					# poor mans enumerate.  replace when python 2.3 is required
+					for idx, key in zip(range(len(self.auxdbkey_order)), self.auxdbkey_order):
+						d[key] = data[idx].strip()
+					break
+
+		if "_eclasses_" not in d:
+			if "INHERITED" in d:
+				d["_eclasses_"] = self.ec.get_eclass_data(d["INHERITED"].split(), from_master_only=True)
+				del d["INHERITED"]
+		else:
+			d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"])
+
+		return d
+
+
+		
+	def _setitem(self, cpv, values):
+		values = ProtectedDict(values)
+		
+		# hack.  proper solution is to make this a __setitem__ override, since template.__setitem__ 
+		# serializes _eclasses_, then we reconstruct it.
+		if "_eclasses_" in values:
+			values["INHERITED"] = ' '.join(reconstruct_eclasses(cpv, values["_eclasses_"]).keys())
+			del values["_eclasses_"]
+
+		flat_hash.database._setitem(self, cpv, values)
diff -uNr portage-orig/pym/plugins/cache/sqlite.py portage-plugin-framework/pym/plugins/cache/sqlite.py
--- portage-orig/pym/plugins/cache/sqlite.py	1970-01-01 09:00:00.000000000 +0900
+++ portage-plugin-framework/pym/plugins/cache/sqlite.py	2005-11-12 21:52:15.000000000 +0900
@@ -0,0 +1,67 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+# $Id: sqlite.py 1911 2005-08-25 03:44:21Z ferringb $
+
+sqlite_module =__import__("sqlite")
+import os
+from cache import sql_template, fs_template
+from cache import cache_errors
+
+class sqlite(fs_template.FsBased, sql_template.SQLDatabase):
+
+	SCHEMA_DELETE_NAME	= "delete_package_values"
+	SCHEMA_DELETE_TRIGGER	= """CREATE TRIGGER %s AFTER DELETE on %s
+	begin
+	DELETE FROM %s WHERE pkgid=old.pkgid;
+	end;""" % (SCHEMA_DELETE_NAME, sql_template.SQLDatabase.SCHEMA_PACKAGE_NAME, 
+		sql_template.SQLDatabase.SCHEMA_VALUES_NAME)
+
+	_BaseError = sqlite_module.Error
+	_dbClass = sqlite_module
+	_supports_replace = True
+
+	def _dbconnect(self, config):
+		self._dbpath = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+".sqldb")
+		try:
+			self.db = sqlite_module.connect(self._dbpath, mode=self._perms, autocommit=False)
+			if not self._ensure_access(self._dbpath):
+				raise cache_errors.InitializationError(self.__class__, "can't ensure perms on %s" % self._dbpath)
+			self.con = self.db.cursor()
+		except self._BaseError, e:
+			raise cache_errors.InitializationError(self.__class__, e)
+
+		
+	def _initdb_con(self, config):
+		sql_template.SQLDatabase._initdb_con(self, config)
+		try:
+			self.con.execute("SELECT name FROM sqlite_master WHERE type=\"trigger\" AND name=%s" % \
+				self._sfilter(self.SCHEMA_DELETE_NAME))
+			if self.con.rowcount == 0:
+				self.con.execute(self.SCHEMA_DELETE_TRIGGER);
+				self.db.commit()
+		except self._BaseError, e:
+			raise cache_errors.InitializationError(self.__class__, e)
+
+	def _table_exists(self, tbl):
+		"""return true/false dependant on a tbl existing"""
+		try:	self.con.execute("SELECT name FROM sqlite_master WHERE type=\"table\" AND name=%s" % 
+			self._sfilter(tbl))
+		except self._BaseError, e:
+			# XXX crappy.
+			return False
+		return len(self.con.fetchall()) == 1
+
+	# we can do it minus a query via rowid.
+	def _insert_cpv(self, cpv):
+		cpv = self._sfilter(cpv)
+		try:	self.con.execute(self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1) % \
+			(self.label, cpv))
+		except self._BaseError, e:
+			raise cache_errors.CacheCorruption(cpv, "tried to insert a cpv, but failed: %s" % str(e))
+
+		# sums the delete also
+		if self.con.rowcount <= 0 or self.con.rowcount > 2:
+			raise cache_errors.CacheCorruption(cpv, "tried to insert a cpv, but failed- %i rows modified" % self.rowcount)
+		return self.con.lastrowid
+
diff -uNr portage-orig/pym/portage.py portage-plugin-framework/pym/portage.py
--- portage-orig/pym/portage.py	2005-11-12 21:53:05.000000000 +0900
+++ portage-plugin-framework/pym/portage.py	2005-11-12 21:52:15.000000000 +0900
@@ -28,7 +28,6 @@
 	import commands
 	from time import sleep
 	from random import shuffle
-	from cache.cache_errors import CacheError
 except SystemExit, e:
 	raise
 except Exception, e:
@@ -67,6 +66,8 @@
 	import xpak
 	import getbinpkg
 	import portage_dep
+	import plugins
+	from cache.cache_errors import CacheError
 
 	# XXX: This needs to get cleaned up.
 	import output
@@ -136,26 +137,6 @@
 signal.signal(signal.SIGTERM, exithandler)
 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
 
-def load_mod(name):
-	modname = string.join(string.split(name,".")[:-1],".")
-	mod = __import__(modname)
-	components = name.split('.')
-	for comp in components[1:]:
-		mod = getattr(mod, comp)
-	return mod
-
-def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
-	for x in key_order:
-		if top_dict.has_key(x) and top_dict[x].has_key(key):
-			if FullCopy:
-				return copy.deepcopy(top_dict[x][key])
-			else:
-				return top_dict[x][key]
-	if EmptyOnError:
-		return ""
-	else:
-		raise KeyError, "Key not found in list; '%s'" % key
-
 def getcwd():
 	"this fixes situations where the current directory doesn't exist"
 	try:
@@ -923,8 +904,8 @@
 			if self.modules["user"] == None:
 				self.modules["user"] = {}
 			self.modules["default"] = {
-				"portdbapi.metadbmodule": "cache.metadata.database",
-				"portdbapi.auxdbmodule":  "cache.flat_hash.database",
+				"portdbapi.metadbmodule": "cache.metadata",
+				"portdbapi.auxdbmodule":  "cache.flat_hash",
 			}
 
 			self.usemask=[]
@@ -1229,8 +1210,18 @@
 		self.virtuals = self.getvirtuals(root)
 
 	def load_best_module(self,property_string):
-		best_mod = best_from_dict(property_string,self.modules,self.module_priority)
-		return load_mod(best_mod)
+		for prio in self.module_priority:
+			if property_string not in self.modules[prio]:
+				continue
+			mod_split = self.modules[prio][property_string].split(".", 1)
+			if len(mod_split) == 2:
+				(mod_type, mod_name) = mod_split
+				#try:
+				return plugins.registry[mod_type][mod_name]
+				#except KeyError:
+				#	pass
+			writemsg("%s module %s could not be loaded.\n" % (property_string, self.modules[prio][property_string]))
+		raise KeyError(property_string)
 
 	def lock(self):
 		self.locked = 1