public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/gentoo-openbsd:master commit in: eclass/
@ 2011-04-25 13:46 Maxim Koltsov
  0 siblings, 0 replies; only message in thread
From: Maxim Koltsov @ 2011-04-25 13:46 UTC (permalink / raw
  To: gentoo-commits

commit:     c002b65c6b84aad1659f9e3430ab9781b43cd486
Author:     Maxim <kolmax94 <AT> gmail <DOT> com>
AuthorDate: Mon Apr 25 13:45:28 2011 +0000
Commit:     Maxim Koltsov <maksbotan <AT> gentoo <DOT> org>
CommitDate: Mon Apr 25 13:45:28 2011 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=proj/gentoo-openbsd.git;a=commit;h=c002b65c

Add modified toolchain-binutils.eclass

---
 eclass/autotools.eclass          |  369 +++++++
 eclass/eutils.eclass             | 2008 ++++++++++++++++++++++++++++++++++++++
 eclass/flag-o-matic.eclass       |  780 +++++++++++++++
 eclass/gnuconfig.eclass          |  103 ++
 eclass/libtool.eclass            |  466 +++++++++
 eclass/multilib.eclass           |  455 +++++++++
 eclass/portability.eclass        |  183 ++++
 eclass/prefix.eclass             |   52 +
 eclass/toolchain-binutils.eclass |  418 ++++++++
 eclass/toolchain-funcs.eclass    |  694 +++++++++++++
 eclass/versionator.eclass        |  577 +++++++++++
 11 files changed, 6105 insertions(+), 0 deletions(-)

diff --git a/eclass/autotools.eclass b/eclass/autotools.eclass
new file mode 100644
index 0000000..d58bb80
--- /dev/null
+++ b/eclass/autotools.eclass
@@ -0,0 +1,369 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/autotools.eclass,v 1.101 2010/08/21 19:39:52 vapier Exp $
+
+# @ECLASS: autotools.eclass
+# @MAINTAINER:
+# base-system@gentoo.org
+# @BLURB: Regenerates auto* build scripts
+# @DESCRIPTION:
+# This eclass is for safely handling autotooled software packages that need to
+# regenerate their build scripts.  All functions will abort in case of errors.
+#
+# NB:  If you add anything, please comment it!
+
+inherit eutils libtool
+
+# @ECLASS-VARIABLE: WANT_AUTOCONF
+# @DESCRIPTION:
+# The major version of autoconf your package needs
+: ${WANT_AUTOCONF:=latest}
+
+# @ECLASS-VARIABLE: WANT_AUTOMAKE
+# @DESCRIPTION:
+# The major version of automake your package needs
+: ${WANT_AUTOMAKE:=latest}
+
+# @ECLASS-VARIABLE: _LATEST_AUTOMAKE
+# @INTERNAL
+# @DESCRIPTION:
+# CONSTANT!
+# The latest major version/slot of automake available on each arch.
+# If a newer version is stable on any arch, and is NOT reflected in this list,
+# then circular dependencies may arise during emerge @system bootstraps.
+# Do NOT change this variable in your ebuilds!
+_LATEST_AUTOMAKE='1.11'
+
+_automake_atom="sys-devel/automake"
+_autoconf_atom="sys-devel/autoconf"
+if [[ -n ${WANT_AUTOMAKE} ]]; then
+	case ${WANT_AUTOMAKE} in
+		none)   _automake_atom="" ;; # some packages don't require automake at all
+		# if you change the "latest" version here, change also autotools_run_tool
+		# this MUST reflect the latest stable major version for each arch!
+		latest) _automake_atom="|| ( `printf '=sys-devel/automake-%s* ' ${_LATEST_AUTOMAKE}` )" ;;
+		*)      _automake_atom="=sys-devel/automake-${WANT_AUTOMAKE}*" ;;
+	esac
+	export WANT_AUTOMAKE
+fi
+
+if [[ -n ${WANT_AUTOCONF} ]] ; then
+	case ${WANT_AUTOCONF} in
+		none)       _autoconf_atom="" ;; # some packages don't require autoconf at all
+		2.1)        _autoconf_atom="=sys-devel/autoconf-${WANT_AUTOCONF}*" ;;
+		# if you change the “latest” version here, change also autotools_run_tool
+		latest|2.5) _autoconf_atom=">=sys-devel/autoconf-2.61" ;;
+		*)          _autoconf_atom="INCORRECT-WANT_AUTOCONF-SETTING-IN-EBUILD" ;;
+	esac
+	export WANT_AUTOCONF
+fi
+
+AUTOTOOLS_DEPEND="${_automake_atom} ${_autoconf_atom}"
+[[ ${CATEGORY}/${PN} != "sys-devel/libtool" ]] && AUTOTOOLS_DEPEND="${AUTOTOOLS_DEPEND} >=sys-devel/libtool-2.2.6b"
+RDEPEND=""
+
+# @ECLASS-VARIABLE: AUTOTOOLS_AUTO_DEPEND
+# @DESCRIPTION:
+# Set to 'no' to disable automatically adding to DEPEND.  This lets
+# ebuilds former conditional depends by using ${AUTOTOOLS_DEPEND} in
+# their own DEPEND string.
+: ${AUTOTOOLS_AUTO_DEPEND:=yes}
+if [[ ${AUTOTOOLS_AUTO_DEPEND} != "no" ]] ; then
+	DEPEND=${AUTOTOOLS_DEPEND}
+fi
+
+unset _automake_atom _autoconf_atom
+
+# @ECLASS-VARIABLE: AM_OPTS
+# @DEFAULT_UNSET
+# @DESCRIPTION:
+# Additional options to pass to automake during
+# eautoreconf call.
+
+# @ECLASS-VARIABLE: AT_NOELIBTOOLIZE
+# @DEFAULT_UNSET
+# @DESCRIPTION:
+# Don't run elibtoolize command if set to 'yes',
+# useful when elibtoolize needs to be ran with
+# particular options
+
+# XXX: M4DIR should be deprecated
+# @ECLASS-VARIABLE: AT_M4DIR
+# @DESCRIPTION:
+# Additional director(y|ies) aclocal should search
+: ${AT_M4DIR:=${M4DIR}}
+
+# @FUNCTION: eautoreconf
+# @DESCRIPTION:
+# This function mimes the behavior of autoreconf, but uses the different
+# eauto* functions to run the tools. It doesn't accept parameters, but
+# the directory with include files can be specified with AT_M4DIR variable.
+#
+# Should do a full autoreconf - normally what most people will be interested in.
+# Also should handle additional directories specified by AC_CONFIG_SUBDIRS.
+eautoreconf() {
+	local x auxdir g
+
+	if [[ -z ${AT_NO_RECURSIVE} ]]; then
+		# Take care of subdirs
+		for x in $(autotools_get_subdirs); do
+			if [[ -d ${x} ]] ; then
+				pushd "${x}" >/dev/null
+				AT_NOELIBTOOLIZE="yes" eautoreconf
+				popd >/dev/null
+			fi
+		done
+	fi
+
+	auxdir=$(autotools_get_auxdir)
+
+	einfo "Running eautoreconf in '${PWD}' ..."
+	[[ -n ${auxdir} ]] && mkdir -p ${auxdir}
+	eaclocal
+	[[ ${CHOST} == *-darwin* ]] && g=g
+	if ${LIBTOOLIZE:-${g}libtoolize} -n --install >& /dev/null ; then
+		_elibtoolize --copy --force --install
+	else
+		_elibtoolize --copy --force
+	fi
+	eautoconf
+	eautoheader
+	FROM_EAUTORECONF="yes" eautomake ${AM_OPTS}
+
+	[[ ${AT_NOELIBTOOLIZE} == "yes" ]] && return 0
+
+	# Call it here to prevent failures due to elibtoolize called _before_
+	# eautoreconf.  We set $S because elibtoolize runs on that #265319
+	S=${PWD} elibtoolize --force
+
+	return 0
+}
+
+# @FUNCTION: eaclocal
+# @DESCRIPTION:
+# These functions runs the autotools using autotools_run_tool with the
+# specified parametes. The name of the tool run is the same of the function
+# without e prefix.
+# They also force installing the support files for safety.
+# Respects AT_M4DIR for additional directories to search for macro's.
+# Always adds ${EPREFIX}/usr/share/aclocal to accommodate situations where
+# aclocal comes from another EPREFIX (for example cross-EPREFIX builds).
+eaclocal() {
+	local aclocal_opts
+
+	local amflags_file
+	for amflags_file in GNUmakefile.am Makefile.am GNUmakefile.in Makefile.in ; do
+		[[ -e ${amflags_file} ]] || continue
+		aclocal_opts=$(sed -n '/^ACLOCAL_AMFLAGS[[:space:]]*=/s:[^=]*=::p' ${amflags_file})
+		eval aclocal_opts=\"${aclocal_opts}\"
+		break
+	done
+
+	if [[ -n ${AT_M4DIR} ]] ; then
+		for x in ${AT_M4DIR} ; do
+			case "${x}" in
+			"-I")
+				# We handle it below
+				;;
+			*)
+				[[ ! -d ${x} ]] && ewarn "eaclocal: '${x}' does not exist"
+				aclocal_opts="${aclocal_opts} -I ${x}"
+				;;
+			esac
+		done
+	fi
+
+	# in some cases (cross-eprefix build), EPREFIX may not be included
+	# by aclocal by default, since it only knows about BPREFIX.
+	local eprefix_include=
+	[[ -d ${EPREFIX}/usr/share/aclocal ]] &&
+		eprefix_include="-I ${EPREFIX}/usr/share/aclocal"
+
+	[[ ! -f aclocal.m4 || -n $(grep -e 'generated.*by aclocal' aclocal.m4) ]] && \
+		autotools_run_tool aclocal "$@" ${aclocal_opts} ${eprefix_include}
+}
+
+# @FUNCTION: _elibtoolize
+# @DESCRIPTION:
+# Runs libtoolize.  Note the '_' prefix .. to not collide with elibtoolize() from
+# libtool.eclass.
+_elibtoolize() {
+	local opts g=
+
+	# Check if we should run libtoolize (AM_PROG_LIBTOOL is an older macro,
+	# check for both it and the current AC_PROG_LIBTOOL)
+	[[ -n $(autotools_check_macro AC_PROG_LIBTOOL AM_PROG_LIBTOOL LT_INIT) ]] || return 0
+
+	[[ -f GNUmakefile.am || -f Makefile.am ]] && opts="--automake"
+
+	[[ ${CHOST} == *-darwin* ]] && g=g
+	autotools_run_tool ${LIBTOOLIZE:-${g}libtoolize} "$@" ${opts}
+
+	# Need to rerun aclocal
+	eaclocal
+}
+
+# @FUNCTION: eautoheader
+# @DESCRIPTION:
+# Runs autoheader.
+eautoheader() {
+	# Check if we should run autoheader
+	[[ -n $(autotools_check_macro "AC_CONFIG_HEADERS") ]] || return 0
+	NO_FAIL=1 autotools_run_tool autoheader "$@"
+}
+
+# @FUNCTION: eautoconf
+# @DESCRIPTION:
+# Runs autoconf.
+eautoconf() {
+	if [[ ! -f configure.ac && ! -f configure.in ]] ; then
+		echo
+		eerror "No configure.{ac,in} present in '${PWD}'!"
+		echo
+		die "No configure.{ac,in} present!"
+	fi
+
+	autotools_run_tool autoconf "$@"
+}
+
+# @FUNCTION: eautomake
+# @DESCRIPTION:
+# Runs automake.
+eautomake() {
+	local extra_opts
+	local makefile_name
+
+	# Run automake if:
+	#  - a Makefile.am type file exists
+	#  - a Makefile.in type file exists and the configure
+	#    script is using the AM_INIT_AUTOMAKE directive
+	for makefile_name in {GNUmakefile,{M,m}akefile}.{am,in} "" ; do
+		[[ -f ${makefile_name} ]] && break
+	done
+	[[ -z ${makefile_name} ]] && return 0
+
+	if [[ ${makefile_name} == *.in ]] ; then
+		if ! grep -qs AM_INIT_AUTOMAKE configure.?? ; then
+			return 0
+		fi
+
+	elif [[ -z ${FROM_EAUTORECONF} && -f ${makefile_name%.am}.in ]]; then
+		local used_automake
+		local installed_automake
+
+		installed_automake=$(WANT_AUTOMAKE= automake --version | head -n 1 | \
+			sed -e 's:.*(GNU automake) ::')
+		used_automake=$(head -n 1 < ${makefile_name%.am}.in | \
+			sed -e 's:.*by automake \(.*\) from .*:\1:')
+
+		if [[ ${installed_automake} != ${used_automake} ]]; then
+			einfo "Automake used for the package (${used_automake}) differs from"
+			einfo "the installed version (${installed_automake})."
+			eautoreconf
+			return 0
+		fi
+	fi
+
+	[[ -f INSTALL && -f AUTHORS && -f ChangeLog && -f NEWS ]] \
+		|| extra_opts="${extra_opts} --foreign"
+
+	# --force-missing seems not to be recognized by some flavours of automake
+	autotools_run_tool automake --add-missing --copy ${extra_opts} "$@"
+}
+
+# @FUNCTION: eautopoint
+# @DESCRIPTION:
+# Runs autopoint (from the gettext package).
+eautopoint() {
+	autotools_run_tool autopoint "$@"
+}
+
+# Internal function to run an autotools' tool
+autotools_run_tool() {
+	if [[ ${EBUILD_PHASE} != "unpack" && ${EBUILD_PHASE} != "prepare" ]]; then
+		ewarn "QA Warning: running $1 in ${EBUILD_PHASE} phase"
+	fi
+
+	# We do the “latest” → version switch here because it solves
+	# possible order problems, see bug #270010 as an example.
+	# During bootstrap in prefix there might be no automake merged yet
+	# due to --nodeps, but still available somewhere in PATH.
+	# For example, ncurses needs local libtool on aix and hpux.
+	if [[ ${WANT_AUTOMAKE} == "latest" ]] &&
+		ROOT=/ has_version "sys-devel/automake"; then
+		local pv
+		for pv in ${_LATEST_AUTOMAKE} ; do
+			# has_version respects ROOT, but in this case, we don't want it to,
+			# thus "ROOT=/" prefix:
+			ROOT=/ has_version "=sys-devel/automake-${pv}*" && export WANT_AUTOMAKE="$pv"
+		done
+		[[ ${WANT_AUTOMAKE} == "latest" ]] && \
+			die "Cannot find the latest automake! Tried ${_LATEST_AUTOMAKE}"
+	fi
+	[[ ${WANT_AUTOCONF} == "latest" ]] && export WANT_AUTOCONF=2.5
+
+	local STDERR_TARGET="${T}/$1.out"
+	# most of the time, there will only be one run, but if there are
+	# more, make sure we get unique log filenames
+	if [[ -e ${STDERR_TARGET} ]] ; then
+		STDERR_TARGET="${T}/$1-$$.out"
+	fi
+
+	printf "***** $1 *****\n***** PWD: ${PWD}\n***** $*\n\n" > "${STDERR_TARGET}"
+
+	ebegin "Running $@"
+	"$@" >> "${STDERR_TARGET}" 2>&1
+	eend $?
+
+	if [[ $? != 0 && ${NO_FAIL} != 1 ]] ; then
+		echo
+		eerror "Failed Running $1 !"
+		eerror
+		eerror "Include in your bugreport the contents of:"
+		eerror
+		eerror "  ${STDERR_TARGET}"
+		echo
+		die "Failed Running $1 !"
+	fi
+}
+
+# Internal function to check for support
+autotools_check_macro() {
+	[[ -f configure.ac || -f configure.in ]] || return 0
+	local macro
+	for macro ; do
+		WANT_AUTOCONF="2.5" autoconf --trace="${macro}" 2>/dev/null
+	done
+	return 0
+}
+
+# Internal function to get additional subdirs to configure
+autotools_get_subdirs() {
+	local subdirs_scan_out
+
+	subdirs_scan_out=$(autotools_check_macro "AC_CONFIG_SUBDIRS")
+	[[ -n ${subdirs_scan_out} ]] || return 0
+
+	echo "${subdirs_scan_out}" | gawk \
+	'($0 !~ /^[[:space:]]*(#|dnl)/) {
+		if (match($0, /AC_CONFIG_SUBDIRS:(.*)$/, res))
+			print res[1]
+	}' | uniq
+
+	return 0
+}
+
+autotools_get_auxdir() {
+	local auxdir_scan_out
+
+	auxdir_scan_out=$(autotools_check_macro "AC_CONFIG_AUX_DIR")
+	[[ -n ${auxdir_scan_out} ]] || return 0
+
+	echo ${auxdir_scan_out} | gawk \
+	'($0 !~ /^[[:space:]]*(#|dnl)/) {
+		if (match($0, /AC_CONFIG_AUX_DIR:(.*)$/, res))
+			print res[1]
+	}' | uniq
+
+	return 0
+}

diff --git a/eclass/eutils.eclass b/eclass/eutils.eclass
new file mode 100644
index 0000000..a28c7e2
--- /dev/null
+++ b/eclass/eutils.eclass
@@ -0,0 +1,2008 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/eutils.eclass,v 1.355 2011/03/18 20:36:37 vapier Exp $
+
+# @ECLASS: eutils.eclass
+# @MAINTAINER:
+# base-system@gentoo.org
+# @BLURB: many extra (but common) functions that are used in ebuilds
+# @DESCRIPTION:
+# The eutils eclass contains a suite of functions that complement
+# the ones that ebuild.sh already contain.  The idea is that the functions
+# are not required in all ebuilds but enough utilize them to have a common
+# home rather than having multiple ebuilds implementing the same thing.
+#
+# Due to the nature of this eclass, some functions may have maintainers
+# different from the overall eclass!
+
+inherit multilib portability
+
+DESCRIPTION="Based on the ${ECLASS} eclass"
+
+if has "${EAPI:-0}" 0 1 2; then
+
+# @FUNCTION: epause
+# @USAGE: [seconds]
+# @DESCRIPTION:
+# Sleep for the specified number of seconds (default of 5 seconds).  Useful when
+# printing a message the user should probably be reading and often used in
+# conjunction with the ebeep function.  If the EPAUSE_IGNORE env var is set,
+# don't wait at all. Defined in EAPIs 0 1 and 2.
+epause() {
+	[[ -z ${EPAUSE_IGNORE} ]] && sleep ${1:-5}
+}
+
+# @FUNCTION: ebeep
+# @USAGE: [number of beeps]
+# @DESCRIPTION:
+# Issue the specified number of beeps (default of 5 beeps).  Useful when
+# printing a message the user should probably be reading and often used in
+# conjunction with the epause function.  If the EBEEP_IGNORE env var is set,
+# don't beep at all. Defined in EAPIs 0 1 and 2.
+ebeep() {
+	local n
+	if [[ -z ${EBEEP_IGNORE} ]] ; then
+		for ((n=1 ; n <= ${1:-5} ; n++)) ; do
+			echo -ne "\a"
+			sleep 0.1 &>/dev/null ; sleep 0,1 &>/dev/null
+			echo -ne "\a"
+			sleep 1
+		done
+	fi
+}
+
+else
+
+ebeep() {
+	ewarn "QA Notice: ebeep is not defined in EAPI=${EAPI}, please file a bug at http://bugs.gentoo.org"
+}
+
+epause() {
+	ewarn "QA Notice: epause is not defined in EAPI=${EAPI}, please file a bug at http://bugs.gentoo.org"
+}
+
+fi
+
+# @FUNCTION: eqawarn
+# @USAGE: [message]
+# @DESCRIPTION:
+# Proxy to einfo for package managers that don't provide eqawarn and use the PM
+# implementation if available.
+if ! declare -F eqawarn >/dev/null ; then
+	eqawarn() {
+		einfo "$@"
+	}
+fi
+
+# @FUNCTION: ecvs_clean
+# @USAGE: [list of dirs]
+# @DESCRIPTION:
+# Remove CVS directories recursiveley.  Useful when a source tarball contains
+# internal CVS directories.  Defaults to $PWD.
+ecvs_clean() {
+	[[ -z $* ]] && set -- .
+	find "$@" -type d -name 'CVS' -prune -print0 | xargs -0 rm -rf
+	find "$@" -type f -name '.cvs*' -print0 | xargs -0 rm -rf
+}
+
+# @FUNCTION: esvn_clean
+# @USAGE: [list of dirs]
+# @DESCRIPTION:
+# Remove .svn directories recursiveley.  Useful when a source tarball contains
+# internal Subversion directories.  Defaults to $PWD.
+esvn_clean() {
+	[[ -z $* ]] && set -- .
+	find "$@" -type d -name '.svn' -prune -print0 | xargs -0 rm -rf
+}
+
+# @FUNCTION: eshopts_push
+# @USAGE: [options to `set` or `shopt`]
+# @DESCRIPTION:
+# Often times code will want to enable a shell option to change code behavior.
+# Since changing shell options can easily break other pieces of code (which
+# assume the default state), eshopts_push is used to (1) push the current shell
+# options onto a stack and (2) pass the specified arguments to set.
+#
+# If the first argument is '-s' or '-u', we assume you want to call `shopt`
+# rather than `set` as there are some options only available via that.
+#
+# A common example is to disable shell globbing so that special meaning/care
+# may be used with variables/arguments to custom functions.  That would be:
+# @CODE
+#		eshopts_push -o noglob
+#		for x in ${foo} ; do
+#			if ...some check... ; then
+#				eshopts_pop
+#				return 0
+#			fi
+#		done
+#		eshopts_pop
+# @CODE
+eshopts_push() {
+	# have to assume __ESHOPTS_SAVE__ isn't screwed with
+	# as a `declare -a` here will reset its value
+	local i=${#__ESHOPTS_SAVE__[@]}
+	if [[ $1 == -[su] ]] ; then
+		__ESHOPTS_SAVE__[$i]=$(shopt -p)
+		[[ $# -eq 0 ]] && return 0
+		shopt "$@" || die "eshopts_push: bad options to shopt: $*"
+	else
+		__ESHOPTS_SAVE__[$i]=$-
+		[[ $# -eq 0 ]] && return 0
+		set "$@" || die "eshopts_push: bad options to set: $*"
+	fi
+}
+
+# @FUNCTION: eshopts_pop
+# @USAGE:
+# @DESCRIPTION:
+# Restore the shell options to the state saved with the corresponding
+# eshopts_push call.  See that function for more details.
+eshopts_pop() {
+	[[ $# -ne 0 ]] && die "eshopts_pop takes no arguments"
+	local i=$(( ${#__ESHOPTS_SAVE__[@]} - 1 ))
+	[[ ${i} -eq -1 ]] && die "eshopts_{push,pop}: unbalanced pair"
+	local s=${__ESHOPTS_SAVE__[$i]}
+	unset __ESHOPTS_SAVE__[$i]
+	if [[ ${s} == "shopt -"* ]] ; then
+		eval "${s}" || die "eshopts_pop: sanity: invalid shopt options: ${s}"
+	else
+		set +$-     || die "eshopts_pop: sanity: invalid shell settings: $-"
+		set -${s}   || die "eshopts_pop: sanity: unable to restore saved shell settings: ${s}"
+	fi
+}
+
+# @VARIABLE: EPATCH_SOURCE
+# @DESCRIPTION:
+# Default directory to search for patches.
+EPATCH_SOURCE="${WORKDIR}/patch"
+# @VARIABLE: EPATCH_SUFFIX
+# @DESCRIPTION:
+# Default extension for patches (do not prefix the period yourself).
+EPATCH_SUFFIX="patch.bz2"
+# @VARIABLE: EPATCH_OPTS
+# @DESCRIPTION:
+# Default options for patch:
+# @CODE
+#	-g0 - keep RCS, ClearCase, Perforce and SCCS happy #24571
+#	--no-backup-if-mismatch - do not leave .orig files behind
+#	-E - automatically remove empty files
+# @CODE
+EPATCH_OPTS="-g0 -E --no-backup-if-mismatch"
+# @VARIABLE: EPATCH_EXCLUDE
+# @DESCRIPTION:
+# List of patches not to apply.	 Note this is only file names,
+# and not the full path.  Globs accepted.
+EPATCH_EXCLUDE=""
+# @VARIABLE: EPATCH_SINGLE_MSG
+# @DESCRIPTION:
+# Change the printed message for a single patch.
+EPATCH_SINGLE_MSG=""
+# @VARIABLE: EPATCH_MULTI_MSG
+# @DESCRIPTION:
+# Change the printed message for multiple patches.
+EPATCH_MULTI_MSG="Applying various patches (bugfixes/updates) ..."
+# @VARIABLE: EPATCH_FORCE
+# @DESCRIPTION:
+# Only require patches to match EPATCH_SUFFIX rather than the extended
+# arch naming style.
+EPATCH_FORCE="no"
+
+# @FUNCTION: epatch
+# @USAGE: [patches] [dirs of patches]
+# @DESCRIPTION:
+# epatch is designed to greatly simplify the application of patches.  It can
+# process patch files directly, or directories of patches.  The patches may be
+# compressed (bzip/gzip/etc...) or plain text.  You generally need not specify
+# the -p option as epatch will automatically attempt -p0 to -p5 until things
+# apply successfully.
+#
+# If you do not specify any options, then epatch will default to the directory
+# specified by EPATCH_SOURCE.
+#
+# When processing directories, epatch will apply all patches that match:
+# @CODE
+#	if ${EPATCH_FORCE} != "yes"
+#		??_${ARCH}_foo.${EPATCH_SUFFIX}
+#	else
+#		*.${EPATCH_SUFFIX}
+# @CODE
+# The leading ?? are typically numbers used to force consistent patch ordering.
+# The arch field is used to apply patches only for the host architecture with
+# the special value of "all" means apply for everyone.  Note that using values
+# other than "all" is highly discouraged -- you should apply patches all the
+# time and let architecture details be detected at configure/compile time.
+#
+# If EPATCH_SUFFIX is empty, then no period before it is implied when searching
+# for patches to apply.
+#
+# Refer to the other EPATCH_xxx variables for more customization of behavior.
+epatch() {
+	_epatch_draw_line() {
+		# create a line of same length as input string
+		[[ -z $1 ]] && set "$(printf "%65s" '')"
+		echo "${1//?/=}"
+	}
+
+	unset P4CONFIG P4PORT P4USER # keep perforce at bay #56402
+
+	# Let the rest of the code process one user arg at a time --
+	# each arg may expand into multiple patches, and each arg may
+	# need to start off with the default global EPATCH_xxx values
+	if [[ $# -gt 1 ]] ; then
+		local m
+		for m in "$@" ; do
+			epatch "${m}"
+		done
+		return 0
+	fi
+
+	local SINGLE_PATCH="no"
+	# no args means process ${EPATCH_SOURCE}
+	[[ $# -eq 0 ]] && set -- "${EPATCH_SOURCE}"
+
+	if [[ -f $1 ]] ; then
+		SINGLE_PATCH="yes"
+		set -- "$1"
+		# Use the suffix from the single patch (localize it); the code
+		# below will find the suffix for us
+		local EPATCH_SUFFIX=$1
+
+	elif [[ -d $1 ]] ; then
+		# Some people like to make dirs of patches w/out suffixes (vim)
+		set -- "$1"/*${EPATCH_SUFFIX:+."${EPATCH_SUFFIX}"}
+
+	else
+		# sanity check ... if it isn't a dir or file, wtf man ?
+		[[ $# -ne 0 ]] && EPATCH_SOURCE=$1
+		echo
+		eerror "Cannot find \$EPATCH_SOURCE!  Value for \$EPATCH_SOURCE is:"
+		eerror
+		eerror "  ${EPATCH_SOURCE}"
+		eerror "  ( ${EPATCH_SOURCE##*/} )"
+		echo
+		die "Cannot find \$EPATCH_SOURCE!"
+	fi
+
+	local PIPE_CMD
+	case ${EPATCH_SUFFIX##*\.} in
+		xz)      PIPE_CMD="xz -dc"    ;;
+		lzma)    PIPE_CMD="lzma -dc"  ;;
+		bz2)     PIPE_CMD="bzip2 -dc" ;;
+		gz|Z|z)  PIPE_CMD="gzip -dc"  ;;
+		ZIP|zip) PIPE_CMD="unzip -p"  ;;
+		*)       ;;
+	esac
+
+	[[ ${SINGLE_PATCH} == "no" ]] && einfo "${EPATCH_MULTI_MSG}"
+
+	local x
+	for x in "$@" ; do
+		# If the patch dir given contains subdirs, or our EPATCH_SUFFIX
+		# didn't match anything, ignore continue on
+		[[ ! -f ${x} ]] && continue
+
+		local patchname=${x##*/}
+
+		# Apply single patches, or forced sets of patches, or
+		# patches with ARCH dependant names.
+		#	???_arch_foo.patch
+		# Else, skip this input altogether
+		local a=${patchname#*_} # strip the ???_
+		a=${a%%_*}              # strip the _foo.patch
+		if ! [[ ${SINGLE_PATCH} == "yes" || \
+				${EPATCH_FORCE} == "yes" || \
+				${a} == all     || \
+				${a} == ${ARCH} ]]
+		then
+			continue
+		fi
+
+		# Let people filter things dynamically
+		if [[ -n ${EPATCH_EXCLUDE} ]] ; then
+			# let people use globs in the exclude
+			eshopts_push -o noglob
+
+			local ex
+			for ex in ${EPATCH_EXCLUDE} ; do
+				if [[ ${patchname} == ${ex} ]] ; then
+					eshopts_pop
+					continue 2
+				fi
+			done
+
+			eshopts_pop
+		fi
+
+		if [[ ${SINGLE_PATCH} == "yes" ]] ; then
+			if [[ -n ${EPATCH_SINGLE_MSG} ]] ; then
+				einfo "${EPATCH_SINGLE_MSG}"
+			else
+				einfo "Applying ${patchname} ..."
+			fi
+		else
+			einfo "  ${patchname} ..."
+		fi
+
+		# most of the time, there will only be one run per unique name,
+		# but if there are more, make sure we get unique log filenames
+		local STDERR_TARGET="${T}/${patchname}.out"
+		if [[ -e ${STDERR_TARGET} ]] ; then
+			STDERR_TARGET="${T}/${patchname}-$$.out"
+		fi
+
+		printf "***** %s *****\n\n" "${patchname}" > "${STDERR_TARGET}"
+
+		# Decompress the patch if need be
+		local count=0
+		local PATCH_TARGET
+		if [[ -n ${PIPE_CMD} ]] ; then
+			PATCH_TARGET="${T}/$$.patch"
+			echo "PIPE_COMMAND:  ${PIPE_CMD} ${x} > ${PATCH_TARGET}" >> "${STDERR_TARGET}"
+
+			if ! (${PIPE_CMD} "${x}" > "${PATCH_TARGET}") >> "${STDERR_TARGET}" 2>&1 ; then
+				echo
+				eerror "Could not extract patch!"
+				#die "Could not extract patch!"
+				count=5
+				break
+			fi
+		else
+			PATCH_TARGET=${x}
+		fi
+
+		# Check for absolute paths in patches.  If sandbox is disabled,
+		# people could (accidently) patch files in the root filesystem.
+		# Or trigger other unpleasantries #237667.  So disallow -p0 on
+		# such patches.
+		local abs_paths=$(egrep -n '^[-+]{3} /' "${PATCH_TARGET}" | awk '$2 != "/dev/null" { print }')
+		if [[ -n ${abs_paths} ]] ; then
+			count=1
+			printf "NOTE: skipping -p0 due to absolute paths in patch:\n%s\n" "${abs_paths}" >> "${STDERR_TARGET}"
+		fi
+		# Similar reason, but with relative paths.
+		local rel_paths=$(egrep -n '^[-+]{3} [^	]*[.][.]/' "${PATCH_TARGET}")
+		if [[ -n ${rel_paths} ]] ; then
+			eqawarn "QA Notice: Your patch uses relative paths '../'."
+			eqawarn " In the future this will cause a failure."
+			eqawarn "${rel_paths}"
+		fi
+
+		# Dynamically detect the correct -p# ... i'm lazy, so shoot me :/
+		while [[ ${count} -lt 5 ]] ; do
+			# Generate some useful debug info ...
+			(
+			_epatch_draw_line "***** ${patchname} *****"
+			echo
+			echo "PATCH COMMAND:  patch -p${count} ${EPATCH_OPTS} < '${PATCH_TARGET}'"
+			echo
+			_epatch_draw_line "***** ${patchname} *****"
+			) >> "${STDERR_TARGET}"
+
+			if (patch -p${count} ${EPATCH_OPTS} --dry-run -f < "${PATCH_TARGET}") >> "${STDERR_TARGET}" 2>&1 ; then
+				(
+				_epatch_draw_line "***** ${patchname} *****"
+				echo
+				echo "ACTUALLY APPLYING ${patchname} ..."
+				echo
+				_epatch_draw_line "***** ${patchname} *****"
+				patch -p${count} ${EPATCH_OPTS} < "${PATCH_TARGET}" 2>&1
+				) >> "${STDERR_TARGET}"
+
+				if [ $? -ne 0 ] ; then
+					echo
+					eerror "A dry-run of patch command succeeded, but actually"
+					eerror "applying the patch failed!"
+					#die "Real world sux compared to the dreamworld!"
+					count=5
+				fi
+				break
+			fi
+
+			: $(( count++ ))
+		done
+
+		# if we had to decompress the patch, delete the temp one
+		if [[ -n ${PIPE_CMD} ]] ; then
+			rm -f "${PATCH_TARGET}"
+		fi
+
+		if [[ ${count} -ge 5 ]] ; then
+			echo
+			eerror "Failed Patch: ${patchname} !"
+			eerror " ( ${PATCH_TARGET} )"
+			eerror
+			eerror "Include in your bugreport the contents of:"
+			eerror
+			eerror "  ${STDERR_TARGET}"
+			echo
+			die "Failed Patch: ${patchname}!"
+		fi
+
+		# if everything worked, delete the patch log
+		rm -f "${STDERR_TARGET}"
+		eend 0
+	done
+
+	[[ ${SINGLE_PATCH} == "no" ]] && einfo "Done with patching"
+	: # everything worked
+}
+epatch_user() {
+	[[ $# -ne 0 ]] && die "epatch_user takes no options"
+
+	# don't clobber any EPATCH vars that the parent might want
+	local EPATCH_SOURCE check base=${PORTAGE_CONFIGROOT%/}/etc/portage/patches
+	for check in {${CATEGORY}/${PF},${CATEGORY}/${P},${CATEGORY}/${PN}}; do
+		EPATCH_SOURCE=${base}/${CTARGET}/${check}
+		[[ -r ${EPATCH_SOURCE} ]] || EPATCH_SOURCE=${base}/${CHOST}/${check}
+		[[ -r ${EPATCH_SOURCE} ]] || EPATCH_SOURCE=${base}/${check}
+		if [[ -d ${EPATCH_SOURCE} ]] ; then
+			EPATCH_SOURCE=${EPATCH_SOURCE} \
+			EPATCH_SUFFIX="patch" \
+			EPATCH_FORCE="yes" \
+			EPATCH_MULTI_MSG="Applying user patches from ${EPATCH_SOURCE} ..." \
+			epatch
+			return 0
+		fi
+	done
+	return 1
+}
+
+# @FUNCTION: emktemp
+# @USAGE: [temp dir]
+# @DESCRIPTION:
+# Cheap replacement for when debianutils (and thus mktemp)
+# does not exist on the users system.
+emktemp() {
+	local exe="touch"
+	[[ $1 == -d ]] && exe="mkdir" && shift
+	local topdir=$1
+
+	if [[ -z ${topdir} ]] ; then
+		[[ -z ${T} ]] \
+			&& topdir="/tmp" \
+			|| topdir=${T}
+	fi
+
+	if ! type -P mktemp > /dev/null ; then
+		# system lacks `mktemp` so we have to fake it
+		local tmp=/
+		while [[ -e ${tmp} ]] ; do
+			tmp=${topdir}/tmp.${RANDOM}.${RANDOM}.${RANDOM}
+		done
+		${exe} "${tmp}" || ${exe} -p "${tmp}"
+		echo "${tmp}"
+	else
+		# the args here will give slightly wierd names on BSD,
+		# but should produce a usable file on all userlands
+		if [[ ${exe} == "touch" ]] ; then
+			TMPDIR="${topdir}" mktemp -t tmp.XXXXXXXXXX
+		else
+			TMPDIR="${topdir}" mktemp -dt tmp.XXXXXXXXXX
+		fi
+	fi
+}
+
+# @FUNCTION: egetent
+# @USAGE: <database> <key>
+# @MAINTAINER:
+# base-system@gentoo.org (Linux)
+# Joe Jezak <josejx@gmail.com> (OS X)
+# usata@gentoo.org (OS X)
+# Aaron Walker <ka0ttic@gentoo.org> (FreeBSD)
+# @DESCRIPTION:
+# Small wrapper for getent (Linux),
+# nidump (< Mac OS X 10.5), dscl (Mac OS X 10.5),
+# and pw (FreeBSD) used in enewuser()/enewgroup()
+egetent() {
+	case ${CHOST} in
+	*-darwin[678])
+		case "$2" in
+		*[!0-9]*) # Non numeric
+			nidump $1 . | awk -F":" "{ if (\$1 ~ /^$2$/) {print \$0;exit;} }"
+			;;
+		*)	# Numeric
+			nidump $1 . | awk -F":" "{ if (\$3 == $2) {print \$0;exit;} }"
+			;;
+		esac
+		;;
+	*-darwin*)
+		local mytype=$1
+		[[ "passwd" == $mytype ]] && mytype="Users"
+		[[ "group" == $mytype ]] && mytype="Groups"
+		case "$2" in
+		*[!0-9]*) # Non numeric
+			dscl . -read /$mytype/$2 2>/dev/null |grep RecordName
+			;;
+		*)	# Numeric
+			local mykey="UniqueID"
+			[[ $mytype == "Groups" ]] && mykey="PrimaryGroupID"
+			dscl . -search /$mytype $mykey $2 2>/dev/null
+			;;
+		esac
+		;;
+	*-freebsd*|*-dragonfly*)
+		local opts action="user"
+		[[ $1 == "passwd" ]] || action="group"
+
+		# lookup by uid/gid
+		if [[ $2 == [[:digit:]]* ]] ; then
+			[[ ${action} == "user" ]] && opts="-u" || opts="-g"
+		fi
+
+		pw show ${action} ${opts} "$2" -q
+		;;
+	*-netbsd*|*-openbsd*)
+		grep "$2:\*:" /etc/$1
+		;;
+	*)
+		type -p nscd >& /dev/null && nscd -i "$1"
+		getent "$1" "$2"
+		;;
+	esac
+}
+
+# @FUNCTION: enewuser
+# @USAGE: <user> [uid] [shell] [homedir] [groups] [params]
+# @DESCRIPTION:
+# Same as enewgroup, you are not required to understand how to properly add
+# a user to the system.  The only required parameter is the username.
+# Default uid is (pass -1 for this) next available, default shell is
+# /bin/false, default homedir is /dev/null, there are no default groups,
+# and default params sets the comment as 'added by portage for ${PN}'.
+enewuser() {
+	# in Prefix Portage, we may be unprivileged, such that we can't handle this
+	rootuid=$(python -c 'from portage.const import rootuid; print rootuid')
+	if [[ ${rootuid} != 0 ]] ; then
+		ewarn "'enewuser()' disabled in Prefixed Portage with non root user"
+		return 0
+	fi
+
+	case ${EBUILD_PHASE} in
+		unpack|compile|test|install)
+		eerror "'enewuser()' called from '${EBUILD_PHASE}()' which is not a pkg_* function."
+		eerror "Package fails at QA and at life.  Please file a bug."
+		die "Bad package!  enewuser is only for use in pkg_* functions!"
+	esac
+
+	# get the username
+	local euser=$1; shift
+	if [[ -z ${euser} ]] ; then
+		eerror "No username specified !"
+		die "Cannot call enewuser without a username"
+	fi
+
+	# lets see if the username already exists
+	if [[ -n $(egetent passwd "${euser}") ]] ; then
+		return 0
+	fi
+	einfo "Adding user '${euser}' to your system ..."
+
+	# options to pass to useradd
+	local opts=
+
+	# handle uid
+	local euid=$1; shift
+	if [[ -n ${euid} && ${euid} != -1 ]] ; then
+		if [[ ${euid} -gt 0 ]] ; then
+			if [[ -n $(egetent passwd ${euid}) ]] ; then
+				euid="next"
+			fi
+		else
+			eerror "Userid given but is not greater than 0 !"
+			die "${euid} is not a valid UID"
+		fi
+	else
+		euid="next"
+	fi
+	if [[ ${euid} == "next" ]] ; then
+		for ((euid = 101; euid <= 999; euid++)); do
+			[[ -z $(egetent passwd ${euid}) ]] && break
+		done
+	fi
+	opts="${opts} -u ${euid}"
+	einfo " - Userid: ${euid}"
+
+	# handle shell
+	local eshell=$1; shift
+	if [[ ! -z ${eshell} ]] && [[ ${eshell} != "-1" ]] ; then
+		if [[ ! -e ${EROOT}${eshell} ]] ; then
+			eerror "A shell was specified but it does not exist !"
+			die "${eshell} does not exist in ${EROOT}"
+		fi
+		if [[ ${eshell} == */false || ${eshell} == */nologin ]] ; then
+			eerror "Do not specify ${eshell} yourself, use -1"
+			die "Pass '-1' as the shell parameter"
+		fi
+	else
+		for shell in /sbin/nologin /usr/sbin/nologin /bin/false /usr/bin/false /dev/null ; do
+			[[ -x ${ROOT}${shell} ]] && break
+		done
+
+		if [[ ${shell} == "/dev/null" ]] ; then
+			eerror "Unable to identify the shell to use, proceeding with userland default."
+			case ${USERLAND} in
+				GNU) shell="/bin/false" ;;
+				BSD) shell="/sbin/nologin" ;;
+				Darwin) shell="/usr/sbin/nologin" ;;
+				*) die "Unable to identify the default shell for userland ${USERLAND}"
+			esac
+		fi
+
+		eshell=${shell}
+	fi
+	einfo " - Shell: ${eshell}"
+	opts="${opts} -s ${eshell}"
+
+	# handle homedir
+	local ehome=$1; shift
+	if [[ -z ${ehome} ]] || [[ ${ehome} == "-1" ]] ; then
+		ehome="/dev/null"
+	fi
+	einfo " - Home: ${ehome}"
+	opts="${opts} -d ${ehome}"
+
+	# handle groups
+	local egroups=$1; shift
+	if [[ ! -z ${egroups} ]] ; then
+		local oldifs=${IFS}
+		local defgroup="" exgroups=""
+
+		export IFS=","
+		for g in ${egroups} ; do
+			export IFS=${oldifs}
+			if [[ -z $(egetent group "${g}") ]] ; then
+				eerror "You must add group ${g} to the system first"
+				die "${g} is not a valid GID"
+			fi
+			if [[ -z ${defgroup} ]] ; then
+				defgroup=${g}
+			else
+				exgroups="${exgroups},${g}"
+			fi
+			export IFS=","
+		done
+		export IFS=${oldifs}
+
+		opts="${opts} -g ${defgroup}"
+		if [[ ! -z ${exgroups} ]] ; then
+			opts="${opts} -G ${exgroups:1}"
+		fi
+	else
+		egroups="(none)"
+	fi
+	einfo " - Groups: ${egroups}"
+
+	# handle extra and add the user
+	local oldsandbox=${SANDBOX_ON}
+	export SANDBOX_ON="0"
+	case ${CHOST} in
+	*-darwin*)
+		### Make the user
+		if [[ -z $@ ]] ; then
+			dscl . create /users/${euser} uid ${euid}
+			dscl . create /users/${euser} shell ${eshell}
+			dscl . create /users/${euser} home ${ehome}
+			dscl . create /users/${euser} realname "added by portage for ${PN}"
+			### Add the user to the groups specified
+			local oldifs=${IFS}
+			export IFS=","
+			for g in ${egroups} ; do
+				dscl . merge /groups/${g} users ${euser}
+			done
+			export IFS=${oldifs}
+		else
+			einfo "Extra options are not supported on Darwin yet"
+			einfo "Please report the ebuild along with the info below"
+			einfo "eextra: $@"
+			die "Required function missing"
+		fi
+		;;
+	*-freebsd*|*-dragonfly*)
+		if [[ -z $@ ]] ; then
+			pw useradd ${euser} ${opts} \
+				-c "added by portage for ${PN}" \
+				die "enewuser failed"
+		else
+			einfo " - Extra: $@"
+			pw useradd ${euser} ${opts} \
+				"$@" || die "enewuser failed"
+		fi
+		;;
+
+	*-netbsd*)
+		if [[ -z $@ ]] ; then
+			useradd ${opts} ${euser} || die "enewuser failed"
+		else
+			einfo " - Extra: $@"
+			useradd ${opts} ${euser} "$@" || die "enewuser failed"
+		fi
+		;;
+
+	*-openbsd*)
+		if [[ -z $@ ]] ; then
+			useradd -u ${euid} -s ${eshell} \
+				-d ${ehome} -c "Added by portage for ${PN}" \
+				-g ${egroups} ${euser} || die "enewuser failed"
+		else
+			einfo " - Extra: $@"
+			useradd -u ${euid} -s ${eshell} \
+				-d ${ehome} -c "Added by portage for ${PN}" \
+				-g ${egroups} ${euser} "$@" || die "enewuser failed"
+		fi
+		;;
+
+	*)
+		if [[ -z $@ ]] ; then
+			useradd -r ${opts} \
+				-c "added by portage for ${PN}" \
+				${euser} \
+				|| die "enewuser failed"
+		else
+			einfo " - Extra: $@"
+			useradd -r ${opts} "$@" \
+				${euser} \
+				|| die "enewuser failed"
+		fi
+		;;
+	esac
+
+	if [[ ! -e ${EROOT}/${ehome} ]] ; then
+		einfo " - Creating ${ehome} in ${EROOT}"
+		mkdir -p "${EROOT}/${ehome}"
+		chown ${euser} "${EROOT}/${ehome}"
+		chmod 755 "${EROOT}/${ehome}"
+	fi
+
+	export SANDBOX_ON=${oldsandbox}
+}
+
+# @FUNCTION: enewgroup
+# @USAGE: <group> [gid]
+# @DESCRIPTION:
+# This function does not require you to understand how to properly add a
+# group to the system.  Just give it a group name to add and enewgroup will
+# do the rest.  You may specify the gid for the group or allow the group to
+# allocate the next available one.
+enewgroup() {
+	# in Prefix Portage, we may be unprivileged, such that we can't handle this
+	rootuid=$(python -c 'from portage.const import rootuid; print rootuid')
+	if [[ ${rootuid} != 0 ]] ; then
+		ewarn "'enewgroup()' disabled in Prefixed Portage with non root user"
+		return 0
+	fi
+
+	case ${EBUILD_PHASE} in
+		unpack|compile|test|install)
+		eerror "'enewgroup()' called from '${EBUILD_PHASE}()' which is not a pkg_* function."
+		eerror "Package fails at QA and at life.  Please file a bug."
+		die "Bad package!  enewgroup is only for use in pkg_* functions!"
+	esac
+
+	# get the group
+	local egroup="$1"; shift
+	if [ -z "${egroup}" ]
+	then
+		eerror "No group specified !"
+		die "Cannot call enewgroup without a group"
+	fi
+
+	# see if group already exists
+	if [[ -n $(egetent group "${egroup}") ]]; then
+		return 0
+	fi
+	einfo "Adding group '${egroup}' to your system ..."
+
+	# options to pass to useradd
+	local opts=
+
+	# handle gid
+	local egid="$1"; shift
+	if [ ! -z "${egid}" ]
+	then
+		if [ "${egid}" -gt 0 ]
+		then
+			if [ -z "`egetent group ${egid}`" ]
+			then
+				if [[ "${CHOST}" == *-darwin* ]]; then
+					opts="${opts} ${egid}"
+				else
+					opts="${opts} -g ${egid}"
+				fi
+			else
+				egid="next available; requested gid taken"
+			fi
+		else
+			eerror "Groupid given but is not greater than 0 !"
+			die "${egid} is not a valid GID"
+		fi
+	else
+		egid="next available"
+	fi
+	einfo " - Groupid: ${egid}"
+
+	# handle extra
+	local eextra="$@"
+	opts="${opts} ${eextra}"
+
+	# add the group
+	local oldsandbox="${SANDBOX_ON}"
+	export SANDBOX_ON="0"
+	case ${CHOST} in
+	*-darwin*)
+		if [ ! -z "${eextra}" ];
+		then
+			einfo "Extra options are not supported on Darwin/OS X yet"
+			einfo "Please report the ebuild along with the info below"
+			einfo "eextra: ${eextra}"
+			die "Required function missing"
+		fi
+
+		# If we need the next available
+		case ${egid} in
+		*[!0-9]*) # Non numeric
+			for ((egid = 101; egid <= 999; egid++)); do
+				[[ -z $(egetent group ${egid}) ]] && break
+			done
+		esac
+		dscl . create /groups/${egroup} gid ${egid}
+		dscl . create /groups/${egroup} passwd '*'
+		;;
+
+	*-freebsd*|*-dragonfly*)
+		case ${egid} in
+			*[!0-9]*) # Non numeric
+				for ((egid = 101; egid <= 999; egid++)); do
+					[[ -z $(egetent group ${egid}) ]] && break
+				done
+		esac
+		pw groupadd ${egroup} -g ${egid} || die "enewgroup failed"
+		;;
+
+	*-netbsd*)
+		case ${egid} in
+		*[!0-9]*) # Non numeric
+			for ((egid = 101; egid <= 999; egid++)); do
+				[[ -z $(egetent group ${egid}) ]] && break
+			done
+		esac
+		groupadd -g ${egid} ${egroup} || die "enewgroup failed"
+		;;
+
+	*)
+		# We specify -r so that we get a GID in the system range from login.defs
+		groupadd -r ${opts} ${egroup} || die "enewgroup failed"
+		;;
+	esac
+	export SANDBOX_ON="${oldsandbox}"
+}
+
+# @FUNCTION: edos2unix
+# @USAGE: <file> [more files ...]
+# @DESCRIPTION:
+# A handy replacement for dos2unix, recode, fixdos, etc...  This allows you
+# to remove all of these text utilities from DEPEND variables because this
+# is a script based solution.  Just give it a list of files to convert and
+# they will all be changed from the DOS CRLF format to the UNIX LF format.
+edos2unix() {
+	echo "$@" | xargs sed -i 's/\r$//'
+}
+
+# Make a desktop file !
+# Great for making those icons in kde/gnome startmenu !
+# Amaze your friends !	Get the women !	 Join today !
+#
+# make_desktop_entry(<command>, [name], [icon], [type], [fields])
+#
+# binary:	what command does the app run with ?
+# name:		the name that will show up in the menu
+# icon:		give your little like a pretty little icon ...
+#			this can be relative (to /usr/share/pixmaps) or
+#			a full path to an icon
+# type:		what kind of application is this ?	for categories:
+#			http://standards.freedesktop.org/menu-spec/latest/apa.html
+# fields:	extra fields to append to the desktop file; a printf string
+make_desktop_entry() {
+	[[ -z $1 ]] && die "make_desktop_entry: You must specify the executable"
+
+	local exec=${1}
+	local name=${2:-${PN}}
+	local icon=${3:-${PN}}
+	local type=${4}
+	local fields=${5}
+
+	if [[ -z ${type} ]] ; then
+		local catmaj=${CATEGORY%%-*}
+		local catmin=${CATEGORY##*-}
+		case ${catmaj} in
+			app)
+				case ${catmin} in
+					accessibility) type=Accessibility;;
+					admin)         type=System;;
+					antivirus)     type=System;;
+					arch)          type=Archiving;;
+					backup)        type=Archiving;;
+					cdr)           type=DiscBurning;;
+					dicts)         type=Dictionary;;
+					doc)           type=Documentation;;
+					editors)       type=TextEditor;;
+					emacs)         type=TextEditor;;
+					emulation)     type=Emulator;;
+					laptop)        type=HardwareSettings;;
+					office)        type=Office;;
+					pda)           type=PDA;;
+					vim)           type=TextEditor;;
+					xemacs)        type=TextEditor;;
+				esac
+				;;
+
+			dev)
+				type="Development"
+				;;
+
+			games)
+				case ${catmin} in
+					action|fps) type=ActionGame;;
+					arcade)     type=ArcadeGame;;
+					board)      type=BoardGame;;
+					emulation)  type=Emulator;;
+					kids)       type=KidsGame;;
+					puzzle)     type=LogicGame;;
+					roguelike)  type=RolePlaying;;
+					rpg)        type=RolePlaying;;
+					simulation) type=Simulation;;
+					sports)     type=SportsGame;;
+					strategy)   type=StrategyGame;;
+				esac
+				type="Game;${type}"
+				;;
+
+			gnome)
+				type="Gnome;GTK"
+				;;
+
+			kde)
+				type="KDE;Qt"
+				;;
+
+			mail)
+				type="Network;Email"
+				;;
+
+			media)
+				case ${catmin} in
+					gfx)
+						type=Graphics
+						;;
+					*)
+						case ${catmin} in
+							radio) type=Tuner;;
+							sound) type=Audio;;
+							tv)    type=TV;;
+							video) type=Video;;
+						esac
+						type="AudioVideo;${type}"
+						;;
+				esac
+				;;
+
+			net)
+				case ${catmin} in
+					dialup) type=Dialup;;
+					ftp)    type=FileTransfer;;
+					im)     type=InstantMessaging;;
+					irc)    type=IRCClient;;
+					mail)   type=Email;;
+					news)   type=News;;
+					nntp)   type=News;;
+					p2p)    type=FileTransfer;;
+					voip)   type=Telephony;;
+				esac
+				type="Network;${type}"
+				;;
+
+			sci)
+				case ${catmin} in
+					astro*)  type=Astronomy;;
+					bio*)    type=Biology;;
+					calc*)   type=Calculator;;
+					chem*)   type=Chemistry;;
+					elec*)   type=Electronics;;
+					geo*)    type=Geology;;
+					math*)   type=Math;;
+					physics) type=Physics;;
+					visual*) type=DataVisualization;;
+				esac
+				type="Education;Science;${type}"
+				;;
+
+			sys)
+				type="System"
+				;;
+
+			www)
+				case ${catmin} in
+					client) type=WebBrowser;;
+				esac
+				type="Network;${type}"
+				;;
+
+			*)
+				type=
+				;;
+		esac
+	fi
+	if [ "${SLOT}" == "0" ] ; then
+		local desktop_name="${PN}"
+	else
+		local desktop_name="${PN}-${SLOT}"
+	fi
+	local desktop="${T}/$(echo ${exec} | sed 's:[[:space:]/:]:_:g')-${desktop_name}.desktop"
+	#local desktop=${T}/${exec%% *:-${desktop_name}}.desktop
+
+	# Don't append another ";" when a valid category value is provided.
+	type=${type%;}${type:+;}
+
+	eshopts_push -s extglob
+	if [[ -n ${icon} && ${icon} != /* ]] && [[ ${icon} == *.xpm || ${icon} == *.png || ${icon} == *.svg ]]; then
+		ewarn "As described in the Icon Theme Specification, icon file extensions are not"
+		ewarn "allowed in .desktop files if the value is not an absolute path."
+		icon=${icon%.@(xpm|png|svg)}
+	fi
+	eshopts_pop
+
+	cat <<-EOF > "${desktop}"
+	[Desktop Entry]
+	Name=${name}
+	Type=Application
+	Comment=${DESCRIPTION}
+	Exec=${exec}
+	TryExec=${exec%% *}
+	Icon=${icon}
+	Categories=${type}
+	EOF
+
+	if [[ ${fields:-=} != *=* ]] ; then
+		# 5th arg used to be value to Path=
+		ewarn "make_desktop_entry: update your 5th arg to read Path=${fields}"
+		fields="Path=${fields}"
+	fi
+	[[ -n ${fields} ]] && printf '%b\n' "${fields}" >> "${desktop}"
+
+	(
+		# wrap the env here so that the 'insinto' call
+		# doesn't corrupt the env of the caller
+		insinto /usr/share/applications
+		doins "${desktop}"
+	) || die "installing desktop file failed"
+}
+
+# @FUNCTION: validate_desktop_entries
+# @USAGE: [directories]
+# @MAINTAINER:
+# Carsten Lohrke <carlo@gentoo.org>
+# @DESCRIPTION:
+# Validate desktop entries using desktop-file-utils
+validate_desktop_entries() {
+	if [[ -x "${EPREFIX}"/usr/bin/desktop-file-validate ]] ; then
+		einfo "Checking desktop entry validity"
+		local directories=""
+		for d in /usr/share/applications $@ ; do
+			[[ -d ${ED}${d} ]] && directories="${directories} ${ED}${d}"
+		done
+		if [[ -n ${directories} ]] ; then
+			for FILE in $(find ${directories} -name "*\.desktop" \
+							-not -path '*.hidden*' | sort -u 2>/dev/null)
+			do
+				local temp=$(desktop-file-validate ${FILE} | grep -v "warning:" | \
+								sed -e "s|error: ||" -e "s|${FILE}:|--|g" )
+				[[ -n $temp ]] && elog ${temp/--/${FILE/${ED}/}:}
+			done
+		fi
+		echo ""
+	else
+		einfo "Passing desktop entry validity check. Install dev-util/desktop-file-utils, if you want to help to improve Gentoo."
+	fi
+}
+
+# @FUNCTION: make_session_desktop
+# @USAGE: <title> <command> [command args...]
+# @DESCRIPTION:
+# Make a GDM/KDM Session file.  The title is the file to execute to start the
+# Window Manager.  The command is the name of the Window Manager.
+#
+# You can set the name of the file via the ${wm} variable.
+make_session_desktop() {
+	[[ -z $1 ]] && eerror "$0: You must specify the title" && return 1
+	[[ -z $2 ]] && eerror "$0: You must specify the command" && return 1
+
+	local title=$1
+	local command=$2
+	local desktop=${T}/${wm:-${PN}}.desktop
+	shift 2
+
+	cat <<-EOF > "${desktop}"
+	[Desktop Entry]
+	Name=${title}
+	Comment=This session logs you into ${title}
+	Exec=${command} $*
+	TryExec=${command}
+	Type=XSession
+	EOF
+
+	(
+	# wrap the env here so that the 'insinto' call
+	# doesn't corrupt the env of the caller
+	insinto /usr/share/xsessions
+	doins "${desktop}"
+	)
+}
+
+# @FUNCTION: domenu
+# @USAGE: <menus>
+# @DESCRIPTION:
+# Install the list of .desktop menu files into the appropriate directory
+# (/usr/share/applications).
+domenu() {
+	(
+	# wrap the env here so that the 'insinto' call
+	# doesn't corrupt the env of the caller
+	local i j ret=0
+	insinto /usr/share/applications
+	for i in "$@" ; do
+		if [[ -f ${i} ]] ; then
+			doins "${i}"
+			((ret+=$?))
+		elif [[ -d ${i} ]] ; then
+			for j in "${i}"/*.desktop ; do
+				doins "${j}"
+				((ret+=$?))
+			done
+		else
+			((++ret))
+		fi
+	done
+	exit ${ret}
+	)
+}
+
+# @FUNCTION: newmenu
+# @USAGE: <menu> <newname>
+# @DESCRIPTION:
+# Like all other new* functions, install the specified menu as newname.
+newmenu() {
+	(
+	# wrap the env here so that the 'insinto' call
+	# doesn't corrupt the env of the caller
+	insinto /usr/share/applications
+	newins "$@"
+	)
+}
+
+# @FUNCTION: doicon
+# @USAGE: <list of icons>
+# @DESCRIPTION:
+# Install the list of icons into the icon directory (/usr/share/pixmaps).
+# This is useful in conjunction with creating desktop/menu files.
+doicon() {
+	(
+	# wrap the env here so that the 'insinto' call
+	# doesn't corrupt the env of the caller
+	local i j ret
+	insinto /usr/share/pixmaps
+	for i in "$@" ; do
+		if [[ -f ${i} ]] ; then
+			doins "${i}"
+			((ret+=$?))
+		elif [[ -d ${i} ]] ; then
+			for j in "${i}"/*.png ; do
+				doins "${j}"
+				((ret+=$?))
+			done
+		else
+			((++ret))
+		fi
+	done
+	exit ${ret}
+	)
+}
+
+# @FUNCTION: newicon
+# @USAGE: <icon> <newname>
+# @DESCRIPTION:
+# Like all other new* functions, install the specified icon as newname.
+newicon() {
+	(
+	# wrap the env here so that the 'insinto' call
+	# doesn't corrupt the env of the caller
+	insinto /usr/share/pixmaps
+	newins "$@"
+	)
+}
+
+# for internal use only (unpack_pdv and unpack_makeself)
+find_unpackable_file() {
+	local src=$1
+	if [[ -z ${src} ]] ; then
+		src=${DISTDIR}/${A}
+	else
+		if [[ -e ${DISTDIR}/${src} ]] ; then
+			src=${DISTDIR}/${src}
+		elif [[ -e ${PWD}/${src} ]] ; then
+			src=${PWD}/${src}
+		elif [[ -e ${src} ]] ; then
+			src=${src}
+		fi
+	fi
+	[[ ! -e ${src} ]] && return 1
+	echo "${src}"
+}
+
+# @FUNCTION: unpack_pdv
+# @USAGE: <file to unpack> <size of off_t>
+# @DESCRIPTION:
+# Unpack those pesky pdv generated files ...
+# They're self-unpacking programs with the binary package stuffed in
+# the middle of the archive.  Valve seems to use it a lot ... too bad
+# it seems to like to segfault a lot :(.  So lets take it apart ourselves.
+#
+# You have to specify the off_t size ... I have no idea how to extract that
+# information out of the binary executable myself.  Basically you pass in
+# the size of the off_t type (in bytes) on the machine that built the pdv
+# archive.
+#
+# One way to determine this is by running the following commands:
+#
+# @CODE
+# 	strings <pdv archive> | grep lseek
+# 	strace -elseek <pdv archive>
+# @CODE
+#
+# Basically look for the first lseek command (we do the strings/grep because
+# sometimes the function call is _llseek or something) and steal the 2nd
+# parameter.  Here is an example:
+#
+# @CODE
+# 	vapier@vapier 0 pdv_unpack # strings hldsupdatetool.bin | grep lseek
+# 	lseek
+# 	vapier@vapier 0 pdv_unpack # strace -elseek ./hldsupdatetool.bin
+# 	lseek(3, -4, SEEK_END)					= 2981250
+# @CODE
+#
+# Thus we would pass in the value of '4' as the second parameter.
+unpack_pdv() {
+	local src=$(find_unpackable_file "$1")
+	local sizeoff_t=$2
+
+	[[ -z ${src} ]] && die "Could not locate source for '$1'"
+	[[ -z ${sizeoff_t} ]] && die "No idea what off_t size was used for this pdv :("
+
+	local shrtsrc=$(basename "${src}")
+	echo ">>> Unpacking ${shrtsrc} to ${PWD}"
+	local metaskip=$(tail -c ${sizeoff_t} "${src}" | hexdump -e \"%i\")
+	local tailskip=$(tail -c $((${sizeoff_t}*2)) "${src}" | head -c ${sizeoff_t} | hexdump -e \"%i\")
+
+	# grab metadata for debug reasons
+	local metafile=$(emktemp)
+	tail -c +$((${metaskip}+1)) "${src}" > "${metafile}"
+
+	# rip out the final file name from the metadata
+	local datafile=$(tail -c +$((${metaskip}+1)) "${src}" | strings | head -n 1)
+	datafile=$(basename "${datafile}")
+
+	# now lets uncompress/untar the file if need be
+	local tmpfile=$(emktemp)
+	tail -c +$((${tailskip}+1)) ${src} 2>/dev/null | head -c 512 > ${tmpfile}
+
+	local iscompressed=$(file -b "${tmpfile}")
+	if [[ ${iscompressed:0:8} == "compress" ]] ; then
+		iscompressed=1
+		mv ${tmpfile}{,.Z}
+		gunzip ${tmpfile}
+	else
+		iscompressed=0
+	fi
+	local istar=$(file -b "${tmpfile}")
+	if [[ ${istar:0:9} == "POSIX tar" ]] ; then
+		istar=1
+	else
+		istar=0
+	fi
+
+	#for some reason gzip dies with this ... dd cant provide buffer fast enough ?
+	#dd if=${src} ibs=${metaskip} count=1 \
+	#	| dd ibs=${tailskip} skip=1 \
+	#	| gzip -dc \
+	#	> ${datafile}
+	if [ ${iscompressed} -eq 1 ] ; then
+		if [ ${istar} -eq 1 ] ; then
+			tail -c +$((${tailskip}+1)) ${src} 2>/dev/null \
+				| head -c $((${metaskip}-${tailskip})) \
+				| tar -xzf -
+		else
+			tail -c +$((${tailskip}+1)) ${src} 2>/dev/null \
+				| head -c $((${metaskip}-${tailskip})) \
+				| gzip -dc \
+				> ${datafile}
+		fi
+	else
+		if [ ${istar} -eq 1 ] ; then
+			tail -c +$((${tailskip}+1)) ${src} 2>/dev/null \
+				| head -c $((${metaskip}-${tailskip})) \
+				| tar --no-same-owner -xf -
+		else
+			tail -c +$((${tailskip}+1)) ${src} 2>/dev/null \
+				| head -c $((${metaskip}-${tailskip})) \
+				> ${datafile}
+		fi
+	fi
+	true
+	#[ -s "${datafile}" ] || die "failure unpacking pdv ('${metaskip}' '${tailskip}' '${datafile}')"
+	#assert "failure unpacking pdv ('${metaskip}' '${tailskip}' '${datafile}')"
+}
+
+# @FUNCTION: unpack_makeself
+# @USAGE: [file to unpack] [offset] [tail|dd]
+# @DESCRIPTION:
+# Unpack those pesky makeself generated files ...
+# They're shell scripts with the binary package tagged onto
+# the end of the archive.  Loki utilized the format as does
+# many other game companies.
+#
+# If the file is not specified, then ${A} is used.  If the
+# offset is not specified then we will attempt to extract
+# the proper offset from the script itself.
+unpack_makeself() {
+	local src_input=${1:-${A}}
+	local src=$(find_unpackable_file "${src_input}")
+	local skip=$2
+	local exe=$3
+
+	[[ -z ${src} ]] && die "Could not locate source for '${src_input}'"
+
+	local shrtsrc=$(basename "${src}")
+	echo ">>> Unpacking ${shrtsrc} to ${PWD}"
+	if [[ -z ${skip} ]] ; then
+		local ver=$(grep -m1 -a '#.*Makeself' "${src}" | awk '{print $NF}')
+		local skip=0
+		exe=tail
+		case ${ver} in
+			1.5.*|1.6.0-nv)	# tested 1.5.{3,4,5} ... guessing 1.5.x series is same
+				skip=$(grep -a ^skip= "${src}" | cut -d= -f2)
+				;;
+			2.0|2.0.1)
+				skip=$(grep -a ^$'\t'tail "${src}" | awk '{print $2}' | cut -b2-)
+				;;
+			2.1.1)
+				skip=$(grep -a ^offset= "${src}" | awk '{print $2}' | cut -b2-)
+				(( skip++ ))
+				;;
+			2.1.2)
+				skip=$(grep -a ^offset= "${src}" | awk '{print $3}' | head -n 1)
+				(( skip++ ))
+				;;
+			2.1.3)
+				skip=`grep -a ^offset= "${src}" | awk '{print $3}'`
+				(( skip++ ))
+				;;
+			2.1.4|2.1.5)
+				skip=$(grep -a offset=.*head.*wc "${src}" | awk '{print $3}' | head -n 1)
+				skip=$(head -n ${skip} "${src}" | wc -c)
+				exe="dd"
+				;;
+			*)
+				eerror "I'm sorry, but I was unable to support the Makeself file."
+				eerror "The version I detected was '${ver}'."
+				eerror "Please file a bug about the file ${shrtsrc} at"
+				eerror "http://bugs.gentoo.org/ so that support can be added."
+				die "makeself version '${ver}' not supported"
+				;;
+		esac
+		debug-print "Detected Makeself version ${ver} ... using ${skip} as offset"
+	fi
+	case ${exe} in
+		tail)	exe="tail -n +${skip} '${src}'";;
+		dd)		exe="dd ibs=${skip} skip=1 if='${src}'";;
+		*)		die "makeself cant handle exe '${exe}'"
+	esac
+
+	# lets grab the first few bytes of the file to figure out what kind of archive it is
+	local tmpfile=$(emktemp)
+	eval ${exe} 2>/dev/null | head -c 512 > "${tmpfile}"
+	local filetype=$(file -b "${tmpfile}")
+	case ${filetype} in
+		*tar\ archive*)
+			eval ${exe} | tar --no-same-owner -xf -
+			;;
+		bzip2*)
+			eval ${exe} | bzip2 -dc | tar --no-same-owner -xf -
+			;;
+		gzip*)
+			eval ${exe} | tar --no-same-owner -xzf -
+			;;
+		compress*)
+			eval ${exe} | gunzip | tar --no-same-owner -xf -
+			;;
+		*)
+			eerror "Unknown filetype \"${filetype}\" ?"
+			false
+			;;
+	esac
+	assert "failure unpacking (${filetype}) makeself ${shrtsrc} ('${ver}' +${skip})"
+}
+
+# @FUNCTION: check_license
+# @USAGE: [license]
+# @DESCRIPTION:
+# Display a license for user to accept.  If no license is
+# specified, then ${LICENSE} is used.
+check_license() {
+	local lic=$1
+	if [ -z "${lic}" ] ; then
+		lic="${PORTDIR}/licenses/${LICENSE}"
+	else
+		if [ -e "${PORTDIR}/licenses/${lic}" ] ; then
+			lic="${PORTDIR}/licenses/${lic}"
+		elif [ -e "${PWD}/${lic}" ] ; then
+			lic="${PWD}/${lic}"
+		elif [ -e "${lic}" ] ; then
+			lic="${lic}"
+		fi
+	fi
+	local l="`basename ${lic}`"
+
+	# here is where we check for the licenses the user already
+	# accepted ... if we don't find a match, we make the user accept
+	local alic
+	eshopts_push -o noglob # so that bash doesn't expand "*"
+	for alic in ${ACCEPT_LICENSE} ; do
+		if [[ ${alic} == ${l} ]]; then
+			eshopts_pop
+			return 0
+		fi
+	done
+	eshopts_pop
+	[ ! -f "${lic}" ] && die "Could not find requested license ${lic}"
+
+	local licmsg=$(emktemp)
+	cat <<-EOF > ${licmsg}
+	**********************************************************
+	The following license outlines the terms of use of this
+	package.  You MUST accept this license for installation to
+	continue.  When you are done viewing, hit 'q'.	If you
+	CTRL+C out of this, the install will not run!
+	**********************************************************
+
+	EOF
+	cat ${lic} >> ${licmsg}
+	${PAGER:-less} ${licmsg} || die "Could not execute pager (${PAGER}) to accept ${lic}"
+	einfon "Do you accept the terms of this license (${l})? [yes/no] "
+	read alic
+	case ${alic} in
+		yes|Yes|y|Y)
+			return 0
+			;;
+		*)
+			echo;echo;echo
+			eerror "You MUST accept the license to continue!  Exiting!"
+			die "Failed to accept license"
+			;;
+	esac
+}
+
+# @FUNCTION: cdrom_get_cds
+# @USAGE: <file on cd1> [file on cd2] [file on cd3] [...]
+# @DESCRIPTION:
+# Aquire cd(s) for those lovely cd-based emerges.  Yes, this violates
+# the whole 'non-interactive' policy, but damnit I want CD support !
+#
+# With these cdrom functions we handle all the user interaction and
+# standardize everything.  All you have to do is call cdrom_get_cds()
+# and when the function returns, you can assume that the cd has been
+# found at CDROM_ROOT.
+#
+# The function will attempt to locate a cd based upon a file that is on
+# the cd.  The more files you give this function, the more cds
+# the cdrom functions will handle.
+#
+# Normally the cdrom functions will refer to the cds as 'cd #1', 'cd #2',
+# etc...  If you want to give the cds better names, then just export
+# the appropriate CDROM_NAME variable before calling cdrom_get_cds().
+# Use CDROM_NAME for one cd, or CDROM_NAME_# for multiple cds.  You can
+# also use the CDROM_NAME_SET bash array.
+#
+# For those multi cd ebuilds, see the cdrom_load_next_cd() function.
+cdrom_get_cds() {
+	# first we figure out how many cds we're dealing with by
+	# the # of files they gave us
+	local cdcnt=0
+	local f=
+	for f in "$@" ; do
+		((++cdcnt))
+		export CDROM_CHECK_${cdcnt}="$f"
+	done
+	export CDROM_TOTAL_CDS=${cdcnt}
+	export CDROM_CURRENT_CD=1
+
+	# now we see if the user gave use CD_ROOT ...
+	# if they did, let's just believe them that it's correct
+	if [[ -n ${CD_ROOT}${CD_ROOT_1} ]] ; then
+		local var=
+		cdcnt=0
+		while [[ ${cdcnt} -lt ${CDROM_TOTAL_CDS} ]] ; do
+			((++cdcnt))
+			var="CD_ROOT_${cdcnt}"
+			[[ -z ${!var} ]] && var="CD_ROOT"
+			if [[ -z ${!var} ]] ; then
+				eerror "You must either use just the CD_ROOT"
+				eerror "or specify ALL the CD_ROOT_X variables."
+				eerror "In this case, you will need ${CDROM_TOTAL_CDS} CD_ROOT_X variables."
+				die "could not locate CD_ROOT_${cdcnt}"
+			fi
+		done
+		export CDROM_ROOT=${CD_ROOT_1:-${CD_ROOT}}
+		einfo "Found CD #${CDROM_CURRENT_CD} root at ${CDROM_ROOT}"
+		export CDROM_SET=-1
+		for f in ${CDROM_CHECK_1//:/ } ; do
+			((++CDROM_SET))
+			[[ -e ${CDROM_ROOT}/${f} ]] && break
+		done
+		export CDROM_MATCH=${f}
+		return
+	fi
+
+	# User didn't help us out so lets make sure they know they can
+	# simplify the whole process ...
+	if [[ ${CDROM_TOTAL_CDS} -eq 1 ]] ; then
+		einfo "This ebuild will need the ${CDROM_NAME:-cdrom for ${PN}}"
+		echo
+		einfo "If you do not have the CD, but have the data files"
+		einfo "mounted somewhere on your filesystem, just export"
+		einfo "the variable CD_ROOT so that it points to the"
+		einfo "directory containing the files."
+		echo
+		einfo "For example:"
+		einfo "export CD_ROOT=/mnt/cdrom"
+		echo
+	else
+		if [[ -n ${CDROM_NAME_SET} ]] ; then
+			# Translate the CDROM_NAME_SET array into CDROM_NAME_#
+			cdcnt=0
+			while [[ ${cdcnt} -lt ${CDROM_TOTAL_CDS} ]] ; do
+				((++cdcnt))
+				export CDROM_NAME_${cdcnt}="${CDROM_NAME_SET[$((${cdcnt}-1))]}"
+			done
+		fi
+
+		einfo "This package will need access to ${CDROM_TOTAL_CDS} cds."
+		cdcnt=0
+		while [[ ${cdcnt} -lt ${CDROM_TOTAL_CDS} ]] ; do
+			((++cdcnt))
+			var="CDROM_NAME_${cdcnt}"
+			[[ ! -z ${!var} ]] && einfo " CD ${cdcnt}: ${!var}"
+		done
+		echo
+		einfo "If you do not have the CDs, but have the data files"
+		einfo "mounted somewhere on your filesystem, just export"
+		einfo "the following variables so they point to the right place:"
+		einfon ""
+		cdcnt=0
+		while [[ ${cdcnt} -lt ${CDROM_TOTAL_CDS} ]] ; do
+			((++cdcnt))
+			echo -n " CD_ROOT_${cdcnt}"
+		done
+		echo
+		einfo "Or, if you have all the files in the same place, or"
+		einfo "you only have one cdrom, you can export CD_ROOT"
+		einfo "and that place will be used as the same data source"
+		einfo "for all the CDs."
+		echo
+		einfo "For example:"
+		einfo "export CD_ROOT_1=/mnt/cdrom"
+		echo
+	fi
+
+	export CDROM_SET=""
+	export CDROM_CURRENT_CD=0
+	cdrom_load_next_cd
+}
+
+# @FUNCTION: cdrom_load_next_cd
+# @DESCRIPTION:
+# Some packages are so big they come on multiple CDs.  When you're done reading
+# files off a CD and want access to the next one, just call this function.
+# Again, all the messy details of user interaction are taken care of for you.
+# Once this returns, just read the variable CDROM_ROOT for the location of the
+# mounted CD.  Note that you can only go forward in the CD list, so make sure
+# you only call this function when you're done using the current CD.
+cdrom_load_next_cd() {
+	local var
+	((++CDROM_CURRENT_CD))
+
+	unset CDROM_ROOT
+	var=CD_ROOT_${CDROM_CURRENT_CD}
+	[[ -z ${!var} ]] && var="CD_ROOT"
+	if [[ -z ${!var} ]] ; then
+		var="CDROM_CHECK_${CDROM_CURRENT_CD}"
+		_cdrom_locate_file_on_cd ${!var}
+	else
+		export CDROM_ROOT=${!var}
+	fi
+
+	einfo "Found CD #${CDROM_CURRENT_CD} root at ${CDROM_ROOT}"
+}
+
+# this is used internally by the cdrom_get_cds() and cdrom_load_next_cd()
+# functions.  this should *never* be called from an ebuild.
+# all it does is try to locate a give file on a cd ... if the cd isn't
+# found, then a message asking for the user to insert the cdrom will be
+# displayed and we'll hang out here until:
+# (1) the file is found on a mounted cdrom
+# (2) the user hits CTRL+C
+_cdrom_locate_file_on_cd() {
+	local mline=""
+	local showedmsg=0 showjolietmsg=0
+
+	while [[ -z ${CDROM_ROOT} ]] ; do
+		local i=0
+		local -a cdset=(${*//:/ })
+		if [[ -n ${CDROM_SET} ]] ; then
+			cdset=(${cdset[${CDROM_SET}]})
+		fi
+
+		while [[ -n ${cdset[${i}]} ]] ; do
+			local dir=$(dirname ${cdset[${i}]})
+			local file=$(basename ${cdset[${i}]})
+
+			local point= node= fs= foo=
+			while read point node fs foo ; do
+				[[ " cd9660 iso9660 udf " != *" ${fs} "* ]] && \
+					! [[ ${fs} == "subfs" && ",${opts}," == *",fs=cdfss,"* ]] \
+					&& continue
+				point=${point//\040/ }
+				[[ ! -d ${point}/${dir} ]] && continue
+				[[ -z $(find "${point}/${dir}" -maxdepth 1 -iname "${file}") ]] && continue
+				export CDROM_ROOT=${point}
+				export CDROM_SET=${i}
+				export CDROM_MATCH=${cdset[${i}]}
+				return
+			done <<< "$(get_mounts)"
+
+			((++i))
+		done
+
+		echo
+		if [[ ${showedmsg} -eq 0 ]] ; then
+			if [[ ${CDROM_TOTAL_CDS} -eq 1 ]] ; then
+				if [[ -z ${CDROM_NAME} ]] ; then
+					einfo "Please insert+mount the cdrom for ${PN} now !"
+				else
+					einfo "Please insert+mount the ${CDROM_NAME} cdrom now !"
+				fi
+			else
+				if [[ -z ${CDROM_NAME_1} ]] ; then
+					einfo "Please insert+mount cd #${CDROM_CURRENT_CD} for ${PN} now !"
+				else
+					local var="CDROM_NAME_${CDROM_CURRENT_CD}"
+					einfo "Please insert+mount the ${!var} cdrom now !"
+				fi
+			fi
+			showedmsg=1
+		fi
+		einfo "Press return to scan for the cd again"
+		einfo "or hit CTRL+C to abort the emerge."
+		echo
+		if [[ ${showjolietmsg} -eq 0 ]] ; then
+			showjolietmsg=1
+		else
+			ewarn "If you are having trouble with the detection"
+			ewarn "of your CD, it is possible that you do not have"
+			ewarn "Joliet support enabled in your kernel.  Please"
+			ewarn "check that CONFIG_JOLIET is enabled in your kernel."
+			ebeep 5
+		fi
+		read || die "something is screwed with your system"
+	done
+}
+
+# @FUNCTION: strip-linguas
+# @USAGE: [<allow LINGUAS>|<-i|-u> <directories of .po files>]
+# @DESCRIPTION:
+# Make sure that LINGUAS only contains languages that
+# a package can support.  The first form allows you to
+# specify a list of LINGUAS.  The -i builds a list of po
+# files found in all the directories and uses the
+# intersection of the lists.  The -u builds a list of po
+# files found in all the directories and uses the union
+# of the lists.
+strip-linguas() {
+	local ls newls nols
+	if [[ $1 == "-i" ]] || [[ $1 == "-u" ]] ; then
+		local op=$1; shift
+		ls=$(find "$1" -name '*.po' -exec basename {} .po ';'); shift
+		local d f
+		for d in "$@" ; do
+			if [[ ${op} == "-u" ]] ; then
+				newls=${ls}
+			else
+				newls=""
+			fi
+			for f in $(find "$d" -name '*.po' -exec basename {} .po ';') ; do
+				if [[ ${op} == "-i" ]] ; then
+					hasq ${f} ${ls} && newls="${newls} ${f}"
+				else
+					hasq ${f} ${ls} || newls="${newls} ${f}"
+				fi
+			done
+			ls=${newls}
+		done
+	else
+		ls="$@"
+	fi
+
+	nols=""
+	newls=""
+	for f in ${LINGUAS} ; do
+		if hasq ${f} ${ls} ; then
+			newls="${newls} ${f}"
+		else
+			nols="${nols} ${f}"
+		fi
+	done
+	[[ -n ${nols} ]] \
+		&& ewarn "Sorry, but ${PN} does not support the LINGUAS:" ${nols}
+	export LINGUAS=${newls:1}
+}
+
+# @FUNCTION: preserve_old_lib
+# @USAGE: <libs to preserve> [more libs]
+# @DESCRIPTION:
+# These functions are useful when a lib in your package changes ABI SONAME.
+# An example might be from libogg.so.0 to libogg.so.1.  Removing libogg.so.0
+# would break packages that link against it.  Most people get around this
+# by using the portage SLOT mechanism, but that is not always a relevant
+# solution, so instead you can call this from pkg_preinst.  See also the
+# preserve_old_lib_notify function.
+preserve_old_lib() {
+	if [[ ${EBUILD_PHASE} != "preinst" ]] ; then
+		eerror "preserve_old_lib() must be called from pkg_preinst() only"
+		die "Invalid preserve_old_lib() usage"
+	fi
+	[[ -z $1 ]] && die "Usage: preserve_old_lib <library to preserve> [more libraries to preserve]"
+
+	# let portage worry about it
+	has preserve-libs ${FEATURES} && return 0
+
+	# cannot preserve-libs on aix yet.
+	[[ ${CHOST} == *-aix* ]] && {
+		einfo "Not preserving libs on AIX (yet), not implemented."
+		return 0
+	}
+
+	local lib dir
+	for lib in "$@" ; do
+		[[ -e ${EROOT}/${lib} ]] || continue
+		dir=${lib%/*}
+		dodir ${dir} || die "dodir ${dir} failed"
+		cp "${EROOT}"/${lib} "${ED}"/${lib} || die "cp ${lib} failed"
+		touch "${ED}"/${lib}
+	done
+}
+
+# @FUNCTION: preserve_old_lib_notify
+# @USAGE: <libs to notify> [more libs]
+# @DESCRIPTION:
+# Spit helpful messages about the libraries preserved by preserve_old_lib.
+preserve_old_lib_notify() {
+	if [[ ${EBUILD_PHASE} != "postinst" ]] ; then
+		eerror "preserve_old_lib_notify() must be called from pkg_postinst() only"
+		die "Invalid preserve_old_lib_notify() usage"
+	fi
+
+	# let portage worry about it
+	has preserve-libs ${FEATURES} && return 0
+
+	local lib notice=0
+	for lib in "$@" ; do
+		[[ -e ${EROOT}/${lib} ]] || continue
+		if [[ ${notice} -eq 0 ]] ; then
+			notice=1
+			ewarn "Old versions of installed libraries were detected on your system."
+			ewarn "In order to avoid breaking packages that depend on these old libs,"
+			ewarn "the libraries are not being removed.  You need to run revdep-rebuild"
+			ewarn "in order to remove these old dependencies.  If you do not have this"
+			ewarn "helper program, simply emerge the 'gentoolkit' package."
+			ewarn
+		fi
+		# temp hack for #348634 #357225
+		[[ ${PN} == "mpfr" ]] && lib=${lib##*/}
+		ewarn "  # revdep-rebuild --library '${lib}'"
+	done
+	if [[ ${notice} -eq 1 ]] ; then
+		ewarn
+		ewarn "Once you've finished running revdep-rebuild, it should be safe to"
+		ewarn "delete the old libraries.  Here is a copy & paste for the lazy:"
+		for lib in "$@" ; do
+			ewarn "  # rm '${lib}'"
+		done
+	fi
+}
+
+# @FUNCTION: built_with_use
+# @USAGE: [--hidden] [--missing <action>] [-a|-o] <DEPEND ATOM> <List of USE flags>
+# @DESCRIPTION:
+#
+# Deprecated: Use EAPI 2 use deps in DEPEND|RDEPEND and with has_version calls.
+#
+# A temporary hack until portage properly supports DEPENDing on USE
+# flags being enabled in packages.  This will check to see if the specified
+# DEPEND atom was built with the specified list of USE flags.  The
+# --missing option controls the behavior if called on a package that does
+# not actually support the defined USE flags (aka listed in IUSE).
+# The default is to abort (call die).  The -a and -o flags control
+# the requirements of the USE flags.  They correspond to "and" and "or"
+# logic.  So the -a flag means all listed USE flags must be enabled
+# while the -o flag means at least one of the listed IUSE flags must be
+# enabled.  The --hidden option is really for internal use only as it
+# means the USE flag we're checking is hidden expanded, so it won't be found
+# in IUSE like normal USE flags.
+#
+# Remember that this function isn't terribly intelligent so order of optional
+# flags matter.
+built_with_use() {
+	local hidden="no"
+	if [[ $1 == "--hidden" ]] ; then
+		hidden="yes"
+		shift
+	fi
+
+	local missing_action="die"
+	if [[ $1 == "--missing" ]] ; then
+		missing_action=$2
+		shift ; shift
+		case ${missing_action} in
+			true|false|die) ;;
+			*) die "unknown action '${missing_action}'";;
+		esac
+	fi
+
+	local opt=$1
+	[[ ${opt:0:1} = "-" ]] && shift || opt="-a"
+
+	local PKG=$(best_version $1)
+	[[ -z ${PKG} ]] && die "Unable to resolve $1 to an installed package"
+	shift
+
+	local USEFILE="${EROOT}"/var/db/pkg/${PKG}/USE
+	local IUSEFILE="${EROOT}"/var/db/pkg/${PKG}/IUSE
+
+	# if the IUSE file doesn't exist, the read will error out, we need to handle
+	# this gracefully
+	if [[ ! -e ${USEFILE} ]] || [[ ! -e ${IUSEFILE} && ${hidden} == "no" ]] ; then
+		case ${missing_action} in
+			true)	return 0;;
+			false)	return 1;;
+			die)	die "Unable to determine what USE flags $PKG was built with";;
+		esac
+	fi
+
+	if [[ ${hidden} == "no" ]] ; then
+		local IUSE_BUILT=( $(<"${IUSEFILE}") )
+		# Don't check USE_EXPAND #147237
+		local expand
+		for expand in $(echo ${USE_EXPAND} | tr '[:upper:]' '[:lower:]') ; do
+			if [[ $1 == ${expand}_* ]] ; then
+				expand=""
+				break
+			fi
+		done
+		if [[ -n ${expand} ]] ; then
+			if ! has $1 ${IUSE_BUILT[@]#[-+]} ; then
+				case ${missing_action} in
+					true)  return 0;;
+					false) return 1;;
+					die)   die "$PKG does not actually support the $1 USE flag!";;
+				esac
+			fi
+		fi
+	fi
+
+	local USE_BUILT=$(<${USEFILE})
+	while [[ $# -gt 0 ]] ; do
+		if [[ ${opt} = "-o" ]] ; then
+			has $1 ${USE_BUILT} && return 0
+		else
+			has $1 ${USE_BUILT} || return 1
+		fi
+		shift
+	done
+	[[ ${opt} = "-a" ]]
+}
+
+# @FUNCTION: epunt_cxx
+# @USAGE: [dir to scan]
+# @DESCRIPTION:
+# Many configure scripts wrongly bail when a C++ compiler could not be
+# detected.  If dir is not specified, then it defaults to ${S}.
+#
+# http://bugs.gentoo.org/73450
+epunt_cxx() {
+	local dir=$1
+	[[ -z ${dir} ]] && dir=${S}
+	ebegin "Removing useless C++ checks"
+	local f
+	find "${dir}" -name configure | while read f ; do
+		patch --no-backup-if-mismatch -p0 "${f}" "${PORTDIR}/eclass/ELT-patches/nocxx/nocxx.patch" > /dev/null
+	done
+	eend 0
+}
+
+# @FUNCTION: make_wrapper
+# @USAGE: <wrapper> <target> [chdir] [libpaths] [installpath]
+# @DESCRIPTION:
+# Create a shell wrapper script named wrapper in installpath
+# (defaults to the bindir) to execute target (default of wrapper) by
+# first optionally setting LD_LIBRARY_PATH to the colon-delimited
+# libpaths followed by optionally changing directory to chdir.
+make_wrapper() {
+	local wrapper=$1 bin=$2 chdir=$3 libdir=$4 path=$5
+	local tmpwrapper=$(emktemp)
+	# We don't want to quote ${bin} so that people can pass complex
+	# things as $bin ... "./someprog --args"
+	cat << EOF > "${tmpwrapper}"
+#!${EPREFIX}/bin/sh
+cd "${chdir:-.}"
+if [ -n "${libdir}" ] ; then
+	if [ "\${LD_LIBRARY_PATH+set}" = "set" ] ; then
+		export LD_LIBRARY_PATH="\${LD_LIBRARY_PATH}:${EPREFIX}${libdir}"
+	else
+		export LD_LIBRARY_PATH="${EPREFIX}${libdir}"
+	fi
+	# ultra-dirty, just do the same for Darwin
+	if [ "\${DYLD_LIBRARY_PATH+set}" = "set" ] ; then
+		export DYLD_LIBRARY_PATH="\${DYLD_LIBRARY_PATH}:${EPERFIX}${libdir}"
+	else
+		export DYLD_LIBRARY_PATH="${EPREFIX}${libdir}"
+	fi
+fi
+exec "${EPREFIX}"${bin} "\$@"
+EOF
+	chmod go+rx "${tmpwrapper}"
+	if [[ -n ${path} ]] ; then
+		(
+		exeinto "${path}"
+		newexe "${tmpwrapper}" "${wrapper}"
+		) || die
+	else
+		newbin "${tmpwrapper}" "${wrapper}" || die
+	fi
+}
+
+# @FUNCTION: path_exists
+# @USAGE: [-a|-o] <paths>
+# @DESCRIPTION:
+# Check if the specified paths exist.  Works for all types of paths
+# (files/dirs/etc...).  The -a and -o flags control the requirements
+# of the paths.  They correspond to "and" and "or" logic.  So the -a
+# flag means all the paths must exist while the -o flag means at least
+# one of the paths must exist.  The default behavior is "and".  If no
+# paths are specified, then the return value is "false".
+path_exists() {
+	local opt=$1
+	[[ ${opt} == -[ao] ]] && shift || opt="-a"
+
+	# no paths -> return false
+	# same behavior as: [[ -e "" ]]
+	[[ $# -eq 0 ]] && return 1
+
+	local p r=0
+	for p in "$@" ; do
+		[[ -e ${p} ]]
+		: $(( r += $? ))
+	done
+
+	case ${opt} in
+		-a) return $(( r != 0 )) ;;
+		-o) return $(( r == $# )) ;;
+	esac
+}

diff --git a/eclass/flag-o-matic.eclass b/eclass/flag-o-matic.eclass
new file mode 100644
index 0000000..60223b8
--- /dev/null
+++ b/eclass/flag-o-matic.eclass
@@ -0,0 +1,780 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/flag-o-matic.eclass,v 1.150 2011/02/25 10:51:44 dirtyepic Exp $
+
+# @ECLASS: flag-o-matic.eclass
+# @MAINTAINER:
+# toolchain@gentoo.org
+# @BLURB: common functions to manipulate and query toolchain flags
+# @DESCRIPTION:
+# This eclass contains a suite of functions to help developers sanely
+# and safely manage toolchain flags in their builds.
+
+inherit eutils toolchain-funcs multilib
+
+################ DEPRECATED functions ################
+# The following are still present to avoid breaking existing
+# code more than necessary; however they are deprecated. Please
+# use gcc-specs-* from toolchain-funcs.eclass instead, if you
+# need to know which hardened techs are active in the compiler.
+# See bug #100974
+#
+# has_hardened
+# has_pie
+# has_pic
+# has_ssp_all
+# has_ssp
+
+
+# {C,CXX,F,FC}FLAGS that we allow in strip-flags
+# Note: shell globs and character lists are allowed
+setup-allowed-flags() {
+	if [[ -z ${ALLOWED_FLAGS} ]] ; then
+		export ALLOWED_FLAGS="-pipe"
+		export ALLOWED_FLAGS="${ALLOWED_FLAGS} -O -O0 -O1 -O2 -mcpu -march -mtune"
+		export ALLOWED_FLAGS="${ALLOWED_FLAGS} -fstack-protector -fstack-protector-all"
+		export ALLOWED_FLAGS="${ALLOWED_FLAGS} -fbounds-checking -fno-strict-overflow"
+		export ALLOWED_FLAGS="${ALLOWED_FLAGS} -fno-PIE -fno-pie -fno-unit-at-a-time"
+		export ALLOWED_FLAGS="${ALLOWED_FLAGS} -g -g[0-9] -ggdb -ggdb[0-9] -gstabs -gstabs+"
+		export ALLOWED_FLAGS="${ALLOWED_FLAGS} -fno-ident -fpermissive"
+		export ALLOWED_FLAGS="${ALLOWED_FLAGS} -W* -w"
+	fi
+	# allow a bunch of flags that negate features / control ABI
+	ALLOWED_FLAGS="${ALLOWED_FLAGS} -fno-stack-protector -fno-stack-protector-all \
+		-fno-strict-aliasing -fno-bounds-checking -fstrict-overflow -fno-omit-frame-pointer"
+	ALLOWED_FLAGS="${ALLOWED_FLAGS} -mregparm -mno-app-regs -mapp-regs \
+		-mno-mmx -mno-sse -mno-sse2 -mno-sse3 -mno-ssse3 -mno-sse4 -mno-sse4.1 \
+		-mno-sse4.2 -mno-avx -mno-aes -mno-pclmul -mno-sse4a -mno-3dnow \
+		-mno-popcnt -mno-abm \
+		-mips1 -mips2 -mips3 -mips4 -mips32 -mips64 -mips16 -mplt \
+		-msoft-float -mno-soft-float -mhard-float -mno-hard-float -mfpu \
+		-mieee -mieee-with-inexact -mschedule -mfloat-gprs -mspe -mno-spe \
+		-mtls-direct-seg-refs -mno-tls-direct-seg-refs \
+		-mflat -mno-flat -mno-faster-structs -mfaster-structs \
+		-m32 -m64 -mabi -mlittle-endian -mbig-endian -EL -EB -fPIC \
+		-mlive-g0 -mcmodel -mstack-bias -mno-stack-bias \
+		-msecure-plt -m*-toc -D* -U*"
+
+	# 4.5
+	ALLOWED_FLAGS="${ALLOWED_FLAGS} -mno-fma4 -mno-movbe -mno-xop -mno-lwp"
+	# 4.6
+	ALLOWED_FLAGS="${ALLOWED_FLAGS} -mno-fsgsbase -mno-rdrnd -mno-f16c \
+		-mno-bmi -mno-tbm"
+
+	# {C,CXX,F,FC}FLAGS that we are think is ok, but needs testing
+	# NOTE:  currently -Os have issues with gcc3 and K6* arch's
+	export UNSTABLE_FLAGS="-Os -O3 -freorder-blocks"
+	return 0
+}
+
+# inverted filters for hardened compiler.  This is trying to unpick
+# the hardened compiler defaults.
+_filter-hardened() {
+	local f
+	for f in "$@" ; do
+		case "${f}" in
+			# Ideally we should only concern ourselves with PIE flags,
+			# not -fPIC or -fpic, but too many places filter -fPIC without
+			# thinking about -fPIE.
+			-fPIC|-fpic|-fPIE|-fpie|-Wl,pie|-pie)
+				gcc-specs-pie || continue
+				is-flagq -nopie || append-flags -nopie;;
+			-fstack-protector)
+				gcc-specs-ssp || continue
+				is-flagq -fno-stack-protector || append-flags $(test-flags -fno-stack-protector);;
+			-fstack-protector-all)
+				gcc-specs-ssp-to-all || continue
+				is-flagq -fno-stack-protector-all || append-flags $(test-flags -fno-stack-protector-all);;
+			-fno-strict-overflow)
+				gcc-specs-nostrict || continue
+				is-flagq -fstrict-overflow || append-flags $(test-flags -fstrict-overflow);;
+		esac
+	done
+}
+
+# Remove occurrences of strings from variable given in $1
+# Strings removed are matched as globs, so for example
+# '-O*' would remove -O1, -O2 etc.
+_filter-var() {
+	local f x VAR VAL
+	declare -a new
+
+	VAR=$1
+	shift
+	eval VAL=\${${VAR}}
+	for f in ${VAL}; do
+		for x in "$@"; do
+			# Note this should work with globs like -O*
+			[[ ${f} == ${x} ]] && continue 2
+		done
+		eval new\[\${\#new\[@]}]=\${f}
+	done
+	eval export ${VAR}=\${new\[*]}
+}
+
+# @FUNCTION: filter-flags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Remove particular <flags> from {C,CPP,CXX,F,FC}FLAGS.  Accepts shell globs.
+filter-flags() {
+	_filter-hardened "$@"
+	_filter-var CFLAGS "$@"
+	_filter-var CPPFLAGS "$@"
+	_filter-var CXXFLAGS "$@"
+	_filter-var FFLAGS "$@"
+	_filter-var FCFLAGS "$@"
+	return 0
+}
+
+# @FUNCTION: filter-lfs-flags
+# @DESCRIPTION:
+# Remove flags that enable Large File Support.
+filter-lfs-flags() {
+	[[ -n $@ ]] && die "filter-lfs-flags takes no arguments"
+	filter-flags -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_LARGE_FILES -D_LARGE_FILE_API
+}
+
+# @FUNCTION: append-cppflags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Add extra <flags> to the current CPPFLAGS.
+append-cppflags() {
+	[[ -z $* ]] && return 0
+	export CPPFLAGS="${CPPFLAGS} $*"
+	return 0
+}
+
+# @FUNCTION: append-cflags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Add extra <flags> to the current CFLAGS.
+append-cflags() {
+	[[ -z $* ]] && return 0
+	export CFLAGS="${CFLAGS} $*"
+	return 0
+}
+
+# @FUNCTION: append-cxxflags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Add extra <flags> to the current CXXFLAGS.
+append-cxxflags() {
+	[[ -z $* ]] && return 0
+	export CXXFLAGS="${CXXFLAGS} $*"
+	return 0
+}
+
+# @FUNCTION: append-fflags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Add extra <flags> to the current {F,FC}FLAGS.
+append-fflags() {
+	[[ -z $* ]] && return 0
+	export FFLAGS="${FFLAGS} $*"
+	export FCFLAGS="${FCFLAGS} $*"
+	return 0
+}
+
+# @FUNCTION: append-lfs-flags
+# @DESCRIPTION:
+# Add flags that enable Large File Support.
+append-lfs-flags() {
+	[[ -n $@ ]] && die "append-lfs-flags takes no arguments"
+	case ${CHOST} in
+	*-aix*) append-cppflags -D_LARGE_FILES -D_LARGE_FILE_API ;;
+	*) append-cppflags -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE ;;
+	esac
+}
+
+# @FUNCTION: append-flags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Add extra <flags> to your current {C,CXX,F,FC}FLAGS.
+append-flags() {
+	[[ -z $* ]] && return 0
+	append-cflags "$@"
+	append-cxxflags "$@"
+	append-fflags "$@"
+	return 0
+}
+
+# @FUNCTION: replace-flags
+# @USAGE: <old> <new>
+# @DESCRIPTION:
+# Replace the <old> flag with <new>.  Accepts shell globs for <old>.
+replace-flags() {
+	[[ $# != 2 ]] \
+		&& echo && eerror "Usage: replace-flags <old flag> <new flag>" \
+		&& die "replace-flags takes 2 arguments, not $#"
+
+	local f fset
+	declare -a new_CFLAGS new_CXXFLAGS new_FFLAGS new_FCFLAGS
+
+	for fset in CFLAGS CXXFLAGS FFLAGS FCFLAGS; do
+		# Looping over the flags instead of using a global
+		# substitution ensures that we're working with flag atoms.
+		# Otherwise globs like -O* have the potential to wipe out the
+		# list of flags.
+		for f in ${!fset}; do
+			# Note this should work with globs like -O*
+			[[ ${f} == ${1} ]] && f=${2}
+			eval new_${fset}\[\${\#new_${fset}\[@]}]=\${f}
+		done
+		eval export ${fset}=\${new_${fset}\[*]}
+	done
+
+	return 0
+}
+
+# @FUNCTION: replace-cpu-flags
+# @USAGE: <old> <new>
+# @DESCRIPTION:
+# Replace cpu flags (like -march/-mcpu/-mtune) that select the <old> cpu
+# with flags that select the <new> cpu.  Accepts shell globs for <old>.
+replace-cpu-flags() {
+	local newcpu="$#" ; newcpu="${!newcpu}"
+	while [ $# -gt 1 ] ; do
+		# quote to make sure that no globbing is done (particularly on
+		# ${oldcpu}) prior to calling replace-flags
+		replace-flags "-march=${1}" "-march=${newcpu}"
+		replace-flags "-mcpu=${1}" "-mcpu=${newcpu}"
+		replace-flags "-mtune=${1}" "-mtune=${newcpu}"
+		shift
+	done
+	return 0
+}
+
+_is_flagq() {
+	local x
+	for x in ${!1} ; do
+		[[ ${x} == $2 ]] && return 0
+	done
+	return 1
+}
+
+# @FUNCTION: is-flagq
+# @USAGE: <flag>
+# @DESCRIPTION:
+# Returns shell true if <flag> is in {C,CXX,F,FC}FLAGS, else returns shell false.  Accepts shell globs.
+is-flagq() {
+	[[ -n $2 ]] && die "Usage: is-flag <flag>"
+	_is_flagq CFLAGS $1 || _is_flagq CXXFLAGS $1 || _is_flagq FFLAGS $1 || _is_flagq FCFLAGS $1
+}
+
+# @FUNCTION: is-flag
+# @USAGE: <flag>
+# @DESCRIPTION:
+# Echo's "true" if flag is set in {C,CXX,F,FC}FLAGS.  Accepts shell globs.
+is-flag() {
+	is-flagq "$@" && echo true
+}
+
+# @FUNCTION: is-ldflagq
+# @USAGE: <flag>
+# @DESCRIPTION:
+# Returns shell true if <flag> is in LDFLAGS, else returns shell false.  Accepts shell globs.
+is-ldflagq() {
+	[[ -n $2 ]] && die "Usage: is-ldflag <flag>"
+	_is_flagq LDFLAGS $1
+}
+
+# @FUNCTION: is-ldflag
+# @USAGE: <flag>
+# @DESCRIPTION:
+# Echo's "true" if flag is set in LDFLAGS.  Accepts shell globs.
+is-ldflag() {
+	is-ldflagq "$@" && echo true
+}
+
+# @FUNCTION: filter-mfpmath
+# @USAGE: <math types>
+# @DESCRIPTION:
+# Remove specified math types from the fpmath flag.  For example, if the user
+# has -mfpmath=sse,386, running `filter-mfpmath sse` will leave the user with
+# -mfpmath=386.
+filter-mfpmath() {
+	local orig_mfpmath new_math prune_math
+
+	# save the original -mfpmath flag
+	orig_mfpmath=$(get-flag -mfpmath)
+	# get the value of the current -mfpmath flag
+	new_math=$(get-flag mfpmath)
+	new_math=" ${new_math//,/ } "
+	# figure out which math values are to be removed
+	prune_math=""
+	for prune_math in "$@" ; do
+		new_math=${new_math/ ${prune_math} / }
+	done
+	new_math=$(echo ${new_math})
+	new_math=${new_math// /,}
+
+	if [[ -z ${new_math} ]] ; then
+		# if we're removing all user specified math values are
+		# slated for removal, then we just filter the flag
+		filter-flags ${orig_mfpmath}
+	else
+		# if we only want to filter some of the user specified
+		# math values, then we replace the current flag
+		replace-flags ${orig_mfpmath} -mfpmath=${new_math}
+	fi
+	return 0
+}
+
+# @FUNCTION: strip-flags
+# @DESCRIPTION:
+# Strip C[XX]FLAGS of everything except known good/safe flags.
+strip-flags() {
+	local x y flag NEW_CFLAGS NEW_CXXFLAGS NEW_FFLAGS NEW_FCFLAGS
+
+	setup-allowed-flags
+
+	local NEW_CFLAGS=""
+	local NEW_CXXFLAGS=""
+	local NEW_FFLAGS=""
+	local NEW_FCFLAGS=""
+
+	# Allow unstable C[XX]FLAGS if we are using unstable profile ...
+
+	#
+	#
+	# In Gentoo Prefix, this is useless. Not a problem, but on aix6 it causes
+	# bash to hang and I can't figure it out. So it is disabled for now.
+	# --darkside@g.o (14 Jan 2009)
+
+	if use !prefix; then
+		if has "~$(tc-arch)" ${ACCEPT_KEYWORDS} ; then
+			ALLOWED_FLAGS="${ALLOWED_FLAGS} ${UNSTABLE_FLAGS}"
+		fi
+	fi
+
+	set -f	# disable pathname expansion
+
+	for x in ${CFLAGS}; do
+		for y in ${ALLOWED_FLAGS}; do
+			flag=${x%%=*}
+			if [ "${flag%%${y}}" = "" ] ; then
+				NEW_CFLAGS="${NEW_CFLAGS} ${x}"
+				break
+			fi
+		done
+	done
+
+	for x in ${CXXFLAGS}; do
+		for y in ${ALLOWED_FLAGS}; do
+			flag=${x%%=*}
+			if [ "${flag%%${y}}" = "" ] ; then
+				NEW_CXXFLAGS="${NEW_CXXFLAGS} ${x}"
+				break
+			fi
+		done
+	done
+
+	for x in ${FFLAGS}; do
+		for y in ${ALLOWED_FLAGS}; do
+			flag=${x%%=*}
+			if [ "${flag%%${y}}" = "" ] ; then
+				NEW_FFLAGS="${NEW_FFLAGS} ${x}"
+				break
+			fi
+		done
+	done
+
+	for x in ${FCFLAGS}; do
+		for y in ${ALLOWED_FLAGS}; do
+			flag=${x%%=*}
+			if [ "${flag%%${y}}" = "" ] ; then
+				NEW_FCFLAGS="${NEW_FCFLAGS} ${x}"
+				break
+			fi
+		done
+	done
+
+	# In case we filtered out all optimization flags fallback to -O2
+	if [ "${CFLAGS/-O}" != "${CFLAGS}" -a "${NEW_CFLAGS/-O}" = "${NEW_CFLAGS}" ]; then
+		NEW_CFLAGS="${NEW_CFLAGS} -O2"
+	fi
+	if [ "${CXXFLAGS/-O}" != "${CXXFLAGS}" -a "${NEW_CXXFLAGS/-O}" = "${NEW_CXXFLAGS}" ]; then
+		NEW_CXXFLAGS="${NEW_CXXFLAGS} -O2"
+	fi
+	if [ "${FFLAGS/-O}" != "${FFLAGS}" -a "${NEW_FFLAGS/-O}" = "${NEW_FFLAGS}" ]; then
+		NEW_FFLAGS="${NEW_FFLAGS} -O2"
+	fi
+	if [ "${FCFLAGS/-O}" != "${FCFLAGS}" -a "${NEW_FCFLAGS/-O}" = "${NEW_FCFLAGS}" ]; then
+		NEW_FCFLAGS="${NEW_FCFLAGS} -O2"
+	fi
+
+	set +f	# re-enable pathname expansion
+
+	export CFLAGS="${NEW_CFLAGS}"
+	export CXXFLAGS="${NEW_CXXFLAGS}"
+	export FFLAGS="${NEW_FFLAGS}"
+	export FCFLAGS="${NEW_FCFLAGS}"
+	return 0
+}
+
+test-flag-PROG() {
+	local comp=$1
+	local flags="$2"
+
+	[[ -z ${comp} || -z ${flags} ]] && \
+		return 1
+
+	local PROG=$(tc-get${comp})
+	${PROG} ${flags} -S -o /dev/null -xc /dev/null \
+		> /dev/null 2>&1
+}
+
+# @FUNCTION: test-flag-CC
+# @USAGE: <flag>
+# @DESCRIPTION:
+# Returns shell true if <flag> is supported by the C compiler, else returns shell false.
+test-flag-CC() { test-flag-PROG "CC" "$1"; }
+
+# @FUNCTION: test-flag-CXX
+# @USAGE: <flag>
+# @DESCRIPTION:
+# Returns shell true if <flag> is supported by the C++ compiler, else returns shell false.
+test-flag-CXX() { test-flag-PROG "CXX" "$1"; }
+
+# @FUNCTION: test-flag-F77
+# @USAGE: <flag>
+# @DESCRIPTION:
+# Returns shell true if <flag> is supported by the Fortran 77 compiler, else returns shell false.
+test-flag-F77() { test-flag-PROG "F77" "$1"; }
+
+# @FUNCTION: test-flag-FC
+# @USAGE: <flag>
+# @DESCRIPTION:
+# Returns shell true if <flag> is supported by the Fortran 90 compiler, else returns shell false.
+test-flag-FC() { test-flag-PROG "FC" "$1"; }
+
+test-flags-PROG() {
+	local comp=$1
+	local flags
+	local x
+
+	shift
+
+	[[ -z ${comp} ]] && return 1
+
+	x=""
+	for x in "$@" ; do
+		test-flag-${comp} "${x}" && flags="${flags}${flags:+ }${x}" || \
+			ewarn "removing ${x} because ${comp} rejected it"
+	done
+
+	echo "${flags}"
+
+	# Just bail if we dont have any flags
+	[[ -n ${flags} ]]
+}
+
+# @FUNCTION: test-flags-CC
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Returns shell true if <flags> are supported by the C compiler, else returns shell false.
+test-flags-CC() { test-flags-PROG "CC" "$@"; }
+
+# @FUNCTION: test-flags-CXX
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Returns shell true if <flags> are supported by the C++ compiler, else returns shell false.
+test-flags-CXX() { test-flags-PROG "CXX" "$@"; }
+
+# @FUNCTION: test-flags-F77
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Returns shell true if <flags> are supported by the Fortran 77 compiler, else returns shell false.
+test-flags-F77() { test-flags-PROG "F77" "$@"; }
+
+# @FUNCTION: test-flags-FC
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Returns shell true if <flags> are supported by the Fortran 90 compiler, else returns shell false.
+test-flags-FC() { test-flags-PROG "FC" "$@"; }
+
+# @FUNCTION: test-flags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Short-hand that should hopefully work for both C and C++ compiler, but
+# its really only present due to the append-flags() abomination.
+test-flags() { test-flags-CC "$@"; }
+
+# @FUNCTION: test_flag
+# @DESCRIPTION:
+# DEPRICIATED, use test-flags()
+test_flag() {
+	ewarn "test_flag: deprecated, please use test-flags()!" >&2
+
+	test-flags-CC "$@"
+}
+
+# @FUNCTION: test_version_info
+# @USAGE: <version>
+# @DESCRIPTION:
+# Returns shell true if the current C compiler version matches <version>, else returns shell false.
+# Accepts shell globs.
+test_version_info() {
+	if [[ $($(tc-getCC) --version 2>&1) == *$1* ]]; then
+		return 0
+	else
+		return 1
+	fi
+}
+
+# @FUNCTION: strip-unsupported-flags
+# @DESCRIPTION:
+# Strip {C,CXX,F,FC}FLAGS of any flags not supported by the active toolchain.
+strip-unsupported-flags() {
+	export CFLAGS=$(test-flags-CC ${CFLAGS})
+	export CXXFLAGS=$(test-flags-CXX ${CXXFLAGS})
+	export FFLAGS=$(test-flags-F77 ${FFLAGS})
+	export FCFLAGS=$(test-flags-FC ${FCFLAGS})
+}
+
+# @FUNCTION: get-flag
+# @USAGE: <flag>
+# @DESCRIPTION:
+# Find and echo the value for a particular flag.  Accepts shell globs.
+get-flag() {
+	local f findflag="$1"
+
+	# this code looks a little flaky but seems to work for
+	# everything we want ...
+	# for example, if CFLAGS="-march=i686":
+	# `get-flag -march` == "-march=i686"
+	# `get-flag march` == "i686"
+	for f in ${CFLAGS} ${CXXFLAGS} ${FFLAGS} ${FCFLAGS} ; do
+		if [ "${f/${findflag}}" != "${f}" ] ; then
+			printf "%s\n" "${f/-${findflag}=}"
+			return 0
+		fi
+	done
+	return 1
+}
+
+# @FUNCTION: has_hardened
+# @DESCRIPTION:
+# DEPRECATED - use gcc-specs-relro or gcc-specs-now from toolchain-funcs
+has_hardened() {
+	ewarn "has_hardened: deprecated, please use gcc-specs-{relro,now}()!" >&2
+
+	test_version_info Hardened && return 0
+	# The specs file wont exist unless gcc has GCC_SPECS support
+	[[ -f ${GCC_SPECS} && ${GCC_SPECS} != ${GCC_SPECS/hardened/} ]]
+}
+
+# @FUNCTION: has_pic
+# @DESCRIPTION:
+# DEPRECATED - use gcc-specs-pie from toolchain-funcs
+# indicate whether PIC is set
+has_pic() {
+	ewarn "has_pic: deprecated, please use gcc-specs-pie()!" >&2
+
+	[[ ${CFLAGS/-fPIC} != ${CFLAGS} || \
+	   ${CFLAGS/-fpic} != ${CFLAGS} ]] || \
+	gcc-specs-pie
+}
+
+# @FUNCTION: has_pie
+# @DESCRIPTION:
+# DEPRECATED - use gcc-specs-pie from toolchain-funcs
+# indicate whether PIE is set
+has_pie() {
+	ewarn "has_pie: deprecated, please use gcc-specs-pie()!" >&2
+
+	[[ ${CFLAGS/-fPIE} != ${CFLAGS} || \
+	   ${CFLAGS/-fpie} != ${CFLAGS} ]] || \
+	gcc-specs-pie
+}
+
+# @FUNCTION: has_ssp_all
+# @DESCRIPTION:
+# DEPRECATED - use gcc-specs-ssp from toolchain-funcs
+# indicate whether code for SSP is being generated for all functions
+has_ssp_all() {
+	ewarn "has_ssp_all: deprecated, please use gcc-specs-ssp()!" >&2
+
+	# note; this matches only -fstack-protector-all
+	[[ ${CFLAGS/-fstack-protector-all} != ${CFLAGS} || \
+	   -n $(echo | $(tc-getCC) ${CFLAGS} -E -dM - | grep __SSP_ALL__) ]] || \
+	gcc-specs-ssp-to-all
+}
+
+# @FUNCTION: has_ssp
+# @DESCRIPTION:
+# DEPRECATED - use gcc-specs-ssp from toolchain-funcs
+# indicate whether code for SSP is being generated
+has_ssp() {
+	ewarn "has_ssp: deprecated, please use gcc-specs-ssp()!" >&2
+
+	# note; this matches both -fstack-protector and -fstack-protector-all
+	[[ ${CFLAGS/-fstack-protector} != ${CFLAGS} || \
+	   -n $(echo | $(tc-getCC) ${CFLAGS} -E -dM - | grep __SSP__) ]] || \
+	gcc-specs-ssp
+}
+
+# @FUNCTION: has_m64
+# @DESCRIPTION:
+# This doesn't test if the flag is accepted, it tests if the flag actually
+# WORKS. Non-multilib gcc will take both -m32 and -m64. If the flag works
+# return code is 0, else the return code is 1.
+has_m64() {
+	# this doesnt test if the flag is accepted, it tests if the flag
+	# actually -WORKS-. non-multilib gcc will take both -m32 and -m64!
+	# please dont replace this function with test_flag in some future
+	# clean-up!
+
+	local temp="$(emktemp)"
+	echo "int main() { return(0); }" > "${temp}".c
+	MY_CC=$(tc-getCC)
+	${MY_CC/ .*/} -m64 -o "$(emktemp)" "${temp}".c > /dev/null 2>&1
+	local ret=$?
+	rm -f "${temp}".c
+	[[ ${ret} != 1 ]] && return 0
+	return 1
+}
+
+# @FUNCTION: has_m32
+# @DESCRIPTION:
+# This doesn't test if the flag is accepted, it tests if the flag actually
+# WORKS. Non-mulilib gcc will take both -m32 and -64. If the flag works return
+# code is 0, else return code is 1.
+has_m32() {
+	# this doesnt test if the flag is accepted, it tests if the flag
+	# actually -WORKS-. non-multilib gcc will take both -m32 and -m64!
+	# please dont replace this function with test_flag in some future
+	# clean-up!
+
+	[ "$(tc-arch)" = "amd64" ] && has_multilib_profile && return 0
+
+	local temp=$(emktemp)
+	echo "int main() { return(0); }" > "${temp}".c
+	MY_CC=$(tc-getCC)
+	${MY_CC/ .*/} -m32 -o "$(emktemp)" "${temp}".c > /dev/null 2>&1
+	local ret=$?
+	rm -f "${temp}".c
+	[[ ${ret} != 1 ]] && return 0
+	return 1
+}
+
+# @FUNCTION: replace-sparc64-flags
+# @DESCRIPTION:
+# Sets mcpu to v8 and uses the original value as mtune if none specified.
+replace-sparc64-flags() {
+	local SPARC64_CPUS="ultrasparc3 ultrasparc v9"
+
+	if [ "${CFLAGS/mtune}" != "${CFLAGS}" ]; then
+		for x in ${SPARC64_CPUS}; do
+			CFLAGS="${CFLAGS/-mcpu=${x}/-mcpu=v8}"
+		done
+	else
+		for x in ${SPARC64_CPUS}; do
+			CFLAGS="${CFLAGS/-mcpu=${x}/-mcpu=v8 -mtune=${x}}"
+		done
+	fi
+
+	if [ "${CXXFLAGS/mtune}" != "${CXXFLAGS}" ]; then
+		for x in ${SPARC64_CPUS}; do
+			CXXFLAGS="${CXXFLAGS/-mcpu=${x}/-mcpu=v8}"
+		done
+	else
+		for x in ${SPARC64_CPUS}; do
+			CXXFLAGS="${CXXFLAGS/-mcpu=${x}/-mcpu=v8 -mtune=${x}}"
+		done
+	fi
+
+	export CFLAGS CXXFLAGS
+}
+
+# @FUNCTION: append-libs
+# @USAGE: <libs>
+# @DESCRIPTION:
+# Add extra <libs> to the current LIBS.
+append-libs() {
+	[[ -z $* ]] && return 0
+	local flag
+	for flag in "$@"; do
+		[[ ${flag} == -l* ]] && flag=${flag#-l}
+		export LIBS="${LIBS} -l${flag}"
+	done
+
+	return 0
+}
+
+# @FUNCTION: append-ldflags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Add extra <flags> to the current LDFLAGS.
+append-ldflags() {
+	[[ -z $* ]] && return 0
+	local flag
+	for flag in "$@"; do
+		[[ ${flag} == -l* ]] && \
+			ewarn "Appending a library link instruction (${flag}); libraries to link to should not be passed through LDFLAGS"
+	done
+
+	export LDFLAGS="${LDFLAGS} $(test-flags "$@")"
+	return 0
+}
+
+# @FUNCTION: filter-ldflags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Remove particular <flags> from LDFLAGS.  Accepts shell globs.
+filter-ldflags() {
+	_filter-var LDFLAGS "$@"
+	return 0
+}
+
+# @FUNCTION: raw-ldflags
+# @USAGE: <flags>
+# @DESCRIPTION:
+# Turn C style ldflags (-Wl,-foo) into straight ldflags - the results
+# are suitable for passing directly to 'ld'; note LDFLAGS is usually passed
+# to gcc where it needs the '-Wl,'.
+raw-ldflags() {
+	local x input="$@"
+	[[ -z ${input} ]] && input=${LDFLAGS}
+	set --
+	for x in ${input} ; do
+		x=${x#-Wl,}
+		set -- "$@" ${x//,/ }
+	done
+	echo "$@"
+}
+
+# @FUNCTION: no-as-needed
+# @RETURN: Flag to disable asneeded behavior for use with append-ldflags.
+no-as-needed() {
+	case $($(tc-getLD) -v 2>&1 </dev/null) in
+		*GNU*) # GNU ld
+		echo "-Wl,--no-as-needed" ;;
+	esac
+}
+
+# Some tests for when we screw with things and want to make
+# sure we didn't break anything
+#TESTS() {
+#	CFLAGS="-a -b -c=1"
+#	CXXFLAGS="-x -y -z=2"
+#	LDFLAGS="-l -m -n=3"
+#
+#	die() { exit 1; }
+#	(is-flag 1 2 3) && die
+#	(is-ldflag 1 2 3) && die
+#
+#	is-flagq -l && die
+#	is-ldflagq -a && die
+#	is-flagq -a || die
+#	is-flagq -x || die
+#	is-ldflagq -n=* || die
+#	is-ldflagq -n && die
+#
+#	strip-unsupported-flags
+#	[[ ${CFLAGS} == "-c=1" ]] || die
+#	[[ ${CXXFLAGS} == "-y -z=2" ]] || die
+#
+#	echo "All tests pass"
+#}
+#TESTS

diff --git a/eclass/gnuconfig.eclass b/eclass/gnuconfig.eclass
new file mode 100644
index 0000000..c681cdf
--- /dev/null
+++ b/eclass/gnuconfig.eclass
@@ -0,0 +1,103 @@
+# Copyright 1999-2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/gnuconfig.eclass,v 1.33 2006/06/21 19:35:28 vapier Exp $
+#
+# THIS ECLASS IS DEAD: It has been integrated into portage
+#
+# Author: Will Woods <wwoods@gentoo.org>
+#
+# This eclass is used to automatically update files that typically come with
+# automake to the newest version available on the system. The most common use
+# of this is to update config.guess and config.sub when configure dies from
+# misguessing your canonical system name (CHOST). It can also be used to update
+# other files that come with automake, e.g. depcomp, mkinstalldirs, etc.
+#
+# usage: gnuconfig_update [file1 file2 ...]
+# if called without arguments, config.guess and config.sub will be updated.
+# All files in the source tree ($S) with the given name(s) will be replaced
+# with the newest available versions chosen from the list of locations in
+# gnuconfig_findnewest(), below.
+#
+# gnuconfig_update should generally be called from src_unpack()
+
+
+DEPEND="sys-devel/gnuconfig"
+
+DESCRIPTION="Based on the ${ECLASS} eclass"
+
+# Wrapper function for gnuconfig_do_update. If no arguments are given, update
+# config.sub and config.guess (old default behavior), otherwise update the
+# named files.
+gnuconfig_update() {
+
+# hmm some packages (like binutils gcc glibc) still use this ...
+#	echo
+#	ewarn "QA Notice: Please stop using me, portage updates files for you."
+#	echo
+
+	local startdir	# declared here ... used in gnuconfig_do_update
+
+	if [[ $1 == /* ]] ; then
+		startdir=$1
+		shift
+	else
+		startdir=${S}
+	fi
+
+	if [[ $# -gt 0 ]] ; then
+		gnuconfig_do_update "$@"
+	else
+		gnuconfig_do_update config.sub config.guess
+	fi
+
+	return $?
+}
+
+# Copy the newest available version of specified files over any old ones in the
+# source dir. This function shouldn't be called directly - use gnuconfig_update
+#
+# Note that since bash using dynamic scoping, startdir is available here from
+# the gnuconfig_update function
+gnuconfig_do_update() {
+	local configsubs_dir target targetlist file
+
+	[[ $# -eq 0 ]] && die "do not call gnuconfig_do_update; use gnuconfig_update"
+
+	configsubs_dir=$(gnuconfig_findnewest)
+	einfo "Using GNU config files from ${configsubs_dir}"
+	for file in "$@" ; do
+		if [[ ! -r ${configsubs_dir}/${file} ]] ; then
+			eerror "Can't read ${configsubs_dir}/${file}, skipping.."
+			continue
+		fi
+		targetlist=$(find "${startdir}" -name "${file}")
+		if [[ -n ${targetlist} ]] ; then
+			for target in ${targetlist} ; do
+				[[ -L ${target} ]] && rm -f "${target}"
+				einfo "  Updating ${target/$startdir\//}"
+				cp -f "${configsubs_dir}/${file}" "${target}"
+				eend $?
+			done
+		else
+			ewarn "  No ${file} found in ${startdir}, skipping ..."
+		fi
+	done
+
+	return 0
+}
+
+# this searches the standard locations for the newest config.{sub|guess}, and
+# returns the directory where they can be found.
+gnuconfig_findnewest() {
+	local locations="
+		${EPREFIX}/usr/share/gnuconfig/config.sub
+		${EPREFIX}/usr/share/automake-1.9/config.sub
+		${EPREFIX}/usr/share/automake-1.8/config.sub
+		${EPREFIX}/usr/share/automake-1.7/config.sub
+		${EPREFIX}/usr/share/automake-1.6/config.sub
+		${EPREFIX}/usr/share/automake-1.5/config.sub
+		${EPREFIX}/usr/share/automake-1.4/config.sub
+		${EPREFIX}/usr/share/libtool/config.sub
+	"
+	grep -s '^timestamp' ${locations} | sort -n -t\' -k2 | tail -n 1 | sed 's,/config.sub:.*$,,'
+}

diff --git a/eclass/libtool.eclass b/eclass/libtool.eclass
new file mode 100644
index 0000000..f248ac6
--- /dev/null
+++ b/eclass/libtool.eclass
@@ -0,0 +1,466 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/libtool.eclass,v 1.90 2010/08/22 22:44:45 vapier Exp $
+
+# @ECLASS: libtool.eclass
+# @MAINTAINER:
+# base-system@gentoo.org
+# @BLURB: quickly update bundled libtool code
+# @DESCRIPTION:
+# This eclass patches ltmain.sh distributed with libtoolized packages with the
+# relink and portage patch among others
+#
+# Note, this eclass does not require libtool as it only applies patches to
+# generated libtool files.  We do not run the libtoolize program because that
+# requires a regeneration of the main autotool files in order to work properly.
+
+DESCRIPTION="Based on the ${ECLASS} eclass"
+
+inherit toolchain-funcs
+
+ELT_PATCH_DIR="${ECLASSDIR}/ELT-patches"
+
+#
+# See if we can apply $2 on $1, and if so, do it
+#
+ELT_try_and_apply_patch() {
+	local ret=0
+	local file=$1
+	local patch=$2
+
+	# We only support patchlevel of 0 - why worry if its static patches?
+	if patch -p0 --dry-run "${file}" "${patch}" &> "${T}/elibtool.log" ; then
+		einfo "  Applying $(basename "$(dirname "${patch}")")-${patch##*/}.patch ..."
+		patch -p0 -g0 --no-backup-if-mismatch "${file}" "${patch}" \
+			&> "${T}/elibtool.log"
+		ret=$?
+		export ELT_APPLIED_PATCHES="${ELT_APPLIED_PATCHES} ${patch##*/}"
+	else
+		ret=1
+	fi
+
+	return "${ret}"
+}
+
+#
+# Get string version of ltmain.sh or ltconfig (passed as $1)
+#
+ELT_libtool_version() {
+	(
+	unset VERSION
+	eval $(grep -e '^[[:space:]]*VERSION=' "$1")
+	echo "${VERSION:-0}"
+	)
+}
+
+#
+# Run through the patches in $2 and see if any
+# apply to $1 ...
+#
+ELT_walk_patches() {
+	local patch
+	local ret=1
+	local file=$1
+	local patch_set=$2
+	local patch_dir="${ELT_PATCH_DIR}/${patch_set}"
+	local rem_int_dep=$3
+
+	[[ -z ${patch_set} ]] && return 1
+	[[ ! -d ${patch_dir} ]] && return 1
+
+	pushd "${ELT_PATCH_DIR}" >/dev/null
+
+	# Go through the patches in reverse order (newer version to older)
+	for patch in $(find "${patch_set}" -maxdepth 1 -type f | LC_ALL=C sort -r) ; do
+		# For --remove-internal-dep ...
+		if [[ -n ${rem_int_dep} ]] ; then
+			# For replace @REM_INT_DEP@ with what was passed
+			# to --remove-internal-dep
+			local tmp="${T}/$$.rem_int_deps.patch"
+			sed -e "s|@REM_INT_DEP@|${rem_int_dep}|g" "${patch}" > "${tmp}"
+			patch=${tmp}
+		fi
+
+		if ELT_try_and_apply_patch "${file}" "${patch}" ; then
+			# Break to unwind w/popd rather than return directly
+			ret=0
+			break
+		fi
+	done
+
+	popd >/dev/null
+	return ${ret}
+}
+
+# @FUNCTION: elibtoolize
+# @USAGE: [dirs] [--portage] [--reverse-deps] [--patch-only] [--remove-internal-dep=xxx] [--shallow] [--no-uclibc]
+# @DESCRIPTION:
+# Apply a smorgasbord of patches to bundled libtool files.  This function
+# should always be safe to run.  If no directories are specified, then
+# ${S} will be searched for appropriate files.
+#
+# If the --shallow option is used, then only ${S}/ltmain.sh will be patched.
+#
+# The other options should be avoided in general unless you know what's going on.
+elibtoolize() {
+	local x
+	local do_portage="no"
+	local do_reversedeps="no"
+	local do_only_patches="no"
+	local do_uclibc="yes"
+	local do_force="no"
+	local deptoremove=
+	local do_shallow="no"
+	local elt_patches="install-sh ltmain portage relink max_cmd_len sed test tmp cross as-needed"
+
+	for x in "$@" ; do
+		case ${x} in
+			--portage)
+				# Only apply portage patch, and don't
+				# 'libtoolize --copy --force' if all patches fail.
+				do_portage="yes"
+				;;
+			--reverse-deps)
+				# Apply the reverse-deps patch
+				# http://bugzilla.gnome.org/show_bug.cgi?id=75635
+				do_reversedeps="yes"
+				elt_patches+=" fix-relink"
+				;;
+			--patch-only)
+				# Do not run libtoolize if none of the patches apply ..
+				do_only_patches="yes"
+				;;
+			--remove-internal-dep=*)
+				# We will replace @REM_INT_DEP@ with what is needed
+				# in ELT_walk_patches() ...
+				deptoremove=${x#--remove-internal-dep=}
+
+				# Add the patch for this ...
+				[[ -n ${deptoremove} ]] && elt_patches+=" rem-int-dep"
+				;;
+			--shallow)
+				# Only patch the ltmain.sh in ${S}
+				do_shallow="yes"
+				;;
+			--no-uclibc)
+				do_uclibc="no"
+				;;
+			"--force")
+				do_force="yes"
+				;;
+			*)
+				eerror "Invalid elibtoolize option: ${x}"
+				die "elibtoolize called with ${x} ??"
+		esac
+	done
+
+	[[ ${do_uclibc} == "yes" ]] && elt_patches+=" uclibc-conf uclibc-ltconf"
+
+	case ${CHOST} in
+		*-aix*)     elt_patches+=" hardcode aixrtl aix-noundef" ;; #213277
+		*-darwin*)  elt_patches+=" darwin-ltconf darwin-ltmain darwin-conf" ;;
+		*-freebsd*) elt_patches+=" fbsd-conf fbsd-ltconf" ;;
+		*-hpux*)    elt_patches+=" hpux-conf deplibs hc-flag-ld hardcode hardcode-relink relink-prog no-lc" ;;
+		*-irix*)    elt_patches+=" irix-ltmain" ;;
+		*-mint*)    elt_patches+=" mint-conf" ;;
+	esac
+
+	# Reuse "$@" for dirs to patch
+	set --
+	if [[ ${do_shallow} == "yes" ]] ; then
+		[[ -f ${S}/ltmain.sh ]] && set -- "${S}"
+	else
+		set -- $(find "${S}" -name ltmain.sh -printf '%h ')
+	fi
+
+	local d p
+	for d in "$@" ; do
+		export ELT_APPLIED_PATCHES=
+
+		[[ ${do_force} == no && -f ${d}/.elibtoolized ]] && continue
+
+		einfo "Running elibtoolize in: ${d#${WORKDIR}/}/"
+
+		for p in ${elt_patches} ; do
+			local ret=0
+
+			case ${p} in
+				portage)
+					# Stupid test to see if its already applied ...
+					if ! grep -qs 'We do not want portage' "${d}/ltmain.sh" ; then
+						ELT_walk_patches "${d}/ltmain.sh" "${p}"
+						ret=$?
+					fi
+					;;
+				rem-int-dep)
+					ELT_walk_patches "${d}/ltmain.sh" "${p}" "${deptoremove}"
+					ret=$?
+					;;
+				fix-relink)
+					# Do not apply if we do not have the relink patch applied ...
+					if grep -qs 'inst_prefix_dir' "${d}/ltmain.sh" ; then
+						ELT_walk_patches "${d}/ltmain.sh" "${p}"
+						ret=$?
+					fi
+					;;
+				max_cmd_len)
+					# Do not apply if $max_cmd_len is not used ...
+					if grep -qs 'max_cmd_len' "${d}/ltmain.sh" ; then
+						ELT_walk_patches "${d}/ltmain.sh" "${p}"
+						ret=$?
+					fi
+					;;
+				as-needed)
+					ELT_walk_patches "${d}/ltmain.sh" "${p}"
+					ret=$?
+					;;
+				uclibc-conf)
+					if grep -qs 'Transform linux' "${d}/configure" ; then
+						ELT_walk_patches "${d}/configure" "${p}"
+						ret=$?
+					# ltmain.sh and co might be in a subdirectory ...
+					elif [[ ! -e ${d}/configure ]] && \
+					     grep -qs 'Transform linux' "${d}/../configure" ; then
+						ELT_walk_patches "${d}/../configure" "${p}"
+						ret=$?
+					fi
+					;;
+				uclibc-ltconf)
+					# Newer libtoolize clears ltconfig, as not used anymore
+					if [[ -s ${d}/ltconfig ]] ; then
+						ELT_walk_patches "${d}/ltconfig" "${p}"
+						ret=$?
+					fi
+					;;
+				fbsd-conf)
+					if grep -qs 'version_type=freebsd-' "${d}/configure" ; then
+						ELT_walk_patches "${d}/configure" "${p}"
+						ret=$?
+					# ltmain.sh and co might be in a subdirectory ...
+					elif [[ ! -e ${d}/configure ]] && \
+					     grep -qs 'version_type=freebsd-' "${d}/../configure" ; then
+						ELT_walk_patches "${d}/../configure" "${p}"
+						ret=$?
+					fi
+					;;
+				fbsd-ltconf)
+					if [[ -s ${d}/ltconfig ]] ; then
+						ELT_walk_patches "${d}/ltconfig" "${p}"
+						ret=$?
+					fi
+					;;
+				darwin-conf)
+					if grep -qs '&& echo \.so ||' "${d}/configure" ; then
+						ELT_walk_patches "${d}/configure" "${p}"
+						ret=$?
+					# ltmain.sh and co might be in a subdirectory ...
+					elif [[ ! -e ${d}/configure ]] && \
+					     grep -qs '&& echo \.so ||' "${d}/../configure" ; then
+						ELT_walk_patches "${d}/../configure" "${p}"
+						ret=$?
+					fi
+					;;
+				darwin-ltconf)
+					# Newer libtoolize clears ltconfig, as not used anymore
+					if [[ -s ${d}/ltconfig ]] ; then
+						ELT_walk_patches "${d}/ltconfig" "${p}"
+						ret=$?
+					fi
+					;;
+				darwin-ltmain)
+					# special case to avoid false positives (failing to apply
+					# ltmain.sh path message), newer libtools have this patch
+					# built in, so not much to patch around then
+					if [[ -e ${d}/ltmain.sh ]] && \
+					   ! grep -qs 'verstring="-compatibility_version' "${d}/ltmain.sh" ; then
+						ELT_walk_patches "${d}/ltmain.sh" "${p}"
+						ret=$?
+					fi
+					;;
+				aixrtl|hpux-conf)
+					ret=1
+					local subret=0
+					# apply multiple patches as often as they match
+					while [[ $subret -eq 0 ]]; do
+						subret=1
+						if [[ -e ${d}/configure ]]; then
+							ELT_walk_patches "${d}/configure" "${p}"
+							subret=$?
+						# ltmain.sh and co might be in a subdirectory ...
+						elif [[ ! -e ${d}/configure && -e ${d}/../configure ]] ; then
+							ELT_walk_patches "${d}/../configure" "${p}"
+							subret=$?
+						fi
+						if [[ $subret -eq 0 ]]; then
+							# have at least one patch succeeded.
+							ret=0
+						fi
+					done
+					;;
+				mint-conf)
+					ret=1
+					local subret=1
+					if [[ -e ${d}/configure ]]; then
+						ELT_walk_patches "${d}/configure" "${p}"
+						subret=$?
+					# ltmain.sh and co might be in a subdirectory ...
+					elif [[ ! -e ${d}/configure && -e ${d}/../configure ]] ; then
+						ELT_walk_patches "${d}/../configure" "${p}"
+						subret=$?
+					fi
+					if [[ $subret -eq 0 ]]; then
+						# have at least one patch succeeded.
+						ret=0
+					fi
+					;;
+				install-sh)
+					ELT_walk_patches "${d}/install-sh" "${p}"
+					ret=$?
+					;;
+				cross)
+					if tc-is-cross-compiler ; then
+						ELT_walk_patches "${d}/ltmain.sh" "${p}"
+						ret=$?
+					fi
+					;;
+				*)
+					ELT_walk_patches "${d}/ltmain.sh" "${p}"
+					ret=$?
+					;;
+			esac
+
+			if [[ ${ret} -ne 0 ]] ; then
+				case ${p} in
+					relink)
+						local version=$(ELT_libtool_version "${d}/ltmain.sh")
+						# Critical patch, but could be applied ...
+						# FIXME:  Still need a patch for ltmain.sh > 1.4.0
+						if ! grep -qs 'inst_prefix_dir' "${d}/ltmain.sh" && \
+						   [[ $(VER_to_int "${version}") -ge $(VER_to_int "1.4.0") ]] ; then
+							ewarn "  Could not apply relink.patch!"
+						fi
+						;;
+					portage)
+						# Critical patch - for this one we abort, as it can really
+						# cause breakage without it applied!
+						if [[ ${do_portage} == "yes" ]] ; then
+							# Stupid test to see if its already applied ...
+							if ! grep -qs 'We do not want portage' "${d}/ltmain.sh" ; then
+								echo
+								eerror "Portage patch requested, but failed to apply!"
+								eerror "Please file a bug report to add a proper patch."
+								die "Portage patch requested, but failed to apply!"
+							fi
+						else
+							if grep -qs 'We do not want portage' "${d}/ltmain.sh" ; then
+							#	ewarn "  Portage patch seems to be already applied."
+							#	ewarn "  Please verify that it is not needed."
+								:
+							else
+							    local version=$(ELT_libtool_version "${d}"/ltmain.sh)
+								echo
+								eerror "Portage patch failed to apply (ltmain.sh version ${version})!"
+								eerror "Please file a bug report to add a proper patch."
+								die "Portage patch failed to apply!"
+							fi
+							# We do not want to run libtoolize ...
+							ELT_APPLIED_PATCHES="portage"
+						fi
+						;;
+					uclibc-*)
+						[[ ${CHOST} == *-uclibc ]] && ewarn "  uClibc patch set '${p}' failed to apply!"
+						;;
+					fbsd-*)
+						if [[ ${CHOST} == *-freebsd* ]] ; then
+							if [[ -z $(grep 'Handle Gentoo/FreeBSD as it was Linux' \
+								"${d}/configure" "${d}/../configure" 2>/dev/null) ]]; then
+								eerror "  FreeBSD patch set '${p}' failed to apply!"
+								die "FreeBSD patch set '${p}' failed to apply!"
+							fi
+						fi
+						;;
+					darwin-*)
+						[[ ${CHOST} == *"-darwin"* ]] && ewarn "  Darwin patch set '${p}' failed to apply!"
+						;;
+				esac
+			fi
+		done
+
+		if [[ -z ${ELT_APPLIED_PATCHES} ]] ; then
+			if [[ ${do_portage} == "no" && \
+				  ${do_reversedeps} == "no" && \
+				  ${do_only_patches} == "no" && \
+				  ${deptoremove} == "" ]]
+			then
+				ewarn "Cannot apply any patches, please file a bug about this"
+				die
+			fi
+		fi
+
+		rm -f "${d}/libtool"
+
+		> "${d}/.elibtoolized"
+	done
+}
+
+uclibctoolize() { die "Use elibtoolize"; }
+darwintoolize() { die "Use elibtoolize"; }
+
+# char *VER_major(string)
+#
+#    Return the Major (X of X.Y.Z) version
+#
+VER_major() {
+	[[ -z $1 ]] && return 1
+
+	local VER=$@
+	echo "${VER%%[^[:digit:]]*}"
+}
+
+# char *VER_minor(string)
+#
+#    Return the Minor (Y of X.Y.Z) version
+#
+VER_minor() {
+	[[ -z $1 ]] && return 1
+
+	local VER=$@
+	VER=${VER#*.}
+	echo "${VER%%[^[:digit:]]*}"
+}
+
+# char *VER_micro(string)
+#
+#    Return the Micro (Z of X.Y.Z) version.
+#
+VER_micro() {
+	[[ -z $1 ]] && return 1
+
+	local VER=$@
+	VER=${VER#*.*.}
+	echo "${VER%%[^[:digit:]]*}"
+}
+
+# int VER_to_int(string)
+#
+#    Convert a string type version (2.4.0) to an int (132096)
+#    for easy compairing or versions ...
+#
+VER_to_int() {
+	[[ -z $1 ]] && return 1
+
+	local VER_MAJOR=$(VER_major "$1")
+	local VER_MINOR=$(VER_minor "$1")
+	local VER_MICRO=$(VER_micro "$1")
+	local VER_int=$(( VER_MAJOR * 65536 + VER_MINOR * 256 + VER_MICRO ))
+
+	# We make version 1.0.0 the minimum version we will handle as
+	# a sanity check ... if its less, we fail ...
+	if [[ ${VER_int} -ge 65536 ]] ; then
+		echo "${VER_int}"
+		return 0
+	fi
+
+	echo 1
+	return 1
+}

diff --git a/eclass/multilib.eclass b/eclass/multilib.eclass
new file mode 100644
index 0000000..3a5ab6f
--- /dev/null
+++ b/eclass/multilib.eclass
@@ -0,0 +1,455 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/multilib.eclass,v 1.90 2011/03/18 22:03:25 vapier Exp $
+
+# @ECLASS: multilib.eclass
+# @MAINTAINER:
+# amd64@gentoo.org
+# toolchain@gentoo.org
+# @BLURB: This eclass is for all functions pertaining to handling multilib configurations.
+# @DESCRIPTION:
+# This eclass is for all functions pertaining to handling multilib configurations.
+
+___ECLASS_RECUR_MULTILIB="yes"
+[[ -z ${___ECLASS_RECUR_TOOLCHAIN_FUNCS} ]] && inherit toolchain-funcs
+
+# Defaults:
+export MULTILIB_ABIS=${MULTILIB_ABIS:-"default"}
+export DEFAULT_ABI=${DEFAULT_ABI:-"default"}
+export CFLAGS_default
+export LDFLAGS_default
+export CHOST_default=${CHOST_default:-${CHOST}}
+export CTARGET_default=${CTARGET_default:-${CTARGET:-${CHOST_default}}}
+export LIBDIR_default=${CONF_LIBDIR:-"lib"}
+export KERNEL_ABI=${KERNEL_ABI:-${DEFAULT_ABI}}
+
+# @FUNCTION: has_multilib_profile
+# @DESCRIPTION:
+# Return true if the current profile is a multilib profile and lists more than
+# one abi in ${MULTILIB_ABIS}.  When has_multilib_profile returns true, that
+# profile should enable the 'multilib' use flag. This is so you can DEPEND on
+# a package only for multilib or not multilib.
+has_multilib_profile() {
+	[ -n "${MULTILIB_ABIS}" -a "${MULTILIB_ABIS}" != "${MULTILIB_ABIS/ /}" ]
+}
+
+# @FUNCTION: get_libdir
+# @RETURN: the libdir for the selected ABI
+# @DESCRIPTION:
+# This function simply returns the desired lib directory. With portage
+# 2.0.51, we now have support for installing libraries to lib32/lib64
+# to accomidate the needs of multilib systems. It's no longer a good idea
+# to assume all libraries will end up in lib. Replace any (sane) instances
+# where lib is named directly with $(get_libdir) if possible.
+#
+# Jeremy Huddleston <eradicator@gentoo.org> (23 Dec 2004):
+#   Added support for ${ABI} and ${DEFAULT_ABI}.  If they're both not set,
+#   fall back on old behavior.  Any profile that has these set should also
+#   depend on a newer version of portage (not yet released) which uses these
+#   over CONF_LIBDIR in econf, dolib, etc...
+get_libdir() {
+	local CONF_LIBDIR
+	if [ -n  "${CONF_LIBDIR_OVERRIDE}" ] ; then
+		# if there is an override, we want to use that... always.
+		echo ${CONF_LIBDIR_OVERRIDE}
+	else
+		get_abi_LIBDIR
+	fi
+}
+
+# @FUNCTION: get_multilibdir
+# @RETURN: Returns the multilibdir
+get_multilibdir() {
+	if has_multilib_profile; then
+		eerror "get_multilibdir called, but it shouldn't be needed with the new multilib approach.  Please file a bug at http://bugs.gentoo.org and assign it to eradicator@gentoo.org"
+		exit 1
+	fi
+	echo ${CONF_MULTILIBDIR:=lib32}
+}
+
+# @FUNCTION: get_libdir_override
+# @DESCRIPTION:
+# Sometimes you need to override the value returned by get_libdir. A good
+# example of this is xorg-x11, where lib32 isnt a supported configuration,
+# and where lib64 -must- be used on amd64 (for applications that need lib
+# to be 32bit, such as adobe acrobat). Note that this override also bypasses
+# portage version sanity checking.
+# get_libdir_override expects one argument, the result get_libdir should
+# return:
+#
+#   get_libdir_override lib64
+get_libdir_override() {
+	if has_multilib_profile; then
+		eerror "get_libdir_override called, but it shouldn't be needed with the new multilib approach.  Please file a bug at http://bugs.gentoo.org and assign it to eradicator@gentoo.org"
+		exit 1
+	fi
+	CONF_LIBDIR="$1"
+	CONF_LIBDIR_OVERRIDE="$1"
+	LIBDIR_default="$1"
+}
+
+# @FUNCTION: get_abi_var
+# @USAGE: <VAR> [ABI]
+# @RETURN: returns the value of ${<VAR>_<ABI>} which should be set in make.defaults
+# @DESCRIPTION:
+# ex:
+# CFLAGS=$(get_abi_var CFLAGS sparc32) # CFLAGS=-m32
+#
+# Note that the prefered method is to set CC="$(tc-getCC) $(get_abi_CFLAGS)"
+# This will hopefully be added to portage soon...
+#
+# If <ABI> is not specified, ${ABI} is used.
+# If <ABI> is not specified and ${ABI} is not defined, ${DEFAULT_ABI} is used.
+# If <ABI> is not specified and ${ABI} and ${DEFAULT_ABI} are not defined, we return an empty string.
+get_abi_var() {
+	local flag=$1
+	local abi
+	if [ $# -gt 1 ]; then
+		abi=${2}
+	elif [ -n "${ABI}" ]; then
+		abi=${ABI}
+	elif [ -n "${DEFAULT_ABI}" ]; then
+		abi=${DEFAULT_ABI}
+	else
+		abi="default"
+	fi
+
+	local var="${flag}_${abi}"
+	echo ${!var}
+}
+
+# @FUNCTION: get_abi_CFLAGS
+# @USAGE: [ABI]
+# @DESCRIPTION:
+# Alias for 'get_abi_var CFLAGS'
+get_abi_CFLAGS() { get_abi_var CFLAGS "$@"; }
+
+# @FUNCTION: get_abi_ASFLAGS
+# @USAGE: [ABI]
+# @DESCRIPTION:
+# Alias for 'get_abi_var ASFLAGS'
+get_abi_ASFLAGS() { get_abi_var ASFLAGS "$@"; }
+
+# @FUNCTION: get_abi_LDFLAGS
+# @USAGE: [ABI]
+# @DESCRIPTION:
+# Alias for 'get_abi_var LDFLAGS'
+get_abi_LDFLAGS() { get_abi_var LDFLAGS "$@"; }
+
+# @FUNCTION: get_abi_CHOST
+# @USAGE: [ABI]
+# @DESCRIPTION:
+# Alias for 'get_abi_var CHOST'
+get_abi_CHOST() { get_abi_var CHOST "$@"; }
+
+# @FUNCTION: get_abi_CTARGET
+# @USAGE: [ABI]
+# @DESCRIPTION:
+# Alias for 'get_abi_var CTARGET'
+get_abi_CTARGET() { get_abi_var CTARGET "$@"; }
+
+# @FUNCTION: get_abi_FAKE_TARGETS
+# @USAGE: [ABI]
+# @DESCRIPTION:
+# Alias for 'get_abi_var FAKE_TARGETS'
+get_abi_FAKE_TARGETS() { get_abi_var FAKE_TARGETS "$@"; }
+
+# @FUNCTION: get_abi_LIBDIR
+# @USAGE: [ABI]
+# @DESCRIPTION:
+# Alias for 'get_abi_var LIBDIR'
+get_abi_LIBDIR() { get_abi_var LIBDIR "$@"; }
+
+# @FUNCTION: get_install_abis
+# @DESCRIPTION:
+# Return a list of the ABIs we want to install for with
+# the last one in the list being the default.
+get_install_abis() {
+	local order=""
+
+	if [[ -z ${MULTILIB_ABIS} ]] ; then
+		echo "default"
+		return 0
+	fi
+
+	if [[ ${EMULTILIB_PKG} == "true" ]] ; then
+		for x in ${MULTILIB_ABIS} ; do
+			if [[ ${x} != "${DEFAULT_ABI}" ]] ; then
+				hasq ${x} ${ABI_DENY} || order="${order} ${x}"
+			fi
+		done
+		hasq ${DEFAULT_ABI} ${ABI_DENY} || order="${order} ${DEFAULT_ABI}"
+
+		if [[ -n ${ABI_ALLOW} ]] ; then
+			local ordera=""
+			for x in ${order} ; do
+				if hasq ${x} ${ABI_ALLOW} ; then
+					ordera="${ordera} ${x}"
+				fi
+			done
+			order=${ordera}
+		fi
+	else
+		order=${DEFAULT_ABI}
+	fi
+
+	if [[ -z ${order} ]] ; then
+		die "The ABI list is empty.  Are you using a proper multilib profile?  Perhaps your USE flags or MULTILIB_ABIS are too restrictive for this package."
+	fi
+
+	echo ${order}
+	return 0
+}
+
+# @FUNCTION: get_all_abis
+# @DESCRIPTION: 
+# Return a list of the ABIs supported by this profile.
+# the last one in the list being the default.
+get_all_abis() {
+	local order=""
+
+	if [[ -z ${MULTILIB_ABIS} ]] ; then
+		echo "default"
+		return 0
+	fi
+
+	for x in ${MULTILIB_ABIS}; do
+		if [[ ${x} != ${DEFAULT_ABI} ]] ; then
+			order="${order:+${order} }${x}"
+		fi
+	done
+	order="${order:+${order} }${DEFAULT_ABI}"
+
+	echo ${order}
+	return 0
+}
+
+# @FUNCTION: get_all_libdirs
+# @DESCRIPTION:
+# Returns a list of all the libdirs used by this profile.  This includes
+# those that might not be touched by the current ebuild and always includes
+# "lib".
+get_all_libdirs() {
+	local libdirs
+	local abi
+	local dir
+
+	for abi in ${MULTILIB_ABIS}; do
+		libdirs+=" $(get_abi_LIBDIR ${abi})"
+	done
+	[[ " ${libdirs} " != *" lib "* ]] && libdirs+=" lib"
+
+	echo "${libdirs}"
+}
+
+# @FUNCTION: is_final_abi
+# @DESCRIPTION:
+# Return true if ${ABI} is the last ABI on our list (or if we're not
+# using the new multilib configuration.  This can be used to determine
+# if we're in the last (or only) run through src_{unpack,compile,install}
+is_final_abi() {
+	has_multilib_profile || return 0
+	set -- $(get_install_abis)
+	local LAST_ABI=$#
+	[[ ${!LAST_ABI} == ${ABI} ]]
+}
+
+# @FUNCTION: number_abis
+# @DESCRIPTION:
+# echo the number of ABIs we will be installing for
+number_abis() {
+	set -- `get_install_abis`
+	echo $#
+}
+
+# @FUNCTION: get_libname
+# @USAGE: [version]
+# @DESCRIPTION:
+# Returns libname with proper suffix {.so,.dylib,.dll,etc} and optionally
+# supplied version for the current platform identified by CHOST.
+#
+# Example:
+#     get_libname ${PV}
+#     Returns: .so.${PV} (ELF) || .${PV}.dylib (MACH) || ...
+get_libname() {
+	local libname
+	local ver=$1
+	case ${CHOST} in
+		*-cygwin|mingw*|*-mingw*) libname="dll";;
+		*-darwin*)                libname="dylib";;
+		*-mint*)                  libname="irrelevant";;
+		hppa*-hpux*)              libname="sl";;
+		*)                        libname="so";;
+	esac
+
+	if [[ -z $* ]] ; then
+		echo ".${libname}"
+	else
+		for ver in "$@" ; do
+			case ${CHOST} in
+				*-darwin*) echo ".${ver}.${libname}";;
+				*-mint*)   echo ".${libname}";;
+				*)         echo ".${libname}.${ver}";;
+			esac
+		done
+	fi
+}
+
+# @FUNCTION: get_modname
+# @USAGE:
+# @DESCRIPTION:
+# Returns modulename with proper suffix {.so,.bundle,etc} for the current
+# platform identified by CHOST.
+#
+# Example:
+#     libfoo$(get_modname)
+#     Returns: libfoo.so (ELF) || libfoo.bundle (MACH) || ...
+get_modname() {
+	local modname
+	local ver=$1
+	case ${CHOST} in
+		*-darwin*)                modname="bundle";;
+		*)                        modname="so";;
+	esac
+
+	echo ".${modname}"
+}
+
+# This is for the toolchain to setup profile variables when pulling in
+# a crosscompiler (and thus they aren't set in the profile)
+multilib_env() {
+	local CTARGET=${1:-${CTARGET}}
+
+	case ${CTARGET} in
+		x86_64*)
+			export CFLAGS_x86=${CFLAGS_x86--m32}
+			export CHOST_x86=${CTARGET/x86_64/i686}
+			export CTARGET_x86=${CHOST_x86}
+			if [[ ${SYMLINK_LIB} == "yes" ]] ; then
+				export LIBDIR_x86="lib32"
+			else
+				export LIBDIR_x86="lib"
+			fi
+
+			export CFLAGS_amd64=${CFLAGS_amd64--m64}
+			export CHOST_amd64=${CTARGET}
+			export CTARGET_amd64=${CHOST_amd64}
+			export LIBDIR_amd64="lib64"
+
+			export CFLAGS_x32=${CFLAGS_x32--mx32}
+			export CHOST_x32=${CTARGET}
+			export CTARGET_x32=${CHOST_x32}
+			export LIBDIR_x32="libx32"
+
+			: ${MULTILIB_ABIS=amd64 x86}
+			: ${DEFAULT_ABI=amd64}
+		;;
+		mips64*)
+			export CFLAGS_o32=${CFLAGS_o32--mabi=32}
+			export CHOST_o32=${CTARGET/mips64/mips}
+			export CTARGET_o32=${CHOST_o32}
+			export LIBDIR_o32="lib"
+
+			export CFLAGS_n32=${CFLAGS_n32--mabi=n32}
+			export CHOST_n32=${CTARGET}
+			export CTARGET_n32=${CHOST_n32}
+			export LIBDIR_n32="lib32"
+
+			export CFLAGS_n64=${CFLAGS_n64--mabi=64}
+			export CHOST_n64=${CTARGET}
+			export CTARGET_n64=${CHOST_n64}
+			export LIBDIR_n64="lib64"
+
+			: ${MULTILIB_ABIS=n64 n32 o32}
+			: ${DEFAULT_ABI=n32}
+		;;
+		powerpc64*)
+			export CFLAGS_ppc=${CFLAGS_ppc--m32}
+			export CHOST_ppc=${CTARGET/powerpc64/powerpc}
+			export CTARGET_ppc=${CHOST_ppc}
+			export LIBDIR_ppc="lib"
+
+			export CFLAGS_ppc64=${CFLAGS_ppc64--m64}
+			export CHOST_ppc64=${CTARGET}
+			export CTARGET_ppc64=${CHOST_ppc64}
+			export LIBDIR_ppc64="lib64"
+
+			: ${MULTILIB_ABIS=ppc64 ppc}
+			: ${DEFAULT_ABI=ppc64}
+		;;
+		s390x*)
+			export CFLAGS_s390=${CFLAGS_s390--m31} # the 31 is not a typo
+			export CHOST_s390=${CTARGET/s390x/s390}
+			export CTARGET_s390=${CHOST_s390}
+			export LIBDIR_s390="lib"
+
+			export CFLAGS_s390x=${CFLAGS_s390x--m64}
+			export CHOST_s390x=${CTARGET}
+			export CTARGET_s390x=${CHOST_s390x}
+			export LIBDIR_s390x="lib64"
+
+			: ${MULTILIB_ABIS=s390x s390}
+			: ${DEFAULT_ABI=s390x}
+		;;
+		sparc*)
+			export CFLAGS_sparc32=${CFLAGS_sparc32}
+			export CHOST_sparc32=${CTARGET/sparc64/sparc}
+			export CTARGET_sparc32=${CHOST_sparc32}
+			export LIBDIR_sparc32="lib"
+
+			export CFLAGS_sparc64=${CFLAGS_sparc64--m64}
+			export CHOST_sparc64=${CTARGET}
+			export CTARGET_sparc64=${CHOST_sparc64}
+			export LIBDIR_sparc64="lib64"
+
+			: ${MULTILIB_ABIS=sparc64 sparc32}
+			: ${DEFAULT_ABI=sparc64}
+		;;
+		*)
+			: ${MULTILIB_ABIS=default}
+			: ${DEFAULT_ABI=default}
+		;;
+	esac
+
+	export MULTILIB_ABIS DEFAULT_ABI
+}
+
+# @FUNCTION: multilib_toolchain_setup
+# @DESCRIPTION:
+# Hide multilib details here for packages which are forced to be compiled for a
+# specific ABI when run on another ABI (like x86-specific packages on amd64)
+multilib_toolchain_setup() {
+	local v vv
+
+	export ABI=$1
+
+	# First restore any saved state we have laying around.
+	if [[ ${__DEFAULT_ABI_SAVED} == "true" ]] ; then
+		for v in CHOST CBUILD AS CC CXX LD ; do
+			vv="__abi_saved_${v}"
+			export ${v}="${!vv}"
+			unset ${vv}
+		done
+		unset __DEFAULT_ABI_SAVED
+	fi
+
+	# We want to avoid the behind-the-back magic of gcc-config as it
+	# screws up ccache and distcc.  See #196243 for more info.
+	if [[ ${ABI} != ${DEFAULT_ABI} ]] ; then
+		# Back that multilib-ass up so we can restore it later
+		for v in CHOST CBUILD AS CC CXX LD ; do
+			export __abi_saved_${v}="${!v}"
+		done
+		export __DEFAULT_ABI_SAVED="true"
+
+		# Set the CHOST native first so that we pick up the native
+		# toolchain and not a cross-compiler by accident #202811.
+		export CHOST=$(get_abi_CHOST ${DEFAULT_ABI})
+		export AS="$(tc-getAS) $(get_abi_ASFLAGS)"
+		export CC="$(tc-getCC) $(get_abi_CFLAGS)"
+		export CXX="$(tc-getCXX) $(get_abi_CFLAGS)"
+		export LD="$(tc-getLD) $(get_abi_LDFLAGS)"
+		export CHOST=$(get_abi_CHOST $1)
+		export CBUILD=$(get_abi_CHOST $1)
+	fi
+}

diff --git a/eclass/portability.eclass b/eclass/portability.eclass
new file mode 100644
index 0000000..3269a3c
--- /dev/null
+++ b/eclass/portability.eclass
@@ -0,0 +1,183 @@
+# Copyright 1999-2005 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/portability.eclass,v 1.17 2010/09/24 14:29:49 vapier Exp $
+#
+# Author: Diego Pettenò <flameeyes@gentoo.org>
+#
+# This eclass is created to avoid using non-portable GNUisms inside ebuilds
+#
+# NB:  If you add anything, please comment it!
+
+# treecopy orig1 orig2 orig3 .... dest
+#
+# mimic cp --parents copy, but working on BSD userland as well
+treecopy() {
+	dest=${!#}
+	files_count=$#
+
+	while(( $# > 1 )); do
+		dirstruct=$(dirname "$1")
+		mkdir -p "${dest}/${dirstruct}"
+		cp -pPR "$1" "${dest}/${dirstruct}"
+
+		shift
+	done
+}
+
+# seq min max
+#
+# compatibility function that mimes seq command if not available
+seq() {
+	# First try `seq`
+	local p=$(type -P seq)
+	if [[ -n ${p} ]] ; then
+		"${p}" "$@"
+		return $?
+	fi
+
+	case $# in
+		1) min=1  max=$1 step=1  ;;
+		2) min=$1 max=$2 step=1  ;;
+		3) min=$1 max=$3 step=$2 ;;
+		*) die "seq called with wrong number of arguments" ;;
+	esac
+
+	# Then try `jot`
+	p=$(type -P jot)
+	if [[ -n ${p} ]] ; then
+		local reps
+		# BSD userland
+		if [[ ${step} != 0 ]] ; then
+			reps=$(( (max - min) / step + 1 ))
+		else
+			reps=0
+		fi
+
+		jot $reps $min $max $step
+		return $?
+	fi
+
+	# Screw it, do the output ourselves
+	while :; do
+		[[ $max < $min && $step > 0 ]] && break
+		[[ $min < $max && $step < 0 ]] && break
+		echo $min
+		: $(( min += step ))
+	done
+	return 0
+}
+
+# Gets the linker flag to link to dlopen() function
+dlopen_lib() {
+	# - Solaris needs nothing
+	# - Darwin needs nothing
+	# - *BSD needs nothing
+	# - Linux needs -ldl (glibc and uclibc)
+	# - Interix needs -ldl
+	case "${CHOST}" in
+		*-linux-gnu*|*-linux-uclibc|*-interix*)
+			echo "-ldl"
+		;;
+	esac
+}
+
+# Gets the home directory for the specified user
+# it's a wrap around egetent as the position of the home directory in the line
+# varies depending on the os used.
+#
+# To use that, inherit eutils, not portability!
+egethome() {
+	ent=$(egetent passwd $1)
+
+	case ${CHOST} in
+	*-darwin*|*-freebsd*|*-dragonfly*)
+		# Darwin, OSX, FreeBSD and DragonFly use position 9 to store homedir
+		echo ${ent} | cut -d: -f9
+		;;
+	*)
+		# Linux, NetBSD and OpenBSD use position 6 instead
+		echo ${ent} | cut -d: -f6
+		;;
+	esac
+}
+
+# Gets the shell for the specified user
+# it's a wrap around egetent as the position of the home directory in the line
+# varies depending on the os used.
+#
+# To use that, inherit eutils, not portability!
+egetshell() {
+	ent=$(egetent passwd "$1")
+
+	case ${CHOST} in
+	*-darwin*|*-freebsd*|*-dragonfly*)
+		# Darwin, OSX, FreeBSD and DragonFly use position 9 to store homedir
+		echo ${ent} | cut -d: -f10
+		;;
+	*)
+		# Linux, NetBSD and OpenBSD use position 6 instead
+		echo ${ent} cut -d: -f7
+		;;
+	esac
+}
+
+# Returns true if specified user has a shell that precludes logins
+# on whichever operating system.
+is-login-disabled() {
+	shell=$(egetshell "$1")
+
+	case ${shell} in
+		/bin/false|/usr/bin/false|/sbin/nologin|/usr/sbin/nologin)
+			return 0 ;;
+		*)
+			return 1 ;;
+	esac
+}
+
+# Gets the name of the BSD-ish make command (pmake from NetBSD)
+#
+# This will return make (provided by system packages) for BSD userlands,
+# or bsdmake for Darwin userlands and pmake for the rest of userlands,
+# both of which are provided by sys-devel/pmake package.
+#
+# Note: the bsdmake for Darwin userland is with compatibility with MacOSX
+# default name.
+get_bmake() {
+	if [[ ${USERLAND} == *BSD ]]; then
+		echo make
+	elif [[ ${USERLAND} == "Darwin" ]]; then
+		echo bsdmake
+	else
+		echo pmake
+	fi
+}
+
+# Portable method of getting mount names and points.
+# Returns as "point node fs options"
+# Remember to convert 040 back to a space.
+get_mounts() {
+	local point= node= fs= opts= foo=
+
+	# Linux has /proc/mounts which should always exist
+	if [[ $(uname -s) == "Linux" ]] ; then
+		while read node point fs opts foo ; do
+			echo "${point} ${node} ${fs} ${opts}"
+		done < /proc/mounts
+		return
+	fi
+
+	# OK, pray we have a -p option that outputs mounts in fstab format
+	# using tabs as the seperator.
+	# Then pray that there are no tabs in the either.
+	# Currently only FreeBSD supports this and the other BSDs will
+	# have to be patched.
+	# Athough the BSD's may support /proc, they do NOT put \040 in place
+	# of the spaces and we should not force a /proc either.
+	local IFS=$'\t'
+	LC_ALL=C mount -p | while read node point fs foo ; do
+		opts=${fs#* }
+		fs=${fs%% *}
+		echo "${point// /\040} ${node// /\040} ${fs%% *} ${opts// /\040}"
+	done
+}
+

diff --git a/eclass/prefix.eclass b/eclass/prefix.eclass
new file mode 100644
index 0000000..a347df7
--- /dev/null
+++ b/eclass/prefix.eclass
@@ -0,0 +1,52 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Id: prefix.eclass,v 1.3 2009/11/16 21:39:00 grobian Exp $
+
+# @ECLASS: prefix.eclass
+# @MAINTAINER:
+# Feel free to contact the Prefix team through <prefix@gentoo.org> if
+# you have problems, suggestions or questions.
+# @BLURB: Eclass to provide Prefix functionality
+# @DESCRIPTION:
+# Gentoo Prefix allows users to install into a self defined offset
+# located somewhere in the filesystem.  Prefix ebuilds require
+# additional functions and variables which are defined by this eclass.
+
+# @ECLASS-VARIABLE: EPREFIX
+# @DESCRIPTION:
+# The offset prefix of a Gentoo Prefix installation.  When Gentoo Prefix
+# is not used, ${EPREFIX} should be "".  Prefix Portage sets EPREFIX,
+# hence this eclass has nothing to do here in that case.
+# Note that setting EPREFIX in the environment with Prefix Portage sets
+# Portage into cross-prefix mode.
+if [[ ! ${EPREFIX+set} ]]; then
+	export EPREFIX=''
+fi
+
+
+# @FUNCTION: eprefixify
+# @USAGE: <list of to be eprefixified files>
+# @DESCRIPTION:
+# replaces @GENTOO_PORTAGE_EPREFIX@ with ${EPREFIX} for the given files,
+# dies if no arguments are given, a file does not exist, or changing a
+# file failed.
+eprefixify() {
+	[[ $# -lt 1 ]] && die "at least one argument required"
+
+	einfo "Adjusting to prefix ${EPREFIX:-/}"
+	local x
+	for x in "$@" ; do
+		if [[ -e ${x} ]] ; then
+			ebegin "  ${x##*/}"
+			sed -i -e "s|@GENTOO_PORTAGE_EPREFIX@|${EPREFIX}|g" "${x}"
+			eend $? || die "failed to eprefixify ${x}"
+		else
+			die "${x} does not exist"
+		fi
+	done
+
+	return 0
+}
+
+
+# vim: tw=72:

diff --git a/eclass/toolchain-binutils.eclass b/eclass/toolchain-binutils.eclass
new file mode 100644
index 0000000..361f24c
--- /dev/null
+++ b/eclass/toolchain-binutils.eclass
@@ -0,0 +1,418 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/toolchain-binutils.eclass,v 1.98 2011/03/18 19:51:55 vapier Exp $
+#
+# Maintainer: Toolchain Ninjas <toolchain@gentoo.org>
+#
+# We install binutils into CTARGET-VERSION specific directories.  This lets
+# us easily merge multiple versions for multiple targets (if we wish) and
+# then switch the versions on the fly (with `binutils-config`).
+#
+# binutils-9999           -> live cvs
+# binutils-9999_preYYMMDD -> nightly snapshot date YYMMDD
+# binutils-#              -> normal release
+
+extra_eclass=""
+if [[ -n ${BINUTILS_TYPE} ]] ; then
+	BTYPE=${BINUTILS_TYPE}
+else
+	case ${PV} in
+	9999)      BTYPE="cvs";;
+	9999_pre*) BTYPE="snap";;
+	*)         BTYPE="rel";;
+	esac
+fi
+
+if [[ ${BTYPE} == "cvs" ]] ; then
+	extra_eclass="cvs"
+	ECVS_SERVER="sourceware.org:/cvs/src"
+	ECVS_MODULE="binutils"
+	ECVS_USER="anoncvs"
+	ECVS_PASS="anoncvs"
+	BVER="cvs"
+elif [[ ${BTYPE} == "snap" ]] ; then
+	BVER=${PV/9999_pre}
+elif [[ ${BTYPE} == "rel" ]] ; then
+	BVER=${PV}
+else
+	BVER=${BINUTILS_VER}
+fi
+
+inherit eutils libtool flag-o-matic gnuconfig multilib versionator ${extra_eclass}
+EXPORT_FUNCTIONS src_unpack src_compile src_test src_install pkg_postinst pkg_postrm
+
+export CTARGET=${CTARGET:-${CHOST}}
+if [[ ${CTARGET} == ${CHOST} ]] ; then
+	if [[ ${CATEGORY/cross-} != ${CATEGORY} ]] ; then
+		export CTARGET=${CATEGORY/cross-}
+	fi
+fi
+is_cross() { [[ ${CHOST} != ${CTARGET} ]] ; }
+
+# TPREFIX is the prefix of the CTARGET installation, not used during
+# compilation, but stored in the env.d file such that the wrapper can use it
+export TPREFIX=${TPREFIX:-${EPREFIX}}
+
+DESCRIPTION="Tools necessary to build programs"
+HOMEPAGE="http://sources.redhat.com/binutils/"
+
+case ${BTYPE} in
+	cvs)  SRC_URI="";;
+	snap) SRC_URI="ftp://gcc.gnu.org/pub/binutils/snapshots/binutils-${BVER}.tar.bz2";;
+	rel)
+		SRC_URI="mirror://kernel/linux/devel/binutils/binutils-${PV}.tar.bz2
+			mirror://kernel/linux/devel/binutils/test/binutils-${PV}.tar.bz2
+			mirror://gnu/binutils/binutils-${PV}.tar.bz2"
+esac
+add_src_uri() {
+	[[ -z $2 ]] && return
+	local a=$1
+	set -- mirror://gentoo http://dev.gentoo.org/~vapier/dist
+	SRC_URI="${SRC_URI} ${@/%//${a}}"
+}
+add_src_uri binutils-${PV}-patches-${PATCHVER}.tar.bz2 ${PATCHVER}
+add_src_uri binutils-${PV}-uclibc-patches-${UCLIBC_PATCHVER}.tar.bz2 ${UCLIBC_PATCHVER}
+add_src_uri elf2flt-${ELF2FLT_VER}.tar.bz2 ${ELF2FLT_VER}
+
+if version_is_at_least 2.18 ; then
+	LICENSE="|| ( GPL-3 LGPL-3 )"
+else
+	LICENSE="|| ( GPL-2 LGPL-2 )"
+fi
+IUSE="nls multitarget multislot p_bootstrap test vanilla"
+if use multislot ; then
+	SLOT="${CTARGET}-${BVER}"
+elif is_cross ; then
+	SLOT="${CTARGET}"
+else
+	SLOT="0"
+fi
+
+RDEPEND=">=sys-devel/binutils-config-1.9"
+DEPEND="${RDEPEND}
+	test? ( dev-util/dejagnu )
+	nls? ( sys-devel/gettext )
+	sys-devel/flex"
+
+S=${WORKDIR}/binutils
+[[ ${BVER} != "cvs" ]] && S=${S}-${BVER}
+
+LIBPATH=/usr/$(get_libdir)/binutils/${CTARGET}/${BVER}
+INCPATH=${LIBPATH}/include
+DATAPATH=/usr/share/binutils-data/${CTARGET}/${BVER}
+MY_BUILDDIR=${WORKDIR}/build
+if is_cross ; then
+	BINPATH=/usr/${CHOST}/${CTARGET}/binutils-bin/${BVER}
+else
+	BINPATH=/usr/${CTARGET}/binutils-bin/${BVER}
+fi
+
+tc-binutils_unpack() {
+	unpack ${A}
+	mkdir -p "${MY_BUILDDIR}"
+	[[ -d ${WORKDIR}/patch ]] && mkdir "${WORKDIR}"/patch/skip
+}
+
+tc-binutils_apply_patches() {
+	cd "${S}"
+
+	if ! use vanilla ; then
+		EPATCH_EXCLUDE=
+		[[ ${SYMLINK_LIB} != "yes" ]] && EPATCH_EXCLUDE+=" 65_all_binutils-*-amd64-32bit-path.patch"
+		if [[ -n ${PATCHVER} ]] ; then
+			EPATCH_SOURCE=${WORKDIR}/patch
+			if [[ ${CTARGET} == mips* ]] ; then
+				# remove gnu-hash for mips (bug #233233)
+				EPATCH_EXCLUDE+=" 77_all_generate-gnu-hash.patch"
+			fi
+			[[ -n $(ls "${EPATCH_SOURCE}"/*.bz2 2>/dev/null) ]] \
+				&& EPATCH_SUFFIX="patch.bz2" \
+				|| EPATCH_SUFFIX="patch"
+			epatch
+		fi
+		if [[ -n ${UCLIBC_PATCHVER} ]] ; then
+			EPATCH_SOURCE=${WORKDIR}/uclibc-patches
+			[[ -n $(ls "${EPATCH_SOURCE}"/*.bz2 2>/dev/null) ]] \
+				&& EPATCH_SUFFIX="patch.bz2" \
+				|| EPATCH_SUFFIX="patch"
+			EPATCH_MULTI_MSG="Applying uClibc fixes ..." \
+			epatch
+		elif [[ ${CTARGET} == *-uclibc* ]] ; then
+			# starting with binutils-2.17.50.0.17, we no longer need
+			# uClibc patchsets :D
+			if grep -qs 'linux-gnu' "${S}"/ltconfig ; then
+				die "sorry, but this binutils doesn't yet support uClibc :("
+			fi
+		fi
+		epatch_user
+	fi
+
+	# fix locale issues if possible #122216
+	if [[ -e ${FILESDIR}/binutils-configure-LANG.patch ]] ; then
+		einfo "Fixing misc issues in configure files"
+		for f in $(grep -l 'autoconf version 2.13' $(find "${S}" -name configure)) ; do
+			ebegin "  Updating ${f/${S}\/}"
+			patch "${f}" "${FILESDIR}"/binutils-configure-LANG.patch >& "${T}"/configure-patch.log \
+				|| eerror "Please file a bug about this"
+			eend $?
+		done
+	fi
+	# fix conflicts with newer glibc #272594
+	if [[ -e libiberty/testsuite/test-demangle.c ]] ; then
+		sed -i 's:\<getline\>:get_line:g' libiberty/testsuite/test-demangle.c
+	fi
+
+	# Fix po Makefile generators
+	sed -i \
+		-e '/^datadir = /s:$(prefix)/@DATADIRNAME@:@datadir@:' \
+		-e '/^gnulocaledir = /s:$(prefix)/share:$(datadir):' \
+		*/po/Make-in || die "sed po's failed"
+
+	# Run misc portage update scripts
+	gnuconfig_update
+	elibtoolize --portage --no-uclibc
+}
+
+toolchain-binutils_src_unpack() {
+	tc-binutils_unpack
+	tc-binutils_apply_patches
+}
+
+toolchain-binutils_src_compile() {
+	# prevent makeinfo from running in releases.  it may not always be
+	# installed, and older binutils may fail with newer texinfo.
+	# besides, we never patch the doc files anyways, so regenerating
+	# in the first place is useless. #193364
+	find . '(' -name '*.info' -o -name '*.texi' ')' -print0 | xargs -0 touch -r .
+
+	# make sure we filter $LINGUAS so that only ones that
+	# actually work make it through #42033
+	strip-linguas -u */po
+
+	# keep things sane
+	strip-flags
+
+	local x
+	echo
+	for x in CATEGORY CBUILD CHOST CTARGET CFLAGS LDFLAGS ; do
+		einfo "$(printf '%10s' ${x}:) ${!x}"
+	done
+	echo
+
+	cd "${MY_BUILDDIR}"
+	set --
+	# enable gold if available (installed as ld.gold)
+	if grep -q 'enable-gold=default' "${S}"/configure ; then
+		set -- "$@" --enable-gold
+	# old ways - remove when 2.21 is stable
+	elif grep -q 'enable-gold=both/ld' "${S}"/configure ; then
+		set -- "$@" --enable-gold=both/ld
+	elif grep -q 'enable-gold=both/bfd' "${S}"/configure ; then
+		set -- "$@" --enable-gold=both/bfd
+	fi
+	if [[ ${CHOST} != *"-mint"* ]]; then  #353410
+		if grep -q -e '--enable-plugins' "${S}"/ld/configure ; then
+			set -- "$@" --enable-plugins
+		fi
+	fi
+	use nls \
+		&& set -- "$@" --without-included-gettext \
+		|| set -- "$@" --disable-nls
+	[[ ${CHOST} == *"-solaris"* ]] && use nls && append-libs -lintl
+	use multitarget && set -- "$@" --enable-targets=all
+	[[ -n ${CBUILD} ]] && set -- "$@" --build=${CBUILD}
+	is_cross && set -- "$@" --with-sysroot=${EPREFIX}/usr/${CTARGET}
+	# glibc-2.3.6 lacks support for this ... so rather than force glibc-2.5+
+	# on everyone in alpha (for now), we'll just enable it when possible
+	has_version ">=${CATEGORY}/glibc-2.5" && set -- "$@" --enable-secureplt
+	has_version ">=sys-libs/glibc-2.5" && set -- "$@" --enable-secureplt
+	set -- "$@" \
+		--prefix=${EPREFIX}/usr \
+		--host=${CHOST} \
+		--target=${CTARGET} \
+		--datadir=${EPREFIX}${DATAPATH} \
+		--infodir=${EPREFIX}${DATAPATH}/info \
+		--mandir=${EPREFIX}${DATAPATH}/man \
+		--bindir=${EPREFIX}${BINPATH} \
+		--libdir=${EPREFIX}${LIBPATH} \
+		--libexecdir=${EPREFIX}${LIBPATH} \
+		--includedir=${EPREFIX}${INCPATH} \
+		--enable-64-bit-bfd \
+		--disable-werror \
+		${EXTRA_ECONF}
+	use p_bootstrap || set -- "$@" --enable-shared
+	echo ./configure "$@"
+	"${S}"/configure "$@" || die
+
+	emake all || die "emake failed"
+
+	# only build info pages if we user wants them, and if
+	# we have makeinfo (may not exist when we bootstrap)
+	if type -p makeinfo > /dev/null ; then
+		emake info || die "make info failed"
+	fi
+	# we nuke the manpages when we're left with junk
+	# (like when we bootstrap, no perl -> no manpages)
+	find . -name '*.1' -a -size 0 | xargs rm -f
+
+	# elf2flt only works on some arches / targets
+	if [[ -n ${ELF2FLT_VER} ]] && [[ ${CTARGET} == *linux* || ${CTARGET} == *-elf* ]] ; then
+		cd "${WORKDIR}"/elf2flt-${ELF2FLT_VER}
+
+		local x supported_arches=$(sed -n '/defined(TARGET_/{s:^.*TARGET_::;s:)::;p}' elf2flt.c | sort -u)
+		for x in ${supported_arches} UNSUPPORTED ; do
+			[[ ${CTARGET} == ${x}* ]] && break
+		done
+
+		if [[ ${x} != "UNSUPPORTED" ]] ; then
+			append-flags -I"${S}"/include
+			set -- "$@" \
+				--with-bfd-include-dir=${MY_BUILDDIR}/bfd \
+				--with-libbfd=${MY_BUILDDIR}/bfd/libbfd.a \
+				--with-libiberty=${MY_BUILDDIR}/libiberty/libiberty.a \
+				--with-binutils-ldscript-dir=${EPREFIX}/${LIBPATH}/ldscripts
+			echo ./configure "$@"
+			./configure "$@" || die
+			emake || die "make elf2flt failed"
+		fi
+	fi
+}
+
+toolchain-binutils_src_test() {
+	cd "${MY_BUILDDIR}"
+	emake -k check || die "check failed :("
+}
+
+toolchain-binutils_src_install() {
+	local x d
+
+	cd "${MY_BUILDDIR}"
+	emake DESTDIR="${D}" tooldir="${EPREFIX}/${LIBPATH}" install || die
+	rm -rf "${ED}"/${LIBPATH}/bin
+
+	# Newer versions of binutils get fancy with ${LIBPATH} #171905
+	cd "${ED}"/${LIBPATH}
+	for d in ../* ; do
+		[[ ${d} == ../${BVER} ]] && continue
+		mv ${d}/* . || die
+		rmdir ${d} || die
+	done
+
+	# Now we collect everything intp the proper SLOT-ed dirs
+	# When something is built to cross-compile, it installs into
+	# /usr/$CHOST/ by default ... we have to 'fix' that :)
+	if is_cross ; then
+		cd "${ED}"/${BINPATH}
+		for x in * ; do
+			mv ${x} ${x/${CTARGET}-}
+		done
+
+		if [[ -d ${ED}/usr/${CHOST}/${CTARGET} ]] ; then
+			mv "${ED}"/usr/${CHOST}/${CTARGET}/include "${ED}"/${INCPATH}
+			mv "${ED}"/usr/${CHOST}/${CTARGET}/lib/* "${ED}"/${LIBPATH}/
+			rm -r "${ED}"/usr/${CHOST}/{include,lib}
+		fi
+	fi
+	insinto ${INCPATH}
+	doins "${S}/include/libiberty.h"
+	if [[ -d ${ED}/${LIBPATH}/lib ]] ; then
+		mv "${ED}"/${LIBPATH}/lib/* "${ED}"/${LIBPATH}/
+		rm -r "${ED}"/${LIBPATH}/lib
+	fi
+
+	# Insert elf2flt where appropriate
+	if [[ -x ${WORKDIR}/elf2flt-${ELF2FLT_VER}/elf2flt ]] ; then
+		cd "${WORKDIR}"/elf2flt-${ELF2FLT_VER}
+		insinto ${LIBPATH}/ldscripts
+		doins elf2flt.ld || die "doins elf2flt.ld failed"
+		exeinto ${BINPATH}
+		doexe elf2flt flthdr || die "doexe elf2flt flthdr failed"
+		mv "${ED}"/${BINPATH}/{ld,ld.real} || die
+		newexe ld-elf2flt ld || die "doexe ld-elf2flt failed"
+		newdoc README README.elf2flt
+	fi
+
+	# Now, some binutils are tricky and actually provide
+	# for multiple TARGETS.  Really, we're talking just
+	# 32bit/64bit support (like mips/ppc/sparc).  Here
+	# we want to tell binutils-config that it's cool if
+	# it generates multiple sets of binutil symlinks.
+	# e.g. sparc gets {sparc,sparc64}-unknown-linux-gnu
+	local targ=${CTARGET/-*} src="" dst=""
+	local FAKE_TARGETS=${CTARGET}
+	case ${targ} in
+		mips*)    src="mips"    dst="mips64";;
+		powerpc*) src="powerpc" dst="powerpc64";;
+		s390*)    src="s390"    dst="s390x";;
+		sparc*)   src="sparc"   dst="sparc64";;
+	esac
+	case ${targ} in
+		mips64*|powerpc64*|s390x*|sparc64*) targ=${src} src=${dst} dst=${targ};;
+	esac
+	[[ -n ${src}${dst} ]] && FAKE_TARGETS="${FAKE_TARGETS} ${CTARGET/${src}/${dst}}"
+
+	# Generate an env.d entry for this binutils
+	cd "${S}"
+	insinto /etc/env.d/binutils
+	cat <<-EOF > env.d
+		TARGET="${CTARGET}"
+		VER="${BVER}"
+		LIBPATH="${EPREFIX}/${LIBPATH}"
+		FAKE_TARGETS="${FAKE_TARGETS}"
+		TPREFIX="${TPREFIX}"
+	EOF
+	newins env.d ${CTARGET}-${BVER}
+
+	# Handle documentation
+	if ! is_cross ; then
+		cd "${S}"
+		dodoc README
+		docinto bfd
+		dodoc bfd/ChangeLog* bfd/README bfd/PORTING bfd/TODO
+		docinto binutils
+		dodoc binutils/ChangeLog binutils/NEWS binutils/README
+		docinto gas
+		dodoc gas/ChangeLog* gas/CONTRIBUTORS gas/NEWS gas/README*
+		docinto gprof
+		dodoc gprof/ChangeLog* gprof/TEST gprof/TODO gprof/bbconv.pl
+		docinto ld
+		dodoc ld/ChangeLog* ld/README ld/NEWS ld/TODO
+		docinto libiberty
+		dodoc libiberty/ChangeLog* libiberty/README
+		docinto opcodes
+		dodoc opcodes/ChangeLog*
+	fi
+	# Remove shared info pages
+	rm -f "${ED}"/${DATAPATH}/info/{dir,configure.info,standards.info}
+	# Trim all empty dirs
+	find "${ED}" -type d | xargs rmdir >& /dev/null
+}
+
+toolchain-binutils_pkg_postinst() {
+	# Make sure this ${CTARGET} has a binutils version selected
+	[[ -e ${EROOT}/etc/env.d/binutils/config-${CTARGET} ]] && return 0
+	binutils-config ${CTARGET}-${BVER}
+}
+
+toolchain-binutils_pkg_postrm() {
+	local current_profile=$(binutils-config -c ${CTARGET})
+
+	# If no other versions exist, then uninstall for this
+	# target ... otherwise, switch to the newest version
+	# Note: only do this if this version is unmerged.  We
+	#       rerun binutils-config if this is a remerge, as
+	#       we want the mtimes on the symlinks updated (if
+	#       it is the same as the current selected profile)
+	if [[ ! -e ${EPREFIX}${BINPATH}/ld ]] && [[ ${current_profile} == ${CTARGET}-${BVER} ]] ; then
+		local choice=$(binutils-config -l | grep ${CTARGET} | awk '{print $2}')
+		choice=${choice//$'\n'/ }
+		choice=${choice/* }
+		if [[ -z ${choice} ]] ; then
+			env -i ROOT="${ROOT}" "${EPREFIX}"/usr/bin/binutils-config -u ${CTARGET}
+		else
+			binutils-config ${choice}
+		fi
+	elif [[ $(CHOST=${CTARGET} binutils-config -c) == ${CTARGET}-${BVER} ]] ; then
+		binutils-config ${CTARGET}-${BVER}
+	fi
+}

diff --git a/eclass/toolchain-funcs.eclass b/eclass/toolchain-funcs.eclass
new file mode 100644
index 0000000..1907607
--- /dev/null
+++ b/eclass/toolchain-funcs.eclass
@@ -0,0 +1,694 @@
+# Copyright 1999-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/toolchain-funcs.eclass,v 1.103 2010/10/28 04:16:27 vapier Exp $
+
+# @ECLASS: toolchain-funcs.eclass
+# @MAINTAINER:
+# Toolchain Ninjas <toolchain@gentoo.org>
+# @BLURB: functions to query common info about the toolchain
+# @DESCRIPTION:
+# The toolchain-funcs aims to provide a complete suite of functions
+# for gleaning useful information about the toolchain and to simplify
+# ugly things like cross-compiling and multilib.  All of this is done
+# in such a way that you can rely on the function always returning
+# something sane.
+
+___ECLASS_RECUR_TOOLCHAIN_FUNCS="yes"
+[[ -z ${___ECLASS_RECUR_MULTILIB} ]] && inherit multilib
+inherit prefix
+
+DESCRIPTION="Based on the ${ECLASS} eclass"
+
+tc-getPROG() {
+	local var=$1
+	local prog=$2
+
+	if [[ -n ${!var} ]] ; then
+		echo "${!var}"
+		return 0
+	fi
+
+	local search=
+	[[ -n $3 ]] && search=$(type -p "$3-${prog}")
+	[[ -z ${search} && -n ${CHOST} ]] && search=$(type -p "${CHOST}-${prog}")
+	[[ -n ${search} ]] && prog=${search##*/}
+
+	export ${var}=${prog}
+	echo "${!var}"
+}
+
+# @FUNCTION: tc-getAR
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the archiver
+tc-getAR() { tc-getPROG AR ar "$@"; }
+# @FUNCTION: tc-getAS
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the assembler
+tc-getAS() { tc-getPROG AS as "$@"; }
+# @FUNCTION: tc-getCC
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the C compiler
+tc-getCC() { tc-getPROG CC gcc "$@"; }
+# @FUNCTION: tc-getCPP
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the C preprocessor
+tc-getCPP() { tc-getPROG CPP cpp "$@"; }
+# @FUNCTION: tc-getCXX
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the C++ compiler
+tc-getCXX() { tc-getPROG CXX g++ "$@"; }
+# @FUNCTION: tc-getLD
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the linker
+tc-getLD() { tc-getPROG LD ld "$@"; }
+# @FUNCTION: tc-getSTRIP
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the strip program
+tc-getSTRIP() { tc-getPROG STRIP strip "$@"; }
+# @FUNCTION: tc-getNM
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the symbol/object thingy
+tc-getNM() { tc-getPROG NM nm "$@"; }
+# @FUNCTION: tc-getRANLIB
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the archiver indexer
+tc-getRANLIB() { tc-getPROG RANLIB ranlib "$@"; }
+# @FUNCTION: tc-getOBJCOPY
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the object copier
+tc-getOBJCOPY() { tc-getPROG OBJCOPY objcopy "$@"; }
+# @FUNCTION: tc-getF77
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the Fortran 77 compiler
+tc-getF77() { tc-getPROG F77 gfortran "$@"; }
+# @FUNCTION: tc-getFC
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the Fortran 90 compiler
+tc-getFC() { tc-getPROG FC gfortran "$@"; }
+# @FUNCTION: tc-getGCJ
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the java compiler
+tc-getGCJ() { tc-getPROG GCJ gcj "$@"; }
+# @FUNCTION: tc-getPKG_CONFIG
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the pkg-config tool
+tc-getPKG_CONFIG() { tc-getPROG PKG_CONFIG pkg-config "$@"; }
+# @FUNCTION: tc-getRC
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the Windows resource compiler
+tc-getRC() { tc-getPROG RC windres "$@"; }
+# @FUNCTION: tc-getDLLWRAP
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the Windows dllwrap utility
+tc-getDLLWRAP() { tc-getPROG DLLWRAP dllwrap "$@"; }
+
+# @FUNCTION: tc-getBUILD_CC
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the C compiler for building binaries to run on the build machine
+tc-getBUILD_CC() {
+	local v
+	for v in CC_FOR_BUILD BUILD_CC HOSTCC ; do
+		if [[ -n ${!v} ]] ; then
+			export BUILD_CC=${!v}
+			echo "${!v}"
+			return 0
+		fi
+	done
+
+	local search=
+	if [[ -n ${CBUILD} ]] ; then
+		search=$(type -p ${CBUILD}-gcc)
+		search=${search##*/}
+	fi
+	search=${search:-gcc}
+
+	export BUILD_CC=${search}
+	echo "${search}"
+}
+
+# @FUNCTION: tc-export
+# @USAGE: <list of toolchain variables>
+# @DESCRIPTION:
+# Quick way to export a bunch of compiler vars at once.
+tc-export() {
+	local var
+	for var in "$@" ; do
+		[[ $(type -t tc-get${var}) != "function" ]] && die "tc-export: invalid export variable '${var}'"
+		eval tc-get${var} > /dev/null
+	done
+}
+
+# @FUNCTION: tc-is-cross-compiler
+# @RETURN: Shell true if we are using a cross-compiler, shell false otherwise
+tc-is-cross-compiler() {
+	return $([[ ${CBUILD:-${CHOST}} != ${CHOST} ]])
+}
+
+# @FUNCTION: tc-is-softfloat
+# @DESCRIPTION:
+# See if this toolchain is a softfloat based one.
+# @CODE
+# The possible return values:
+#  - only: the target is always softfloat (never had fpu)
+#  - yes:  the target should support softfloat
+#  - no:   the target doesn't support softfloat
+# @CODE
+# This allows us to react differently where packages accept
+# softfloat flags in the case where support is optional, but
+# rejects softfloat flags where the target always lacks an fpu.
+tc-is-softfloat() {
+	case ${CTARGET} in
+		bfin*|h8300*)
+			echo "only" ;;
+		*)
+			[[ ${CTARGET//_/-} == *-softfloat-* ]] \
+				&& echo "yes" \
+				|| echo "no"
+			;;
+	esac
+}
+
+# @FUNCTION: tc-is-hardfloat
+# @DESCRIPTION:
+# See if this toolchain is a hardfloat based one.
+# @CODE
+# The possible return values:
+#  - yes:  the target should support hardfloat
+#  - no:   the target doesn't support hardfloat
+tc-is-hardfloat() {
+	[[ ${CTARGET//_/-} == *-hardfloat-* ]] \
+		&& echo "yes" \
+		|| echo "no"
+}
+
+# @FUNCTION: tc-is-static-only
+# @DESCRIPTION:
+# Return shell true if the target does not support shared libs, shell false
+# otherwise.
+tc-is-static-only() {
+	local host=${CTARGET:-${CHOST}}
+
+	# *MiNT doesn't have shared libraries, only platform so far
+	return $([[ ${host} == *-mint* ]])
+}
+
+# @FUNCTION: tc-has-openmp
+# @USAGE: [toolchain prefix]
+# @DESCRIPTION:
+# See if the toolchain supports OpenMP.
+tc-has-openmp() {
+	local base="${T}/test-tc-openmp"
+	cat <<-EOF > "${base}.c"
+	#include <omp.h>
+	int main() {
+		int nthreads, tid, ret = 0;
+		#pragma omp parallel private(nthreads, tid)
+		{
+		tid = omp_get_thread_num();
+		nthreads = omp_get_num_threads(); ret += tid + nthreads;
+		}
+		return ret;
+	}
+	EOF
+	$(tc-getCC "$@") -fopenmp "${base}.c" -o "${base}" >&/dev/null
+	local ret=$?
+	rm -f "${base}"*
+	return ${ret}
+}
+
+# @FUNCTION: tc-has-tls
+# @USAGE: [-s|-c|-l] [toolchain prefix]
+# @DESCRIPTION:
+# See if the toolchain supports thread local storage (TLS).  Use -s to test the
+# compiler, -c to also test the assembler, and -l to also test the C library
+# (the default).
+tc-has-tls() {
+	local base="${T}/test-tc-tls"
+	cat <<-EOF > "${base}.c"
+	int foo(int *i) {
+		static __thread int j = 0;
+		return *i ? j : *i;
+	}
+	EOF
+	local flags
+	case $1 in
+		-s) flags="-S";;
+		-c) flags="-c";;
+		-l) ;;
+		-*) die "Usage: tc-has-tls [-c|-l] [toolchain prefix]";;
+	esac
+	: ${flags:=-fPIC -shared -Wl,-z,defs}
+	[[ $1 == -* ]] && shift
+	$(tc-getCC "$@") ${flags} "${base}.c" -o "${base}" >&/dev/null
+	local ret=$?
+	rm -f "${base}"*
+	return ${ret}
+}
+
+
+# Parse information from CBUILD/CHOST/CTARGET rather than
+# use external variables from the profile.
+tc-ninja_magic_to_arch() {
+ninj() { [[ ${type} == "kern" ]] && echo $1 || echo $2 ; }
+
+	local type=$1
+	local host=$2
+	[[ -z ${host} ]] && host=${CTARGET:-${CHOST}}
+
+	case ${host} in
+		powerpc-apple-darwin*)    echo ppc-macos;;
+		powerpc64-apple-darwin*)  echo ppc64-macos;;
+		i?86-apple-darwin*)       echo x86-macos;;
+		x86_64-apple-darwin*)     echo x64-macos;;
+		sparc-sun-solaris*)       echo sparc-solaris;;
+		sparcv9-sun-solaris*)     echo sparc64-solaris;;
+		i?86-pc-solaris*)         echo x86-solaris;;
+		x86_64-pc-solaris*)       echo x64-solaris;;
+		powerpc-ibm-aix*)         echo ppc-aix;;
+		mips-sgi-irix*)           echo mips-irix;;
+		ia64w-hp-hpux*)           echo ia64w-hpux;;
+		ia64-hp-hpux*)            echo ia64-hpux;;
+		hppa*64*-hp-hpux*)        echo hppa64-hpux;;
+		hppa*-hp-hpux*)           echo hppa-hpux;;
+		i?86-pc-freebsd*)         echo x86-freebsd;;
+		x86_64-pc-freebsd*)       echo x64-freebsd;;
+		powerpc-unknown-openbsd*) echo ppc-openbsd;;
+		i?86-pc-openbsd*)         echo x86-openbsd;;
+		x86_64-pc-openbsd*)       echo x64-openbsd;;
+		i?86-pc-netbsd*)          echo x86-netbsd;;
+		i?86-pc-interix*)         echo x86-interix;;
+		i?86-pc-winnt*)           echo x86-winnt;;
+
+		alpha*)		echo alpha;;
+		arm*)		echo arm;;
+		avr*)		ninj avr32 avr;;
+		bfin*)		ninj blackfin bfin;;
+		cris*)		echo cris;;
+		hppa*)		ninj parisc hppa;;
+		i?86*)
+			# Starting with linux-2.6.24, the 'x86_64' and 'i386'
+			# trees have been unified into 'x86'.
+			# FreeBSD still uses i386
+			if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -lt $(KV_to_int 2.6.24) || ${host} == *freebsd* ]] ; then
+				echo i386
+			else
+				echo x86
+			fi
+			;;
+		ia64*)		echo ia64;;
+		m68*)		echo m68k;;
+		mips*)		echo mips;;
+		nios2*)		echo nios2;;
+		nios*)		echo nios;;
+		powerpc*)
+			# Starting with linux-2.6.15, the 'ppc' and 'ppc64' trees
+			# have been unified into simply 'powerpc', but until 2.6.16,
+			# ppc32 is still using ARCH="ppc" as default
+			if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -ge $(KV_to_int 2.6.16) ]] ; then
+				echo powerpc
+			elif [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -eq $(KV_to_int 2.6.15) ]] ; then
+				if [[ ${host} == powerpc64* ]] || [[ ${PROFILE_ARCH} == "ppc64" ]] ; then
+					echo powerpc
+				else
+					echo ppc
+				fi
+			elif [[ ${host} == powerpc64* ]] ; then
+				echo ppc64
+			elif [[ ${PROFILE_ARCH} == "ppc64" ]] ; then
+				ninj ppc64 ppc
+			else
+				echo ppc
+			fi
+			;;
+		s390*)		echo s390;;
+		sh64*)		ninj sh64 sh;;
+		sh*)		echo sh;;
+		sparc64*)	ninj sparc64 sparc;;
+		sparc*)		[[ ${PROFILE_ARCH} == "sparc64" ]] \
+						&& ninj sparc64 sparc \
+						|| echo sparc
+					;;
+		vax*)		echo vax;;
+		x86_64*)
+			# Starting with linux-2.6.24, the 'x86_64' and 'i386'
+			# trees have been unified into 'x86'.
+			if [[ ${type} == "kern" ]] && [[ $(KV_to_int ${KV}) -ge $(KV_to_int 2.6.24) ]] ; then
+				echo x86
+			else
+				ninj x86_64 amd64
+			fi
+			;;
+
+		# since our usage of tc-arch is largely concerned with
+		# normalizing inputs for testing ${CTARGET}, let's filter
+		# other cross targets (mingw and such) into the unknown.
+		*)			echo unknown;;
+	esac
+}
+# @FUNCTION: tc-arch-kernel
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the kernel arch according to the compiler target
+tc-arch-kernel() {
+	tc-ninja_magic_to_arch kern "$@"
+}
+# @FUNCTION: tc-arch
+# @USAGE: [toolchain prefix]
+# @RETURN: name of the portage arch according to the compiler target
+tc-arch() {
+	tc-ninja_magic_to_arch portage "$@"
+}
+
+tc-endian() {
+	local host=$1
+	[[ -z ${host} ]] && host=${CTARGET:-${CHOST}}
+	host=${host%%-*}
+
+	case ${host} in
+		alpha*)		echo big;;
+		arm*b*)		echo big;;
+		arm*)		echo little;;
+		cris*)		echo little;;
+		hppa*)		echo big;;
+		i?86*)		echo little;;
+		ia64*)		echo little;;
+		m68*)		echo big;;
+		mips*l*)	echo little;;
+		mips*)		echo big;;
+		powerpc*)	echo big;;
+		s390*)		echo big;;
+		sh*b*)		echo big;;
+		sh*)		echo little;;
+		sparc*)		echo big;;
+		x86_64*)	echo little;;
+		*)			echo wtf;;
+	esac
+}
+
+# Internal func.  The first argument is the version info to expand.
+# Query the preprocessor to improve compatibility across different
+# compilers rather than maintaining a --version flag matrix. #335943
+_gcc_fullversion() {
+	local ver="$1"; shift
+	set -- `$(tc-getCPP "$@") -E -P - <<<"__GNUC__ __GNUC_MINOR__ __GNUC_PATCHLEVEL__"`
+	eval echo "$ver"
+}
+
+# @FUNCTION: gcc-fullversion
+# @RETURN: compiler version (major.minor.micro: [3.4.6])
+gcc-fullversion() {
+	_gcc_fullversion '$1.$2.$3' "$@"
+}
+# @FUNCTION: gcc-version
+# @RETURN: compiler version (major.minor: [3.4].6)
+gcc-version() {
+	_gcc_fullversion '$1.$2' "$@"
+}
+# @FUNCTION: gcc-major-version
+# @RETURN: major compiler version (major: [3].4.6)
+gcc-major-version() {
+	_gcc_fullversion '$1' "$@"
+}
+# @FUNCTION: gcc-minor-version
+# @RETURN: minor compiler version (minor: 3.[4].6)
+gcc-minor-version() {
+	_gcc_fullversion '$2' "$@"
+}
+# @FUNCTION: gcc-micro-version
+# @RETURN: micro compiler version (micro: 3.4.[6])
+gcc-micro-version() {
+	_gcc_fullversion '$3' "$@"
+}
+
+# Returns the installation directory - internal toolchain
+# function for use by _gcc-specs-exists (for flag-o-matic).
+_gcc-install-dir() {
+	echo "$(LC_ALL=C $(tc-getCC) -print-search-dirs 2> /dev/null |\
+		awk '$1=="install:" {print $2}')"
+}
+# Returns true if the indicated specs file exists - internal toolchain
+# function for use by flag-o-matic.
+_gcc-specs-exists() {
+	[[ -f $(_gcc-install-dir)/$1 ]]
+}
+
+# Returns requested gcc specs directive unprocessed - for used by
+# gcc-specs-directive()
+# Note; later specs normally overwrite earlier ones; however if a later
+# spec starts with '+' then it appends.
+# gcc -dumpspecs is parsed first, followed by files listed by "gcc -v"
+# as "Reading <file>", in order.  Strictly speaking, if there's a
+# $(gcc_install_dir)/specs, the built-in specs aren't read, however by
+# the same token anything from 'gcc -dumpspecs' is overridden by
+# the contents of $(gcc_install_dir)/specs so the result is the
+# same either way.
+_gcc-specs-directive_raw() {
+	local cc=$(tc-getCC)
+	local specfiles=$(LC_ALL=C ${cc} -v 2>&1 | awk '$1=="Reading" {print $NF}')
+	${cc} -dumpspecs 2> /dev/null | cat - ${specfiles} | awk -v directive=$1 \
+'BEGIN	{ pspec=""; spec=""; outside=1 }
+$1=="*"directive":"  { pspec=spec; spec=""; outside=0; next }
+	outside || NF==0 || ( substr($1,1,1)=="*" && substr($1,length($1),1)==":" ) { outside=1; next }
+	spec=="" && substr($0,1,1)=="+" { spec=pspec " " substr($0,2); next }
+	{ spec=spec $0 }
+END	{ print spec }'
+	return 0
+}
+
+# Return the requested gcc specs directive, with all included
+# specs expanded.
+# Note, it does not check for inclusion loops, which cause it
+# to never finish - but such loops are invalid for gcc and we're
+# assuming gcc is operational.
+gcc-specs-directive() {
+	local directive subdname subdirective
+	directive="$(_gcc-specs-directive_raw $1)"
+	while [[ ${directive} == *%\(*\)* ]]; do
+		subdname=${directive/*%\(}
+		subdname=${subdname/\)*}
+		subdirective="$(_gcc-specs-directive_raw ${subdname})"
+		directive="${directive//\%(${subdname})/${subdirective}}"
+	done
+	echo "${directive}"
+	return 0
+}
+
+# Returns true if gcc sets relro
+gcc-specs-relro() {
+	local directive
+	directive=$(gcc-specs-directive link_command)
+	return $([[ "${directive/\{!norelro:}" != "${directive}" ]])
+}
+# Returns true if gcc sets now
+gcc-specs-now() {
+	local directive
+	directive=$(gcc-specs-directive link_command)
+	return $([[ "${directive/\{!nonow:}" != "${directive}" ]])
+}
+# Returns true if gcc builds PIEs
+gcc-specs-pie() {
+	local directive
+	directive=$(gcc-specs-directive cc1)
+	return $([[ "${directive/\{!nopie:}" != "${directive}" ]])
+}
+# Returns true if gcc builds with the stack protector
+gcc-specs-ssp() {
+	local directive
+	directive=$(gcc-specs-directive cc1)
+	return $([[ "${directive/\{!fno-stack-protector:}" != "${directive}" ]])
+}
+# Returns true if gcc upgrades fstack-protector to fstack-protector-all
+gcc-specs-ssp-to-all() {
+	local directive
+	directive=$(gcc-specs-directive cc1)
+	return $([[ "${directive/\{!fno-stack-protector-all:}" != "${directive}" ]])
+}
+# Returns true if gcc builds with fno-strict-overflow
+gcc-specs-nostrict() {
+	local directive
+	directive=$(gcc-specs-directive cc1)
+	return $([[ "${directive/\{!fstrict-overflow:}" != "${directive}" ]])
+}
+
+
+# @FUNCTION: gen_usr_ldscript
+# @USAGE: [-a] <list of libs to create linker scripts for>
+# @DESCRIPTION:
+# This function generate linker scripts in /usr/lib for dynamic
+# libs in /lib.  This is to fix linking problems when you have
+# the .so in /lib, and the .a in /usr/lib.  What happens is that
+# in some cases when linking dynamic, the .a in /usr/lib is used
+# instead of the .so in /lib due to gcc/libtool tweaking ld's
+# library search path.  This causes many builds to fail.
+# See bug #4411 for more info.
+#
+# Note that you should in general use the unversioned name of
+# the library (libfoo.so), as ldconfig should usually update it
+# correctly to point to the latest version of the library present.
+gen_usr_ldscript() {
+	local lib libdir=$(get_libdir) output_format="" auto=false suffix=$(get_libname)
+	[[ -z ${ED+set} ]] && local ED=${D%/}${EPREFIX}/
+
+	tc-is-static-only && return
+
+	# Just make sure it exists
+	dodir /usr/${libdir}
+
+	if [[ $1 == "-a" ]] ; then
+		auto=true
+		shift
+		dodir /${libdir}
+	fi
+
+	# OUTPUT_FORMAT gives hints to the linker as to what binary format
+	# is referenced ... makes multilib saner
+	output_format=$($(tc-getCC) ${CFLAGS} ${LDFLAGS} -Wl,--verbose 2>&1 | sed -n 's/^OUTPUT_FORMAT("\([^"]*\)",.*/\1/p')
+	[[ -n ${output_format} ]] && output_format="OUTPUT_FORMAT ( ${output_format} )"
+
+	for lib in "$@" ; do
+		local tlib
+		if ${auto} ; then
+			lib="lib${lib}${suffix}"
+		else
+			# Ensure /lib/${lib} exists to avoid dangling scripts/symlinks.
+			# This especially is for AIX where $(get_libname) can return ".a",
+			# so /lib/${lib} might be moved to /usr/lib/${lib} (by accident).
+			[[ -r ${ED}/${libdir}/${lib} ]] || continue
+			#TODO: better die here?
+		fi
+
+		case ${CTARGET:-${CHOST}} in
+		*-darwin*)
+			if ${auto} ; then
+				tlib=$(scanmacho -qF'%S#F' "${ED}"/usr/${libdir}/${lib})
+			else
+				tlib=$(scanmacho -qF'%S#F' "${ED}"/${libdir}/${lib})
+			fi
+			if [[ -z ${tlib} ]] ; then
+				ewarn "gen_usr_ldscript: unable to read install_name from ${lib}"
+				tlib=${lib}
+			fi
+			tlib=${tlib##*/}
+
+			if ${auto} ; then
+				mv "${ED}"/usr/${libdir}/${lib%${suffix}}.*${suffix#.} "${ED}"/${libdir}/ || die
+				# some install_names are funky: they encode a version
+				if [[ ${tlib} != ${lib%${suffix}}.*${suffix#.} ]] ; then
+					mv "${ED}"/usr/${libdir}/${tlib%${suffix}}.*${suffix#.} "${ED}"/${libdir}/ || die
+				fi
+				[[ ${tlib} != ${lib} ]] && rm -f "${ED}"/${libdir}/${lib}
+			fi
+
+			# Mach-O files have an id, which is like a soname, it tells how
+			# another object linking against this lib should reference it.
+			# Since we moved the lib from usr/lib into lib this reference is
+			# wrong.  Hence, we update it here.  We don't configure with
+			# libdir=/lib because that messes up libtool files.
+			# Make sure we don't lose the specific version, so just modify the
+			# existing install_name
+			if [[ ! -w "${ED}/${libdir}/${tlib}" ]] ; then
+				chmod u+w "${ED}${libdir}/${tlib}" # needed to write to it
+				local nowrite=yes
+			fi
+			install_name_tool \
+				-id "${EPREFIX}"/${libdir}/${tlib} \
+				"${ED}"/${libdir}/${tlib} || die "install_name_tool failed"
+			[[ -n ${nowrite} ]] && chmod u-w "${ED}${libdir}/${tlib}"
+			# Now as we don't use GNU binutils and our linker doesn't
+			# understand linker scripts, just create a symlink.
+			pushd "${ED}/usr/${libdir}" > /dev/null
+			ln -snf "../../${libdir}/${tlib}" "${lib}"
+			popd > /dev/null
+			;;
+		*-aix*|*-irix*|*64*-hpux*|*-interix*|*-winnt*)
+			if ${auto} ; then
+				mv "${ED}"/usr/${libdir}/${lib}* "${ED}"/${libdir}/ || die
+				# no way to retrieve soname on these platforms (?)
+				tlib=$(readlink "${ED}"/${libdir}/${lib})
+				tlib=${tlib##*/}
+				if [[ -z ${tlib} ]] ; then
+					# ok, apparently was not a symlink, don't remove it and
+					# just link to it
+					tlib=${lib}
+				else
+					rm -f "${ED}"/${libdir}/${lib}
+				fi
+			else
+				tlib=${lib}
+			fi
+
+			# we don't have GNU binutils on these platforms, so we symlink
+			# instead, which seems to work fine.  Keep it relative, otherwise
+			# we break some QA checks in Portage
+			# on interix, the linker scripts would work fine in _most_
+			# situations. if a library links to such a linker script the
+			# absolute path to the correct library is inserted into the binary,
+			# which is wrong, since anybody linking _without_ libtool will miss
+			# some dependencies, since the stupid linker cannot find libraries
+			# hardcoded with absolute paths (as opposed to the loader, which
+			# seems to be able to do this).
+			# this has been seen while building shared-mime-info which needs
+			# libxml2, but links without libtool (and does not add libz to the
+			# command line by itself).
+			pushd "${ED}/usr/${libdir}" > /dev/null
+			ln -snf "../../${libdir}/${tlib}" "${lib}"
+			popd > /dev/null
+			;;
+		hppa*-hpux*) # PA-RISC 32bit (SOM) only, others (ELF) match *64*-hpux* above.
+			if ${auto} ; then
+				tlib=$(chatr "${ED}"/usr/${libdir}/${lib} | sed -n '/internal name:/{n;s/^ *//;p;q}')
+				[[ -z ${tlib} ]] && tlib=${lib}
+				tlib=${tlib##*/} # 'internal name' can have a path component
+				mv "${ED}"/usr/${libdir}/${lib}* "${ED}"/${libdir}/ || die
+				# some SONAMEs are funky: they encode a version before the .so
+				if [[ ${tlib} != ${lib}* ]] ; then
+					mv "${ED}"/usr/${libdir}/${tlib}* "${ED}"/${libdir}/ || die
+				fi
+				[[ ${tlib} != ${lib} ]] &&
+				rm -f "${ED}"/${libdir}/${lib}
+			else
+				tlib=$(chatr "${ED}"/${libdir}/${lib} | sed -n '/internal name:/{n;s/^ *//;p;q}')
+				[[ -z ${tlib} ]] && tlib=${lib}
+				tlib=${tlib##*/} # 'internal name' can have a path component
+			fi
+			pushd "${ED}"/usr/${libdir} >/dev/null
+			ln -snf "../../${libdir}/${tlib}" "${lib}"
+			# need the internal name in usr/lib too, to be available at runtime
+			# when linked with /path/to/lib.sl (hardcode_direct_absolute=yes)
+			[[ ${tlib} != ${lib} ]] &&
+			ln -snf "../../${libdir}/${tlib}" "${tlib}"
+			popd >/dev/null
+			;;
+		*)
+			if ${auto} ; then
+				tlib=$(scanelf -qF'%S#F' "${ED}"/usr/${libdir}/${lib})
+				if [[ -z ${tlib} ]] ; then
+					ewarn "gen_usr_ldscript: unable to read SONAME from ${lib}"
+					tlib=${lib}
+				fi
+				mv "${ED}"/usr/${libdir}/${lib}* "${ED}"/${libdir}/ || die
+				# some SONAMEs are funky: they encode a version before the .so
+				if [[ ${tlib} != ${lib}* ]] ; then
+					mv "${ED}"/usr/${libdir}/${tlib}* "${ED}"/${libdir}/ || die
+				fi
+				[[ ${tlib} != ${lib} ]] && rm -f "${ED}"/${libdir}/${lib}
+			else
+				tlib=${lib}
+			fi
+			cat > "${ED}/usr/${libdir}/${lib}" <<-END_LDSCRIPT
+			/* GNU ld script
+			   Since Gentoo has critical dynamic libraries in /lib, and the static versions
+			   in /usr/lib, we need to have a "fake" dynamic lib in /usr/lib, otherwise we
+			   run into linking problems.  This "fake" dynamic lib is a linker script that
+			   redirects the linker to the real lib.  And yes, this works in the cross-
+			   compiling scenario as the sysroot-ed linker will prepend the real path.
+
+			   See bug http://bugs.gentoo.org/4411 for more info.
+			 */
+			${output_format}
+			GROUP ( ${EPREFIX}/${libdir}/${tlib} )
+			END_LDSCRIPT
+			;;
+		esac
+		fperms a+x "/usr/${libdir}/${lib}" || die "could not change perms on ${lib}"
+	done
+}

diff --git a/eclass/versionator.eclass b/eclass/versionator.eclass
new file mode 100644
index 0000000..05acf48
--- /dev/null
+++ b/eclass/versionator.eclass
@@ -0,0 +1,577 @@
+# Copyright 1999-2008 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/eclass/versionator.eclass,v 1.17 2010/07/19 20:17:35 vapier Exp $
+
+# @ECLASS: versionator.eclass
+# @MAINTAINER:
+# base-system@gentoo.org
+# @BLURB: functions which simplify manipulation of ${PV} and similar version strings
+# @DESCRIPTION:
+# This eclass provides functions which simplify manipulating $PV and similar
+# variables. Most functions default to working with $PV, although other
+# values can be used.
+# @EXAMPLE:
+# Simple Example 1: $PV is 1.2.3b, we want 1_2.3b:
+#     MY_PV=$(replace_version_separator 1 '_' )
+#
+# Simple Example 2: $PV is 1.4.5, we want 1:
+#     MY_MAJORV=$(get_major_version )
+#
+# Rather than being a number, the index parameter can be a separator character
+# such as '-', '.' or '_'. In this case, the first separator of this kind is
+# selected.
+#
+# There's also:
+#     version_is_at_least             want      have
+#  which may be buggy, so use with caution.
+
+inherit eutils
+
+# @FUNCTION: get_all_version_components
+# @USAGE: [version]
+# @DESCRIPTION:
+# Split up a version string into its component parts. If no parameter is
+# supplied, defaults to $PV.
+#     0.8.3       ->  0 . 8 . 3
+#     7c          ->  7 c
+#     3.0_p2      ->  3 . 0 _ p2
+#     20040905    ->  20040905
+#     3.0c-r1     ->  3 . 0 c - r1
+get_all_version_components() {
+	eshopts_push -s extglob
+	local ver_str=${1:-${PV}} result result_idx=0
+	result=( )
+
+	# sneaky cache trick cache to avoid having to parse the same thing several
+	# times.
+	if [[ "${VERSIONATOR_CACHE_VER_STR}" == "${ver_str}" ]] ; then
+		echo ${VERSIONATOR_CACHE_RESULT}
+		eshopts_pop
+		return
+	fi
+	export VERSIONATOR_CACHE_VER_STR="${ver_str}"
+
+	while [[ -n "$ver_str" ]] ; do
+		case "${ver_str:0:1}" in
+			# number: parse whilst we have a number
+			[[:digit:]])
+				result[$result_idx]="${ver_str%%[^[:digit:]]*}"
+				ver_str="${ver_str##+([[:digit:]])}"
+				result_idx=$(($result_idx + 1))
+				;;
+
+			# separator: single character
+			[-_.])
+				result[$result_idx]="${ver_str:0:1}"
+				ver_str="${ver_str:1}"
+				result_idx=$(($result_idx + 1))
+				;;
+
+			# letter: grab the letters plus any following numbers
+			[[:alpha:]])
+				local not_match="${ver_str##+([[:alpha:]])*([[:digit:]])}"
+				result[$result_idx]=${ver_str:0:$((${#ver_str} - ${#not_match}))}
+				ver_str="${not_match}"
+				result_idx=$(($result_idx + 1))
+				;;
+
+			# huh?
+			*)
+				result[$result_idx]="${ver_str:0:1}"
+				ver_str="${ver_str:1}"
+				result_idx=$(($result_idx + 1))
+				;;
+		esac
+	done
+
+	export VERSIONATOR_CACHE_RESULT="${result[@]}"
+	echo ${result[@]}
+	eshopts_pop
+}
+
+# @FUNCTION: get_version_components
+# @USAGE: [version]
+# @DESCRIPTION:
+# Get the important version components, excluding '.', '-' and '_'. Defaults to
+# $PV if no parameter is supplied.
+#     0.8.3       ->  0 8 3
+#     7c          ->  7 c
+#     3.0_p2      ->  3 0 p2
+#     20040905    ->  20040905
+#     3.0c-r1     ->  3 0 c r1
+get_version_components() {
+	eshopts_push -s extglob
+	local c="$(get_all_version_components "${1:-${PV}}")"
+	c=( ${c[@]//[-._]/ } )
+	echo ${c[@]}
+	eshopts_pop
+}
+
+# @FUNCTION: get_major_version
+# @USAGE: [version]
+# @DESCRIPTION:
+# Get the major version of a value. Defaults to $PV if no parameter is supplied.
+#     0.8.3       ->  0
+#     7c          ->  7
+#     3.0_p2      ->  3
+#     20040905    ->  20040905
+#     3.0c-r1     ->  3
+get_major_version() {
+	eshopts_push -s extglob
+	local c
+	c=( $(get_all_version_components "${1:-${PV}}" ) )
+	echo ${c[0]}
+	eshopts_pop
+}
+
+# @FUNCTION: get_version_component_range
+# @USAGE: [version]
+# @DESCRIPTION:
+# Get a particular component or range of components from the version. If no
+# version parameter is supplied, defaults to $PV.
+#    1      1.2.3       -> 1
+#    1-2    1.2.3       -> 1.2
+#    2-     1.2.3       -> 2.3
+get_version_component_range() {
+	eshopts_push -s extglob
+	local c v="${2:-${PV}}" range="${1}" range_start range_end i=-1 j=0
+	c=( $(get_all_version_components ${v} ) )
+	range_start="${range%-*}" ; range_start="${range_start:-1}"
+	range_end="${range#*-}"   ; range_end="${range_end:-${#c[@]}}"
+
+	while (( j < ${range_start} )) ; do
+		i=$(($i + 1))
+		[[ $i -gt ${#c[@]} ]] && eshopts_pop && return
+		[[ -n "${c[${i}]//[-._]}" ]] && j=$(($j + 1))
+	done
+
+	while (( j <= ${range_end} )) ; do
+		echo -n ${c[$i]}
+		[[ $i -gt ${#c[@]} ]] && eshopts_pop && return
+		[[ -n "${c[${i}]//[-._]}" ]] && j=$(($j + 1))
+		i=$(($i + 1))
+	done
+	eshopts_pop
+}
+
+# @FUNCTION: get_after_major_version
+# @USAGE: [version]
+# @DESCRIPTION:
+# Get everything after the major version and its separator (if present) of a
+# value. Defaults to $PV if no parameter is supplied.
+#     0.8.3       ->  8.3
+#     7c          ->  c
+#     3.0_p2      ->  0_p2
+#     20040905    ->  (empty string)
+#     3.0c-r1     ->  0c-r1
+get_after_major_version() {
+	eshopts_push -s extglob
+	echo $(get_version_component_range 2- "${1:-${PV}}" )
+	eshopts_pop
+}
+
+# @FUNCTION: replace_version_separator
+# @USAGE: <search> <replacement> [subject]
+# @DESCRIPTION:
+# Replace the $1th separator with $2 in $3 (defaults to $PV if $3 is not
+# supplied). If there are fewer than $1 separators, don't change anything.
+#     1 '_' 1.2.3       -> 1_2.3
+#     2 '_' 1.2.3       -> 1.2_3
+#     1 '_' 1b-2.3      -> 1b_2.3
+# Rather than being a number, $1 can be a separator character such as '-', '.'
+# or '_'. In this case, the first separator of this kind is selected.
+replace_version_separator() {
+	eshopts_push -s extglob
+	local w i c found=0 v="${3:-${PV}}"
+	w=${1:-1}
+	c=( $(get_all_version_components ${v} ) )
+	if [[ "${w//[[:digit:]]/}" == "${w}" ]] ; then
+		# it's a character, not an index
+		for (( i = 0 ; i < ${#c[@]} ; i = $i + 1 )) ; do
+			if [[ "${c[${i}]}" == "${w}" ]] ; then
+				c[${i}]="${2}"
+				break
+			fi
+		done
+	else
+		for (( i = 0 ; i < ${#c[@]} ; i = $i + 1 )) ; do
+			if [[ -n "${c[${i}]//[^-._]}" ]] ; then
+				found=$(($found + 1))
+				if [[ "$found" == "${w}" ]] ; then
+					c[${i}]="${2}"
+					break
+				fi
+			fi
+		done
+	fi
+	c=${c[@]}
+	echo ${c// }
+	eshopts_pop
+}
+
+# @FUNCTION: replace_all_version_separators
+# @USAGE: <replacement> [subject]
+# @DESCRIPTION:
+# Replace all version separators in $2 (defaults to $PV) with $1.
+#     '_' 1b.2.3        -> 1b_2_3
+replace_all_version_separators() {
+	eshopts_push -s extglob
+	local c
+	c=( $(get_all_version_components "${2:-${PV}}" ) )
+	c="${c[@]//[-._]/$1}"
+	echo ${c// }
+	eshopts_pop
+}
+
+# @FUNCTION: delete_version_separator
+# @USAGE: <search> [subject]
+# @DESCRIPTION:
+# Delete the $1th separator in $2 (defaults to $PV if $2 is not supplied). If
+# there are fewer than $1 separators, don't change anything.
+#     1 1.2.3       -> 12.3
+#     2 1.2.3       -> 1.23
+#     1 1b-2.3      -> 1b2.3
+# Rather than being a number, $1 can be a separator character such as '-', '.'
+# or '_'. In this case, the first separator of this kind is deleted.
+delete_version_separator() {
+	eshopts_push -s extglob
+	replace_version_separator "${1}" "" "${2}"
+	eshopts_pop
+}
+
+# @FUNCTION: delete_all_version_separators
+# @USAGE: [subject]
+# @DESCRIPTION:
+# Delete all version separators in $1 (defaults to $PV).
+#     1b.2.3        -> 1b23
+delete_all_version_separators() {
+	eshopts_push -s extglob
+	replace_all_version_separators "" "${1}"
+	eshopts_pop
+}
+
+# @FUNCTION: get_version_component_count
+# @USAGE: [version]
+# @DESCRIPTION:
+# How many version components are there in $1 (defaults to $PV)?
+#     1.0.1       ->  3
+#     3.0c-r1     ->  4
+get_version_component_count() {
+	eshopts_push -s extglob
+	local a
+	a=( $(get_version_components "${1:-${PV}}" ) )
+	echo ${#a[@]}
+	eshopts_pop
+}
+
+# @FUNCTION: get_last_version_component_index
+# @USAGE: [version]
+# @DESCRIPTION:
+# What is the index of the last version component in $1 (defaults to $PV)?
+# Equivalent to get_version_component_count - 1.
+#     1.0.1       ->  3
+#     3.0c-r1     ->  4
+get_last_version_component_index() {
+	eshopts_push -s extglob
+	echo $(( $(get_version_component_count "${1:-${PV}}" ) - 1 ))
+	eshopts_pop
+}
+
+# @FUNCTION: version_is_at_least
+# @USAGE: <want> [have]
+# @DESCRIPTION:
+# Is $2 (defaults to $PVR) at least version $1? Intended for use in eclasses
+# only. May not be reliable, be sure to do very careful testing before actually
+# using this.
+version_is_at_least() {
+	eshopts_push -s extglob
+	local want_s="$1" have_s="${2:-${PVR}}" r
+	version_compare "${want_s}" "${have_s}"
+	r=$?
+	case $r in
+		1|2)
+			eshopts_pop
+			return 0
+			;;
+		3)
+			eshopts_pop
+			return 1
+			;;
+		*)
+			eshopts_pop
+			die "versionator compare bug [atleast, ${want_s}, ${have_s}, ${r}]"
+			;;
+	esac
+	eshopts_pop
+}
+
+# @FUNCTION: version_compare
+# @USAGE: <A> <B>
+# @DESCRIPTION:
+# Takes two parameters (A, B) which are versions. If A is an earlier version
+# than B, returns 1. If A is identical to B, return 2. If A is later than B,
+# return 3. You probably want version_is_at_least rather than this function.
+# May not be very reliable. Test carefully before using this.
+version_compare() {
+	eshopts_push -s extglob
+	local ver_a=${1} ver_b=${2} parts_a parts_b cur_idx_a=0 cur_idx_b=0
+	parts_a=( $(get_all_version_components "${ver_a}" ) )
+	parts_b=( $(get_all_version_components "${ver_b}" ) )
+
+	### compare number parts.
+	local inf_loop=0
+	while true ; do
+		inf_loop=$(( ${inf_loop} + 1 ))
+		[[ ${inf_loop} -gt 20 ]] && \
+			die "versionator compare bug [numbers, ${ver_a}, ${ver_b}]"
+
+		# grab the current number components
+		local cur_tok_a=${parts_a[${cur_idx_a}]}
+		local cur_tok_b=${parts_b[${cur_idx_b}]}
+
+		# number?
+		if [[ -n ${cur_tok_a} ]] && [[ -z ${cur_tok_a//[[:digit:]]} ]] ; then
+			cur_idx_a=$(( ${cur_idx_a} + 1 ))
+			[[ ${parts_a[${cur_idx_a}]} == "." ]] \
+				&& cur_idx_a=$(( ${cur_idx_a} + 1 ))
+		else
+			cur_tok_a=""
+		fi
+
+		if [[ -n ${cur_tok_b} ]] && [[ -z ${cur_tok_b//[[:digit:]]} ]] ; then
+			cur_idx_b=$(( ${cur_idx_b} + 1 ))
+			[[ ${parts_b[${cur_idx_b}]} == "." ]] \
+				&& cur_idx_b=$(( ${cur_idx_b} + 1 ))
+		else
+			cur_tok_b=""
+		fi
+
+		# done with number components?
+		[[ -z ${cur_tok_a} ]] && [[ -z ${cur_tok_b} ]] && break
+
+		# to avoid going into octal mode, strip any leading zeros. otherwise
+		# bash will throw a hissy fit on versions like 6.3.068.
+		cur_tok_a=${cur_tok_a##+(0)}
+		cur_tok_b=${cur_tok_b##+(0)}
+
+		# if a component is blank, make it zero.
+		[[ -z ${cur_tok_a} ]] && cur_tok_a=0
+		[[ -z ${cur_tok_b} ]] && cur_tok_b=0
+
+		# compare
+		[[ ${cur_tok_a} -lt ${cur_tok_b} ]] && eshopts_pop && return 1
+		[[ ${cur_tok_a} -gt ${cur_tok_b} ]] && eshopts_pop && return 3
+	done
+
+	### number parts equal. compare letter parts.
+	local letter_a=
+	letter_a=${parts_a[${cur_idx_a}]}
+	if [[ ${#letter_a} -eq 1 ]] && [[ -z ${letter_a/[a-z]} ]] ; then
+		cur_idx_a=$(( ${cur_idx_a} + 1 ))
+	else
+		letter_a="@"
+	fi
+
+	local letter_b=
+	letter_b=${parts_b[${cur_idx_b}]}
+	if [[ ${#letter_b} -eq 1 ]] && [[ -z ${letter_b/[a-z]} ]] ; then
+		cur_idx_b=$(( ${cur_idx_b} + 1 ))
+	else
+		letter_b="@"
+	fi
+
+	# compare
+	[[ ${letter_a} < ${letter_b} ]] && eshopts_pop && return 1
+	[[ ${letter_a} > ${letter_b} ]] && eshopts_pop && return 3
+
+	### letter parts equal. compare suffixes in order.
+	local suffix rule part r_lt r_gt
+	for rule in "alpha=1" "beta=1" "pre=1" "rc=1" "p=3" "r=3" ; do
+		suffix=${rule%%=*}
+		r_lt=${rule##*=}
+		[[ ${r_lt} -eq 1 ]] && r_gt=3 || r_gt=1
+
+		local suffix_a=
+		for part in ${parts_a[@]} ; do
+			[[ ${part#${suffix}} != ${part} ]] && \
+				[[ -z ${part##${suffix}*([[:digit:]])} ]] && \
+				suffix_a=${part#${suffix}}0
+		done
+
+		local suffix_b=
+		for part in ${parts_b[@]} ; do
+			[[ ${part#${suffix}} != ${part} ]] && \
+				[[ -z ${part##${suffix}*([[:digit:]])} ]] && \
+				suffix_b=${part#${suffix}}0
+		done
+
+		[[ -z ${suffix_a} ]] && [[ -z ${suffix_b} ]] && continue
+
+		[[ -z ${suffix_a} ]] && eshopts_pop && return ${r_gt}
+		[[ -z ${suffix_b} ]] && eshopts_pop && return ${r_lt}
+
+		# avoid octal problems
+		suffix_a=${suffix_a##+(0)} ; suffix_a=${suffix_a:-0}
+		suffix_b=${suffix_b##+(0)} ; suffix_b=${suffix_b:-0}
+
+		[[ ${suffix_a} -lt ${suffix_b} ]] && eshopts_pop && return 1
+		[[ ${suffix_a} -gt ${suffix_b} ]] && eshopts_pop && return 3
+	done
+
+	### no differences.
+	eshopts_pop
+	return 2
+}
+
+# @FUNCTION: version_sort
+# @USAGE: <version> [more versions...]
+# @DESCRIPTION:
+# Returns its parameters sorted, highest version last. We're using a quadratic
+# algorithm for simplicity, so don't call it with more than a few dozen items.
+# Uses version_compare, so be careful.
+version_sort() {
+	eshopts_push -s extglob
+	local items= left=0
+	items=( $@ )
+	while [[ ${left} -lt ${#items[@]} ]] ; do
+		local lowest_idx=${left}
+		local idx=$(( ${lowest_idx} + 1 ))
+		while [[ ${idx} -lt ${#items[@]} ]] ; do
+			version_compare "${items[${lowest_idx}]}" "${items[${idx}]}"
+			[[ $? -eq 3 ]] && lowest_idx=${idx}
+			idx=$(( ${idx} + 1 ))
+		done
+		local tmp=${items[${lowest_idx}]}
+		items[${lowest_idx}]=${items[${left}]}
+		items[${left}]=${tmp}
+		left=$(( ${left} + 1 ))
+	done
+	echo ${items[@]}
+	eshopts_pop
+}
+
+# @FUNCTION: version_format_string
+# @USAGE: <format> [version]
+# @DESCRIPTION:
+# Reformat complicated version strings.  The first argument is the string
+# to reformat with while the rest of the args are passed on to the
+# get_version_components function.  You should make sure to single quote
+# the first argument since it'll have variables that get delayed expansion.s
+# @EXAMPLE:
+# P="cow-hat-1.2.3_p4"
+# MY_P=$(version_format_string '${PN}_source_$1_$2-$3_$4')
+# Now MY_P will be: cow-hat_source_1_2-3_p4
+version_format_string() {
+	local fstr=$1
+	shift
+	set -- $(get_version_components "$@")
+	eval echo "${fstr}"
+}
+
+__versionator__test_version_compare() {
+	eshopts_push -s extglob
+	local lt=1 eq=2 gt=3 p q
+
+	__versionator__test_version_compare_t() {
+		version_compare "${1}" "${3}"
+		local r=$?
+		[[ ${r} -eq ${2} ]] || echo "FAIL: ${@} (got ${r} exp ${2})"
+	}
+
+	echo "
+		0             $lt 1
+		1             $lt 2
+		2             $gt 1
+		2             $eq 2
+		0             $eq 0
+		10            $lt 20
+		68            $eq 068
+		068           $gt 67
+		068           $lt 69
+
+		1.0           $lt 2.0
+		2.0           $eq 2.0
+		2.0           $gt 1.0
+
+		1.0           $gt 0.0
+		0.0           $eq 0.0
+		0.0           $lt 1.0
+
+		0.1           $lt 0.2
+		0.2           $eq 0.2
+		0.3           $gt 0.2
+
+		1.2           $lt 2.1
+		2.1           $gt 1.2
+
+		1.2.3         $lt 1.2.4
+		1.2.4         $gt 1.2.3
+
+		1.2.0         $eq 1.2
+		1.2.1         $gt 1.2
+		1.2           $lt 1.2.1
+
+		1.2b          $eq 1.2b
+		1.2b          $lt 1.2c
+		1.2b          $gt 1.2a
+		1.2b          $gt 1.2
+		1.2           $lt 1.2a
+
+		1.3           $gt 1.2a
+		1.3           $lt 1.3a
+
+		1.0_alpha7    $lt 1.0_beta7
+		1.0_beta      $lt 1.0_pre
+		1.0_pre5      $lt 1.0_rc2
+		1.0_rc2       $lt 1.0
+
+		1.0_p1        $gt 1.0
+		1.0_p1-r1     $gt 1.0_p1
+
+		1.0_alpha6-r1 $gt 1.0_alpha6
+		1.0_beta6-r1  $gt 1.0_alpha6-r2
+
+		1.0_pre1      $lt 1.0-p1
+
+		1.0p          $gt 1.0_p1
+		1.0r          $gt 1.0-r1
+		1.6.15        $gt 1.6.10-r2
+		1.6.10-r2     $lt 1.6.15
+
+	" | while read a b c ; do
+		[[ -z "${a}${b}${c}" ]] && continue;
+		__versionator__test_version_compare_t "${a}" "${b}" "${c}"
+	done
+
+
+	for q in "alpha beta pre rc=${lt};${gt}" "p r=${gt};${lt}" ; do
+		for p in ${q%%=*} ; do
+			local c=${q##*=}
+			local alt=${c%%;*} agt=${c##*;}
+			__versionator__test_version_compare_t "1.0" $agt "1.0_${p}"
+			__versionator__test_version_compare_t "1.0" $agt "1.0_${p}1"
+			__versionator__test_version_compare_t "1.0" $agt "1.0_${p}068"
+
+			__versionator__test_version_compare_t "2.0_${p}"    $alt "2.0"
+			__versionator__test_version_compare_t "2.0_${p}1"   $alt "2.0"
+			__versionator__test_version_compare_t "2.0_${p}068" $alt "2.0"
+
+			__versionator__test_version_compare_t "1.0_${p}"  $eq "1.0_${p}"
+			__versionator__test_version_compare_t "0.0_${p}"  $lt "0.0_${p}1"
+			__versionator__test_version_compare_t "666_${p}3" $gt "666_${p}"
+
+			__versionator__test_version_compare_t "1_${p}7"  $lt "1_${p}8"
+			__versionator__test_version_compare_t "1_${p}7"  $eq "1_${p}7"
+			__versionator__test_version_compare_t "1_${p}7"  $gt "1_${p}6"
+			__versionator__test_version_compare_t "1_${p}09" $eq "1_${p}9"
+		done
+	done
+
+	for p in "-r" "_p" ; do
+		__versionator__test_version_compare_t "7.2${p}1" $lt "7.2${p}2"
+		__versionator__test_version_compare_t "7.2${p}2" $gt "7.2${p}1"
+		__versionator__test_version_compare_t "7.2${p}3" $gt "7.2${p}2"
+		__versionator__test_version_compare_t "7.2${p}2" $lt "7.2${p}3"
+	done
+	eshopts_pop
+}



^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2011-04-25 13:46 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-04-25 13:46 [gentoo-commits] proj/gentoo-openbsd:master commit in: eclass/ Maxim Koltsov

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox