public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
From: "Magnus Granberg" <zorry@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] dev/zorry:master commit in: gobs/pym/
Date: Fri, 22 Mar 2013 19:05:55 +0000 (UTC)	[thread overview]
Message-ID: <1363979088.594c1bbdd9c78b67804891c58f2a0194c679e59c.zorry@gentoo> (raw)

commit:     594c1bbdd9c78b67804891c58f2a0194c679e59c
Author:     Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Fri Mar 22 19:04:48 2013 +0000
Commit:     Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Fri Mar 22 19:04:48 2013 +0000
URL:        http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=594c1bbd

update portage files and hilight log

---
 gobs/pym/Scheduler.py    |   73 ++++++----
 gobs/pym/actions.py      |  379 ++++++++++++++++++++++++++++------------------
 gobs/pym/build_log.py    |  172 +++++++++++----------
 gobs/pym/main.py         |   33 ++++-
 gobs/pym/mysql_querys.py |   19 +++-
 5 files changed, 414 insertions(+), 262 deletions(-)

diff --git a/gobs/pym/Scheduler.py b/gobs/pym/Scheduler.py
index 6c446cb..3aaf147 100644
--- a/gobs/pym/Scheduler.py
+++ b/gobs/pym/Scheduler.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
 
 from collections import deque
 import gc
@@ -18,7 +18,7 @@ import zlib
 import portage
 from portage import os
 from portage import _encodings
-from portage import _unicode_decode, _unicode_encode
+from portage import _unicode_encode
 from portage.cache.mappings import slot_dict_class
 from portage.elog.messages import eerror
 from portage.localization import _
@@ -411,7 +411,7 @@ class Scheduler(PollScheduler):
 			if not (isinstance(task, Package) and \
 				task.operation == "merge"):
 				continue
-			if 'interactive' in task.metadata.properties:
+			if 'interactive' in task.properties:
 				interactive_tasks.append(task)
 		return interactive_tasks
 
@@ -720,7 +720,6 @@ class Scheduler(PollScheduler):
 			return
 
 		if self._parallel_fetch:
-			self._status_msg("Starting parallel fetch")
 
 			prefetchers = self._prefetchers
 
@@ -785,10 +784,10 @@ class Scheduler(PollScheduler):
 			if x.operation == "uninstall":
 				continue
 
-			if x.metadata["EAPI"] in ("0", "1", "2", "3"):
+			if x.eapi in ("0", "1", "2", "3"):
 				continue
 
-			if "pretend" not in x.metadata.defined_phases:
+			if "pretend" not in x.defined_phases:
 				continue
 
 			out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
@@ -807,7 +806,7 @@ class Scheduler(PollScheduler):
 			build_dir_path = os.path.join(
 				os.path.realpath(settings["PORTAGE_TMPDIR"]),
 				"portage", x.category, x.pf)
-			existing_buildir = os.path.isdir(build_dir_path)
+			existing_builddir = os.path.isdir(build_dir_path)
 			settings["PORTAGE_BUILDDIR"] = build_dir_path
 			build_dir = EbuildBuildDir(scheduler=sched_iface,
 				settings=settings)
@@ -818,7 +817,7 @@ class Scheduler(PollScheduler):
 
 				# Clean up the existing build dir, in case pkg_pretend
 				# checks for available space (bug #390711).
-				if existing_buildir:
+				if existing_builddir:
 					if x.built:
 						tree = "bintree"
 						infloc = os.path.join(build_dir_path, "build-info")
@@ -907,13 +906,18 @@ class Scheduler(PollScheduler):
 					failures += 1
 				portage.elog.elog_process(x.cpv, settings)
 			finally:
-				if current_task is not None and current_task.isAlive():
-					current_task.cancel()
-					current_task.wait()
-				clean_phase = EbuildPhase(background=False,
-					phase='clean', scheduler=sched_iface, settings=settings)
-				clean_phase.start()
-				clean_phase.wait()
+
+				if current_task is not None:
+					if current_task.isAlive():
+						current_task.cancel()
+						current_task.wait()
+					if current_task.returncode == os.EX_OK:
+						clean_phase = EbuildPhase(background=False,
+							phase='clean', scheduler=sched_iface,
+							settings=settings)
+						clean_phase.start()
+						clean_phase.wait()
+
 				build_dir.unlock()
 
 		if failures:
@@ -1062,7 +1066,8 @@ class Scheduler(PollScheduler):
 		printer = portage.output.EOutput()
 		background = self._background
 		failure_log_shown = False
-		if background and len(self._failed_pkgs_all) == 1:
+		if background and len(self._failed_pkgs_all) == 1 and \
+			self.myopts.get('--quiet-fail', 'n') != 'y':
 			# If only one package failed then just show it's
 			# whole log for easy viewing.
 			failed_pkg = self._failed_pkgs_all[-1]
@@ -1141,9 +1146,9 @@ class Scheduler(PollScheduler):
 				printer.eerror(line)
 			printer.eerror("")
 			for failed_pkg in self._failed_pkgs_all:
-				# Use _unicode_decode() to force unicode format string so
+				# Use unicode_literals to force unicode format string so
 				# that Package.__unicode__() is called in python2.
-				msg = _unicode_decode(" %s") % (failed_pkg.pkg,)
+				msg = " %s" % (failed_pkg.pkg,)
 				log_path = self._locate_failure_log(failed_pkg)
 				if log_path is not None:
 					msg += ", Log file:"
@@ -1534,7 +1539,7 @@ class Scheduler(PollScheduler):
 		self._config_pool[settings['EROOT']].append(settings)
 
 	def _keep_scheduling(self):
-		return bool(not self._terminated_tasks and self._pkg_queue and \
+		return bool(not self._terminated.is_set() and self._pkg_queue and \
 			not (self._failed_pkgs and not self._build_opts.fetchonly))
 
 	def _is_work_scheduled(self):
@@ -1794,7 +1799,7 @@ class Scheduler(PollScheduler):
 			#              scope
 			e = exc
 			mydepgraph = e.depgraph
-			dropped_tasks = set()
+			dropped_tasks = {}
 
 		if e is not None:
 			def unsatisfied_resume_dep_msg():
@@ -1844,7 +1849,7 @@ class Scheduler(PollScheduler):
 		self._init_graph(mydepgraph.schedulerGraph())
 
 		msg_width = 75
-		for task in dropped_tasks:
+		for task, atoms in dropped_tasks.items():
 			if not (isinstance(task, Package) and task.operation == "merge"):
 				continue
 			pkg = task
@@ -1852,7 +1857,10 @@ class Scheduler(PollScheduler):
 				" %s" % (pkg.cpv,)
 			if pkg.root_config.settings["ROOT"] != "/":
 				msg += " for %s" % (pkg.root,)
-			msg += " dropped due to unsatisfied dependency."
+			if not atoms:
+				msg += " dropped because it is masked or unavailable"
+			else:
+				msg += " dropped because it requires %s" % ", ".join(atoms)
 			for line in textwrap.wrap(msg, msg_width):
 				eerror(line, phase="other", key=pkg.cpv)
 			settings = self.pkgsettings[pkg.root]
@@ -1897,11 +1905,21 @@ class Scheduler(PollScheduler):
 		root_config = pkg.root_config
 		world_set = root_config.sets["selected"]
 		world_locked = False
-		if hasattr(world_set, "lock"):
-			world_set.lock()
-			world_locked = True
+		atom = None
+
+		if pkg.operation != "uninstall":
+			# Do this before acquiring the lock, since it queries the
+			# portdbapi which can call the global event loop, triggering
+			# a concurrent call to this method or something else that
+			# needs an exclusive (non-reentrant) lock on the world file.
+			atom = create_world_atom(pkg, args_set, root_config)
 
 		try:
+
+			if hasattr(world_set, "lock"):
+				world_set.lock()
+				world_locked = True
+
 			if hasattr(world_set, "load"):
 				world_set.load() # maybe it's changed on disk
 
@@ -1913,8 +1931,7 @@ class Scheduler(PollScheduler):
 					for s in pkg.root_config.setconfig.active:
 						world_set.remove(SETPREFIX+s)
 			else:
-				atom = create_world_atom(pkg, args_set, root_config)
-				if atom:
+				if atom is not None:
 					if hasattr(world_set, "add"):
 						self._status_msg(('Recording %s in "world" ' + \
 							'favorites file...') % atom)

diff --git a/gobs/pym/actions.py b/gobs/pym/actions.py
index 3b50187..e29d8e0 100644
--- a/gobs/pym/actions.py
+++ b/gobs/pym/actions.py
@@ -1,7 +1,7 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
-from __future__ import print_function
+from __future__ import print_function, unicode_literals
 
 import errno
 import logging
@@ -22,8 +22,10 @@ from itertools import chain
 
 import portage
 portage.proxy.lazyimport.lazyimport(globals(),
+	'portage.dbapi._similar_name_search:similar_name_search',
 	'portage.debug',
 	'portage.news:count_unread_news,display_news_notifications',
+	'portage.util._get_vm_info:get_vm_info',
 	'_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
 	'_emerge.help:help@emerge_help',
 	'_emerge.post_emerge:display_news_notification,post_emerge',
@@ -35,8 +37,7 @@ from portage import os
 from portage import shutil
 from portage import eapi_is_supported, _encodings, _unicode_decode
 from portage.cache.cache_errors import CacheError
-from portage.const import GLOBAL_CONFIG_PATH
-from portage.const import _DEPCLEAN_LIB_CHECK_DEFAULT
+from portage.const import GLOBAL_CONFIG_PATH, VCS_DIRS, _DEPCLEAN_LIB_CHECK_DEFAULT
 from portage.dbapi.dep_expand import dep_expand
 from portage.dbapi._expand_new_virt import expand_new_virt
 from portage.dep import Atom
@@ -54,6 +55,7 @@ from portage._sets.base import InternalPackageSet
 from portage.util import cmp_sort_key, writemsg, varexpand, \
 	writemsg_level, writemsg_stdout
 from portage.util.digraph import digraph
+from portage.util._async.run_main_scheduler import run_main_scheduler
 from portage.util._async.SchedulerInterface import SchedulerInterface
 from portage.util._eventloop.global_event_loop import global_event_loop
 from portage._global_updates import _global_updates
@@ -286,8 +288,14 @@ def action_build(settings, trees, mtimedb,
 					"dropped due to\n" + \
 					"!!! masking or unsatisfied dependencies:\n\n",
 					noiselevel=-1)
-				for task in dropped_tasks:
-					portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
+				for task, atoms in dropped_tasks.items():
+					if not atoms:
+						writemsg("  %s is masked or unavailable\n" %
+							(task,), noiselevel=-1)
+					else:
+						writemsg("  %s requires %s\n" %
+							(task, ", ".join(atoms)), noiselevel=-1)
+
 				portage.writemsg("\n", noiselevel=-1)
 			del dropped_tasks
 		else:
@@ -312,6 +320,7 @@ def action_build(settings, trees, mtimedb,
 		if not success:
 			return 1
 
+	mergecount = None
 	if "--pretend" not in myopts and \
 		("--ask" in myopts or "--tree" in myopts or \
 		"--verbose" in myopts) and \
@@ -343,6 +352,7 @@ def action_build(settings, trees, mtimedb,
 				if isinstance(x, Package) and x.operation == "merge":
 					mergecount += 1
 
+			prompt = None
 			if mergecount==0:
 				sets = trees[settings['EROOT']]['root_config'].sets
 				world_candidates = None
@@ -355,12 +365,11 @@ def action_build(settings, trees, mtimedb,
 					world_candidates = [x for x in favorites \
 						if not (x.startswith(SETPREFIX) and \
 						not sets[x[1:]].world_candidate)]
+
 				if "selective" in myparams and \
 					not oneshot and world_candidates:
-					print()
-					for x in world_candidates:
-						print(" %s %s" % (good("*"), x))
-					prompt="Would you like to add these packages to your world favorites?"
+					# Prompt later, inside saveNomergeFavorites.
+					prompt = None
 				elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
 					prompt="Nothing to merge; would you like to auto-clean packages?"
 				else:
@@ -373,13 +382,15 @@ def action_build(settings, trees, mtimedb,
 			else:
 				prompt="Would you like to merge these packages?"
 		print()
-		if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
+		if prompt is not None and "--ask" in myopts and \
+			userquery(prompt, enter_invalid) == "No":
 			print()
 			print("Quitting.")
 			print()
 			return 128 + signal.SIGINT
 		# Don't ask again (e.g. when auto-cleaning packages after merge)
-		myopts.pop("--ask", None)
+		if mergecount != 0:
+			myopts.pop("--ask", None)
 
 	if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
 		if ("--resume" in myopts):
@@ -449,25 +460,29 @@ def action_build(settings, trees, mtimedb,
 
 			mydepgraph.saveNomergeFavorites()
 
-		mergetask = Scheduler(settings, trees, mtimedb, myopts,
-			spinner, favorites=favorites,
-			graph_config=mydepgraph.schedulerGraph())
-
-		del mydepgraph
-		clear_caches(trees)
-
-		retval = mergetask.merge()
-
-		if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
-			if "yes" == settings.get("AUTOCLEAN"):
-				portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
-				unmerge(trees[settings['EROOT']]['root_config'],
-					myopts, "clean", [],
-					ldpath_mtimes, autoclean=1)
-			else:
-				portage.writemsg_stdout(colorize("WARN", "WARNING:")
-					+ " AUTOCLEAN is disabled.  This can cause serious"
-					+ " problems due to overlapping packages.\n")
+		if mergecount == 0:
+			retval = os.EX_OK
+		else:
+			mergetask = Scheduler(settings, trees, mtimedb, myopts,
+				spinner, favorites=favorites,
+				graph_config=mydepgraph.schedulerGraph())
+
+			del mydepgraph
+			clear_caches(trees)
+
+			retval = mergetask.merge()
+
+			if retval == os.EX_OK and \
+				not (buildpkgonly or fetchonly or pretend):
+				if "yes" == settings.get("AUTOCLEAN"):
+					portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+					unmerge(trees[settings['EROOT']]['root_config'],
+						myopts, "clean", [],
+						ldpath_mtimes, autoclean=1)
+				else:
+					portage.writemsg_stdout(colorize("WARN", "WARNING:")
+						+ " AUTOCLEAN is disabled.  This can cause serious"
+						+ " problems due to overlapping packages.\n")
 
 		return retval
 
@@ -614,11 +629,17 @@ def action_depclean(settings, trees, ldpath_mtimes,
 	if not cleanlist and "--quiet" in myopts:
 		return rval
 
+	set_atoms = {}
+	for k in ("system", "selected"):
+		try:
+			set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+		except portage.exception.PackageSetNotFound:
+			# A nested set could not be resolved, so ignore nested sets.
+			set_atoms[k] = root_config.sets[k].getAtoms()
+
 	print("Packages installed:   " + str(len(vardb.cpv_all())))
-	print("Packages in world:    " + \
-		str(len(root_config.sets["selected"].getAtoms())))
-	print("Packages in system:   " + \
-		str(len(root_config.sets["system"].getAtoms())))
+	print("Packages in world:    %d" % len(set_atoms["selected"]))
+	print("Packages in system:   %d" % len(set_atoms["system"]))
 	print("Required packages:    "+str(req_pkg_count))
 	if "--pretend" in myopts:
 		print("Number to remove:     "+str(len(cleanlist)))
@@ -651,13 +672,21 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 	required_sets[protected_set_name] = protected_set
 	system_set = psets["system"]
 
-	if not system_set or not selected_set:
+	set_atoms = {}
+	for k in ("system", "selected"):
+		try:
+			set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+		except portage.exception.PackageSetNotFound:
+			# A nested set could not be resolved, so ignore nested sets.
+			set_atoms[k] = root_config.sets[k].getAtoms()
+
+	if not set_atoms["system"] or not set_atoms["selected"]:
 
-		if not system_set:
+		if not set_atoms["system"]:
 			writemsg_level("!!! You have no system list.\n",
 				level=logging.ERROR, noiselevel=-1)
 
-		if not selected_set:
+		if not set_atoms["selected"]:
 			writemsg_level("!!! You have no world file.\n",
 					level=logging.WARNING, noiselevel=-1)
 
@@ -701,7 +730,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 						continue
 				except portage.exception.InvalidDependString as e:
 					show_invalid_depstring_notice(pkg,
-						pkg.metadata["PROVIDE"], str(e))
+						pkg._metadata["PROVIDE"], _unicode(e))
 					del e
 					protected_set.add("=" + pkg.cpv)
 					continue
@@ -755,7 +784,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 					continue
 			except portage.exception.InvalidDependString as e:
 				show_invalid_depstring_notice(pkg,
-					pkg.metadata["PROVIDE"], str(e))
+					pkg._metadata["PROVIDE"], _unicode(e))
 				del e
 				protected_set.add("=" + pkg.cpv)
 				continue
@@ -773,7 +802,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 					required_sets['__excluded__'].add("=" + pkg.cpv)
 			except portage.exception.InvalidDependString as e:
 				show_invalid_depstring_notice(pkg,
-					pkg.metadata["PROVIDE"], str(e))
+					pkg._metadata["PROVIDE"], _unicode(e))
 				del e
 				required_sets['__excluded__'].add("=" + pkg.cpv)
 
@@ -809,7 +838,12 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 			msg.append("the following required packages not being installed:")
 			msg.append("")
 			for atom, parent in unresolvable:
-				msg.append("  %s pulled in by:" % (atom,))
+				if atom != atom.unevaluated_atom and \
+					vardb.match(_unicode(atom)):
+					msg.append("  %s (%s) pulled in by:" %
+						(atom.unevaluated_atom, atom))
+				else:
+					msg.append("  %s pulled in by:" % (atom,))
 				msg.append("    %s" % (parent,))
 				msg.append("")
 			msg.extend(textwrap.wrap(
@@ -852,15 +886,27 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 			required_pkgs_total += 1
 
 	def show_parents(child_node):
-		parent_nodes = graph.parent_nodes(child_node)
-		if not parent_nodes:
+		parent_atoms = \
+			resolver._dynamic_config._parent_atoms.get(child_node, [])
+
+		# Never display the special internal protected_set.
+		parent_atoms = [parent_atom for parent_atom in parent_atoms
+			if not (isinstance(parent_atom[0], SetArg) and
+			parent_atom[0].name == protected_set_name)]
+
+		if not parent_atoms:
 			# With --prune, the highest version can be pulled in without any
 			# real parent since all installed packages are pulled in.  In that
 			# case there's nothing to show here.
 			return
+		parent_atom_dict = {}
+		for parent, atom in parent_atoms:
+			parent_atom_dict.setdefault(parent, []).append(atom)
+
 		parent_strs = []
-		for node in parent_nodes:
-			parent_strs.append(str(getattr(node, "cpv", node)))
+		for parent, atoms in parent_atom_dict.items():
+			parent_strs.append("%s requires %s" %
+				(getattr(parent, "cpv", parent), ", ".join(atoms)))
 		parent_strs.sort()
 		msg = []
 		msg.append("  %s pulled in by:\n" % (child_node.cpv,))
@@ -885,12 +931,6 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 			graph.debug_print()
 			writemsg("\n", noiselevel=-1)
 
-		# Never display the special internal protected_set.
-		for node in graph:
-			if isinstance(node, SetArg) and node.name == protected_set_name:
-				graph.remove(node)
-				break
-
 		pkgs_to_remove = []
 
 		if action == "depclean":
@@ -1163,17 +1203,17 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 		for node in clean_set:
 			graph.add(node, None)
 			for dep_type in Package._dep_keys:
-				depstr = node.metadata[dep_type]
+				depstr = node._metadata[dep_type]
 				if not depstr:
 					continue
 				priority = priority_map[dep_type]
 
 				if debug:
-					writemsg_level(_unicode_decode("\nParent:    %s\n") \
+					writemsg_level("\nParent:    %s\n"
 						% (node,), noiselevel=-1, level=logging.DEBUG)
-					writemsg_level(_unicode_decode(  "Depstring: %s\n") \
+					writemsg_level(  "Depstring: %s\n"
 						% (depstr,), noiselevel=-1, level=logging.DEBUG)
-					writemsg_level(_unicode_decode(  "Priority:  %s\n") \
+					writemsg_level(  "Priority:  %s\n"
 						% (priority,), noiselevel=-1, level=logging.DEBUG)
 
 				try:
@@ -1187,7 +1227,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
 
 				if debug:
 					writemsg_level("Candidates: [%s]\n" % \
-						', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
+						', '.join("'%s'" % (x,) for x in atoms),
 						noiselevel=-1, level=logging.DEBUG)
 
 				for atom in atoms:
@@ -1353,6 +1393,86 @@ class _info_pkgs_ver(object):
 
 def action_info(settings, trees, myopts, myfiles):
 
+	# See if we can find any packages installed matching the strings
+	# passed on the command line
+	mypkgs = []
+	eroot = settings['EROOT']
+	vardb = trees[eroot]["vartree"].dbapi
+	portdb = trees[eroot]['porttree'].dbapi
+	bindb = trees[eroot]["bintree"].dbapi
+	for x in myfiles:
+		any_match = False
+		cp_exists = bool(vardb.match(x.cp))
+		installed_match = vardb.match(x)
+		for installed in installed_match:
+			mypkgs.append((installed, "installed"))
+			any_match = True
+
+		if any_match:
+			continue
+
+		for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+			if pkg_type == "binary" and "--usepkg" not in myopts:
+				continue
+
+			# Use match instead of cp_list, to account for old-style virtuals.
+			if not cp_exists and db.match(x.cp):
+				cp_exists = True
+			# Search for masked packages too.
+			if not cp_exists and hasattr(db, "xmatch") and \
+				db.xmatch("match-all", x.cp):
+				cp_exists = True
+
+			matches = db.match(x)
+			matches.reverse()
+			for match in matches:
+				if pkg_type == "binary":
+					if db.bintree.isremote(match):
+						continue
+				auxkeys = ["EAPI", "DEFINED_PHASES"]
+				metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+				if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+					"info" in metadata["DEFINED_PHASES"].split():
+					mypkgs.append((match, pkg_type))
+					break
+
+		if not cp_exists:
+			xinfo = '"%s"' % x.unevaluated_atom
+			# Discard null/ from failed cpv_expand category expansion.
+			xinfo = xinfo.replace("null/", "")
+			if settings["ROOT"] != "/":
+				xinfo = "%s for %s" % (xinfo, eroot)
+			writemsg("\nemerge: there are no ebuilds to satisfy %s.\n" %
+				colorize("INFORM", xinfo), noiselevel=-1)
+
+			if myopts.get("--misspell-suggestions", "y") != "n":
+
+				writemsg("\nemerge: searching for similar names..."
+					, noiselevel=-1)
+
+				dbs = [vardb]
+				#if "--usepkgonly" not in myopts:
+				dbs.append(portdb)
+				if "--usepkg" in myopts:
+					dbs.append(bindb)
+
+				matches = similar_name_search(dbs, x)
+
+				if len(matches) == 1:
+					writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
+						, noiselevel=-1)
+				elif len(matches) > 1:
+					writemsg(
+						"\nemerge: Maybe you meant any of these: %s?\n" % \
+						(", ".join(matches),), noiselevel=-1)
+				else:
+					# Generally, this would only happen if
+					# all dbapis are empty.
+					writemsg(" nothing similar found.\n"
+						, noiselevel=-1)
+
+			return 1
+
 	output_buffer = []
 	append = output_buffer.append
 	root_config = trees[settings['EROOT']]['root_config']
@@ -1371,6 +1491,18 @@ def action_info(settings, trees, myopts, myfiles):
 	append(header_width * "=")
 	append("System uname: %s" % (platform.platform(aliased=1),))
 
+	vm_info = get_vm_info()
+	if "ram.total" in vm_info:
+		line = "%-9s %10d total" % ("KiB Mem:", vm_info["ram.total"] / 1024)
+		if "ram.free" in vm_info:
+			line += ",%10d free" % (vm_info["ram.free"] / 1024,)
+		append(line)
+	if "swap.total" in vm_info:
+		line = "%-9s %10d total" % ("KiB Swap:", vm_info["swap.total"] / 1024)
+		if "swap.free" in vm_info:
+			line += ",%10d free" % (vm_info["swap.free"] / 1024,)
+		append(line)
+
 	lastSync = portage.grabfile(os.path.join(
 		settings["PORTDIR"], "metadata", "timestamp.chk"))
 	if lastSync:
@@ -1559,40 +1691,6 @@ def action_info(settings, trees, myopts, myfiles):
 	writemsg_stdout("\n".join(output_buffer),
 		noiselevel=-1)
 
-	# See if we can find any packages installed matching the strings
-	# passed on the command line
-	mypkgs = []
-	eroot = settings['EROOT']
-	vardb = trees[eroot]["vartree"].dbapi
-	portdb = trees[eroot]['porttree'].dbapi
-	bindb = trees[eroot]["bintree"].dbapi
-	for x in myfiles:
-		match_found = False
-		installed_match = vardb.match(x)
-		for installed in installed_match:
-			mypkgs.append((installed, "installed"))
-			match_found = True
-
-		if match_found:
-			continue
-
-		for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
-			if pkg_type == "binary" and "--usepkg" not in myopts:
-				continue
-
-			matches = db.match(x)
-			matches.reverse()
-			for match in matches:
-				if pkg_type == "binary":
-					if db.bintree.isremote(match):
-						continue
-				auxkeys = ["EAPI", "DEFINED_PHASES"]
-				metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
-				if metadata["EAPI"] not in ("0", "1", "2", "3") and \
-					"info" in metadata["DEFINED_PHASES"].split():
-					mypkgs.append((match, pkg_type))
-					break
-
 	# If some packages were found...
 	if mypkgs:
 		# Get our global settings (we only print stuff if it varies from
@@ -1886,35 +1984,10 @@ def action_regen(settings, portdb, max_jobs, max_load):
 
 	regen = MetadataRegen(portdb, max_jobs=max_jobs,
 		max_load=max_load, main=True)
-	received_signal = []
-
-	def emergeexitsig(signum, frame):
-		signal.signal(signal.SIGINT, signal.SIG_IGN)
-		signal.signal(signal.SIGTERM, signal.SIG_IGN)
-		portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
-			{"signal":signum})
-		regen.terminate()
-		received_signal.append(128 + signum)
-
-	earlier_sigint_handler = signal.signal(signal.SIGINT, emergeexitsig)
-	earlier_sigterm_handler = signal.signal(signal.SIGTERM, emergeexitsig)
 
-	try:
-		regen.start()
-		regen.wait()
-	finally:
-		# Restore previous handlers
-		if earlier_sigint_handler is not None:
-			signal.signal(signal.SIGINT, earlier_sigint_handler)
-		else:
-			signal.signal(signal.SIGINT, signal.SIG_DFL)
-		if earlier_sigterm_handler is not None:
-			signal.signal(signal.SIGTERM, earlier_sigterm_handler)
-		else:
-			signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
-	if received_signal:
-		sys.exit(received_signal[0])
+	signum = run_main_scheduler(regen)
+	if signum is not None:
+		sys.exit(128 + signum)
 
 	portage.writemsg_stdout("done!\n")
 	return regen.returncode
@@ -2005,7 +2078,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 			noiselevel=-1, level=logging.ERROR)
 		return 1
 
-	vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
+	vcs_dirs = frozenset(VCS_DIRS)
 	vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
 
 	os.umask(0o022)
@@ -2031,7 +2104,8 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 		emergelog(xterm_titles, msg )
 		writemsg_level(msg + "\n")
 		exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
-			(portage._shell_quote(myportdir),), **spawn_kwargs)
+			(portage._shell_quote(myportdir),),
+			**portage._native_kwargs(spawn_kwargs))
 		if exitcode != os.EX_OK:
 			msg = "!!! git pull error in %s." % myportdir
 			emergelog(xterm_titles, msg)
@@ -2047,7 +2121,8 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 				"control (contains %s).\n!!! Aborting rsync sync.\n") % \
 				(myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
 			return 1
-		if not os.path.exists("/usr/bin/rsync"):
+		rsync_binary = portage.process.find_binary("rsync")
+		if rsync_binary is None:
 			print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
 			print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
 			sys.exit(1)
@@ -2273,7 +2348,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 			if mytimestamp != 0 and "--quiet" not in myopts:
 				print(">>> Checking server timestamp ...")
 
-			rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
+			rsynccommand = [rsync_binary] + rsync_opts + extra_rsync_opts
 
 			if "--debug" in myopts:
 				print(rsynccommand)
@@ -2319,7 +2394,8 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 								rsync_initial_timeout)
 
 						mypids.extend(portage.process.spawn(
-							mycommand, returnpid=True, **spawn_kwargs))
+							mycommand, returnpid=True,
+							**portage._native_kwargs(spawn_kwargs)))
 						exitcode = os.waitpid(mypids[0], 0)[1]
 						if usersync_uid is not None:
 							portage.util.apply_permissions(tmpservertimestampfile,
@@ -2385,7 +2461,8 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 				elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
 					# actual sync
 					mycommand = rsynccommand + [dosyncuri+"/", myportdir]
-					exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
+					exitcode = portage.process.spawn(mycommand,
+						**portage._native_kwargs(spawn_kwargs))
 					if exitcode in [0,1,3,4,11,14,20,21]:
 						break
 			elif exitcode in [1,3,4,11,14,20,21]:
@@ -2463,7 +2540,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 			if portage.process.spawn_bash(
 					"cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
 					(portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
-					**spawn_kwargs) != os.EX_OK:
+					**portage._native_kwargs(spawn_kwargs)) != os.EX_OK:
 				print("!!! cvs checkout error; exiting.")
 				sys.exit(1)
 			os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
@@ -2472,7 +2549,8 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 			print(">>> Starting cvs update with "+syncuri+"...")
 			retval = portage.process.spawn_bash(
 				"cd %s; exec cvs -z0 -q update -dP" % \
-				(portage._shell_quote(myportdir),), **spawn_kwargs)
+				(portage._shell_quote(myportdir),),
+				**portage._native_kwargs(spawn_kwargs))
 			if retval != os.EX_OK:
 				writemsg_level("!!! cvs update error; exiting.\n",
 					noiselevel=-1, level=logging.ERROR)
@@ -2544,7 +2622,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
 		print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
 		print(warn(" * ")+"that you update portage now, before any other packages are updated.")
 		print()
-		print(warn(" * ")+"To update portage, run 'emerge portage' now.")
+		print(warn(" * ")+"To update portage, run 'emerge --oneshot portage' now.")
 		print()
 
 	display_news_notification(root_config, myopts)
@@ -3054,7 +3132,7 @@ def load_emerge_config(trees=None):
 		v = os.environ.get(envvar, None)
 		if v and v.strip():
 			kwargs[k] = v
-	trees = portage.create_trees(trees=trees, **kwargs)
+	trees = portage.create_trees(trees=trees, **portage._native_kwargs(kwargs))
 
 	for root_trees in trees.values():
 		settings = root_trees["vartree"].settings
@@ -3258,7 +3336,7 @@ def expand_set_arguments(myfiles, myaction, root_config):
 	# world file, the depgraph performs set expansion later. It will get
 	# confused about where the atoms came from if it's not allowed to
 	# expand them itself.
-	do_not_expand = (None, )
+	do_not_expand = myaction is None
 	newargs = []
 	for a in myfiles:
 		if a in ("system", "world"):
@@ -3324,6 +3402,14 @@ def expand_set_arguments(myfiles, myaction, root_config):
 					for line in textwrap.wrap(msg, 50):
 						out.ewarn(line)
 				setconfig.active.append(s)
+
+				if do_not_expand:
+					# Loading sets can be slow, so skip it here, in order
+					# to allow the depgraph to indicate progress with the
+					# spinner while sets are loading (bug #461412).
+					newargs.append(a)
+					continue
+
 				try:
 					set_atoms = setconfig.getSetAtoms(s)
 				except portage.exception.PackageSetNotFound as e:
@@ -3339,17 +3425,18 @@ def expand_set_arguments(myfiles, myaction, root_config):
 					return (None, 1)
 				if myaction in unmerge_actions and \
 						not sets[s].supportsOperation("unmerge"):
-					sys.stderr.write("emerge: the given set '%s' does " % s + \
-						"not support unmerge operations\n")
+					writemsg_level("emerge: the given set '%s' does " % s + \
+						"not support unmerge operations\n",
+						level=logging.ERROR, noiselevel=-1)
 					retval = 1
 				elif not set_atoms:
-					print("emerge: '%s' is an empty set" % s)
-				elif myaction not in do_not_expand:
-					newargs.extend(set_atoms)
+					writemsg_level("emerge: '%s' is an empty set\n" % s,
+						level=logging.INFO, noiselevel=-1)
 				else:
-					newargs.append(SETPREFIX+s)
-				for e in sets[s].errors:
-					print(e)
+					newargs.extend(set_atoms)
+				for error_msg in sets[s].errors:
+					writemsg_level("%s\n" % (error_msg,),
+						level=logging.ERROR, noiselevel=-1)
 		else:
 			newargs.append(a)
 	return (newargs, retval)
@@ -3514,8 +3601,7 @@ def run_action(settings, trees, mtimedb, myaction, myopts, myfiles, build_dict,
 	del mytrees, mydb
 
 	for x in myfiles:
-		ext = os.path.splitext(x)[1]
-		if (ext == ".ebuild" or ext == ".tbz2") and \
+		if x.endswith((".ebuild", ".tbz2")) and \
 			os.path.exists(os.path.abspath(x)):
 			print(colorize("BAD", "\n*** emerging by path is broken "
 				"and may not always work!!!\n"))
@@ -3678,10 +3764,15 @@ def run_action(settings, trees, mtimedb, myaction, myopts, myfiles, build_dict,
 			portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
 
 	if not "--pretend" in myopts:
-		emergelog(xterm_titles, "Started emerge on: "+\
-			_unicode_decode(
-				time.strftime("%b %d, %Y %H:%M:%S", time.localtime()),
-				encoding=_encodings['content'], errors='replace'))
+		time_fmt = "%b %d, %Y %H:%M:%S"
+		if sys.hexversion < 0x3000000:
+			time_fmt = portage._unicode_encode(time_fmt)
+		time_str = time.strftime(time_fmt, time.localtime(time.time()))
+		# Avoid potential UnicodeDecodeError in Python 2, since strftime
+		# returns bytes in Python 2, and %b may contain non-ascii chars.
+		time_str = _unicode_decode(time_str,
+			encoding=_encodings['content'], errors='replace')
+		emergelog(xterm_titles, "Started emerge on: %s" % time_str)
 		myelogstr=""
 		if myopts:
 			opt_list = []

diff --git a/gobs/pym/build_log.py b/gobs/pym/build_log.py
index 36f1c7a..8558cf3 100644
--- a/gobs/pym/build_log.py
+++ b/gobs/pym/build_log.py
@@ -3,8 +3,6 @@ import re
 import os
 import platform
 import hashlib
-from multiprocessing import Process
-
 
 from portage.versions import catpkgsplit, cpv_getversion
 import portage
@@ -21,8 +19,8 @@ from gobs.flags import gobs_use_flags
 from gobs.ConnectionManager import connectionManager
 from gobs.mysql_querys import add_gobs_logs, get_config_id, get_ebuild_id_db_checksum, add_new_buildlog, \
 	update_manifest_sql, get_package_id, get_build_job_id, get_use_id, get_fail_querue_dict, \
-	add_fail_querue_dict, update_fail_times, get_config
-	
+	add_fail_querue_dict, update_fail_times, get_config, get_hilight_info
+
 def get_build_dict_db(conn, config_id, settings, pkg):
 	myportdb = portage.portdbapi(mysettings=settings)
 	cpvr_list = catpkgsplit(pkg.cpv, silent=1)
@@ -83,88 +81,99 @@ def get_build_dict_db(conn, config_id, settings, pkg):
 		build_dict['build_job_id'] = build_job_id
 	return build_dict
 
-def search_info(textline, error_log_list):
-	if re.search(" * Package:", textline):
-		error_log_list.append(textline + '\n')
-	if re.search(" * Repository:", textline):
-		error_log_list.append(textline + '\n')
-	if re.search(" * Maintainer:", textline):
-		error_log_list.append(textline + '\n')
-	if re.search(" * USE:", textline):
-		error_log_list.append(textline + '\n')
-	if re.search(" * FEATURES:", textline):
-		error_log_list.append(textline + '\n')
-	return error_log_list
-
-def search_error(logfile_text, textline, error_log_list, sum_build_log_list, i):
-	if re.search("Error 1", textline):
-		x = i - 20
-		endline = True
-		error_log_list.append(".....\n")
-		while x != i + 3 and endline:
-			try:
-				error_log_list.append(logfile_text[x] + '\n')
-			except:
-				endline = False
-			else:
-				x = x +1
-	if re.search(" * ERROR:", textline):
-		x = i
-		endline= True
-		field = textline.split(" ")
-		sum_build_log_list.append("True")
-		error_log_list.append(".....\n")
-		while x != i + 10 and endline:
-			try:
-				error_log_list.append(logfile_text[x] + '\n')
-			except:
-				endline = False
-			else:
-				x = x +1
-	if re.search("configure: error:", textline):
-		x = i - 4
-		endline = True
-		error_log_list.append(".....\n")
-		while x != i + 3 and endline:
-			try:
-				error_log_list.append(logfile_text[x] + '\n')
-			except:
-				endline = False
-			else:
-				x = x +1
-	return error_log_list, sum_build_log_list
-
-def search_qa(logfile_text, textline, qa_error_list, error_log_list,i):
-	if re.search(" * QA Notice:", textline):
-		x = i
-		qa_error_list.append(logfile_text[x] + '\n')
-		endline= True
-		error_log_list.append(".....\n")
-		while x != i + 3 and endline:
-			try:
-				error_log_list.append(logfile_text[x] + '\n')
-			except:
-				endline = False
+def search_buildlog(conn, logfile_text):
+	log_search_list = get_hilight_info(conn)
+	index = 0
+	hilight_list = []
+	for textline in logfile_text:
+		index = index + 1
+		for search_pattern in log_search_list:
+			if re.searchsearch_pattern(['hilight_search'], textline):
+				hilight_tmp = {}
+				hilight_tmp['startline'] = index - search_pattern['hilight_start']
+				hilight_tmp['hilight'] =search_pattern ['hilight_css']
+				if search_pattern['hilight_search_end'] is None:
+					hilight_tmp['endline'] = index + search_pattern['hilight_end']
+				else:
+					hilight_tmp['endline'] = None
+					i = index + 1
+					while hilight_tmp['endline'] == None:
+						if re.serchsearch_pattern(['hilight_search_end'], logfile_text[i -1]):
+							if re.serch(search_pattern['hilight_search_end'], logfile_text[i]):
+								i = i + 1
+							else:
+								hilight_tmp['endline'] = i
+						else:
+							i = i +1
+				hilight_list.append(hilight_tmp)
+	new_hilight_dict = {}
+	for hilight_tmp in hilight_list:
+		add_new_hilight = True
+		add_new_hilight_middel = None
+		for k, v in sorted(new_hilight_dict.iteritems()):
+			if hilight_tmp['startline'] == hilight_tmp['endline']:
+				if v['endline'] == hilight_tmp['startline'] or v['startline'] == hilight_tmp['startline']:
+					add_new_hilight = False
+				if hilight_tmp['startline'] > v['startline'] and hilight_tmp['startline'] < v['endline']:
+					add_new_hilight = False
+					add_new_hilight_middel = k
 			else:
-				x = x +1
-	return qa_error_list, error_log_list
+				if v['endline'] == hilight_tmp['startline'] or v['startline'] == hilight_tmp['startline']:
+					add_new_hilight = False
+				if hilight_tmp['startline'] > v['startline'] and hilight_tmp['startline'] < v['endline']:
+					add_new_hilight = False
+		if add_new_hilight is True:
+			adict = {}
+			adict['startline'] = hilight_tmp['startline']
+			adict['hilight'] = hilight_tmp['hilight']
+			adict['endline'] = hilight_tmp['endline']
+			new_hilight_dict[hilight_tmp['startline']] = adict
+		if not add_new_hilight_middel is None:
+			adict1 = {}
+			adict2 = {}
+			adict3 = {}
+			adict1['startline'] = new_hilight_dict[add_new_hilight_middel]['startline']
+			adict1['endline'] = hilight_tmp['startline'] -1
+			adict1['hilight'] = new_hilight_dict[add_new_hilight_middel]['hilight']
+			adict2['startline'] = hilight_tmp['startline']
+			adict2['hilight'] = hilight_tmp['hilight']
+			adict2['endline'] = hilight_tmp['endline']
+			adict3['startline'] = hilight_tmp['endline'] + 1
+			adict3['hilight'] = new_hilight_dict[add_new_hilight_middel]['hilight']
+			adict3['endline'] = new_hilight_dict[add_new_hilight_middel]['endline']	
+			del new_hilight_dict[add_new_hilight_middel]
+			new_hilight_dict[adict1['startline']] = adict1
+			new_hilight_dict[adict2['startline']] = adict2
+			new_hilight_dict[adict3['startline']] = adict3
+	return new_hilight_dict
 
-def get_buildlog_info(settings, pkg, build_dict):
+def get_buildlog_info(conn, settings, pkg, build_dict):
 	myportdb = portage.portdbapi(mysettings=settings)
 	init_repoman = gobs_repoman(settings, myportdb)
 	logfile_text = get_log_text_list(settings.get("PORTAGE_LOG_FILE"))
-	# FIXME to support more errors and stuff
-	i = 0
+	hilight_dict = search_buildlog(conn, logfile_text)
 	build_log_dict = {}
 	error_log_list = []
 	qa_error_list = []
 	repoman_error_list = []
 	sum_build_log_list = []
-	for textline in logfile_text:
-		error_log_list = search_info(textline, error_log_list)
-		error_log_list, sum_build_log_list = search_error(logfile_text, textline, error_log_list, sum_build_log_list, i)
-		qa_error_list, error_log_list = search_qa(logfile_text, textline, qa_error_list, error_log_list, i)
-		i = i +1
+	
+	for k, v in sorted(hilight_dict.iteritems()):
+		if v['startline'] == v['endline']:
+			error_log_list.append(logfile_text[k -1])
+			if v['hilight'] == "qa":
+				qa_error_list.append(logfile_text[k -1])
+		else:
+			i = k
+			while i != (v['endline'] + 1):
+				error_log_list.append(logfile_text[i -1])
+				if v['hilight'] == "qa":
+					qa_error_list.append(logfile_text[i -1])
+				i = i +1
+			error_log_list.append(logfile_text[i -1])
+			if v['hilight'] == "qa":
+				qa_error_list(logfile_text[i -1])
+
 	# Run repoman check_repoman()
 	repoman_error_list = init_repoman.check_repoman(build_dict['cpv'], pkg.repo)
 	if repoman_error_list != []:
@@ -175,6 +184,7 @@ def get_buildlog_info(settings, pkg, build_dict):
 	build_log_dict['qa_error_list'] = qa_error_list
 	build_log_dict['error_log_list'] = error_log_list
 	build_log_dict['summary_error_list'] = sum_build_log_list
+	build_log_dict['hilight_dict'] = hilight_dict
 	return build_log_dict
 
 def write_msg_file(msg, log_path):
@@ -209,7 +219,7 @@ def write_msg_file(msg, log_path):
 				if f_real is not f:
 					f_real.close()
 
-def add_buildlog_process(settings, pkg):
+def add_buildlog_main(settings, pkg):
 	CM = connectionManager()
 	conn = CM.newConnection()
 	if not conn.is_connected() is True:
@@ -227,7 +237,7 @@ def add_buildlog_process(settings, pkg):
 		conn.close
 		return
 	build_log_dict = {}
-	build_log_dict = get_buildlog_info(settings, pkg, build_dict)
+	build_log_dict = get_buildlog_info(conn, settings, pkg, build_dict)
 	error_log_list = build_log_dict['error_log_list']
 	build_error = ""
 	log_hash = hashlib.sha256()
@@ -259,12 +269,6 @@ def add_buildlog_process(settings, pkg):
 		print(">>> Logging %s:%s" % (pkg.cpv, pkg.repo,))
 	conn.close
 
-def add_buildlog_main(settings, pkg):
-	#Run it in a process so we don't kill portage
-	p = Process(target=add_buildlog_process, args=(settings, pkg,))
-	p.start()
-	p.join()
-
 def log_fail_queru(conn, build_dict, settings):
 	config_id = build_dict['config_id']
 	print('build_dict', build_dict)

diff --git a/gobs/pym/main.py b/gobs/pym/main.py
index 4bc45ee..921267a 100644
--- a/gobs/pym/main.py
+++ b/gobs/pym/main.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2013 Gentoo Foundation
 # Distributed under the terms of the GNU General Public License v2
 
 from __future__ import print_function
@@ -44,7 +44,6 @@ options=[
 "--tree",
 "--unordered-display",
 "--update",
-"--verbose",
 "--verbose-main-repo-display",
 ]
 
@@ -65,7 +64,7 @@ shortmapping={
 "s":"--search",    "S":"--searchdesc",
 "t":"--tree",
 "u":"--update",
-"v":"--verbose",   "V":"--version"
+"V":"--version"
 }
 
 COWSAY_MOO = """
@@ -139,6 +138,7 @@ def insert_optional_args(args):
 		'--package-moves'        : y_or_n,
 		'--quiet'                : y_or_n,
 		'--quiet-build'          : y_or_n,
+		'--quiet-fail'           : y_or_n,
 		'--rebuild-if-new-slot': y_or_n,
 		'--rebuild-if-new-rev'   : y_or_n,
 		'--rebuild-if-new-ver'   : y_or_n,
@@ -150,6 +150,7 @@ def insert_optional_args(args):
 		"--use-ebuild-visibility": y_or_n,
 		'--usepkg'               : y_or_n,
 		'--usepkgonly'           : y_or_n,
+		'--verbose'              : y_or_n,
 	}
 
 	short_arg_opts = {
@@ -167,6 +168,8 @@ def insert_optional_args(args):
 		'k' : y_or_n,
 		'K' : y_or_n,
 		'q' : y_or_n,
+		'v' : y_or_n,
+		'w' : y_or_n,
 	}
 
 	arg_stack = args[:]
@@ -541,6 +544,12 @@ def parse_opts(tmpcmdline, silent=False):
 			"choices"  : true_y_or_n,
 		},
 
+		"--quiet-fail": {
+			"help"     : "suppresses display of the build log on stdout",
+			"type"     : "choice",
+			"choices"  : true_y_or_n,
+		},
+
 		"--rebuild-if-new-slot": {
 			"help"     : ("Automatically rebuild or reinstall packages when slot/sub-slot := "
 				"operator dependencies can be satisfied by a newer slot, so that "
@@ -600,6 +609,7 @@ def parse_opts(tmpcmdline, silent=False):
 		},
 
 		"--select": {
+			"shortopt" : "-w",
 			"help"    : "add specified packages to the world set " + \
 			            "(inverse of --oneshot)",
 			"type"    : "choice",
@@ -638,6 +648,13 @@ def parse_opts(tmpcmdline, silent=False):
 			"type"     : "choice",
 			"choices"  : true_y_or_n
 		},
+
+		"--verbose": {
+			"shortopt" : "-v",
+			"help"     : "verbose output",
+			"type"     : "choice",
+			"choices"  : true_y_or_n
+		},
 	}
 
 	from optparse import OptionParser
@@ -782,6 +799,9 @@ def parse_opts(tmpcmdline, silent=False):
 	if myoptions.quiet_build in true_y:
 		myoptions.quiet_build = 'y'
 
+	if myoptions.quiet_fail in true_y:
+		myoptions.quiet_fail = 'y'
+
 	if myoptions.rebuild_if_new_slot in true_y:
 		myoptions.rebuild_if_new_slot = 'y'
 
@@ -917,6 +937,11 @@ def parse_opts(tmpcmdline, silent=False):
 	else:
 		myoptions.usepkgonly = None
 
+	if myoptions.verbose in true_y:
+		myoptions.verbose = True
+	else:
+		myoptions.verbose = None
+
 	for myopt in options:
 		v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
 		if v:
@@ -979,8 +1004,6 @@ def emerge_main(args=None, build_dict=None):
 	if build_dict is None:
 		build_dict = {}
 
-	portage._disable_legacy_globals()
-	portage._internal_warnings = True
 	# Disable color until we're sure that it should be enabled (after
 	# EMERGE_DEFAULT_OPTS has been parsed).
 	portage.output.havecolor = 0

diff --git a/gobs/pym/mysql_querys.py b/gobs/pym/mysql_querys.py
index 8b8bacd..cd94a76 100644
--- a/gobs/pym/mysql_querys.py
+++ b/gobs/pym/mysql_querys.py
@@ -86,7 +86,7 @@ def update_make_conf(connection, configsDict):
 
 def get_default_config(connection):
 	cursor = connection.cursor()
-	sqlQ = "SELECT host, config FROM configs WHERE default_config = 'True'"
+	sqlQ = "SELECT hostname, config FROM configs WHERE default_config = 'True'"
 	cursor.execute(sqlQ)
 	hostname, config = cursor.fetchone()
 	cursor.close()
@@ -523,6 +523,23 @@ def get_build_job_id(connection, build_dict):
 			return build_job_id[0]
 	cursor.close()
 
+def get_hilight_info(connection):
+	cursor = connection.cursor()
+	sqlQ = 'SELECT hilight_search, hilight_search_end, hilight_css, hilight_start, hilight_end FROM hilight'
+	hilight = []
+	cursor.execute(sqlQ)
+	entries = cursor.fetchall()
+	cursor.close()
+	for i in entries:
+		aadict = {}
+		aadict['hilight_search'] = i[0]
+		aadict['hilight_searchend'] = i[1]
+		aadict['hilight_css'] = i[2]
+		aadict['hilight_start'] = i[3]
+		aadict['hilight_end'] = i[4]
+		hilight.append(aadict)
+	return hilight
+
 def add_new_buildlog(connection, build_dict, build_log_dict):
 	cursor = connection.cursor()
 	sqlQ1 = 'SELECT build_log_id FROM build_logs WHERE ebuild_id = %s'


             reply	other threads:[~2013-03-22 19:06 UTC|newest]

Thread overview: 174+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-03-22 19:05 Magnus Granberg [this message]
  -- strict thread matches above, loose matches on Subject: below --
2013-04-25  0:34 [gentoo-commits] dev/zorry:master commit in: gobs/pym/ Magnus Granberg
2013-04-24  0:37 Magnus Granberg
2013-04-24  0:11 Magnus Granberg
2013-01-27 12:03 Magnus Granberg
2013-01-26 22:23 Magnus Granberg
2013-01-22 21:06 Magnus Granberg
2013-01-22 20:59 Magnus Granberg
2013-01-22 20:56 Magnus Granberg
2012-12-29 12:12 Magnus Granberg
2012-12-27 23:52 Magnus Granberg
2012-12-27 23:09 Magnus Granberg
2012-12-22 11:45 Magnus Granberg
2012-12-21 23:50 Magnus Granberg
2012-12-21 23:31 Magnus Granberg
2012-12-21 23:23 Magnus Granberg
2012-12-21 20:41 Magnus Granberg
2012-12-21 20:31 Magnus Granberg
2012-12-21 17:33 Magnus Granberg
2012-12-21  2:24 Magnus Granberg
2012-12-21  2:11 Magnus Granberg
2012-12-21  1:50 Magnus Granberg
2012-12-21  1:49 Magnus Granberg
2012-12-21  1:44 Magnus Granberg
2012-12-19  2:17 Magnus Granberg
2012-12-17  1:18 Magnus Granberg
2012-12-17  0:33 Magnus Granberg
2012-12-16 20:50 Magnus Granberg
2012-12-16 20:45 Magnus Granberg
2012-12-15 16:14 Magnus Granberg
2012-12-15  0:31 Magnus Granberg
2012-12-14 14:17 Magnus Granberg
2012-12-13 22:57 Magnus Granberg
2012-12-13 15:18 Magnus Granberg
2012-12-13 15:15 Magnus Granberg
2012-12-13 15:09 Magnus Granberg
2012-12-12  0:29 Magnus Granberg
2012-12-12  0:14 Magnus Granberg
2012-12-12  0:11 Magnus Granberg
2012-12-12  0:09 Magnus Granberg
2012-12-12  0:04 Magnus Granberg
2012-12-12  0:00 Magnus Granberg
2012-12-11 23:52 Magnus Granberg
2012-12-11 23:48 Magnus Granberg
2012-12-11 23:38 Magnus Granberg
2012-12-07 14:58 Magnus Granberg
2012-12-07 14:33 Magnus Granberg
2012-12-07 14:29 Magnus Granberg
2012-12-07 14:22 Magnus Granberg
2012-12-07  0:07 Magnus Granberg
2012-12-07  0:02 Magnus Granberg
2012-12-06 23:56 Magnus Granberg
2012-12-06 23:52 Magnus Granberg
2012-12-06  2:51 Magnus Granberg
2012-12-06  2:41 Magnus Granberg
2012-12-06  2:34 Magnus Granberg
2012-12-06  2:22 Magnus Granberg
2012-12-06  2:18 Magnus Granberg
2012-12-06  0:11 Magnus Granberg
2012-12-06  0:08 Magnus Granberg
2012-12-06  0:04 Magnus Granberg
2012-12-02 11:53 Magnus Granberg
2012-12-02 11:49 Magnus Granberg
2012-12-02  0:06 Magnus Granberg
2012-12-02  0:05 Magnus Granberg
2012-12-01 23:58 Magnus Granberg
2012-12-01 23:35 Magnus Granberg
2012-12-01 23:33 Magnus Granberg
2012-12-01 23:28 Magnus Granberg
2012-12-01 23:24 Magnus Granberg
2012-12-01 23:12 Magnus Granberg
2012-12-01 23:03 Magnus Granberg
2012-12-01 22:58 Magnus Granberg
2012-12-01 11:31 Magnus Granberg
2012-12-01 11:26 Magnus Granberg
2012-07-18  0:10 Magnus Granberg
2012-07-17 15:02 Magnus Granberg
2012-07-17 13:00 Magnus Granberg
2012-07-17  1:07 Magnus Granberg
2012-07-17  0:38 Magnus Granberg
2012-07-17  0:18 Magnus Granberg
2012-06-27 15:26 Magnus Granberg
2012-06-27 15:15 Magnus Granberg
2012-06-27 14:57 Magnus Granberg
2012-06-27 14:43 Magnus Granberg
2012-06-27 14:39 Magnus Granberg
2012-06-27 14:24 Magnus Granberg
2012-06-27 14:19 Magnus Granberg
2012-06-27 14:14 Magnus Granberg
2012-06-27 14:11 Magnus Granberg
2012-06-27 14:07 Magnus Granberg
2012-06-04 23:45 Magnus Granberg
2012-06-03 22:18 Magnus Granberg
2012-05-25  0:15 Magnus Granberg
2012-05-20 14:33 Magnus Granberg
2012-05-20 14:29 Magnus Granberg
2012-05-09 23:12 Magnus Granberg
2012-05-07 23:44 Magnus Granberg
2012-05-07 23:39 Magnus Granberg
2012-05-07 23:35 Magnus Granberg
2012-05-07 23:31 Magnus Granberg
2012-05-07 23:25 Magnus Granberg
2012-05-06 10:47 Magnus Granberg
2012-05-02 14:33 Magnus Granberg
2012-05-01 10:00 Magnus Granberg
2012-05-01  0:15 Magnus Granberg
2012-05-01  0:02 Magnus Granberg
2012-04-30 16:45 Magnus Granberg
2012-04-30 14:33 Magnus Granberg
2012-04-30 14:17 Magnus Granberg
2012-04-30 14:15 Magnus Granberg
2012-04-30 13:13 Magnus Granberg
2012-04-30 13:12 Magnus Granberg
2012-04-29 15:56 Magnus Granberg
2012-04-29 13:24 Magnus Granberg
2012-04-29 13:17 Magnus Granberg
2012-04-28 19:29 Magnus Granberg
2012-04-28 17:24 Magnus Granberg
2012-04-28 17:03 Magnus Granberg
2012-04-28 16:09 Magnus Granberg
2012-04-28 16:07 Magnus Granberg
2012-04-28 16:05 Magnus Granberg
2012-04-28 14:29 Magnus Granberg
2012-04-28 14:20 Magnus Granberg
2012-04-28 14:01 Magnus Granberg
2012-04-28 12:37 Magnus Granberg
2012-04-28  1:53 Magnus Granberg
2012-04-28  1:25 Magnus Granberg
2012-04-28  0:51 Magnus Granberg
2012-04-27 21:03 Magnus Granberg
2012-04-27 20:42 Magnus Granberg
2012-04-27 20:33 Magnus Granberg
2012-04-27 18:27 Magnus Granberg
2012-04-27 18:23 Magnus Granberg
2011-10-31 21:32 Magnus Granberg
2011-10-29 22:48 Magnus Granberg
2011-10-29 22:38 Magnus Granberg
2011-10-29 22:28 Magnus Granberg
2011-10-29 22:24 Magnus Granberg
2011-10-29  0:21 Magnus Granberg
2011-10-29  0:19 Magnus Granberg
2011-10-19 21:31 Magnus Granberg
2011-10-19 21:28 Magnus Granberg
2011-10-19 20:20 Magnus Granberg
2011-10-13 10:41 Magnus Granberg
2011-10-12 10:33 Magnus Granberg
2011-10-12 10:26 Magnus Granberg
2011-10-11 23:51 Magnus Granberg
2011-10-11 23:32 Magnus Granberg
2011-10-11 11:20 Magnus Granberg
2011-10-10 23:57 Magnus Granberg
2011-10-10 23:49 Magnus Granberg
2011-10-10 23:46 Magnus Granberg
2011-10-10 23:43 Magnus Granberg
2011-10-10 23:30 Magnus Granberg
2011-10-09 21:49 Magnus Granberg
2011-09-30 13:38 Magnus Granberg
2011-09-30 13:33 Magnus Granberg
2011-09-30 13:17 Magnus Granberg
2011-09-28  1:41 Magnus Granberg
2011-09-28  1:39 Magnus Granberg
2011-09-28  1:04 Magnus Granberg
2011-09-27 23:43 Magnus Granberg
2011-09-27 11:05 Magnus Granberg
2011-09-13 23:06 Magnus Granberg
2011-09-13  1:02 Magnus Granberg
2011-09-01 23:34 Magnus Granberg
2011-08-31 23:31 Magnus Granberg
2011-08-31  2:05 Magnus Granberg
2011-08-30 23:41 Magnus Granberg
2011-07-29 15:31 Magnus Granberg
2011-04-24 22:21 Magnus Granberg
2011-04-23 15:26 Magnus Granberg
2011-04-23 14:28 Magnus Granberg

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1363979088.594c1bbdd9c78b67804891c58f2a0194c679e59c.zorry@gentoo \
    --to=zorry@gentoo.org \
    --cc=gentoo-commits@lists.gentoo.org \
    --cc=gentoo-dev@lists.gentoo.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox