From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from pigeon.gentoo.org ([208.92.234.80] helo=lists.gentoo.org) by finch.gentoo.org with esmtp (Exim 4.77) (envelope-from ) id 1SoI5q-0007AX-IV for garchives@archives.gentoo.org; Mon, 09 Jul 2012 17:49:54 +0000 Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 23409E0653; Mon, 9 Jul 2012 17:49:42 +0000 (UTC) Received: from smtp.gentoo.org (smtp.gentoo.org [140.211.166.183]) by pigeon.gentoo.org (Postfix) with ESMTP id C44BDE0653 for ; Mon, 9 Jul 2012 17:49:41 +0000 (UTC) Received: from hornbill.gentoo.org (hornbill.gentoo.org [94.100.119.163]) (using TLSv1 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTPS id 6FCD51B4006 for ; Mon, 9 Jul 2012 17:49:40 +0000 (UTC) Received: from localhost.localdomain (localhost [127.0.0.1]) by hornbill.gentoo.org (Postfix) with ESMTP id 3B862E5433 for ; Mon, 9 Jul 2012 17:49:39 +0000 (UTC) From: "Jorge Manuel B. S. Vicetto" To: gentoo-commits@lists.gentoo.org Content-type: text/plain; charset=UTF-8 Reply-To: gentoo-dev@lists.gentoo.org, "Jorge Manuel B. S. Vicetto" Message-ID: <1341856124.498f935c119b4b8093b2abeb1f29916aab67232f.jmbsvicetto@gentoo> Subject: [gentoo-commits] proj/releng:master commit in: scripts/ X-VCS-Repository: proj/releng X-VCS-Files: scripts/backup_snapshot_repo scripts/cache-tools.py scripts/copy_buildsync.sh scripts/run_catalyst scripts/run_official scripts/run_snapshot scripts/stage_build.sh scripts/sudo_catalyst scripts/sudo_official scripts/sudo_snapshot scripts/update_auto_tree scripts/update_official_tree scripts/update_snapshot_tree X-VCS-Directories: scripts/ X-VCS-Committer: jmbsvicetto X-VCS-Committer-Name: Jorge Manuel B. S. Vicetto X-VCS-Revision: 498f935c119b4b8093b2abeb1f29916aab67232f X-VCS-Branch: master Date: Mon, 9 Jul 2012 17:49:39 +0000 (UTC) Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: quoted-printable X-Archives-Salt: 23712234-f83a-4205-9b13-e7e777a9eb39 X-Archives-Hash: e7d9a1f5ab64e2fc246aa582b16c6ef2 commit: 498f935c119b4b8093b2abeb1f29916aab67232f Author: Jorge Manuel B. S. Vicetto (jmbsvicetto) ge= ntoo org> AuthorDate: Mon Jul 9 17:47:42 2012 +0000 Commit: Jorge Manuel B. S. Vicetto gentoo org= > CommitDate: Mon Jul 9 17:48:44 2012 +0000 URL: http://git.overlays.gentoo.org/gitweb/?p=3Dproj/releng.git;a=3D= commit;h=3D498f935c Add releng scripts to the repository. --- scripts/backup_snapshot_repo | 10 + scripts/cache-tools.py | 700 ++++++++++++++++++++++++++++++++++++= ++++++ scripts/copy_buildsync.sh | 127 ++++++++ scripts/run_catalyst | 2 + scripts/run_official | 39 +++ scripts/run_snapshot | 2 + scripts/stage_build.sh | 162 ++++++++++ scripts/sudo_catalyst | 28 ++ scripts/sudo_official | 46 +++ scripts/sudo_snapshot | 20 ++ scripts/update_auto_tree | 2 + scripts/update_official_tree | 2 + scripts/update_snapshot_tree | 2 + 13 files changed, 1142 insertions(+), 0 deletions(-) diff --git a/scripts/backup_snapshot_repo b/scripts/backup_snapshot_repo new file mode 100755 index 0000000..94b2828 --- /dev/null +++ b/scripts/backup_snapshot_repo @@ -0,0 +1,10 @@ +#!/bin/bash + +# Start our rsyncs +RSYNC_OPTS=3D"--archive --delete --sparse --whole-file" + +if [ -e /release/repos/snapshot-tree/hooks/post-commit ] +then + echo "$(date): Starting rsync of trees from tmpfs to disk..." >> /var/l= og/snapshot-tree-backup.log + rsync ${RSYNC_OPTS} /release/repos/snapshot-tree/* /release/repos/snaps= hot-tree-disk 2>&1 >> /var/log/snapshot-tree-backup.log || echo "$(date):= rsync failed!" >> /var/log/snapshot-tree-backup.log +fi diff --git a/scripts/cache-tools.py b/scripts/cache-tools.py new file mode 100755 index 0000000..0364450 --- /dev/null +++ b/scripts/cache-tools.py @@ -0,0 +1,700 @@ +#!/usr/bin/env python +# Copyright 1999-2006 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 +# $Header: $ +# +# Zac Medico +# + +import errno, fpformat, os, sys, time + +if not hasattr(__builtins__, "set"): + from sets import Set as set +from itertools import chain + +def create_syncronized_func(myfunc, mylock): + def newfunc(*pargs, **kwargs): + mylock.acquire() + try: + myfunc(*pargs, **kwargs) + finally: + mylock.release() + return myfunc + +class ConsoleUpdate(object): + + _synchronized_methods =3D ["append", "carriageReturn", + "newLine", "reset", "update"] + + def __init__(self): + self.offset =3D 0 + import sys + self.stream =3D sys.stdout + self.quiet =3D False + import threading + self._lock =3D threading.RLock() + for method_name in self._synchronized_methods: + myfunc =3D create_syncronized_func( + getattr(self, method_name), self._lock) + setattr(self, method_name, myfunc) + # ANSI code that clears from the cursor to the end of the line + self._CLEAR_EOL =3D None + try: + import curses + try: + curses.setupterm() + self._CLEAR_EOL =3D curses.tigetstr('el') + except curses.error: + pass + except ImportError: + pass + if not self._CLEAR_EOL: + self._CLEAR_EOL =3D '\x1b[K' + + def acquire(self, **kwargs): + return self._lock.acquire(**kwargs) + + def release(self): + self._lock.release() + + def reset(self): + self.offset =3D 0 + + def carriageReturn(self): + if not self.quiet: + self.stream.write("\r") + self.stream.write(self._CLEAR_EOL) + self.offset =3D 0 + + def newLine(self): + if not self.quiet: + self.stream.write("\n") + self.stream.flush() + self.reset() + + def update(self, msg): + if not self.quiet: + self.carriageReturn() + self.append(msg) + + def append(self, msg): + if not self.quiet: + self.offset +=3D len(msg) + self.stream.write(msg) + self.stream.flush() + +class ProgressCounter(object): + def __init__(self): + self.total =3D 0 + self.current =3D 0 + +class ProgressAnalyzer(ProgressCounter): + def __init__(self): + self.start_time =3D time.time() + self.currentTime =3D self.start_time + self._samples =3D [] + self.sampleCount =3D 20 + def percentage(self, digs=3D0): + if self.total > 0: + float_percent =3D 100 * float(self.current) / float(self.total) + else: + float_percent =3D 0.0 + return fpformat.fix(float_percent,digs) + def totalTime(self): + self._samples.append((self.currentTime, self.current)) + while len(self._samples) > self.sampleCount: + self._samples.pop(0) + prev_time, prev_count =3D self._samples[0] + time_delta =3D self.currentTime - prev_time + if time_delta > 0: + rate =3D (self.current - prev_count) / time_delta + if rate > 0: + return self.total / rate + return 0 + def remaining_time(self): + return self.totalTime() - self.elapsed_time() + def elapsed_time(self): + return self.currentTime - self.start_time + +class ConsoleProgress(object): + def __init__(self, name=3D"Progress", console=3DNone): + self.name =3D name + self.analyzer =3D ProgressAnalyzer() + if console is None: + self.console =3D ConsoleUpdate() + else: + self.console =3D console + self.time_format=3D"%H:%M:%S" + self.quiet =3D False + self.lastUpdate =3D 0 + self.latency =3D 0.5 + + def formatTime(self, t): + return time.strftime(self.time_format, time.gmtime(t)) + + def displayProgress(self, current, total): + if self.quiet: + return + + self.analyzer.currentTime =3D time.time() + if self.analyzer.currentTime - self.lastUpdate < self.latency: + return + self.lastUpdate =3D self.analyzer.currentTime + self.analyzer.current =3D current + self.analyzer.total =3D total + + output =3D ((self.name, self.analyzer.percentage(1).rjust(4) + "%"), + ("Elapsed", self.formatTime(self.analyzer.elapsed_time())), + ("Remaining", self.formatTime(self.analyzer.remaining_time())), + ("Total", self.formatTime(self.analyzer.totalTime()))) + self.console.update(" ".join([ x[0] + ": " + x[1] for x in output ])) + +class ProgressHandler(object): + def __init__(self): + self.curval =3D 0 + self.maxval =3D 0 + self.last_update =3D 0 + self.min_display_latency =3D 0.2 + + def onProgress(self, maxval, curval): + self.maxval =3D maxval + self.curval =3D curval + cur_time =3D time.time() + if cur_time - self.last_update >=3D self.min_display_latency: + self.last_update =3D cur_time + self.display() + + def display(self): + raise NotImplementedError(self) + +def open_file(filename=3DNone): + if filename is None: + f =3D sys.stderr + elif filename =3D=3D "-": + f =3D sys.stdout + else: + try: + filename =3D os.path.expanduser(filename) + f =3D open(filename, "a") + except (IOError, OSError), e: + sys.stderr.write("%s\n" % e) + sys.exit(e.errno) + return f + +def create_log(name=3D"", logfile=3DNone, loglevel=3D0): + import logging + log =3D logging.getLogger(name) + log.setLevel(loglevel) + handler =3D logging.StreamHandler(open_file(logfile)) + handler.setFormatter(logging.Formatter("%(levelname)s %(message)s")) + log.addHandler(handler) + return log + +def is_interrupt(e): + if isinstance(e, (SystemExit, KeyboardInterrupt)): + return True + return hasattr(e, "errno") and e.errno =3D=3D errno.EINTR + +def mirror_cache(valid_nodes_iterable, src_cache, trg_cache, log, + eclass_cache, cleanse_on_transfer_failure): + + cleanse_candidates =3D set(trg_cache.iterkeys()) + update_count =3D 0 + + # Since the loop below is mission critical, we continue after *any* + # exception that is not an interrupt. + + for x in valid_nodes_iterable: + log.debug("%s mirroring" % x) + if not cleanse_on_transfer_failure: + cleanse_candidates.discard(x) + + try: + entry =3D copy_dict(src_cache[x]) + except KeyError, e: + log.error("%s missing source: %s" % (x, str(e))) + del e + continue + except Exception, e: + if is_interrupt(e): + raise + log.error("%s reading source: %s" % (x, str(e))) + del e + continue + + write_it =3D True + trg =3D None + + try: + trg =3D copy_dict(trg_cache[x]) + if long(trg["_mtime_"]) =3D=3D long(entry["_mtime_"]) and \ + eclass_cache.is_eclass_data_valid(trg["_eclasses_"]) and \ + set(trg["_eclasses_"]) =3D=3D set(entry["_eclasses_"]): + write_it =3D False + except KeyError: + pass + except Exception, e: + if is_interrupt(e): + raise + log.error("%s reading target: %s" % (x, str(e))) + del e + + if trg and not write_it: + """ We don't want to skip the write unless we're really sure that + the existing cache is identical, so don't trust _mtime_ and + _eclasses_ alone.""" + for d in (entry, trg): + if "EAPI" in d and d["EAPI"] in ("", "0"): + del d["EAPI"] + for k in set(chain(entry, trg)).difference( + ("_mtime_", "_eclasses_")): + if trg.get(k, "") !=3D entry.get(k, ""): + write_it =3D True + break + + if write_it: + update_count +=3D 1 + log.info("%s transferring" % x) + inherited =3D entry.get("INHERITED", None) + if inherited: + if src_cache.complete_eclass_entries: + if not "_eclasses_" in entry: + log.error("%s missing _eclasses_" % x) + continue + if not eclass_cache.is_eclass_data_valid(entry["_eclasses_"]): + log.error("%s stale _eclasses_" % x) + continue + else: + entry["_eclasses_"] =3D eclass_cache.get_eclass_data(entry["INHERIT= ED"].split(), \ + from_master_only=3DTrue) + if not entry["_eclasses_"]: + log.error("%s stale _eclasses_" % x) + continue + try: + trg_cache[x] =3D entry + cleanse_candidates.discard(x) + except Exception, e: + if is_interrupt(e): + raise + log.error("%s writing target: %s" % (x, str(e))) + del e + else: + cleanse_candidates.discard(x) + + if not trg_cache.autocommits: + try: + trg_cache.commit() + except Exception, e: + if is_interrupt(e): + raise + log.error("committing target: %s" % str(e)) + del e + + return update_count, cleanse_candidates + +def copy_dict(src, dest=3DNone): + """Some cache implementations throw cache errors when accessing the val= ues. + We grab all the values at once here so that we don't have to be concern= ed + about exceptions later.""" + if dest is None: + dest =3D {} + for k, v in src.iteritems(): + dest[k] =3D v + return dest + +class ListPackages(object): + def __init__(self, portdb, log, shuffle=3DFalse): + self._portdb =3D portdb + self._log =3D log + self._shuffle =3D shuffle + + def run(self): + log =3D self._log + cp_list =3D self._portdb.cp_list + cp_all =3D self._portdb.cp_all() + if self._shuffle: + from random import shuffle + shuffle(cp_all) + else: + cp_all.sort() + cpv_all =3D [] + # Since the loop below is mission critical, we continue after *any* + # exception that is not an interrupt. + for cp in cp_all: + log.debug("%s cp_list" % cp) + try: + cpv_all.extend(cp_list(cp)) + except Exception, e: + if is_interrupt(e): + raise + self._log.error("%s cp_list: %s" % (cp, str(e))) + + self.cpv_all =3D cpv_all + +class MetadataGenerate(object): + """When cache generation fails for some reason, cleanse the stale cache + entry if it exists. This prevents the master mirror from distributing + stale cache, and will allow clients to safely assume that all cache is + valid. The mtime requirement is especially annoying due to bug #139134 + (timestamps of cache entries don't change when an eclass changes) and t= he + interaction of timestamps with rsync.""" + def __init__(self, portdb, cpv_all, log): + self._portdb =3D portdb + self._cpv_all =3D cpv_all + self._log =3D log + + def run(self, onProgress=3DNone): + log =3D self._log + portdb =3D self._portdb + cpv_all =3D self._cpv_all + auxdb =3D portdb.auxdb[portdb.porttree_root] + cleanse_candidates =3D set(auxdb.iterkeys()) + + # Since the loop below is mission critical, we continue after *any* + # exception that is not an interrupt. + maxval =3D len(cpv_all) + curval =3D 0 + if onProgress: + onProgress(maxval, curval) + while cpv_all: + cpv =3D cpv_all.pop(0) + log.debug("%s generating" % cpv) + try: + portdb.aux_get(cpv, ["EAPI"]) + # Cleanse if the above doesn't succeed (prevent clients from + # receiving stale cache, and let them assume it is valid). + cleanse_candidates.discard(cpv) + except Exception, e: + if is_interrupt(e): + raise + log.error("%s generating: %s" % (cpv, str(e))) + del e + curval +=3D 1 + if onProgress: + onProgress(maxval, curval) + + self.target_cache =3D auxdb + self.dead_nodes =3D cleanse_candidates + +class MetadataTransfer(object): + def __init__(self, portdb, cpv_all, forward, cleanse_on_transfer_failur= e, + log): + self._portdb =3D portdb + self._cpv_all =3D cpv_all + self._log =3D log + self._forward =3D forward + self._cleanse_on_transfer_failure =3D cleanse_on_transfer_failure + + def run(self, onProgress=3DNone): + log =3D self._log + portdb =3D self._portdb + cpv_all =3D self._cpv_all + aux_cache =3D portdb.auxdb[portdb.porttree_root] + import portage + auxdbkeys =3D portage.auxdbkeys[:] + metadbmodule =3D portdb.mysettings.load_best_module("portdbapi.metadbm= odule") + portdir_cache =3D metadbmodule(portdb.porttree_root, "metadata/cache", + auxdbkeys) + + maxval =3D len(cpv_all) + curval =3D 0 + if onProgress: + onProgress(maxval, curval) + class pkg_iter(object): + def __init__(self, pkg_list, onProgress=3DNone): + self.pkg_list =3D pkg_list + self.maxval =3D len(pkg_list) + self.curval =3D 0 + self.onProgress =3D onProgress + def __iter__(self): + while self.pkg_list: + yield self.pkg_list.pop() + self.curval +=3D 1 + if self.onProgress: + self.onProgress(self.maxval, self.curval) + + if self._forward: + src_cache =3D portdir_cache + trg_cache =3D aux_cache + else: + src_cache =3D aux_cache + trg_cache =3D portdir_cache + + """ This encapsulates validation of eclass timestamps and also fills i= n + missing data (mtimes and/or paths) as necessary for the given cache + format.""" + eclass_cache =3D portage.eclass_cache.cache(portdb.porttree_root) + + if not trg_cache.autocommits: + trg_cache.sync(100) + + self.target_cache =3D trg_cache + self.update_count, self.dead_nodes =3D mirror_cache( + pkg_iter(cpv_all, onProgress=3DonProgress), + src_cache, trg_cache, log, eclass_cache, + self._cleanse_on_transfer_failure) + +class CacheCleanse(object): + def __init__(self, auxdb, dead_nodes, log): + self._auxdb =3D auxdb + self._dead_nodes =3D dead_nodes + self._log =3D log + def run(self): + auxdb =3D self._auxdb + log =3D self._log + for cpv in self._dead_nodes: + try: + log.info("%s cleansing" % cpv) + del auxdb[cpv] + except Exception, e: + if is_interrupt(e): + raise + log.error("%s cleansing: %s" % (cpv, str(e))) + del e + +def import_portage(): + try: + from portage import data as portage_data + except ImportError: + import portage_data + # If we're not already root or in the portage group, we make the gid of= the + # current process become portage_gid. + if os.getgid() !=3D 0 and portage_data.portage_gid not in os.getgroups(= ): + portage_data.portage_gid =3D os.getgid() + portage_data.secpass =3D 1 + + os.environ["PORTAGE_LEGACY_GLOBALS"] =3D "false" + import portage + del os.environ["PORTAGE_LEGACY_GLOBALS"] + return portage + +def create_portdb(portdir=3DNone, cachedir=3DNone, config_root=3DNone, + target_root=3DNone, profile=3DNone, **kwargs): + + if cachedir is not None: + os.environ["PORTAGE_DEPCACHEDIR"] =3D cachedir + if config_root is None: + config_root =3D os.environ.get("PORTAGE_CONFIGROOT", "/") + if target_root is None: + target_root =3D os.environ.get("ROOT", "/") + if profile is None: + profile =3D "" + + portage =3D import_portage() + try: + from portage import const as portage_const + except ImportError: + import portage_const + + # Disable overlays because we only generate metadata for the main repo. + os.environ["PORTDIR_OVERLAY"] =3D "" + conf =3D portage.config(config_profile_path=3Dprofile, + config_incrementals=3Dportage_const.INCREMENTALS, + target_root=3Dtarget_root, + config_root=3Dconfig_root) + + if portdir is None: + portdir =3D conf["PORTDIR"] + + # The cannonical path is the key for portdb.auxdb. + portdir =3D os.path.realpath(portdir) + conf["PORTDIR"] =3D portdir + conf.backup_changes("PORTDIR") + + portdb =3D portage.portdbapi(portdir, + mysettings=3Dconf) + + return portdb + +def parse_args(myargv): + description =3D "This program will ensure that the metadata cache is up= to date for entire portage tree." + usage =3D "usage: cache-tools [options] --generate || --transfer" + from optparse import OptionParser + parser =3D OptionParser(description=3Ddescription, usage=3Dusage) + parser.add_option("--portdir", + help=3D"location of the portage tree", + dest=3D"portdir") + parser.add_option("--cachedir", + help=3D"location of the metadata cache", + dest=3D"cachedir") + parser.add_option("--profile", + help=3D"location of the profile", + dest=3D"profile") + parser.add_option("--generate", + help=3D"generate metadata as necessary to ensure that the cache is ful= ly populated", + action=3D"store_true", dest=3D"generate", default=3DFalse) + parser.add_option("--shuffle", + help=3D"generate cache in random rather than sorted order (useful to p= revent two separate instances from competing to generate metadata for the= same packages simultaneously)", + action=3D"store_true", dest=3D"shuffle", default=3DFalse) + parser.add_option("--transfer", + help=3D"transfer metadata from portdir to cachedir or vice versa", + action=3D"store_true", dest=3D"transfer", default=3DFalse) + parser.add_option("--cleanse-on-transfer-failure", + help=3D"cleanse target cache when transfer fails for any reason (such = as the source being unavailable)", + action=3D"store_true", dest=3D"cleanse_on_transfer_failure", default=3D= False) + parser.add_option("--forward", + help=3D"forward metadata transfer flows from portdir to cachedir (defa= ult)", + action=3D"store_true", dest=3D"forward", default=3DTrue) + parser.add_option("--reverse", + help=3D"reverse metadata transfer flows from cachedir to portdir", + action=3D"store_false", dest=3D"forward", default=3DTrue) + parser.add_option("--logfile", + help=3D"send status messages to a file (default is stderr)", + dest=3D"logfile", default=3DNone) + parser.add_option("--loglevel", + help=3D"numeric log level (defauls to 0 and may range from 0 to 50 cor= responding to the default levels of the python logging module)", + dest=3D"loglevel", default=3D"0") + parser.add_option("--reportfile", + help=3D"send a report to a file", + dest=3D"reportfile", default=3DNone) + parser.add_option("--spawn-outfile", + help=3D"redirect ouput of spawned processes to a file instead of stdou= t/stderr", + dest=3D"spawn_outfile", default=3DNone) + parser.add_option("--no-progress", + action=3D"store_false", dest=3D"progress", default=3DTrue, + help=3D"disable progress output to tty") + options, args =3D parser.parse_args(args=3Dmyargv) + + # Conversion to dict allows us to use **opts as function args later on. + opts =3D {} + all_options =3D ("portdir", "cachedir", "profile", "progress", "logfile= ", + "loglevel", "generate", "transfer", "forward", "shuffle", + "spawn_outfile", "reportfile", "cleanse_on_transfer_failure") + for opt_name in all_options: + v =3D getattr(options, opt_name) + opts[opt_name] =3D v + return opts + +def run_command(args): + opts =3D parse_args(sys.argv[1:]) + + if opts["spawn_outfile"]: + fd =3D os.dup(1) + sys.stdout =3D os.fdopen(fd, 'w') + fd =3D os.dup(2) + sys.stderr =3D os.fdopen(fd, 'w') + f =3D open_file(opts["spawn_outfile"]) + os.dup2(f.fileno(), 1) + os.dup2(f.fileno(), 2) + del fd, f + + console =3D ConsoleUpdate() + if not opts["progress"] or not sys.stdout.isatty(): + console.quiet =3D True + job =3D None + import signal, thread, threading + shutdown_initiated =3D threading.Event() + shutdown_complete =3D threading.Event() + def shutdown_console(): + console.acquire() + try: + console.update("Interrupted.") + console.newLine() + console.quiet =3D True + shutdown_complete.set() + # Kill the main thread if necessary. + # This causes the SIGINT signal handler to be invoked in the + # main thread. The signal handler needs to be an actual + # callable object (rather than something like signal.SIG_DFL) + # in order to avoid TypeError: 'int' object is not callable. + thread.interrupt_main() + thread.exit() + finally: + console.release() + + def handle_interrupt(*args): + if shutdown_complete.isSet(): + sys.exit(1) + # Lock the console from a new thread so that the main thread is allowe= d + # to cleanly complete any console interaction that may have been in + # progress when this interrupt arrived. + if not shutdown_initiated.isSet(): + thread.start_new_thread(shutdown_console, ()) + shutdown_initiated.set() + + signal.signal(signal.SIGINT, handle_interrupt) + signal.signal(signal.SIGTERM, handle_interrupt) + + try: + import datetime + datestamp =3D str(datetime.datetime.now()) + time_begin =3D time.time() + log =3D create_log(name=3D"MetadataGenerate", + logfile=3Dopts["logfile"], loglevel=3Dint(opts["loglevel"])) + if opts["reportfile"]: + reportfile =3D open_file(opts["reportfile"]) + portdb =3D create_portdb(**opts) + try: + os.nice(int(portdb.mysettings.get("PORTAGE_NICENESS", "0"))) + except (OSError, ValueError), e: + log.error("PORTAGE_NICENESS failed: '%s'" % str(e)) + del e + + job =3D ListPackages(portdb, log, shuffle=3Dopts["shuffle"]) + console.update("Listing packages in repository...") + job.run() + cpv_all =3D job.cpv_all + total_count =3D len(cpv_all) + if opts["generate"]: + job =3D MetadataGenerate(portdb, cpv_all, log) + name =3D "Cache generation" + complete_msg =3D "Metadata generation is complete." + elif opts["transfer"]: + job =3D MetadataTransfer(portdb, cpv_all, opts["forward"], + opts["cleanse_on_transfer_failure"], log) + if opts["forward"]: + name =3D "Forward transfer" + complete_msg =3D "Forward metadata transfer is complete." + else: + name =3D "Reverse transfer" + complete_msg =3D "Reverse metadata transfer is complete." + else: + sys.stderr.write("required options: --generate || --transfer\n") + sys.exit(os.EX_USAGE) + job.opts =3D opts + + onProgress =3D None + if not console.quiet: + ui =3D ConsoleProgress(name=3Dname, console=3Dconsole) + progressHandler =3D ProgressHandler() + onProgress =3D progressHandler.onProgress + def display(): + ui.displayProgress(progressHandler.curval, progressHandler.maxval) + progressHandler.display =3D display + + job.run(onProgress=3DonProgress) + + if not console.quiet: + # make sure the final progress is displayed + progressHandler.display() + + update_count =3D None + if opts["transfer"]: + update_count =3D job.update_count + target_cache =3D job.target_cache + dead_nodes =3D job.dead_nodes + cleanse_count =3D len(dead_nodes) + console.update("Cleansing cache...") + job =3D CacheCleanse(target_cache, dead_nodes, log) + job.run() + console.update(complete_msg) + console.newLine() + time_end =3D time.time() + if opts["reportfile"]: + width =3D 20 + reportfile.write(name.ljust(width) + "%s\n" % datestamp) + reportfile.write("Elapsed seconds".ljust(width) + "%f\n" % (time_end = - time_begin)) + reportfile.write("Total packages".ljust(width) + "%i\n" % total_count= ) + if update_count is not None: + reportfile.write("Updated packages".ljust(width) + "%i\n" % update_c= ount) + reportfile.write("Cleansed packages".ljust(width) + "%i\n" % cleanse_= count) + reportfile.write(("-"*50)+"\n") + except Exception, e: + if not is_interrupt(e): + raise + del e + handle_interrupt() + sys.exit(0) + +if __name__ =3D=3D "__main__": + run_command(sys.argv[1:]) diff --git a/scripts/copy_buildsync.sh b/scripts/copy_buildsync.sh new file mode 100755 index 0000000..729ff38 --- /dev/null +++ b/scripts/copy_buildsync.sh @@ -0,0 +1,127 @@ +#!/bin/bash + +ARCHES=3D"alpha amd64 arm hppa ia64 ppc sparc x86 sh s390" + #alpha amd64 arm hppa ia64 mips ppc s390 sh sparc x86 +#ARCHES=3D"s390" +RSYNC_OPTS=3D"-aO --delay-updates" +DEBUG=3D +VERBOSE=3D + +OUT_STAGE3=3D"latest-stage3.txt" +OUT_ISO=3D"latest-iso.txt" + +# Nothing to edit beyond this point + +DEBUGP=3D +VERBOSEP=3D +[ -n "$DEBUG" ] && DEBUGP=3Decho +[ -n "$DEBUG" ] && RSYNC_OPTS=3D"${RSYNC_OPTS} -n" +[ -n "$VERBOSE" ] && RSYNC_OPTS=3D"${RSYNC_OPTS} -v" +[ -n "$VERBOSEP" ] && VERBOSEP=3D"-v" + +for ARCH in $ARCHES; do + rc=3D0 + fail=3D0 + + indir=3D/home/buildsync/builds/${ARCH} + outdir=3D/release/weekly/${ARCH} + tmpdir=3D/release/tmp/buildsync/partial/${ARCH} + + mkdir -p ${tmpdir} 2>/dev/null + # Copying + if [ -d "${indir}" ]; then + for i in $(find ${indir} -type f | grep -- '-20[0123][0-9]\{5\}' | sed= -e 's:^.*-\(20[^.]\+\).*$:\1:' | sort -ur); do + #echo "Doing $i" + t=3D"${outdir}/${i}" + mkdir -p ${t} 2>/dev/null + rsync ${RSYNC_OPTS} --temp-dir=3D${tmpdir} --partial-dir=3D${tmpdir} = ${indir}/ --filter "S *${i}*" --filter 'S **/' --filter 'H *' ${t} + rc=3D$? + if [ $rc -eq 0 ]; then + find ${indir} -type f -name "*${i}*" -print0 | xargs -0 --no-run-if-= empty $DEBUGP rm $VERBOSEP -f + else + echo "Not deleting ${indir}/*${i}*, rsync failed!" 1>&2 + fail=3D1 + fi + done + find ${outdir} -mindepth 1 -type d \ + | egrep -v current \ + | sort -r \ + | tr '\n' '\0' \ + |xargs -0 --no-run-if-empty rmdir --ignore-fail-on-non-empty + fi + + # =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D + # Build data for revealing latest: + # *.iso + # stage3*bz2 + cd "${outdir}" + # %T@ + + iso_list=3D"$(find 20* -name '*.iso' -printf '%h %f %h/%f\n' |grep -v h= ardened | sort -n)" + stage3_list=3D"$(find 20* -name 'stage3*bz2' -printf '%h %f %h/%f\n' |g= rep -v hardened | sort -n)" + latest_iso_date=3D"$(echo -e "${iso_list}" |awk '{print $1}' |cut -d/ -= f1 | tail -n1)" + latest_stage3_date=3D"$(echo -e "${stage3_list}" |awk '{print $1}' |cut= -d/ -f1 | tail -n1)" + header=3D"$(echo -e "# Latest as of $(date -uR)\n# ts=3D$(date -u +%s)"= )" +=09 + # Do not remove this + [ -z "${latest_iso_date}" ] && latest_iso_date=3D"NONE-FOUND" + [ -z "${latest_stage3_date}" ] && latest_stage3_date=3D"NONE-FOUND" +=09 + if [ -n "${iso_list}" ]; then + echo -e "${header}" >"${OUT_ISO}" + echo -e "${iso_list}" |awk '{print $3}' | grep "$latest_iso_date" >>${= OUT_ISO} + rm -f current-iso + ln -sf "$latest_iso_date" current-iso + fi + if [ -n "${stage3_list}" ]; then + echo -e "${header}" >"${OUT_STAGE3}" + echo -e "${stage3_list}" |awk '{print $3}' |grep "$latest_stage3_date"= >>${OUT_STAGE3} + rm -f current-stage3 + # The "latest stage3" concept doesn't apply to the arm variants + # that are pushed on different days of the week. + if [[ ! $(echo ${outdir} | grep arm) ]]; then + ln -sf "$latest_stage3_date" current-stage3 + fi + fi +=09 + # new variant preserve code + variants=3D"$(find 20* \( -iname '*.iso' -o -iname '*.tar.bz2' \) -prin= tf '%f\n' |sed -e 's,-20[012][0-9]\{5\}.*,,g' -r | sort | uniq)" + echo -n '' >"${tmpdir}"/.keep.${ARCH}.txt + for v in $variants ; do=20 + #date_variant=3D$(find 20* -iname "${v}*" \( -name '*.tar.bz2' -o -ina= me '*.iso' \) -printf '%h\n' | sed -e "s,.*/$a/autobuilds/,,g" -e 's,/.*,= ,g' |sort -n | tail -n1 ) + variant_path=3D$(find 20* -iname "${v}*" \( -name '*.tar.bz2' -o -inam= e '*.iso' \) -print | sed -e "s,.*/$a/autobuilds/,,g" | sort -k1,1 -t/ | = tail -n1 ) + f=3D"latest-${v}.txt" + echo -e "${header}" >"${f}" + echo -e "${variant_path}" >>${f} + rm -f "current-$v" + ln -sf "${variant_path%/*}" "current-$v" + echo "${variant_path}" | sed -e 's,/.*,,g' -e 's,^,/,g' -e 's,$,$,g' >= >"${tmpdir}"/.keep.${ARCH}.txt + done=20 + #echo "$date_variant" \ + #| sort | uniq | sed -e 's,^,/,g' -e 's,$,$,g' >"${tmpdir}"/.keep.${ARC= H}.txt + + # =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D + # Cleanup + if [ $fail -eq 0 ]; then + # Clean up all but latest 4 from mirror dir + cd "${outdir}" + #echo regex "/${latest_iso_date}\$|/${latest_stage3_date}\$" + for i in $(find -regextype posix-basic -mindepth 1 -maxdepth 1 -type d= -regex '.*20[012][0-9]\{5\}.*' \ + | sed -e 's:^.*-\(20[^.]\+\).*$:\1:' \ + | sort -ur \ + | egrep -v "/${latest_iso_date}\$|/${latest_stage3_date}\$" \ + | egrep -v -f "${tmpdir}"/.keep.${ARCH}.txt \ + | tail -n +5); do + + $DEBUGP rm $VERBOSEP -rf $(pwd)/${i} + done + =09 + $DEBUGP rm $VERBOSEP -rf ${tmpdir} + + else + echo "There was some failure for $ARCH during the weekly sync. Not doi= ng cleanup for fear of dataloss." 1>&2 + fi + +done + +# vim:ts=3D2 sw=3D2 noet ft=3Dsh: diff --git a/scripts/run_catalyst b/scripts/run_catalyst new file mode 100755 index 0000000..997f652 --- /dev/null +++ b/scripts/run_catalyst @@ -0,0 +1,2 @@ +#!/bin/bash +sudo /release/bin/sudo_catalyst "$@" diff --git a/scripts/run_official b/scripts/run_official new file mode 100755 index 0000000..dfb29f2 --- /dev/null +++ b/scripts/run_official @@ -0,0 +1,39 @@ +#!/bin/bash + +email_from=3D"auto" +email_to=3D"releng@gentoo.org" +url=3D"https://poseidon.amd64.dev.gentoo.org/snapshots" +snapshot_uri=3D"/release/webroot/snapshots" +svn_repo=3D"/release/repos/snapshot-tree" + +send_email() { + subject=3D"[Snapshot] ${1}" + + echo -e "From: ${email_from}\r\nTo: ${email_to}\r\nSubject: ${subject}= \r\n\r\nA new snapshot has been built from revision `svnlook history ${sv= n_repo} | head -n 3 | tail -n 1 | sed -e 's:^ *::' -e 's: .*$::'` of ${sv= n_repo}. You can find it at ${url}.\r\n\r\n$(cat /release/snapshots/port= age-${1}.tar.bz2.DIGESTS)\r\n" | /usr/sbin/sendmail -f ${email_from} ${em= ail_to} +} + +if [ "${email_from}" =3D=3D "auto" ] +then + username=3D"$(whoami)" + if [ "${username}" =3D=3D "root" ] + then + email_from=3D"catalyst@poseidon.amd64.dev.gentoo.org" + else + email_from=3D"${username}@gentoo.org" + fi +fi + +sudo /release/bin/sudo_official "$@" && \ +echo "Starting rsync from /release/snapshots/portage-${1}.tar.bz2* to ${= snapshot_uri}" && \ +rsync --archive --stats --progress /release/snapshots/portage-${1}.tar.b= z2* \ + ${snapshot_uri} +ret=3D$? + +if [ "${email_from}" =3D=3D "none" ] +then + echo "Skipping email step as configured..." +else + [ $ret -eq 0 ] && send_email ${1} +fi + +exit $ret diff --git a/scripts/run_snapshot b/scripts/run_snapshot new file mode 100755 index 0000000..20cc460 --- /dev/null +++ b/scripts/run_snapshot @@ -0,0 +1,2 @@ +#!/bin/bash +sudo /release/bin/sudo_snapshot "$@" diff --git a/scripts/stage_build.sh b/scripts/stage_build.sh new file mode 100755 index 0000000..0dd89a9 --- /dev/null +++ b/scripts/stage_build.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +PID=3D$$ + +profile=3D +version_stamp=3D +subarch=3D +stage1_seed=3D +snapshot=3D +config=3D/etc/catalyst/catalyst.conf +email_from=3D"catalyst@localhost" +email_to=3D"root@localhost" +verbose=3D0 + +usage() { + msg=3D$1 + + if [ -n "${msg}" ]; then + echo -e "${msg}\n"; + fi + + cat <] [-v|--version-stamp ] + [-a|--arch ] [-s|--stage1-seed ] [--verbose] + [-f|--email-from ] [-t|--email-to ] [-h|--help] + +Options: + -p|--profile Sets the portage profile (required) + -v|--version-stamp Sets the version stamp (required) + -a|--arch Sets the 'subarch' in the spec (required) + -s|--stage1-seed Sets the seed for the stage1 (required) + -S|--snapshot Sets the snapshot name (if not given defaults to t= oday's + date) + -c|--config catalyst config to use, defaults to catalyst defau= lt + --verbose Send output of commands to console as well as log + -f|--email-from Sets the 'From' on emails sent from this script (d= efaults + to catalyst@localhost) + -t|--email-to Sets the 'To' on emails sent from this script (def= aults + to root@localhost) + -h|--help Show this message and quit + +Example: + stage_build -p default-linux/x86/2006.1 -v 2007.0_pre -a i686 -s defau= lt/stage3-i686-2006.1 +EOH +} + +send_email() { + subject=3D"[${subarch}] $1" + body=3D$2 + + echo -e "From: ${email_from}\r\nTo: ${email_to}\r\nSubject: ${subject}= \r\n\r\nArch: ${subarch}\r\nProfile: ${profile}\r\nVersion stamp: ${versi= on_stamp}\r\nStage1 seed: ${stage1_seed}\r\nSnapshot: ${snapshot}\r\n\r\n= ${body}\r\n" | /usr/sbin/sendmail -f ${email_from} ${email_to} +} + +run_cmd() { + cmd=3D$1 + logfile=3D$2 + + if [ $verbose =3D 1 ]; then + ${cmd} 2>&1 | tee ${logfile} + else + ${cmd} &> ${logfile} + fi +} + +# Parse args +params=3D${#} +while [ ${#} -gt 0 ] +do + a=3D${1} + shift + case "${a}" in + -h|--help) + usage + exit 0 + ;; + -p|--profile) + profile=3D$1 + shift + ;; + -v|--version-stamp) + version_stamp=3D$1 + shift + ;; + -a|--arch) + subarch=3D$1 + shift + ;; + -f|--email-from) + email_from=3D$1 + shift + ;; + -t|--email-to) + email_to=3D$1 + shift + ;; + -s|--stage1-seed) + stage1_seed=3D$1 + shift + ;; + -S|--snapshot) + snapshot=3D$1 + shift + ;; + -c|--config) + config=3D$1 + shift + ;; + --verbose) + verbose=3D1 + ;; + -*) + echo "You have specified an invalid option: ${a}" + usage + exit 1 + ;; + esac +done + +# Make sure all required values were specified +if [ -z "${profile}" ]; then + usage "You must specify a profile." + exit 1 +fi +if [ -z "${version_stamp}" ]; then + usage "You must specify a version stamp." + exit 1 +fi +if [ -z "${subarch}" ]; then + usage "You must specify an arch." + exit 1 +fi +if [ -z "${stage1_seed}" ]; then + usage "You must specify a stage1 seed." + exit 1 +fi +cd /tmp + +if [ -z "${snapshot}" ]; then + snapshot=3D`date +%Y%m%d` + run_cmd "catalyst -c ${config} -s '${snapshot}'" "/tmp/catalyst_build_= snapshot.${PID}.log" + if [ $? !=3D 0 ]; then + send_email "Catalyst build error - snapshot" "$(> stage${i}.spec + else=20 + echo "source_subpath: default/stage$(expr ${i} - 1)-${subarch}-${ver= sion_stamp}" >> stage${i}.spec + fi + run_cmd "catalyst -a -p -c ${config} -f stage${i}.spec" "/tmp/catalyst= _build_stage${i}.${PID}.log" + if [ $? !=3D 0 ]; then + send_email "Catalyst build error - stage${i}" "$(tail -n 200 /tmp/ca= talyst_build_stage${i}.${PID}.log)\r\n\r\nFull build log at /tmp/catalyst= _build_stage${i}.${PID}.log" + exit 1 + fi +done + +send_email "Catalyst build success" "Everything finished successfully." diff --git a/scripts/sudo_catalyst b/scripts/sudo_catalyst new file mode 100755 index 0000000..19ecc90 --- /dev/null +++ b/scripts/sudo_catalyst @@ -0,0 +1,28 @@ +#!/bin/bash + +usage() { + echo "Usage: $(basename ${0}) " + echo "Where arch is either amd64 or x86, target is default, dev, harden= ed," + echo "or uclibc, and spec is your spec file." + echo +} + +if [ -z "$1" -o -z "$2" -o -z "$3" ] +then + usage +else + target=3D"$(grep target ${3} | cut -d' ' -f2)" + /usr/bin/catalyst -c /etc/catalyst/${1}-${2}.conf -f ${3} ${4} ${5} +# && \ +# case ${target} in +# stage*|grp*|livecd-stage2) +# echo "Cleaning out ${target} temp files" +# rel_type=3D"$(grep rel_type ${3} | cut -d' ' -f2)" +# subarch=3D"$(grep subarch ${3} | cut -d' ' -f2)" +# version=3D"$(grep version ${3} | cut -d' ' -f2)" +# storedir=3D"$(grep storedir /etc/catalyst/${1}-${2}.conf | cut -d\" = -f2)" +# echo "Removing ${storedir}/tmp/${rel_type}/${target}-${subarch}-${ve= rsion}" +# rm -rf ${storedir}/tmp/${rel_type}/${target}-${subarch}-${version} +# ;; +# esac +fi diff --git a/scripts/sudo_official b/scripts/sudo_official new file mode 100755 index 0000000..80e7ca0 --- /dev/null +++ b/scripts/sudo_official @@ -0,0 +1,46 @@ +#!/bin/bash + +tree=3D"/release/trees/snapshot-tree" +portdir=3D"${tree}/${1/_beta2/}/portage" +cache_args=3D"--portdir=3D${portdir} --cachedir=3D/release/tmp/depcache" + +usage() { + echo "Usage: $(basename ${0}) " +} + +if [ -z "${1}" ] +then + usage +else + cd ${tree} + echo "Clearing out old metadata cache" + rm -rf ${portdir}/metadata/cache + echo "Performing a svn up on ${tree}" + svn up || exit 1 + mkdir -p ${portdir}/metadata/cache + echo "Recreating portage metadata cache" + cache-tools.py ${cache_args} --generate || exit 1 + cache-tools.py ${cache_args} --transfer --reverse \ + --cleanse-on-transfer-failure || exit 1 + if [ ! -d ${portdir}/metadata/cache/sys-kernel ] + then + echo "Metadata update failed! Bailing out!" + exit 1 + fi + catalyst -c /etc/catalyst/snapshot-official.conf -s ${1} \ + -C portdir=3D"${portdir}" || exit 1 + for i in amd64 x86 + do + for j in default dev hardened uclibc + do + cd /release/buildroot/${i}-${j}/snapshots + rm -f portage-official.tar.bz2 portage-${1}.tar.bz2* + ln -sf /release/snapshots/portage-${1}.tar.bz2 \ + portage-official.tar.bz2 + ln -sf /release/snapshots/portage-${1}.tar.bz2 \ + portage-${1}.tar.bz2 + ln -sf /release/snapshots/portage-${1}.tar.bz2.DIGESTS \ + portage-${1}.tar.bz2.DIGESTS + done + done +fi diff --git a/scripts/sudo_snapshot b/scripts/sudo_snapshot new file mode 100755 index 0000000..1ba6485 --- /dev/null +++ b/scripts/sudo_snapshot @@ -0,0 +1,20 @@ +#!/bin/bash +usage() { + echo "Usage: $(basename ${0}) " +} +if [ -z "${1}" ] +then + usage +else + catalyst -c /etc/catalyst/snapshot.conf -s ${1} + for i in amd64 x86 + do + for j in default dev hardened uclibc + do + cd /release/buildroot/${i}-${j}/snapshots + rm -f portage-${1}.tar.bz2 + ln -sf /release/snapshots/portage-${1}.tar.bz2 \ + portage-${1}.tar.bz2 + done + done +fi diff --git a/scripts/update_auto_tree b/scripts/update_auto_tree new file mode 100755 index 0000000..08909e7 --- /dev/null +++ b/scripts/update_auto_tree @@ -0,0 +1,2 @@ +#!/bin/bash +PORTDIR=3D"/release/trees/portage-auto/" FEATURES=3D"$FEATURES -news" em= erge --sync -q diff --git a/scripts/update_official_tree b/scripts/update_official_tree new file mode 100755 index 0000000..250e905 --- /dev/null +++ b/scripts/update_official_tree @@ -0,0 +1,2 @@ +#!/bin/bash +PORTDIR=3D"/release/trees/portage-official/" emerge --sync diff --git a/scripts/update_snapshot_tree b/scripts/update_snapshot_tree new file mode 100755 index 0000000..f64742d --- /dev/null +++ b/scripts/update_snapshot_tree @@ -0,0 +1,2 @@ +#!/bin/bash +PORTDIR=3D"/release/trees/portage-snapshot/" emerge --sync