* [gentoo-commits] proj/catalyst:master commit in: targets/stage1/, /, catalyst/base/, catalyst/, doc/, catalyst/targets/, bin/
@ 2020-04-15 19:59 Matt Turner
0 siblings, 0 replies; only message in thread
From: Matt Turner @ 2020-04-15 19:59 UTC (permalink / raw
To: gentoo-commits
commit: 32d11d799bbb899f3418297462089a22ba13cf53
Author: Matt Turner <mattst88 <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 15 19:55:00 2020 +0000
Commit: Matt Turner <mattst88 <AT> gentoo <DOT> org>
CommitDate: Wed Apr 15 19:57:37 2020 +0000
URL: https://gitweb.gentoo.org/proj/catalyst.git/commit/?id=32d11d79
catalyst: Run autopep8
The only change I reverted was from the reformatting of the
HASH_DEFINITIONS table in catalyst/hash_utils.py.
Signed-off-by: Matt Turner <mattst88 <AT> gentoo.org>
.pylintrc | 2 +-
bin/catalyst | 28 +-
bin/pylint | 47 +-
catalyst/__init__.py | 8 +-
catalyst/base/clearbase.py | 119 +-
catalyst/base/genbase.py | 88 +-
catalyst/base/resume.py | 221 ++-
catalyst/base/stagebase.py | 3338 ++++++++++++++++++------------------
catalyst/base/targetbase.py | 43 +-
catalyst/config.py | 226 +--
catalyst/defaults.py | 148 +-
catalyst/fileops.py | 197 +--
catalyst/hash_utils.py | 226 ++-
catalyst/lock.py | 30 +-
catalyst/log.py | 150 +-
catalyst/main.py | 767 +++++----
catalyst/support.py | 395 ++---
catalyst/targets/embedded.py | 78 +-
catalyst/targets/livecd_stage1.py | 88 +-
catalyst/targets/livecd_stage2.py | 204 +--
catalyst/targets/netboot.py | 297 ++--
catalyst/targets/snapshot.py | 184 +-
catalyst/targets/stage1.py | 225 +--
catalyst/targets/stage2.py | 74 +-
catalyst/targets/stage3.py | 32 +-
catalyst/targets/stage4.py | 66 +-
catalyst/version.py | 89 +-
doc/make_subarch_table_guidexml.py | 38 +-
doc/make_target_table.py | 38 +-
setup.py | 171 +-
targets/stage1/build.py | 21 +-
31 files changed, 3853 insertions(+), 3785 deletions(-)
diff --git a/.pylintrc b/.pylintrc
index 40851664..8caafef1 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -85,7 +85,7 @@ max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
-indent-string='\t'
+indent-string=' '
[MISCELLANEOUS]
diff --git a/bin/catalyst b/bin/catalyst
index 97cbb0f9..e464d369 100755
--- a/bin/catalyst
+++ b/bin/catalyst
@@ -4,28 +4,28 @@ import sys
# This block ensures that ^C interrupts are handled quietly.
try:
- import signal
+ import signal
- def exithandler(_signum, _frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- print()
- sys.exit(1)
+ def exithandler(_signum, _frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ print()
+ sys.exit(1)
- signal.signal(signal.SIGINT, exithandler)
- signal.signal(signal.SIGTERM, exithandler)
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ signal.signal(signal.SIGINT, exithandler)
+ signal.signal(signal.SIGTERM, exithandler)
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except KeyboardInterrupt:
- print()
- sys.exit(1)
+ print()
+ sys.exit(1)
from catalyst.main import main
try:
- main(sys.argv[1:])
+ main(sys.argv[1:])
except KeyboardInterrupt:
- print("Aborted.")
- sys.exit(130)
+ print("Aborted.")
+ sys.exit(130)
sys.exit(0)
diff --git a/bin/pylint b/bin/pylint
index 98108f5f..183e24be 100755
--- a/bin/pylint
+++ b/bin/pylint
@@ -9,38 +9,39 @@ import sys
def find_all_modules(source_root):
- """Locate all python modules in the tree for scanning"""
- ret = []
+ """Locate all python modules in the tree for scanning"""
+ ret = []
- for root, _dirs, files in os.walk(source_root, topdown=False):
- # Add all of the .py modules in the tree.
- ret += [os.path.join(root, x) for x in files if x.endswith('.py')]
+ for root, _dirs, files in os.walk(source_root, topdown=False):
+ # Add all of the .py modules in the tree.
+ ret += [os.path.join(root, x) for x in files if x.endswith('.py')]
- # Add the main scripts that don't end in .py.
- ret += [os.path.join(source_root, 'bin', x) for x in ('catalyst', 'pylint')]
+ # Add the main scripts that don't end in .py.
+ ret += [os.path.join(source_root, 'bin', x)
+ for x in ('catalyst', 'pylint')]
- return ret
+ return ret
def main(argv):
- """The main entry point"""
- source_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ """The main entry point"""
+ source_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- if not argv:
- argv = find_all_modules(source_root)
+ if not argv:
+ argv = find_all_modules(source_root)
- pympath = source_root
- pythonpath = os.environ.get('PYTHONPATH')
- if pythonpath is None:
- pythonpath = pympath
- else:
- pythonpath = pympath + ':' + pythonpath
- os.environ['PYTHONPATH'] = pythonpath
+ pympath = source_root
+ pythonpath = os.environ.get('PYTHONPATH')
+ if pythonpath is None:
+ pythonpath = pympath
+ else:
+ pythonpath = pympath + ':' + pythonpath
+ os.environ['PYTHONPATH'] = pythonpath
- pylintrc = os.path.join(source_root, '.pylintrc')
- cmd = ['pylint', '--rcfile', pylintrc]
- os.execvp(cmd[0], cmd + argv)
+ pylintrc = os.path.join(source_root, '.pylintrc')
+ cmd = ['pylint', '--rcfile', pylintrc]
+ os.execvp(cmd[0], cmd + argv)
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/catalyst/__init__.py b/catalyst/__init__.py
index 7bc28970..143bdf81 100644
--- a/catalyst/__init__.py
+++ b/catalyst/__init__.py
@@ -3,8 +3,8 @@
__maintainer__ = 'Catalyst <catalyst@gentoo.org>'
try:
- from .verinfo import version as fullversion
- __version__ = fullversion.split('\n')[0].split()[1]
+ from .verinfo import version as fullversion
+ __version__ = fullversion.split('\n')[0].split()[1]
except ImportError:
- from .version import get_version, __version__
- fullversion = get_version(reset=True)
+ from .version import get_version, __version__
+ fullversion = get_version(reset=True)
diff --git a/catalyst/base/clearbase.py b/catalyst/base/clearbase.py
index 8dfffb06..dcf6c523 100644
--- a/catalyst/base/clearbase.py
+++ b/catalyst/base/clearbase.py
@@ -4,66 +4,61 @@ from catalyst import log
from catalyst.support import countdown
from catalyst.fileops import clear_dir
-class ClearBase():
- """
- This class does all of clearing after task completion
- """
- def __init__(self, myspec):
- self.settings = myspec
- self.resume = None
-
-
- def clear_autoresume(self):
- """ Clean resume points since they are no longer needed """
- if "autoresume" in self.settings["options"]:
- log.notice('Removing AutoResume Points ...')
- self.resume.clear_all()
-
-
- def remove_autoresume(self):
- """ Rmove all resume points since they are no longer needed """
- if "autoresume" in self.settings["options"]:
- log.notice('Removing AutoResume ...')
- self.resume.clear_all(remove=True)
-
-
- def clear_chroot(self):
- self.chroot_lock.unlock()
- log.notice('Clearing the chroot path ...')
- clear_dir(self.settings["chroot_path"], mode=0o755)
-
-
- def remove_chroot(self):
- self.chroot_lock.unlock()
- log.notice('Removing the chroot path ...')
- clear_dir(self.settings["chroot_path"], mode=0o755, remove=True)
-
- def clear_packages(self, remove=False):
- if "pkgcache" in self.settings["options"]:
- log.notice('purging the pkgcache ...')
- clear_dir(self.settings["pkgcache_path"], remove=remove)
-
-
- def clear_kerncache(self, remove=False):
- if "kerncache" in self.settings["options"]:
- log.notice('purging the kerncache ...')
- clear_dir(self.settings["kerncache_path"], remove=remove)
-
-
- def purge(self, remove=False):
- countdown(10,"Purging Caches ...")
- if any(k in self.settings["options"] for k in ("purge",
- "purgeonly", "purgetmponly")):
- log.notice('purge(); clearing autoresume ...')
- self.clear_autoresume()
-
- log.notice('purge(); clearing chroot ...')
- self.clear_chroot()
-
- if "purgetmponly" not in self.settings["options"]:
- log.notice('purge(); clearing package cache ...')
- self.clear_packages(remove)
-
- log.notice('purge(); clearing kerncache ...')
- self.clear_kerncache(remove)
+class ClearBase():
+ """
+ This class does all of clearing after task completion
+ """
+
+ def __init__(self, myspec):
+ self.settings = myspec
+ self.resume = None
+
+ def clear_autoresume(self):
+ """ Clean resume points since they are no longer needed """
+ if "autoresume" in self.settings["options"]:
+ log.notice('Removing AutoResume Points ...')
+ self.resume.clear_all()
+
+ def remove_autoresume(self):
+ """ Rmove all resume points since they are no longer needed """
+ if "autoresume" in self.settings["options"]:
+ log.notice('Removing AutoResume ...')
+ self.resume.clear_all(remove=True)
+
+ def clear_chroot(self):
+ self.chroot_lock.unlock()
+ log.notice('Clearing the chroot path ...')
+ clear_dir(self.settings["chroot_path"], mode=0o755)
+
+ def remove_chroot(self):
+ self.chroot_lock.unlock()
+ log.notice('Removing the chroot path ...')
+ clear_dir(self.settings["chroot_path"], mode=0o755, remove=True)
+
+ def clear_packages(self, remove=False):
+ if "pkgcache" in self.settings["options"]:
+ log.notice('purging the pkgcache ...')
+ clear_dir(self.settings["pkgcache_path"], remove=remove)
+
+ def clear_kerncache(self, remove=False):
+ if "kerncache" in self.settings["options"]:
+ log.notice('purging the kerncache ...')
+ clear_dir(self.settings["kerncache_path"], remove=remove)
+
+ def purge(self, remove=False):
+ countdown(10, "Purging Caches ...")
+ if any(k in self.settings["options"] for k in ("purge",
+ "purgeonly", "purgetmponly")):
+ log.notice('purge(); clearing autoresume ...')
+ self.clear_autoresume()
+
+ log.notice('purge(); clearing chroot ...')
+ self.clear_chroot()
+
+ if "purgetmponly" not in self.settings["options"]:
+ log.notice('purge(); clearing package cache ...')
+ self.clear_packages(remove)
+
+ log.notice('purge(); clearing kerncache ...')
+ self.clear_kerncache(remove)
diff --git a/catalyst/base/genbase.py b/catalyst/base/genbase.py
index d6fea954..1164fd55 100644
--- a/catalyst/base/genbase.py
+++ b/catalyst/base/genbase.py
@@ -4,50 +4,50 @@ import os
class GenBase():
- """
- This class does generation of the contents and digests files.
- """
- def __init__(self,myspec):
- self.settings = myspec
+ """
+ This class does generation of the contents and digests files.
+ """
+ def __init__(self, myspec):
+ self.settings = myspec
- def gen_contents_file(self, path):
- contents = path + ".CONTENTS"
- if os.path.exists(contents):
- os.remove(contents)
- if "contents" in self.settings:
- contents_map = self.settings["contents_map"]
- if os.path.exists(path):
- with io.open(contents, "w", encoding='utf-8') as myf:
- keys={}
- for i in self.settings["contents"].split():
- keys[i]=1
- array = sorted(keys.keys())
- for j in array:
- contents = contents_map.contents(path, j,
- verbose=self.settings["VERBOSE"])
- if contents:
- myf.write(contents)
+ def gen_contents_file(self, path):
+ contents = path + ".CONTENTS"
+ if os.path.exists(contents):
+ os.remove(contents)
+ if "contents" in self.settings:
+ contents_map = self.settings["contents_map"]
+ if os.path.exists(path):
+ with io.open(contents, "w", encoding='utf-8') as myf:
+ keys = {}
+ for i in self.settings["contents"].split():
+ keys[i] = 1
+ array = sorted(keys.keys())
+ for j in array:
+ contents = contents_map.contents(path, j,
+ verbose=self.settings["VERBOSE"])
+ if contents:
+ myf.write(contents)
- def gen_digest_file(self, path):
- digests = path + ".DIGESTS"
- if os.path.exists(digests):
- os.remove(digests)
- if "digests" in self.settings:
- hash_map = self.settings["hash_map"]
- if os.path.exists(path):
- with io.open(digests, "w", encoding='utf-8') as myf:
- keys={}
- for i in self.settings["digests"].split():
- keys[i]=1
- array = sorted(keys.keys())
- for f in [path, path + '.CONTENTS']:
- if os.path.exists(f):
- if "all" in array:
- for k in list(hash_map.hash_map):
- digest = hash_map.generate_hash(f, hash_=k)
- myf.write(digest)
- else:
- for j in array:
- digest = hash_map.generate_hash(f, hash_=j)
- myf.write(digest)
+ def gen_digest_file(self, path):
+ digests = path + ".DIGESTS"
+ if os.path.exists(digests):
+ os.remove(digests)
+ if "digests" in self.settings:
+ hash_map = self.settings["hash_map"]
+ if os.path.exists(path):
+ with io.open(digests, "w", encoding='utf-8') as myf:
+ keys = {}
+ for i in self.settings["digests"].split():
+ keys[i] = 1
+ array = sorted(keys.keys())
+ for f in [path, path + '.CONTENTS']:
+ if os.path.exists(f):
+ if "all" in array:
+ for k in list(hash_map.hash_map):
+ digest = hash_map.generate_hash(f, hash_=k)
+ myf.write(digest)
+ else:
+ for j in array:
+ digest = hash_map.generate_hash(f, hash_=j)
+ myf.write(digest)
diff --git a/catalyst/base/resume.py b/catalyst/base/resume.py
index 82f97cd3..977545e5 100644
--- a/catalyst/base/resume.py
+++ b/catalyst/base/resume.py
@@ -16,118 +16,109 @@ from catalyst.fileops import ensure_dirs, clear_dir
class AutoResume():
- '''Class for tracking and handling all aspects of
- the autoresume option and related files.
- '''
-
-
- def __init__(self, basedir, mode=0o755):
- self.basedir = basedir
- ensure_dirs(basedir, mode=mode, fatal=True)
- self._points = {}
- self._init_points_()
-
-
- def _init_points_(self):
- '''Internal function which reads the autoresume directory and
- for existing autoresume points and adds them to our _points variable
- '''
- existing = listdir_files(self.basedir, False)
- for point in existing:
- self._points[point] = pjoin(self.basedir, point)
-
-
- def enable(self, point, data=None):
- '''Sets the resume point 'ON'
-
- @param point: string. name of the resume point to enable
- @param data: string of information to store, or None
- @return boolean
- '''
- if point in self._points and not data:
- return True
- fname = pjoin(self.basedir, point)
- if data:
- with open(fname,"w") as myf:
- myf.write(data)
- else:
- try:
- fileutils.touch(fname)
- self._points[point] = fname
- except Exception as e:
- log.error('AutoResumeError: %s', e)
- return False
- return True
-
-
- def get(self, point, no_lf=True):
- '''Gets any data stored inside a resume point
-
- @param point: string. name of the resume point to enable
- @return data: string of information stored, or None
- '''
- if point in self._points:
- try:
- with open(self._points[point], 'r') as myf:
- data = myf.read()
- if data and no_lf:
- data = data.replace('\n', ' ')
- except OSError as e:
- log.error('AutoResumeError: %s', e)
- return None
- return data
- return None
-
-
- def disable(self, point):
- '''Sets the resume point 'OFF'
-
- @param point: string. name of the resume point to disable
- @return boolean
- '''
- if point not in self._points:
- return True
- try:
- os.unlink(self._points[point])
- self._points.pop(point)
- except Exception as e:
- log.error('AutoResumeError: %s', e)
- return False
- return True
-
-
- def is_enabled(self, point):
- '''Returns True if the resume point 'ON'
-
- @param point: string. name of the resume point enabled
- @return boolean
- '''
- return point in self._points
-
-
- def is_disabled(self, point):
- '''Returns True if the resume point 'OFF'
-
- @param point: string. name of the resume point not enabled
- @return boolean
- '''
- return point not in self._points
-
-
- @property
- def enabled(self):
- '''Returns a list of enabled points
- '''
- return list(self._points)
-
-
- def clear_all(self, remove=False):
- '''Clear all active resume points
-
- @remove: boolean, passed through to clear_dir()
- @return boolean
- '''
- if clear_dir(self.basedir, mode=0o755, remove=remove):
- self._points = {}
- return True
- return False
+ '''Class for tracking and handling all aspects of
+ the autoresume option and related files.
+ '''
+
+ def __init__(self, basedir, mode=0o755):
+ self.basedir = basedir
+ ensure_dirs(basedir, mode=mode, fatal=True)
+ self._points = {}
+ self._init_points_()
+
+ def _init_points_(self):
+ '''Internal function which reads the autoresume directory and
+ for existing autoresume points and adds them to our _points variable
+ '''
+ existing = listdir_files(self.basedir, False)
+ for point in existing:
+ self._points[point] = pjoin(self.basedir, point)
+
+ def enable(self, point, data=None):
+ '''Sets the resume point 'ON'
+
+ @param point: string. name of the resume point to enable
+ @param data: string of information to store, or None
+ @return boolean
+ '''
+ if point in self._points and not data:
+ return True
+ fname = pjoin(self.basedir, point)
+ if data:
+ with open(fname, "w") as myf:
+ myf.write(data)
+ else:
+ try:
+ fileutils.touch(fname)
+ self._points[point] = fname
+ except Exception as e:
+ log.error('AutoResumeError: %s', e)
+ return False
+ return True
+
+ def get(self, point, no_lf=True):
+ '''Gets any data stored inside a resume point
+
+ @param point: string. name of the resume point to enable
+ @return data: string of information stored, or None
+ '''
+ if point in self._points:
+ try:
+ with open(self._points[point], 'r') as myf:
+ data = myf.read()
+ if data and no_lf:
+ data = data.replace('\n', ' ')
+ except OSError as e:
+ log.error('AutoResumeError: %s', e)
+ return None
+ return data
+ return None
+
+ def disable(self, point):
+ '''Sets the resume point 'OFF'
+
+ @param point: string. name of the resume point to disable
+ @return boolean
+ '''
+ if point not in self._points:
+ return True
+ try:
+ os.unlink(self._points[point])
+ self._points.pop(point)
+ except Exception as e:
+ log.error('AutoResumeError: %s', e)
+ return False
+ return True
+
+ def is_enabled(self, point):
+ '''Returns True if the resume point 'ON'
+
+ @param point: string. name of the resume point enabled
+ @return boolean
+ '''
+ return point in self._points
+
+ def is_disabled(self, point):
+ '''Returns True if the resume point 'OFF'
+
+ @param point: string. name of the resume point not enabled
+ @return boolean
+ '''
+ return point not in self._points
+
+ @property
+ def enabled(self):
+ '''Returns a list of enabled points
+ '''
+ return list(self._points)
+
+ def clear_all(self, remove=False):
+ '''Clear all active resume points
+
+ @remove: boolean, passed through to clear_dir()
+ @return boolean
+ '''
+ if clear_dir(self.basedir, mode=0o755, remove=remove):
+ self._points = {}
+ return True
+ return False
diff --git a/catalyst/base/stagebase.py b/catalyst/base/stagebase.py
index d993641f..d1e638be 100644
--- a/catalyst/base/stagebase.py
+++ b/catalyst/base/stagebase.py
@@ -13,9 +13,9 @@ from DeComp.compress import CompressMap
from catalyst import log
from catalyst.defaults import (SOURCE_MOUNT_DEFAULTS, TARGET_MOUNT_DEFAULTS,
- PORT_LOGDIR_CLEAN)
+ PORT_LOGDIR_CLEAN)
from catalyst.support import (CatalystError, file_locate, normpath,
- cmd, read_makeconf, ismount, file_check)
+ cmd, read_makeconf, ismount, file_check)
from catalyst.base.targetbase import TargetBase
from catalyst.base.clearbase import ClearBase
from catalyst.base.genbase import GenBase
@@ -25,1647 +25,1697 @@ from catalyst.base.resume import AutoResume
class StageBase(TargetBase, ClearBase, GenBase):
- """
- This class does all of the chroot setup, copying of files, etc. It is
- the driver class for pretty much everything that Catalyst does.
- """
- def __init__(self,myspec,addlargs):
- self.required_values |= frozenset([
- "profile",
- "rel_type",
- "snapshot",
- "source_subpath",
- "subarch",
- "target",
- "version_stamp",
- ])
- self.valid_values |= self.required_values | frozenset([
- "asflags",
- "catalyst_use",
- "cbuild",
- "cflags",
- "common_flags",
- "compression_mode",
- "cxxflags",
- "decompression_mode",
- "distcc_hosts",
- "fcflags",
- "fflags",
- "hostuse",
- "kerncache_path",
- "ldflags",
- "makeopts",
- "pkgcache_path",
- "portage_confdir",
- "portage_overlay",
- "portage_prefix",
- ])
-
- self.set_valid_build_kernel_vars(addlargs)
- TargetBase.__init__(self, myspec, addlargs)
- GenBase.__init__(self, myspec)
- ClearBase.__init__(self, myspec)
-
- self.makeconf = {}
-
- if "chost" in self.settings:
- host = self.settings["chost"].split("-")[0]
- else:
- host = self.settings["subarch"]
- self.settings["hostarch"] = host
-
- if "cbuild" in self.settings:
- build = self.settings["cbuild"].split("-")[0]
- else:
- build = platform.machine()
- self.settings["buildarch"] = build
-
- arch_dir = normpath(self.settings['sharedir'] + '/arch/')
-
- log.debug("Searching arch definitions...")
- for x in [x for x in os.listdir(arch_dir) if x.endswith('.toml')]:
- log.debug("\tTrying %s", x)
- name = x[:-len('.toml')]
-
- with open(arch_dir + x) as file:
- arch_config = toml.load(file)
-
- # Search for a subarchitecture in each arch in the arch_config
- for arch in [x for x in arch_config if x.startswith(name) and host in arch_config[x]]:
- self.settings.update(arch_config[arch][host])
- setarch = arch_config.get('setarch', {})
- break
- else:
- # Didn't find a matching subarchitecture, keep searching
- continue
-
- break
- else:
- raise CatalystError("Unknown host machine type " + host)
-
- if setarch.get('if_build', '') == platform.machine():
- chroot = f'setarch {setarch["arch"]} chroot'
- else:
- chroot = 'chroot'
- self.settings["CHROOT"] = chroot
-
- self.settings["crosscompile"] = \
- build != host and not chroot.startswith('setarch')
-
- log.notice('Using target: %s', self.settings['target'])
- # Print a nice informational message
- if self.settings["crosscompile"]:
- log.info('Cross-compiling on %s for different machine type %s',
- build, host)
- elif chroot.startswith('setarch'):
- log.info('Building on %s for alternate personality type %s',
- build, host)
- else:
- log.info('Building natively for %s', host)
-
- # This must be set first as other set_ options depend on this
- self.set_spec_prefix()
-
- # Initialize our (de)compressor's)
- self.decompressor = CompressMap(self.settings["decompress_definitions"],
- env = self.env,
- search_order = self.settings["decompressor_search_order"],
- comp_prog = self.settings["comp_prog"],
- decomp_opt = self.settings["decomp_opt"])
- self.accepted_extensions = self.decompressor.search_order_extensions(
- self.settings["decompressor_search_order"])
- log.notice("Source file specification matching setting is: %s",
- self.settings["source_matching"])
- log.notice("Accepted source file extensions search order: %s",
- self.accepted_extensions)
- # save resources, it is not always needed
- self.compressor = None
-
- # Define all of our core variables
- self.set_target_profile()
- self.set_target_subpath()
- self.set_source_subpath()
-
- # Set paths
- self.set_snapshot_path()
- self.set_root_path()
- self.set_source_path()
- self.set_snapcache_path()
- self.set_chroot_path()
- self.set_autoresume_path()
- self.set_dest_path()
- self.set_stage_path()
- self.set_target_path()
-
- self.set_controller_file()
- self.set_default_action_sequence()
- self.set_use()
- self.set_catalyst_use()
- self.set_cleanables()
- self.set_iso_volume_id()
- self.set_build_kernel_vars()
- self.set_fsscript()
- self.set_install_mask()
- self.set_rcadd()
- self.set_rcdel()
- self.set_cdtar()
- self.set_fstype()
- self.set_fsops()
- self.set_iso()
- self.set_packages()
- self.set_rm()
- self.set_linuxrc()
- self.set_busybox_config()
- self.set_overlay()
- self.set_portage_overlay()
- self.set_root_overlay()
-
- # This next line checks to make sure that the specified variables exist on disk.
- #pdb.set_trace()
- file_locate(self.settings, ["distdir"], expand = 0)
- # If we are using portage_confdir, check that as well.
- if "portage_confdir" in self.settings:
- file_locate(self.settings, ["portage_confdir"], expand = 0)
-
- # Setup our mount points.
- # initialize our target mounts.
- self.target_mounts = TARGET_MOUNT_DEFAULTS.copy()
-
- self.mounts = ["proc", "dev", "portdir", "distdir", "port_tmpdir"]
- # initialize our source mounts
- self.mountmap = SOURCE_MOUNT_DEFAULTS.copy()
- # update these from settings
- self.mountmap["portdir"] = self.settings["portdir"]
- self.mountmap["distdir"] = self.settings["distdir"]
- self.target_mounts["portdir"] = normpath(self.settings["repo_basedir"] +
- "/" + self.settings["repo_name"])
- self.target_mounts["distdir"] = self.settings["target_distdir"]
- self.target_mounts["packagedir"] = self.settings["target_pkgdir"]
- if "snapcache" not in self.settings["options"]:
- self.mounts.remove("portdir")
- self.mountmap["portdir"] = None
- else:
- self.mountmap["portdir"] = normpath("/".join([
- self.settings["snapshot_cache_path"],
- self.settings["repo_name"],
- ]))
- self.mounts.append("devpts")
- self.mounts.append("shm")
- self.mounts.append("run")
-
- self.set_mounts()
-
- # Configure any user specified options (either in catalyst.conf or on
- # the command line).
- if "pkgcache" in self.settings["options"]:
- self.set_pkgcache_path()
- log.info('Location of the package cache is %s', self.settings['pkgcache_path'])
- self.mounts.append("packagedir")
- self.mountmap["packagedir"] = self.settings["pkgcache_path"]
-
- if "kerncache" in self.settings["options"]:
- self.set_kerncache_path()
- log.info('Location of the kerncache is %s', self.settings['kerncache_path'])
- self.mounts.append("kerncache")
- self.mountmap["kerncache"] = self.settings["kerncache_path"]
-
- if "ccache" in self.settings["options"]:
- if "CCACHE_DIR" in os.environ:
- ccdir = os.environ["CCACHE_DIR"]
- del os.environ["CCACHE_DIR"]
- else:
- ccdir = "/var/tmp/ccache"
- if not os.path.isdir(ccdir):
- raise CatalystError(
- "Compiler cache support can't be enabled (can't find " + ccdir+")")
- self.mounts.append("ccache")
- self.mountmap["ccache"] = ccdir
- # for the chroot:
- self.env["CCACHE_DIR"] = self.target_mounts["ccache"]
-
- if "icecream" in self.settings["options"]:
- self.mounts.append("icecream")
- self.mountmap["icecream"] = self.settings["icecream"]
- self.env["PATH"] = self.target_mounts["icecream"] + ":" + self.env["PATH"]
-
- if "port_logdir" in self.settings:
- self.mounts.append("port_logdir")
- self.mountmap["port_logdir"] = self.settings["port_logdir"]
- self.env["PORT_LOGDIR"] = self.settings["port_logdir"]
- self.env["PORT_LOGDIR_CLEAN"] = PORT_LOGDIR_CLEAN
-
- def override_cbuild(self):
- if "CBUILD" in self.makeconf:
- self.settings["CBUILD"] = self.makeconf["CBUILD"]
-
- def override_chost(self):
- if "CHOST" in self.makeconf:
- self.settings["CHOST"] = self.makeconf["CHOST"]
-
- def override_cflags(self):
- if "CFLAGS" in self.makeconf:
- self.settings["CFLAGS"] = self.makeconf["CFLAGS"]
-
- def override_cxxflags(self):
- if "CXXFLAGS" in self.makeconf:
- self.settings["CXXFLAGS"] = self.makeconf["CXXFLAGS"]
-
- def override_fcflags(self):
- if "FCFLAGS" in self.makeconf:
- self.settings["FCFLAGS"] = self.makeconf["FCFLAGS"]
-
- def override_fflags(self):
- if "FFLAGS" in self.makeconf:
- self.settings["FFLAGS"] = self.makeconf["FFLAGS"]
-
- def override_ldflags(self):
- if "LDFLAGS" in self.makeconf:
- self.settings["LDFLAGS"] = self.makeconf["LDFLAGS"]
-
- def override_asflags(self):
- if "ASFLAGS" in self.makeconf:
- self.settings["ASFLAGS"] = self.makeconf["ASFLAGS"]
-
- def override_common_flags(self):
- if "COMMON_FLAGS" in self.makeconf:
- self.settings["COMMON_FLAGS"] = self.makeconf["COMMON_FLAGS"]
-
- def set_install_mask(self):
- if "install_mask" in self.settings:
- if not isinstance(self.settings['install_mask'], str):
- self.settings["install_mask"] = \
- ' '.join(self.settings["install_mask"])
-
- def set_spec_prefix(self):
- self.settings["spec_prefix"] = self.settings["target"]
-
- def set_target_profile(self):
- self.settings["target_profile"] = self.settings["profile"]
-
- def set_target_subpath(self):
- common = self.settings["rel_type"] + "/" + \
- self.settings["target"] + "-" + self.settings["subarch"]
- self.settings["target_subpath"] = \
- common + \
- "-" + self.settings["version_stamp"] + \
- "/"
- self.settings["target_subpath_unversioned"] = \
- common + \
- "/"
-
- def set_source_subpath(self):
- if not isinstance(self.settings['source_subpath'], str):
- raise CatalystError(
- "source_subpath should have been a string. Perhaps you have " + \
- "something wrong in your spec file?")
-
- def set_pkgcache_path(self):
- if "pkgcache_path" in self.settings:
- if not isinstance(self.settings['pkgcache_path'], str):
- self.settings["pkgcache_path"] = \
- normpath(self.settings["pkgcache_path"])
- elif "versioned_cache" in self.settings["options"]:
- self.settings["pkgcache_path"] = \
- normpath(self.settings["storedir"] + "/packages/" + \
- self.settings["target_subpath"] + "/")
- else:
- self.settings["pkgcache_path"] = \
- normpath(self.settings["storedir"] + "/packages/" + \
- self.settings["target_subpath_unversioned"] + "/")
-
- def set_kerncache_path(self):
- if "kerncache_path" in self.settings:
- if not isinstance(self.settings['kerncache_path'], str):
- self.settings["kerncache_path"] = \
- normpath(self.settings["kerncache_path"])
- elif "versioned_cache" in self.settings["options"]:
- self.settings["kerncache_path"] = normpath(self.settings["storedir"] + \
- "/kerncache/" + self.settings["target_subpath"])
- else:
- self.settings["kerncache_path"] = normpath(self.settings["storedir"] + \
- "/kerncache/" + self.settings["target_subpath_unversioned"])
-
- def set_target_path(self):
- self.settings["target_path"] = normpath(self.settings["storedir"] + \
- "/builds/" + self.settings["target_subpath"])
- if "autoresume" in self.settings["options"]\
- and self.resume.is_enabled("setup_target_path"):
- log.notice('Resume point detected, skipping target path setup operation...')
- else:
- self.resume.enable("setup_target_path")
- ensure_dirs(self.settings["storedir"] + "/builds")
-
- def set_fsscript(self):
- if self.settings["spec_prefix"] + "/fsscript" in self.settings:
- self.settings["fsscript"] = \
- self.settings[self.settings["spec_prefix"] + "/fsscript"]
- del self.settings[self.settings["spec_prefix"] + "/fsscript"]
-
- def set_rcadd(self):
- if self.settings["spec_prefix"] + "/rcadd" in self.settings:
- self.settings["rcadd"] = \
- self.settings[self.settings["spec_prefix"] + "/rcadd"]
- del self.settings[self.settings["spec_prefix"] + "/rcadd"]
-
- def set_rcdel(self):
- if self.settings["spec_prefix"] + "/rcdel" in self.settings:
- self.settings["rcdel"] = \
- self.settings[self.settings["spec_prefix"] + "/rcdel"]
- del self.settings[self.settings["spec_prefix"] + "/rcdel"]
-
- def set_cdtar(self):
- if self.settings["spec_prefix"] + "/cdtar" in self.settings:
- self.settings["cdtar"] = \
- normpath(self.settings[self.settings["spec_prefix"] + "/cdtar"])
- del self.settings[self.settings["spec_prefix"] + "/cdtar"]
-
- def set_iso(self):
- if self.settings["spec_prefix"] + "/iso" in self.settings:
- if self.settings[self.settings["spec_prefix"] + "/iso"].startswith('/'):
- self.settings["iso"] = \
- normpath(self.settings[self.settings["spec_prefix"] + "/iso"])
- else:
- # This automatically prepends the build dir to the ISO output path
- # if it doesn't start with a /
- self.settings["iso"] = normpath(self.settings["storedir"] + \
- "/builds/" + self.settings["rel_type"] + "/" + \
- self.settings[self.settings["spec_prefix"] + "/iso"])
- del self.settings[self.settings["spec_prefix"] + "/iso"]
-
- def set_fstype(self):
- if self.settings["spec_prefix"] + "/fstype" in self.settings:
- self.settings["fstype"] = \
- self.settings[self.settings["spec_prefix"] + "/fstype"]
- del self.settings[self.settings["spec_prefix"] + "/fstype"]
-
- if "fstype" not in self.settings:
- self.settings["fstype"] = "normal"
- for x in self.valid_values:
- if x == self.settings["spec_prefix"] + "/fstype":
- log.info('%s/fstype is being set to the default of "normal"',
- self.settings['spec_prefix'])
-
- def set_fsops(self):
- if "fstype" in self.settings:
- self.valid_values |= {"fsops"}
- if self.settings["spec_prefix"] + "/fsops" in self.settings:
- self.settings["fsops"] = \
- self.settings[self.settings["spec_prefix"] + "/fsops"]
- del self.settings[self.settings["spec_prefix"] + "/fsops"]
-
- def set_source_path(self):
- if "seedcache" in self.settings["options"]\
- and os.path.isdir(normpath(self.settings["storedir"] + "/tmp/" +
- self.settings["source_subpath"] + "/")):
- self.settings["source_path"] = normpath(self.settings["storedir"] +
- "/tmp/" + self.settings["source_subpath"] + "/")
- log.debug("source_subpath is: %s", self.settings["source_path"])
- else:
- log.debug('Checking source path existence and '
- 'get the final filepath. subpath: %s',
- self.settings["source_subpath"])
- self.settings["source_path"] = file_check(
- normpath(self.settings["storedir"] + "/builds/" +
- self.settings["source_subpath"]),
- self.accepted_extensions,
- self.settings["source_matching"] in ["strict"]
- )
- log.debug('Source path returned from file_check is: %s',
- self.settings["source_path"])
- if os.path.isfile(self.settings["source_path"]):
- # XXX: Is this even necessary if the previous check passes?
- if os.path.exists(self.settings["source_path"]):
- self.settings["source_path_hash"] = \
- self.settings["hash_map"].generate_hash(
- self.settings["source_path"],
- hash_ = self.settings["hash_function"])
- log.notice('Source path set to %s', self.settings['source_path'])
-
- def set_dest_path(self):
- if "root_path" in self.settings:
- self.settings["destpath"] = normpath(self.settings["chroot_path"] +
- self.settings["root_path"])
- else:
- self.settings["destpath"] = normpath(self.settings["chroot_path"])
-
- def set_cleanables(self):
- self.settings["cleanables"] = ["/etc/resolv.conf", "/var/tmp/*", "/tmp/*",
- self.settings["repo_basedir"] + "/" +
- self.settings["repo_name"]]
-
- def set_snapshot_path(self):
- self.settings["snapshot_path"] = file_check(
- normpath(self.settings["storedir"] +
- "/snapshots/" + self.settings["snapshot_name"] +
- self.settings["snapshot"]),
- self.accepted_extensions,
- self.settings["source_matching"] == "strict"
- )
- log.info('SNAPSHOT_PATH set to: %s', self.settings['snapshot_path'])
- self.settings["snapshot_path_hash"] = \
- self.settings["hash_map"].generate_hash(
- self.settings["snapshot_path"],
- hash_ = self.settings["hash_function"])
-
- def set_snapcache_path(self):
- self.settings["snapshot_cache_path"] = \
- normpath(pjoin(self.settings["snapshot_cache"],
- self.settings["snapshot"]))
- if "snapcache" in self.settings["options"]:
- self.settings["snapshot_cache_path"] = \
- normpath(pjoin(self.settings["snapshot_cache"],
- self.settings["snapshot"]))
- self.snapcache_lock = \
- LockDir(self.settings["snapshot_cache_path"])
- log.info('Setting snapshot cache to %s', self.settings['snapshot_cache_path'])
-
- def set_chroot_path(self):
- """
- NOTE: the trailing slash has been removed
- Things *could* break if you don't use a proper join()
- """
- self.settings["chroot_path"] = normpath(self.settings["storedir"] +
- "/tmp/" + self.settings["target_subpath"].rstrip('/'))
- self.chroot_lock = LockDir(self.settings["chroot_path"])
-
- def set_autoresume_path(self):
- self.settings["autoresume_path"] = normpath(pjoin(
- self.settings["storedir"], "tmp", self.settings["rel_type"],
- ".autoresume-%s-%s-%s"
- %(self.settings["target"], self.settings["subarch"],
- self.settings["version_stamp"])
- ))
- if "autoresume" in self.settings["options"]:
- log.info('The autoresume path is %s', self.settings['autoresume_path'])
- self.resume = AutoResume(self.settings["autoresume_path"], mode=0o755)
-
- def set_controller_file(self):
- self.settings["controller_file"] = normpath(self.settings["sharedir"] +
- "/targets/" + self.settings["target"] + "/" + "controller.sh")
-
- def set_iso_volume_id(self):
- if self.settings["spec_prefix"] + "/volid" in self.settings:
- self.settings["iso_volume_id"] = \
- self.settings[self.settings["spec_prefix"] + "/volid"]
- if len(self.settings["iso_volume_id"]) > 32:
- raise CatalystError(
- "ISO volume ID must not exceed 32 characters.")
- else:
- self.settings["iso_volume_id"] = "catalyst "+self.settings["snapshot"]
-
- def set_default_action_sequence(self):
- """ Default action sequence for run method.
-
- This method sets the optional purgeonly action sequence and returns.
- Or it calls the normal set_action_sequence() for the target stage.
- """
- if "purgeonly" in self.settings["options"]:
- self.settings["action_sequence"] = ["remove_chroot"]
- return
- self.set_action_sequence()
-
- def set_action_sequence(self):
- """Set basic stage1, 2, 3 action sequences"""
- self.settings["action_sequence"] = ["unpack", "unpack_snapshot",
- "setup_confdir", "portage_overlay",
- "bind", "chroot_setup", "setup_environment",
- "run_local", "preclean", "unbind", "clean"]
- self.set_completion_action_sequences()
-
- def set_completion_action_sequences(self):
- if "fetch" not in self.settings["options"]:
- self.settings["action_sequence"].append("capture")
- if "keepwork" in self.settings["options"]:
- self.settings["action_sequence"].append("clear_autoresume")
- elif "seedcache" in self.settings["options"]:
- self.settings["action_sequence"].append("remove_autoresume")
- else:
- self.settings["action_sequence"].append("remove_autoresume")
- self.settings["action_sequence"].append("remove_chroot")
-
- def set_use(self):
- use = self.settings["spec_prefix"] + "/use"
- if use in self.settings:
- if isinstance(self.settings[use], str):
- self.settings["use"] = self.settings[use].split()
- self.settings["use"] = self.settings[use]
- del self.settings[use]
- else:
- self.settings["use"] = []
-
- def set_catalyst_use(self):
- catalyst_use = self.settings["spec_prefix"] + "/catalyst_use"
- if catalyst_use in self.settings:
- if isinstance(self.settings[catalyst_use], str):
- self.settings["catalyst_use"] = self.settings[catalyst_use].split()
- else:
- self.settings["catalyst_use"] = self.settings[catalyst_use]
- del self.settings[catalyst_use]
- else:
- self.settings["catalyst_use"] = []
-
- # Force bindist when options ask for it
- if "bindist" in self.settings["options"]:
- log.debug("Enabling bindist USE flag")
- self.settings["catalyst_use"].append("bindist")
-
- def set_stage_path(self):
- self.settings["stage_path"] = normpath(self.settings["chroot_path"])
-
- def set_mounts(self):
- pass
-
- def set_packages(self):
- pass
-
- def set_rm(self):
- if self.settings["spec_prefix"] + "/rm" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/rm'], str):
- self.settings[self.settings["spec_prefix"] + "/rm"] = \
- self.settings[self.settings["spec_prefix"] + "/rm"].split()
-
- def set_linuxrc(self):
- if self.settings["spec_prefix"] + "/linuxrc" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/linuxrc'], str):
- self.settings["linuxrc"] = \
- self.settings[self.settings["spec_prefix"] + "/linuxrc"]
- del self.settings[self.settings["spec_prefix"] + "/linuxrc"]
-
- def set_busybox_config(self):
- if self.settings["spec_prefix"] + "/busybox_config" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/busybox_config'], str):
- self.settings["busybox_config"] = \
- self.settings[self.settings["spec_prefix"] + "/busybox_config"]
- del self.settings[self.settings["spec_prefix"] + "/busybox_config"]
-
- def set_portage_overlay(self):
- if "portage_overlay" in self.settings:
- if isinstance(self.settings['portage_overlay'], str):
- self.settings["portage_overlay"] = \
- self.settings["portage_overlay"].split()
- log.info('portage_overlay directories are set to: %s',
- ' '.join(self.settings['portage_overlay']))
-
- def set_overlay(self):
- if self.settings["spec_prefix"] + "/overlay" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/overlay'], str):
- self.settings[self.settings["spec_prefix"] + "/overlay"] = \
- self.settings[self.settings["spec_prefix"] + \
- "/overlay"].split()
-
- def set_root_overlay(self):
- if self.settings["spec_prefix"] + "/root_overlay" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/root_overlay'], str):
- self.settings[self.settings["spec_prefix"] + "/root_overlay"] = \
- self.settings[self.settings["spec_prefix"] + \
- "/root_overlay"].split()
-
- def set_root_path(self):
- """ ROOT= variable for emerges """
- self.settings["root_path"] = "/"
-
- def set_valid_build_kernel_vars(self,addlargs):
- if "boot/kernel" in addlargs:
- if isinstance(addlargs['boot/kernel'], str):
- loopy = [addlargs["boot/kernel"]]
- else:
- loopy = addlargs["boot/kernel"]
-
- for x in loopy:
- self.valid_values |= frozenset([
- "boot/kernel/" + x + "/aliases",
- "boot/kernel/" + x + "/config",
- "boot/kernel/" + x + "/console",
- "boot/kernel/" + x + "/extraversion",
- "boot/kernel/" + x + "/gk_action",
- "boot/kernel/" + x + "/gk_kernargs",
- "boot/kernel/" + x + "/initramfs_overlay",
- "boot/kernel/" + x + "/kernelopts",
- "boot/kernel/" + x + "/packages",
- "boot/kernel/" + x + "/softlevel",
- "boot/kernel/" + x + "/sources",
- "boot/kernel/" + x + "/use",
- ])
- if "boot/kernel/" + x + "/packages" in addlargs:
- if isinstance(addlargs['boot/kernel/' + x + '/packages'], str):
- addlargs["boot/kernel/" + x + "/packages"] = \
- [addlargs["boot/kernel/" + x + "/packages"]]
-
- def set_build_kernel_vars(self):
- prefix = self.settings["spec_prefix"]
-
- gk_mainargs = prefix + "/gk_mainargs"
- if gk_mainargs in self.settings:
- self.settings["gk_mainargs"] = self.settings[gk_mainargs]
- del self.settings[gk_mainargs]
-
- # Ask genkernel to include b2sum if <target>/verify is set
- verify = prefix + "/verify"
- if verify in self.settings:
- assert self.settings[verify] == "blake2"
- self.settings.setdefault("gk_mainargs", []).append("--b2sum")
-
- def kill_chroot_pids(self):
- log.info('Checking for processes running in chroot and killing them.')
-
- # Force environment variables to be exported so script can see them
- self.setup_environment()
-
- killcmd = normpath(self.settings["sharedir"] +
- self.settings["shdir"] + "/support/kill-chroot-pids.sh")
- if os.path.exists(killcmd):
- cmd([killcmd], env = self.env)
-
- def mount_safety_check(self):
- """
- Check and verify that none of our paths in mypath are mounted. We don't
- want to clean up with things still mounted, and this allows us to check.
- Returns 1 on ok, 0 on "something is still mounted" case.
- """
-
- if not os.path.exists(self.settings["chroot_path"]):
- return
-
- log.debug('self.mounts = %s', self.mounts)
- for x in self.mounts:
- target = normpath(self.settings["chroot_path"] + self.target_mounts[x])
- log.debug('mount_safety_check() x = %s %s', x, target)
- if not os.path.exists(target):
- continue
-
- if ismount(target):
- # Something is still mounted
- try:
- log.warning('%s is still mounted; performing auto-bind-umount...', target)
- # Try to umount stuff ourselves
- self.unbind()
- if ismount(target):
- raise CatalystError("Auto-unbind failed for " + target)
- log.notice('Auto-unbind successful...')
- except CatalystError:
- raise CatalystError("Unable to auto-unbind " + target)
-
- def unpack(self):
-
- clst_unpack_hash = self.resume.get("unpack")
-
- # Set up all unpack info settings
- unpack_info = self.decompressor.create_infodict(
- source = self.settings["source_path"],
- destination = self.settings["chroot_path"],
- arch = self.settings["compressor_arch"],
- other_options = self.settings["compressor_options"],
- )
-
- display_msg = (
- 'Starting %(mode)s from %(source)s\nto '
- '%(destination)s (this may take some time) ..')
-
- error_msg = "'%(mode)s' extraction of %(source)s to %(destination)s failed."
-
- if "seedcache" in self.settings["options"]:
- if os.path.isdir(unpack_info["source"]):
- # SEEDCACHE Is a directory, use rsync
- unpack_info['mode'] = "rsync"
- else:
- # SEEDCACHE is a not a directory, try untar'ing
- log.notice('Referenced SEEDCACHE does not appear to be a directory, trying to untar...')
- unpack_info['source'] = file_check(unpack_info['source'])
- else:
- # No SEEDCACHE, use tar
- unpack_info['source'] = file_check(unpack_info['source'])
- # end of unpack_info settings
-
- # set defaults,
- # only change them if the resume point is proven to be good
- _unpack = True
- invalid_chroot = True
- # Begin autoresume validation
- if "autoresume" in self.settings["options"]:
- # check chroot
- if os.path.isdir(self.settings["chroot_path"]):
- if self.resume.is_enabled("unpack"):
- # Autoresume is valid in the chroot
- _unpack = False
- invalid_chroot = False
- log.notice('Resume: "chroot" is valid...')
- else:
- # self.resume.is_disabled("unpack")
- # Autoresume is invalid in the chroot
- log.notice('Resume: "seed source" unpack resume point is disabled')
-
- # check seed source
- if os.path.isfile(self.settings["source_path"]) and not invalid_chroot:
- if self.settings["source_path_hash"].replace("\n", " ") == clst_unpack_hash:
- # Seed tarball has not changed, chroot is valid
- _unpack = False
- invalid_chroot = False
- log.notice('Resume: "seed source" hash matches chroot...')
- else:
- # self.settings["source_path_hash"] != clst_unpack_hash
- # Seed tarball has changed, so invalidate the chroot
- _unpack = True
- invalid_chroot = True
- log.notice('Resume: "seed source" has changed, hashes do not match, invalidating resume...')
- log.notice(' source_path......: %s', self.settings["source_path"])
- log.notice(' new source hash..: %s', self.settings["source_path_hash"].replace("\n", " "))
- log.notice(' recorded hash....: %s', clst_unpack_hash)
- unpack_info['source'] = file_check(unpack_info['source'])
-
- else:
- # No autoresume, check SEEDCACHE
- if "seedcache" in self.settings["options"]:
- # if the seedcache is a dir, rsync will clean up the chroot
- if os.path.isdir(self.settings["source_path"]):
- pass
- elif os.path.isdir(self.settings["source_path"]):
- # We should never reach this, so something is very wrong
- raise CatalystError(
- "source path is a dir but seedcache is not enabled: %s"
- % self.settings["source_path"])
-
- if _unpack:
- self.mount_safety_check()
-
- if invalid_chroot:
- if "autoresume" in self.settings["options"]:
- log.notice('Resume: Target chroot is invalid, cleaning up...')
-
- self.clear_autoresume()
- self.clear_chroot()
-
- ensure_dirs(self.settings["chroot_path"])
-
- ensure_dirs(self.settings["chroot_path"] + "/tmp", mode=1777)
-
- if "pkgcache" in self.settings["options"]:
- ensure_dirs(self.settings["pkgcache_path"], mode=0o755)
-
- if "kerncache" in self.settings["options"]:
- ensure_dirs(self.settings["kerncache_path"], mode=0o755)
-
- log.notice('%s', display_msg % unpack_info)
-
- # now run the decompressor
- if not self.decompressor.extract(unpack_info):
- log.error('%s', error_msg % unpack_info)
-
- if "source_path_hash" in self.settings:
- self.resume.enable("unpack",
- data = self.settings["source_path_hash"])
- else:
- self.resume.enable("unpack")
- else:
- log.notice('Resume: Valid resume point detected, skipping seed unpack operation...')
-
- def unpack_snapshot(self):
- unpack = True
- snapshot_hash = self.resume.get("unpack_repo")
-
- unpack_errmsg = "Error unpacking snapshot using mode %(mode)s"
-
- unpack_info = self.decompressor.create_infodict(
- source = self.settings["snapshot_path"],
- destination = self.settings["snapshot_cache_path"],
- arch = self.settings["compressor_arch"],
- other_options = self.settings["compressor_options"],
- )
-
- target_portdir = normpath(self.settings["chroot_path"] +
- self.settings["repo_basedir"] + "/" + self.settings["repo_name"])
- log.info('%s', self.settings['chroot_path'])
- log.info('unpack_snapshot(), target_portdir = %s', target_portdir)
- if "snapcache" in self.settings["options"]:
- snapshot_cache_hash_path = pjoin(
- self.settings['snapshot_cache_path'], 'catalyst-hash')
- snapshot_cache_hash = fileutils.readfile(snapshot_cache_hash_path, True)
- unpack_info['mode'] = self.decompressor.determine_mode(
- unpack_info['source'])
-
- cleanup_msg = "Cleaning up invalid snapshot cache at \n\t" + \
- self.settings["snapshot_cache_path"] + \
- " (this can take a long time)..."
-
- if self.settings["snapshot_path_hash"] == snapshot_cache_hash:
- log.info('Valid snapshot cache, skipping unpack of portage tree...')
- unpack = False
- else:
- cleanup_msg = \
- 'Cleaning up existing portage tree (this can take a long time)...'
- unpack_info['destination'] = normpath(
- self.settings["chroot_path"] + self.settings["repo_basedir"])
- unpack_info['mode'] = self.decompressor.determine_mode(
- unpack_info['source'])
-
- if "autoresume" in self.settings["options"] \
- and os.path.exists(target_portdir) \
- and self.resume.is_enabled("unpack_repo") \
- and self.settings["snapshot_path_hash"] == snapshot_hash:
- log.notice('Valid Resume point detected, skipping unpack of portage tree...')
- unpack = False
-
- if unpack:
- if "snapcache" in self.settings["options"]:
- self.snapcache_lock.write_lock()
- if os.path.exists(target_portdir):
- log.info('%s', cleanup_msg)
- clear_dir(target_portdir)
-
- log.notice('Unpacking portage tree (this can take a long time) ...')
- if not self.decompressor.extract(unpack_info):
- log.error('%s', unpack_errmsg % unpack_info)
-
- if "snapcache" in self.settings["options"]:
- with open(snapshot_cache_hash_path, 'w') as myf:
- myf.write(self.settings["snapshot_path_hash"])
- else:
- log.info('Setting snapshot autoresume point')
- self.resume.enable("unpack_repo",
- data = self.settings["snapshot_path_hash"])
-
- if "snapcache" in self.settings["options"]:
- self.snapcache_lock.unlock()
-
- def config_profile_link(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("config_profile_link"):
- log.notice('Resume point detected, skipping config_profile_link operation...')
- else:
- # TODO: zmedico and I discussed making this a directory and pushing
- # in a parent file, as well as other user-specified configuration.
- log.info('Configuring profile link...')
- clear_path(self.settings['chroot_path'] + \
- self.settings['port_conf'] + '/make.profile')
- ensure_dirs(self.settings['chroot_path'] + self.settings['port_conf'])
- cmd(['ln', '-sf',
- '../..' + self.settings['portdir'] + '/profiles/' + self.settings['target_profile'],
- self.settings['chroot_path'] + self.settings['port_conf'] + '/make.profile'],
- env=self.env)
- self.resume.enable("config_profile_link")
-
- def setup_confdir(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("setup_confdir"):
- log.notice('Resume point detected, skipping setup_confdir operation...')
- else:
- if "portage_confdir" in self.settings:
- log.info('Configuring %s...', self.settings['port_conf'])
- dest = normpath(self.settings['chroot_path'] + '/' + self.settings['port_conf'])
- ensure_dirs(dest)
- # The trailing slashes on both paths are important:
- # We want to make sure rsync copies the dirs into each
- # other and not as subdirs.
- cmd(['rsync', '-a', self.settings['portage_confdir'] + '/', dest + '/'],
- env=self.env)
- self.resume.enable("setup_confdir")
-
- def portage_overlay(self):
- """ We copy the contents of our overlays to /usr/local/portage """
- if "portage_overlay" in self.settings:
- for x in self.settings["portage_overlay"]:
- if os.path.exists(x):
- log.info('Copying overlay dir %s', x)
- ensure_dirs(self.settings['chroot_path'] + self.settings['local_overlay'])
- cmd("cp -a " + x + "/* " + self.settings["chroot_path"] +
- self.settings["local_overlay"],
- env=self.env)
-
- def root_overlay(self):
- """ Copy over the root_overlay """
- if self.settings["spec_prefix"] + "/root_overlay" in self.settings:
- for x in self.settings[self.settings["spec_prefix"] +
- "/root_overlay"]:
- if os.path.exists(x):
- log.info('Copying root_overlay: %s', x)
- cmd(['rsync', '-a', x + '/', self.settings['chroot_path']],
- env=self.env)
-
- def bind(self):
- for x in self.mounts:
- log.debug('bind(); x = %s', x)
- target = normpath(self.settings["chroot_path"] + self.target_mounts[x])
- ensure_dirs(target, mode=0o755)
-
- if not os.path.exists(self.mountmap[x]):
- if self.mountmap[x] not in ("maybe_tmpfs", "tmpfs", "shmfs"):
- ensure_dirs(self.mountmap[x], mode=0o755)
-
- src = self.mountmap[x]
- log.debug('bind(); src = %s', src)
- if "snapcache" in self.settings["options"] and x == "portdir":
- self.snapcache_lock.read_lock()
- _cmd = None
- if src == "maybe_tmpfs":
- if "var_tmpfs_portage" in self.settings:
- _cmd = ['mount', '-t', 'tmpfs',
- '-o', 'size=' + self.settings['var_tmpfs_portage'] + 'G',
- src, target]
- elif src == "tmpfs":
- _cmd = ['mount', '-t', 'tmpfs', src, target]
- else:
- if src == "shmfs":
- _cmd = ['mount', '-t', 'tmpfs', '-o', 'noexec,nosuid,nodev', 'shm', target]
- else:
- _cmd = ['mount', '--bind', src, target]
- if _cmd:
- log.debug('bind(); _cmd = %s', _cmd)
- cmd(_cmd, env=self.env, fail_func=self.unbind)
- log.debug('bind(); finished :D')
-
- def unbind(self):
- ouch = 0
- mypath = self.settings["chroot_path"]
- myrevmounts = self.mounts[:]
- myrevmounts.reverse()
- # Unmount in reverse order for nested bind-mounts
- for x in myrevmounts:
- target = normpath(mypath + self.target_mounts[x])
- if not os.path.exists(target):
- log.notice('%s does not exist. Skipping', target)
- continue
-
- if not ismount(target):
- log.notice('%s is not a mount point. Skipping', target)
- continue
-
- try:
- cmd(['umount', target], env=self.env)
- except CatalystError:
- log.warning('First attempt to unmount failed: %s', target)
- log.warning('Killing any pids still running in the chroot')
-
- self.kill_chroot_pids()
-
- try:
- cmd(['umount', target], env=self.env)
- except CatalystError:
- ouch = 1
- log.warning("Couldn't umount bind mount: %s", target)
-
- if "snapcache" in self.settings["options"] and x == "/var/db/repos/gentoo":
- try:
- # It's possible the snapshot lock object isn't created yet.
- # This is because mount safety check calls unbind before the
- # target is fully initialized
- self.snapcache_lock.unlock()
- except Exception:
- pass
- if ouch:
- # if any bind mounts really failed, then we need to raise
- # this to potentially prevent an upcoming bash stage cleanup script
- # from wiping our bind mounts.
- raise CatalystError(
- "Couldn't umount one or more bind-mounts; aborting for safety.")
-
- def chroot_setup(self):
- self.makeconf = read_makeconf(normpath(self.settings["chroot_path"] +
- self.settings["make_conf"]))
- self.override_cbuild()
- self.override_chost()
- self.override_cflags()
- self.override_cxxflags()
- self.override_fcflags()
- self.override_fflags()
- self.override_ldflags()
- self.override_asflags()
- self.override_common_flags()
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("chroot_setup"):
- log.notice('Resume point detected, skipping chroot_setup operation...')
- else:
- log.notice('Setting up chroot...')
-
- shutil.copy('/etc/resolv.conf', self.settings['chroot_path'] + '/etc/')
-
- # Copy over the envscript, if applicable
- if "envscript" in self.settings:
- if not os.path.exists(self.settings["envscript"]):
- raise CatalystError(
- "Can't find envscript " + self.settings["envscript"],
- print_traceback=True)
-
- log.warning(
- 'env variables in catalystrc may cause catastrophic failure.\n'
- 'If your build fails look here first as the possible problem.')
-
- shutil.copy(self.settings['envscript'],
- self.settings['chroot_path'] + '/tmp/envscript')
-
- # Copy over /etc/hosts from the host in case there are any
- # specialties in there
- hosts_file = self.settings['chroot_path'] + '/etc/hosts'
- if os.path.exists(hosts_file):
- os.rename(hosts_file, hosts_file + '.catalyst')
- shutil.copy('/etc/hosts', hosts_file)
- # write out the make.conf
- try:
- self.write_make_conf(setup=True)
- except OSError as e:
- raise CatalystError('Could not write %s: %s' % (
- normpath(self.settings["chroot_path"] +
- self.settings["make_conf"]), e))
- self.resume.enable("chroot_setup")
-
- def write_make_conf(self, setup=True):
- # Modify and write out make.conf (for the chroot)
- makepath = normpath(self.settings["chroot_path"] +
- self.settings["make_conf"])
- clear_path(makepath)
- with open(makepath, "w") as myf:
- log.notice("Writing the stage make.conf to: %s" % makepath)
- myf.write("# These settings were set by the catalyst build script "
- "that automatically\n# built this stage.\n")
- myf.write("# Please consult "
- "/usr/share/portage/config/make.conf.example "
- "for a more\n# detailed example.\n")
-
- for flags in ["COMMON_FLAGS", "CFLAGS", "CXXFLAGS", "FCFLAGS", "FFLAGS",
- "LDFLAGS", "ASFLAGS"]:
- if flags in ["LDFLAGS", "ASFLAGS"]:
- if not flags in self.settings:
- continue
- myf.write("# %s is unsupported. USE AT YOUR OWN RISK!\n"
- % flags)
- if flags not in self.settings or (flags != "COMMON_FLAGS" and
- self.settings[flags] == self.settings["COMMON_FLAGS"]):
- myf.write('%s="${COMMON_FLAGS}"\n' % flags)
- elif isinstance(self.settings[flags], list):
- myf.write('%s="%s"\n'
- % (flags, ' '.join(self.settings[flags])))
- else:
- myf.write('%s="%s"\n'
- % (flags, self.settings[flags]))
-
- if "CBUILD" in self.settings:
- myf.write("# This should not be changed unless you know exactly"
- " what you are doing. You\n# should probably be "
- "using a different stage, instead.\n")
- myf.write('CBUILD="' + self.settings["CBUILD"] + '"\n')
-
- if "CHOST" in self.settings:
- myf.write("# WARNING: Changing your CHOST is not something "
- "that should be done lightly.\n# Please consult "
- "https://wiki.gentoo.org/wiki/Changing_the_CHOST_variable "
- "before changing.\n")
- myf.write('CHOST="' + self.settings["CHOST"] + '"\n')
-
- # Figure out what our USE vars are for building
- myusevars = []
- if "bindist" in self.settings["options"]:
- myf.write("\n# NOTE: This stage was built with the bindist Use flag enabled\n")
- if setup or "sticky-config" in self.settings["options"]:
- myusevars.extend(self.settings["catalyst_use"])
- log.notice("STICKY-CONFIG is enabled")
- if "HOSTUSE" in self.settings:
- myusevars.extend(self.settings["HOSTUSE"])
-
- if "use" in self.settings:
- myusevars.extend(self.settings["use"])
-
- if myusevars:
- myf.write("# These are the USE and USE_EXPAND flags that were "
- "used for\n# building in addition to what is provided "
- "by the profile.\n")
- myusevars = sorted(set(myusevars))
- myf.write('USE="' + ' '.join(myusevars) + '"\n')
- if '-*' in myusevars:
- log.warning(
- 'The use of -* in %s/use will cause portage to ignore\n'
- 'package.use in the profile and portage_confdir.\n'
- "You've been warned!", self.settings['spec_prefix'])
-
- myuseexpandvars = {}
- if "HOSTUSEEXPAND" in self.settings:
- for hostuseexpand in self.settings["HOSTUSEEXPAND"]:
- myuseexpandvars.update(
- {hostuseexpand:self.settings["HOSTUSEEXPAND"][hostuseexpand]})
-
- if myuseexpandvars:
- for hostuseexpand in myuseexpandvars:
- myf.write(hostuseexpand + '="' +
- ' '.join(myuseexpandvars[hostuseexpand]) + '"\n')
- # write out a shipable version
- target_portdir = normpath(self.settings["repo_basedir"] + "/" +
- self.settings["repo_name"])
-
- myf.write('PORTDIR="%s"\n' % target_portdir)
- myf.write('DISTDIR="%s"\n' % self.settings['target_distdir'])
- myf.write('PKGDIR="%s"\n' % self.settings['target_pkgdir'])
- if setup:
- # Setup the portage overlay
- if "portage_overlay" in self.settings:
- myf.write('PORTDIR_OVERLAY="%s"\n' % self.settings["local_overlay"])
-
- # Set default locale for system responses. #478382
- myf.write(
- '\n'
- '# This sets the language of build output to English.\n'
- '# Please keep this setting intact when reporting bugs.\n'
- 'LC_MESSAGES=C\n')
-
-
- def fsscript(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("fsscript"):
- log.notice('Resume point detected, skipping fsscript operation...')
- else:
- if "fsscript" in self.settings:
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'fsscript'],
- env=self.env)
- self.resume.enable("fsscript")
-
- def rcupdate(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("rcupdate"):
- log.notice('Resume point detected, skipping rcupdate operation...')
- else:
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'rc-update'],
- env=self.env)
- self.resume.enable("rcupdate")
-
- def clean(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("clean"):
- log.notice('Resume point detected, skipping clean operation...')
- else:
- for x in self.settings["cleanables"]:
- log.notice('Cleaning chroot: %s', x)
- clear_path(normpath(self.settings["destpath"] + x))
-
- # Put /etc/hosts back into place
- hosts_file = self.settings['chroot_path'] + '/etc/hosts'
- if os.path.exists(hosts_file + '.catalyst'):
- os.rename(hosts_file + '.catalyst', hosts_file)
-
- # optionally clean up portage configs
- if ("portage_prefix" in self.settings and
- "sticky-config" not in self.settings["options"]):
- log.debug("clean(), portage_preix = %s, no sticky-config", self.settings["portage_prefix"])
- for _dir in "accept_keywords", "keywords", "mask", "unmask", "use":
- target = pjoin(self.settings["destpath"],
- "etc/portage/package.%s" % _dir,
- self.settings["portage_prefix"])
- log.notice("Clearing portage_prefix target: %s", target)
- clear_path(target)
-
- # Remove our overlay
- overlay = normpath(self.settings["chroot_path"] + self.settings["local_overlay"])
- if os.path.exists(overlay):
- clear_path(overlay)
-
- if "sticky-config" not in self.settings["options"]:
- # re-write the make.conf to be sure it is clean
- self.write_make_conf(setup=False)
-
- # Clean up old and obsoleted files in /etc
- if os.path.exists(self.settings["stage_path"]+"/etc"):
- cmd(['find', self.settings['stage_path'] + '/etc',
- '-maxdepth', '1', '-name', '*-', '-delete'],
- env=self.env)
-
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'clean'], env=self.env)
- self.resume.enable("clean")
-
- def empty(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("empty"):
- log.notice('Resume point detected, skipping empty operation...')
- else:
- if self.settings["spec_prefix"] + "/empty" in self.settings:
- if isinstance(
- self.settings[self.settings['spec_prefix'] + '/empty'],
- str):
- self.settings[self.settings["spec_prefix"] + "/empty"] = \
- self.settings[self.settings["spec_prefix"] + \
- "/empty"].split()
- for x in self.settings[self.settings["spec_prefix"] + "/empty"]:
- myemp = self.settings["destpath"] + x
- if not os.path.isdir(myemp) or os.path.islink(myemp):
- log.warning('not a directory or does not exist, '
- 'skipping "empty" operation: %s', x)
- continue
- log.info('Emptying directory %s', x)
- clear_dir(myemp)
- self.resume.enable("empty")
-
- def remove(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("remove"):
- log.notice('Resume point detected, skipping remove operation...')
- else:
- if self.settings["spec_prefix"] + "/rm" in self.settings:
- for x in self.settings[self.settings["spec_prefix"] + "/rm"]:
- # We're going to shell out for all these cleaning
- # operations, so we get easy glob handling.
- log.notice('livecd: removing %s', x)
- clear_path(self.settings["chroot_path"] + x)
- try:
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'clean'],
- env=self.env)
- self.resume.enable("remove")
- except:
- self.unbind()
- raise
-
- def preclean(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("preclean"):
- log.notice('Resume point detected, skipping preclean operation...')
- else:
- try:
- if os.path.exists(self.settings["controller_file"]):
- cmd([self.settings['controller_file'], 'preclean'],
- env=self.env)
- self.resume.enable("preclean")
-
- except:
- self.unbind()
- raise CatalystError("Build failed, could not execute preclean")
-
- def capture(self):
- # initialize it here so it doesn't use
- # resources if it is not needed
- if not self.compressor:
- self.compressor = CompressMap(self.settings["compress_definitions"],
- env=self.env, default_mode=self.settings['compression_mode'],
- comp_prog=self.settings['comp_prog'])
-
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("capture"):
- log.notice('Resume point detected, skipping capture operation...')
- else:
- log.notice('Capture target in a tarball')
- # Remove filename from path
- mypath = os.path.dirname(self.settings["target_path"].rstrip('/'))
-
- # Now make sure path exists
- ensure_dirs(mypath)
-
- pack_info = self.compressor.create_infodict(
- source=".",
- basedir=self.settings["stage_path"],
- filename=self.settings["target_path"].rstrip('/'),
- mode=self.settings["compression_mode"],
- auto_extension=True,
- arch=self.settings["compressor_arch"],
- other_options=self.settings["compressor_options"],
- )
- target_filename = ".".join([self.settings["target_path"].rstrip('/'),
- self.compressor.extension(pack_info['mode'])])
-
- log.notice('Creating stage tarball... mode: %s',
- self.settings['compression_mode'])
-
- if self.compressor.compress(pack_info):
- self.gen_contents_file(target_filename)
- self.gen_digest_file(target_filename)
- self.resume.enable("capture")
- else:
- log.warning("Couldn't create stage tarball: %s", target_filename)
-
- def run_local(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("run_local"):
- log.notice('Resume point detected, skipping run_local operation...')
- else:
- try:
- if os.path.exists(self.settings["controller_file"]):
- log.info('run_local() starting controller script...')
- cmd([self.settings['controller_file'], 'run'],
- env=self.env)
- self.resume.enable("run_local")
- else:
- log.info('run_local() no controller_file found... %s',
- self.settings['controller_file'])
-
- except CatalystError:
- self.unbind()
- raise CatalystError("Stage build aborting due to error.",
- print_traceback=False)
-
- def setup_environment(self):
- """
- Modify the current environment. This is an ugly hack that should be
- fixed. We need this to use the os.system() call since we can't
- specify our own environ
- """
- log.debug('setup_environment(); settings = %r', self.settings)
- for x in list(self.settings):
- log.debug('setup_environment(); processing: %s', x)
- if x == "options":
- for opt in self.settings[x]:
- self.env['clst_' + opt.upper()] = "true"
- continue
- # Sanitize var names by doing "s|/-.|_|g"
- varname = "clst_" + x.replace("/", "_")
- varname = varname.replace("-", "_")
- varname = varname.replace(".", "_")
- if isinstance(self.settings[x], str):
- # Prefix to prevent namespace clashes
- if "path" in x:
- self.env[varname] = self.settings[x].rstrip("/")
- else:
- self.env[varname] = self.settings[x]
- elif isinstance(self.settings[x], list):
- self.env[varname] = ' '.join(self.settings[x])
- elif isinstance(self.settings[x], bool):
- if self.settings[x]:
- self.env[varname] = "true"
- else:
- self.env[varname] = "false"
- # This handles a dictionary of objects just one level deep and no deeper!
- # Its currently used only for USE_EXPAND flags which are dictionaries of
- # lists in arch/amd64.py and friends. If we wanted self.settigs[var]
- # of any depth, we should make this function recursive.
- elif isinstance(self.settings[x], dict):
- if x in ["compress_definitions",
- "decompress_definitions"]:
- continue
- self.env[varname] = ' '.join(self.settings[x].keys())
- for y in self.settings[x].keys():
- varname2 = "clst_" + y.replace("/", "_")
- varname2 = varname2.replace("-", "_")
- varname2 = varname2.replace(".", "_")
- if isinstance(self.settings[x][y], str):
- self.env[varname2] = self.settings[x][y]
- elif isinstance(self.settings[x][y], list):
- self.env[varname2] = ' '.join(self.settings[x][y])
- elif isinstance(self.settings[x][y], bool):
- if self.settings[x][y]:
- self.env[varname] = "true"
- else:
- self.env[varname] = "false"
-
- if "makeopts" in self.settings:
- if isinstance(self.settings["makeopts"], str):
- self.env["MAKEOPTS"] = self.settings["makeopts"]
- else:
- # ensure makeopts is a string
- self.env["MAKEOPTS"] = ' '.join(self.settings["makeopts"])
-
- log.debug('setup_environment(); env = %r', self.env)
-
- def run(self):
- self.chroot_lock.write_lock()
-
- # Kill any pids in the chroot
- self.kill_chroot_pids()
-
- # Check for mounts right away and abort if we cannot unmount them
- self.mount_safety_check()
-
- if "clear-autoresume" in self.settings["options"]:
- self.clear_autoresume()
-
- if "purgetmponly" in self.settings["options"]:
- self.purge()
- return True
-
- if "purgeonly" in self.settings["options"]:
- log.info('StageBase: run() purgeonly')
- self.purge()
-
- if "purge" in self.settings["options"]:
- log.info('StageBase: run() purge')
- self.purge()
-
- failure = False
- for x in self.settings["action_sequence"]:
- log.notice('--- Running action sequence: %s', x)
- sys.stdout.flush()
- try:
- getattr(self, x)()
- except LockInUse:
- log.error('Unable to aquire the lock...')
- failure = True
- break
- except Exception:
- log.error('Exception running action sequence %s', x, exc_info=True)
- failure = True
- break
-
- if failure:
- log.notice('Cleaning up... Running unbind()')
- self.unbind()
- return False
- return True
-
-
- def unmerge(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("unmerge"):
- log.notice('Resume point detected, skipping unmerge operation...')
- else:
- if self.settings["spec_prefix"] + "/unmerge" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix'] + '/unmerge'], str):
- self.settings[self.settings["spec_prefix"] + "/unmerge"] = \
- [self.settings[self.settings["spec_prefix"] + "/unmerge"]]
-
- # Before cleaning, unmerge stuff
- try:
- cmd([self.settings['controller_file'], 'unmerge'] +
- self.settings[self.settings['spec_prefix'] + '/unmerge'],
- env=self.env)
- log.info('unmerge shell script')
- except CatalystError:
- self.unbind()
- raise
- self.resume.enable("unmerge")
-
- def target_setup(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("target_setup"):
- log.notice('Resume point detected, skipping target_setup operation...')
- else:
- log.notice('Setting up filesystems per filesystem type')
- cmd([self.settings['controller_file'], 'target_image_setup',
- self.settings['target_path']], env=self.env)
- self.resume.enable("target_setup")
-
- def setup_overlay(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("setup_overlay"):
- log.notice('Resume point detected, skipping setup_overlay operation...')
- else:
- if self.settings["spec_prefix"] + "/overlay" in self.settings:
- for x in self.settings[self.settings["spec_prefix"] + "/overlay"]:
- if os.path.exists(x):
- cmd(['rsync', '-a', x + '/', self.settings['target_path']],
- env=self.env)
- self.resume.enable("setup_overlay")
-
- def create_iso(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("create_iso"):
- log.notice('Resume point detected, skipping create_iso operation...')
- else:
- # Create the ISO
- if "iso" in self.settings:
- cmd([self.settings['controller_file'], 'iso', self.settings['iso']],
- env=self.env)
- self.gen_contents_file(self.settings["iso"])
- self.gen_digest_file(self.settings["iso"])
- self.resume.enable("create_iso")
- else:
- log.warning('livecd/iso was not defined. '
- 'An ISO Image will not be created.')
-
- def build_packages(self):
- build_packages_resume = pjoin(self.settings["autoresume_path"],
- "build_packages")
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("build_packages"):
- log.notice('Resume point detected, skipping build_packages operation...')
- else:
- if self.settings["spec_prefix"] + "/packages" in self.settings:
- target_pkgs = self.settings["spec_prefix"] + '/packages'
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("build_packages"):
- log.notice('Resume point detected, skipping build_packages '
- 'operation...')
- else:
- command = [self.settings['controller_file'], 'build_packages']
- if isinstance(self.settings[target_pkgs], str):
- command.append(self.settings[target_pkgs])
- else:
- command.extend(self.settings[target_pkgs])
- try:
- cmd(command, env=self.env)
- fileutils.touch(build_packages_resume)
- self.resume.enable("build_packages")
- except CatalystError:
- self.unbind()
- raise CatalystError(
- self.settings["spec_prefix"] +
- "build aborting due to error.")
-
- def build_kernel(self):
- '''Build all configured kernels'''
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("build_kernel"):
- log.notice('Resume point detected, skipping build_kernel operation...')
- else:
- if "boot/kernel" in self.settings:
- try:
- mynames = self.settings["boot/kernel"]
- if isinstance(mynames, str):
- mynames = [mynames]
- # Execute the script that sets up the kernel build environment
- cmd([self.settings['controller_file'], 'pre-kmerge'],
- env=self.env)
- for kname in mynames:
- self._build_kernel(kname = kname)
- self.resume.enable("build_kernel")
- except CatalystError:
- self.unbind()
- raise CatalystError(
- "build aborting due to kernel build error.",
- print_traceback=True)
-
- def _build_kernel(self, kname):
- "Build a single configured kernel by name"
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("build_kernel_" + kname):
- log.notice('Resume point detected, skipping build_kernel '
- 'for %s operation...', kname)
- return
- self._copy_kernel_config(kname=kname)
-
- # If we need to pass special options to the bootloader
- # for this kernel put them into the environment
- key = 'boot/kernel/' + kname + '/kernelopts'
- if key in self.settings:
- myopts = self.settings[key]
-
- if not isinstance(myopts, str):
- myopts = ' '.join(myopts)
- self.env[kname + "_kernelopts"] = myopts
- else:
- self.env[kname + "_kernelopts"] = ""
-
- key = 'boot/kernel/' + kname + '/extraversion'
- self.settings.setdefault(key, '')
- self.env["clst_kextraversion"] = self.settings[key]
-
- self._copy_initramfs_overlay(kname=kname)
-
- # Execute the script that builds the kernel
- cmd([self.settings['controller_file'], 'kernel', kname],
- env=self.env)
-
- if "boot/kernel/" + kname + "/initramfs_overlay" in self.settings:
- log.notice('Cleaning up temporary overlay dir')
- clear_dir(self.settings['chroot_path'] + '/tmp/initramfs_overlay/')
-
- self.resume.is_enabled("build_kernel_" + kname)
-
- # Execute the script that cleans up the kernel build environment
- cmd([self.settings['controller_file'], 'post-kmerge'],
- env=self.env)
-
- def _copy_kernel_config(self, kname):
- key = 'boot/kernel/' + kname + '/config'
- if key in self.settings:
- if not os.path.exists(self.settings[key]):
- self.unbind()
- raise CatalystError("Can't find kernel config: %s" %
- self.settings[key])
-
- try:
- shutil.copy(self.settings[key],
- self.settings['chroot_path'] + '/var/tmp/' + kname + '.config')
-
- except IOError:
- self.unbind()
-
- def _copy_initramfs_overlay(self, kname):
- key = 'boot/kernel/' + kname + '/initramfs_overlay'
- if key in self.settings:
- if os.path.exists(self.settings[key]):
- log.notice('Copying initramfs_overlay dir %s', self.settings[key])
-
- ensure_dirs(
- self.settings['chroot_path'] +
- '/tmp/initramfs_overlay/' + self.settings[key])
-
- cmd('cp -R ' + self.settings[key] + '/* ' +
- self.settings['chroot_path'] +
- '/tmp/initramfs_overlay/' + self.settings[key], env=self.env)
-
- def bootloader(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("bootloader"):
- log.notice('Resume point detected, skipping bootloader operation...')
- else:
- try:
- cmd([self.settings['controller_file'], 'bootloader',
- self.settings['target_path'].rstrip('/')],
- env=self.env)
- self.resume.enable("bootloader")
- except CatalystError:
- self.unbind()
- raise CatalystError("Script aborting due to error.")
-
- def livecd_update(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("livecd_update"):
- log.notice('Resume point detected, skipping build_packages operation...')
- else:
- try:
- cmd([self.settings['controller_file'], 'livecd-update'],
- env=self.env)
- self.resume.enable("livecd_update")
-
- except CatalystError:
- self.unbind()
- raise CatalystError("build aborting due to livecd_update error.")
-
- @staticmethod
- def _debug_pause_():
- input("press any key to continue: ")
+ """
+ This class does all of the chroot setup, copying of files, etc. It is
+ the driver class for pretty much everything that Catalyst does.
+ """
+
+ def __init__(self, myspec, addlargs):
+ self.required_values |= frozenset([
+ "profile",
+ "rel_type",
+ "snapshot",
+ "source_subpath",
+ "subarch",
+ "target",
+ "version_stamp",
+ ])
+ self.valid_values |= self.required_values | frozenset([
+ "asflags",
+ "catalyst_use",
+ "cbuild",
+ "cflags",
+ "common_flags",
+ "compression_mode",
+ "cxxflags",
+ "decompression_mode",
+ "distcc_hosts",
+ "fcflags",
+ "fflags",
+ "hostuse",
+ "kerncache_path",
+ "ldflags",
+ "makeopts",
+ "pkgcache_path",
+ "portage_confdir",
+ "portage_overlay",
+ "portage_prefix",
+ ])
+
+ self.set_valid_build_kernel_vars(addlargs)
+ TargetBase.__init__(self, myspec, addlargs)
+ GenBase.__init__(self, myspec)
+ ClearBase.__init__(self, myspec)
+
+ self.makeconf = {}
+
+ if "chost" in self.settings:
+ host = self.settings["chost"].split("-")[0]
+ else:
+ host = self.settings["subarch"]
+ self.settings["hostarch"] = host
+
+ if "cbuild" in self.settings:
+ build = self.settings["cbuild"].split("-")[0]
+ else:
+ build = platform.machine()
+ self.settings["buildarch"] = build
+
+ arch_dir = normpath(self.settings['sharedir'] + '/arch/')
+
+ log.debug("Searching arch definitions...")
+ for x in [x for x in os.listdir(arch_dir) if x.endswith('.toml')]:
+ log.debug("\tTrying %s", x)
+ name = x[:-len('.toml')]
+
+ with open(arch_dir + x) as file:
+ arch_config = toml.load(file)
+
+ # Search for a subarchitecture in each arch in the arch_config
+ for arch in [x for x in arch_config if x.startswith(name) and host in arch_config[x]]:
+ self.settings.update(arch_config[arch][host])
+ setarch = arch_config.get('setarch', {})
+ break
+ else:
+ # Didn't find a matching subarchitecture, keep searching
+ continue
+
+ break
+ else:
+ raise CatalystError("Unknown host machine type " + host)
+
+ if setarch.get('if_build', '') == platform.machine():
+ chroot = f'setarch {setarch["arch"]} chroot'
+ else:
+ chroot = 'chroot'
+ self.settings["CHROOT"] = chroot
+
+ self.settings["crosscompile"] = \
+ build != host and not chroot.startswith('setarch')
+
+ log.notice('Using target: %s', self.settings['target'])
+ # Print a nice informational message
+ if self.settings["crosscompile"]:
+ log.info('Cross-compiling on %s for different machine type %s',
+ build, host)
+ elif chroot.startswith('setarch'):
+ log.info('Building on %s for alternate personality type %s',
+ build, host)
+ else:
+ log.info('Building natively for %s', host)
+
+ # This must be set first as other set_ options depend on this
+ self.set_spec_prefix()
+
+ # Initialize our (de)compressor's)
+ self.decompressor = CompressMap(self.settings["decompress_definitions"],
+ env=self.env,
+ search_order=self.settings["decompressor_search_order"],
+ comp_prog=self.settings["comp_prog"],
+ decomp_opt=self.settings["decomp_opt"])
+ self.accepted_extensions = self.decompressor.search_order_extensions(
+ self.settings["decompressor_search_order"])
+ log.notice("Source file specification matching setting is: %s",
+ self.settings["source_matching"])
+ log.notice("Accepted source file extensions search order: %s",
+ self.accepted_extensions)
+ # save resources, it is not always needed
+ self.compressor = None
+
+ # Define all of our core variables
+ self.set_target_profile()
+ self.set_target_subpath()
+ self.set_source_subpath()
+
+ # Set paths
+ self.set_snapshot_path()
+ self.set_root_path()
+ self.set_source_path()
+ self.set_snapcache_path()
+ self.set_chroot_path()
+ self.set_autoresume_path()
+ self.set_dest_path()
+ self.set_stage_path()
+ self.set_target_path()
+
+ self.set_controller_file()
+ self.set_default_action_sequence()
+ self.set_use()
+ self.set_catalyst_use()
+ self.set_cleanables()
+ self.set_iso_volume_id()
+ self.set_build_kernel_vars()
+ self.set_fsscript()
+ self.set_install_mask()
+ self.set_rcadd()
+ self.set_rcdel()
+ self.set_cdtar()
+ self.set_fstype()
+ self.set_fsops()
+ self.set_iso()
+ self.set_packages()
+ self.set_rm()
+ self.set_linuxrc()
+ self.set_busybox_config()
+ self.set_overlay()
+ self.set_portage_overlay()
+ self.set_root_overlay()
+
+ # This next line checks to make sure that the specified variables exist on disk.
+ # pdb.set_trace()
+ file_locate(self.settings, ["distdir"], expand=0)
+ # If we are using portage_confdir, check that as well.
+ if "portage_confdir" in self.settings:
+ file_locate(self.settings, ["portage_confdir"], expand=0)
+
+ # Setup our mount points.
+ # initialize our target mounts.
+ self.target_mounts = TARGET_MOUNT_DEFAULTS.copy()
+
+ self.mounts = ["proc", "dev", "portdir", "distdir", "port_tmpdir"]
+ # initialize our source mounts
+ self.mountmap = SOURCE_MOUNT_DEFAULTS.copy()
+ # update these from settings
+ self.mountmap["portdir"] = self.settings["portdir"]
+ self.mountmap["distdir"] = self.settings["distdir"]
+ self.target_mounts["portdir"] = normpath(self.settings["repo_basedir"] +
+ "/" + self.settings["repo_name"])
+ self.target_mounts["distdir"] = self.settings["target_distdir"]
+ self.target_mounts["packagedir"] = self.settings["target_pkgdir"]
+ if "snapcache" not in self.settings["options"]:
+ self.mounts.remove("portdir")
+ self.mountmap["portdir"] = None
+ else:
+ self.mountmap["portdir"] = normpath("/".join([
+ self.settings["snapshot_cache_path"],
+ self.settings["repo_name"],
+ ]))
+ self.mounts.append("devpts")
+ self.mounts.append("shm")
+ self.mounts.append("run")
+
+ self.set_mounts()
+
+ # Configure any user specified options (either in catalyst.conf or on
+ # the command line).
+ if "pkgcache" in self.settings["options"]:
+ self.set_pkgcache_path()
+ log.info('Location of the package cache is %s',
+ self.settings['pkgcache_path'])
+ self.mounts.append("packagedir")
+ self.mountmap["packagedir"] = self.settings["pkgcache_path"]
+
+ if "kerncache" in self.settings["options"]:
+ self.set_kerncache_path()
+ log.info('Location of the kerncache is %s',
+ self.settings['kerncache_path'])
+ self.mounts.append("kerncache")
+ self.mountmap["kerncache"] = self.settings["kerncache_path"]
+
+ if "ccache" in self.settings["options"]:
+ if "CCACHE_DIR" in os.environ:
+ ccdir = os.environ["CCACHE_DIR"]
+ del os.environ["CCACHE_DIR"]
+ else:
+ ccdir = "/var/tmp/ccache"
+ if not os.path.isdir(ccdir):
+ raise CatalystError(
+ "Compiler cache support can't be enabled (can't find " + ccdir+")")
+ self.mounts.append("ccache")
+ self.mountmap["ccache"] = ccdir
+ # for the chroot:
+ self.env["CCACHE_DIR"] = self.target_mounts["ccache"]
+
+ if "icecream" in self.settings["options"]:
+ self.mounts.append("icecream")
+ self.mountmap["icecream"] = self.settings["icecream"]
+ self.env["PATH"] = self.target_mounts["icecream"] + \
+ ":" + self.env["PATH"]
+
+ if "port_logdir" in self.settings:
+ self.mounts.append("port_logdir")
+ self.mountmap["port_logdir"] = self.settings["port_logdir"]
+ self.env["PORT_LOGDIR"] = self.settings["port_logdir"]
+ self.env["PORT_LOGDIR_CLEAN"] = PORT_LOGDIR_CLEAN
+
+ def override_cbuild(self):
+ if "CBUILD" in self.makeconf:
+ self.settings["CBUILD"] = self.makeconf["CBUILD"]
+
+ def override_chost(self):
+ if "CHOST" in self.makeconf:
+ self.settings["CHOST"] = self.makeconf["CHOST"]
+
+ def override_cflags(self):
+ if "CFLAGS" in self.makeconf:
+ self.settings["CFLAGS"] = self.makeconf["CFLAGS"]
+
+ def override_cxxflags(self):
+ if "CXXFLAGS" in self.makeconf:
+ self.settings["CXXFLAGS"] = self.makeconf["CXXFLAGS"]
+
+ def override_fcflags(self):
+ if "FCFLAGS" in self.makeconf:
+ self.settings["FCFLAGS"] = self.makeconf["FCFLAGS"]
+
+ def override_fflags(self):
+ if "FFLAGS" in self.makeconf:
+ self.settings["FFLAGS"] = self.makeconf["FFLAGS"]
+
+ def override_ldflags(self):
+ if "LDFLAGS" in self.makeconf:
+ self.settings["LDFLAGS"] = self.makeconf["LDFLAGS"]
+
+ def override_asflags(self):
+ if "ASFLAGS" in self.makeconf:
+ self.settings["ASFLAGS"] = self.makeconf["ASFLAGS"]
+
+ def override_common_flags(self):
+ if "COMMON_FLAGS" in self.makeconf:
+ self.settings["COMMON_FLAGS"] = self.makeconf["COMMON_FLAGS"]
+
+ def set_install_mask(self):
+ if "install_mask" in self.settings:
+ if not isinstance(self.settings['install_mask'], str):
+ self.settings["install_mask"] = \
+ ' '.join(self.settings["install_mask"])
+
+ def set_spec_prefix(self):
+ self.settings["spec_prefix"] = self.settings["target"]
+
+ def set_target_profile(self):
+ self.settings["target_profile"] = self.settings["profile"]
+
+ def set_target_subpath(self):
+ common = self.settings["rel_type"] + "/" + \
+ self.settings["target"] + "-" + self.settings["subarch"]
+ self.settings["target_subpath"] = \
+ common + \
+ "-" + self.settings["version_stamp"] + \
+ "/"
+ self.settings["target_subpath_unversioned"] = \
+ common + \
+ "/"
+
+ def set_source_subpath(self):
+ if not isinstance(self.settings['source_subpath'], str):
+ raise CatalystError(
+ "source_subpath should have been a string. Perhaps you have " +
+ "something wrong in your spec file?")
+
+ def set_pkgcache_path(self):
+ if "pkgcache_path" in self.settings:
+ if not isinstance(self.settings['pkgcache_path'], str):
+ self.settings["pkgcache_path"] = \
+ normpath(self.settings["pkgcache_path"])
+ elif "versioned_cache" in self.settings["options"]:
+ self.settings["pkgcache_path"] = \
+ normpath(self.settings["storedir"] + "/packages/" +
+ self.settings["target_subpath"] + "/")
+ else:
+ self.settings["pkgcache_path"] = \
+ normpath(self.settings["storedir"] + "/packages/" +
+ self.settings["target_subpath_unversioned"] + "/")
+
+ def set_kerncache_path(self):
+ if "kerncache_path" in self.settings:
+ if not isinstance(self.settings['kerncache_path'], str):
+ self.settings["kerncache_path"] = \
+ normpath(self.settings["kerncache_path"])
+ elif "versioned_cache" in self.settings["options"]:
+ self.settings["kerncache_path"] = normpath(self.settings["storedir"] +
+ "/kerncache/" + self.settings["target_subpath"])
+ else:
+ self.settings["kerncache_path"] = normpath(self.settings["storedir"] +
+ "/kerncache/" + self.settings["target_subpath_unversioned"])
+
+ def set_target_path(self):
+ self.settings["target_path"] = normpath(self.settings["storedir"] +
+ "/builds/" + self.settings["target_subpath"])
+ if "autoresume" in self.settings["options"]\
+ and self.resume.is_enabled("setup_target_path"):
+ log.notice(
+ 'Resume point detected, skipping target path setup operation...')
+ else:
+ self.resume.enable("setup_target_path")
+ ensure_dirs(self.settings["storedir"] + "/builds")
+
+ def set_fsscript(self):
+ if self.settings["spec_prefix"] + "/fsscript" in self.settings:
+ self.settings["fsscript"] = \
+ self.settings[self.settings["spec_prefix"] + "/fsscript"]
+ del self.settings[self.settings["spec_prefix"] + "/fsscript"]
+
+ def set_rcadd(self):
+ if self.settings["spec_prefix"] + "/rcadd" in self.settings:
+ self.settings["rcadd"] = \
+ self.settings[self.settings["spec_prefix"] + "/rcadd"]
+ del self.settings[self.settings["spec_prefix"] + "/rcadd"]
+
+ def set_rcdel(self):
+ if self.settings["spec_prefix"] + "/rcdel" in self.settings:
+ self.settings["rcdel"] = \
+ self.settings[self.settings["spec_prefix"] + "/rcdel"]
+ del self.settings[self.settings["spec_prefix"] + "/rcdel"]
+
+ def set_cdtar(self):
+ if self.settings["spec_prefix"] + "/cdtar" in self.settings:
+ self.settings["cdtar"] = \
+ normpath(
+ self.settings[self.settings["spec_prefix"] + "/cdtar"])
+ del self.settings[self.settings["spec_prefix"] + "/cdtar"]
+
+ def set_iso(self):
+ if self.settings["spec_prefix"] + "/iso" in self.settings:
+ if self.settings[self.settings["spec_prefix"] + "/iso"].startswith('/'):
+ self.settings["iso"] = \
+ normpath(
+ self.settings[self.settings["spec_prefix"] + "/iso"])
+ else:
+ # This automatically prepends the build dir to the ISO output path
+ # if it doesn't start with a /
+ self.settings["iso"] = normpath(self.settings["storedir"] +
+ "/builds/" + self.settings["rel_type"] + "/" +
+ self.settings[self.settings["spec_prefix"] + "/iso"])
+ del self.settings[self.settings["spec_prefix"] + "/iso"]
+
+ def set_fstype(self):
+ if self.settings["spec_prefix"] + "/fstype" in self.settings:
+ self.settings["fstype"] = \
+ self.settings[self.settings["spec_prefix"] + "/fstype"]
+ del self.settings[self.settings["spec_prefix"] + "/fstype"]
+
+ if "fstype" not in self.settings:
+ self.settings["fstype"] = "normal"
+ for x in self.valid_values:
+ if x == self.settings["spec_prefix"] + "/fstype":
+ log.info('%s/fstype is being set to the default of "normal"',
+ self.settings['spec_prefix'])
+
+ def set_fsops(self):
+ if "fstype" in self.settings:
+ self.valid_values |= {"fsops"}
+ if self.settings["spec_prefix"] + "/fsops" in self.settings:
+ self.settings["fsops"] = \
+ self.settings[self.settings["spec_prefix"] + "/fsops"]
+ del self.settings[self.settings["spec_prefix"] + "/fsops"]
+
+ def set_source_path(self):
+ if "seedcache" in self.settings["options"]\
+ and os.path.isdir(normpath(self.settings["storedir"] + "/tmp/" +
+ self.settings["source_subpath"] + "/")):
+ self.settings["source_path"] = normpath(self.settings["storedir"] +
+ "/tmp/" + self.settings["source_subpath"] + "/")
+ log.debug("source_subpath is: %s", self.settings["source_path"])
+ else:
+ log.debug('Checking source path existence and '
+ 'get the final filepath. subpath: %s',
+ self.settings["source_subpath"])
+ self.settings["source_path"] = file_check(
+ normpath(self.settings["storedir"] + "/builds/" +
+ self.settings["source_subpath"]),
+ self.accepted_extensions,
+ self.settings["source_matching"] in ["strict"]
+ )
+ log.debug('Source path returned from file_check is: %s',
+ self.settings["source_path"])
+ if os.path.isfile(self.settings["source_path"]):
+ # XXX: Is this even necessary if the previous check passes?
+ if os.path.exists(self.settings["source_path"]):
+ self.settings["source_path_hash"] = \
+ self.settings["hash_map"].generate_hash(
+ self.settings["source_path"],
+ hash_=self.settings["hash_function"])
+ log.notice('Source path set to %s', self.settings['source_path'])
+
+ def set_dest_path(self):
+ if "root_path" in self.settings:
+ self.settings["destpath"] = normpath(self.settings["chroot_path"] +
+ self.settings["root_path"])
+ else:
+ self.settings["destpath"] = normpath(self.settings["chroot_path"])
+
+ def set_cleanables(self):
+ self.settings["cleanables"] = ["/etc/resolv.conf", "/var/tmp/*", "/tmp/*",
+ self.settings["repo_basedir"] + "/" +
+ self.settings["repo_name"]]
+
+ def set_snapshot_path(self):
+ self.settings["snapshot_path"] = file_check(
+ normpath(self.settings["storedir"] +
+ "/snapshots/" + self.settings["snapshot_name"] +
+ self.settings["snapshot"]),
+ self.accepted_extensions,
+ self.settings["source_matching"] == "strict"
+ )
+ log.info('SNAPSHOT_PATH set to: %s', self.settings['snapshot_path'])
+ self.settings["snapshot_path_hash"] = \
+ self.settings["hash_map"].generate_hash(
+ self.settings["snapshot_path"],
+ hash_=self.settings["hash_function"])
+
+ def set_snapcache_path(self):
+ self.settings["snapshot_cache_path"] = \
+ normpath(pjoin(self.settings["snapshot_cache"],
+ self.settings["snapshot"]))
+ if "snapcache" in self.settings["options"]:
+ self.settings["snapshot_cache_path"] = \
+ normpath(pjoin(self.settings["snapshot_cache"],
+ self.settings["snapshot"]))
+ self.snapcache_lock = \
+ LockDir(self.settings["snapshot_cache_path"])
+ log.info('Setting snapshot cache to %s',
+ self.settings['snapshot_cache_path'])
+
+ def set_chroot_path(self):
+ """
+ NOTE: the trailing slash has been removed
+ Things *could* break if you don't use a proper join()
+ """
+ self.settings["chroot_path"] = normpath(self.settings["storedir"] +
+ "/tmp/" + self.settings["target_subpath"].rstrip('/'))
+ self.chroot_lock = LockDir(self.settings["chroot_path"])
+
+ def set_autoresume_path(self):
+ self.settings["autoresume_path"] = normpath(pjoin(
+ self.settings["storedir"], "tmp", self.settings["rel_type"],
+ ".autoresume-%s-%s-%s"
+ % (self.settings["target"], self.settings["subarch"],
+ self.settings["version_stamp"])
+ ))
+ if "autoresume" in self.settings["options"]:
+ log.info('The autoresume path is %s',
+ self.settings['autoresume_path'])
+ self.resume = AutoResume(self.settings["autoresume_path"], mode=0o755)
+
+ def set_controller_file(self):
+ self.settings["controller_file"] = normpath(self.settings["sharedir"] +
+ "/targets/" + self.settings["target"] + "/" + "controller.sh")
+
+ def set_iso_volume_id(self):
+ if self.settings["spec_prefix"] + "/volid" in self.settings:
+ self.settings["iso_volume_id"] = \
+ self.settings[self.settings["spec_prefix"] + "/volid"]
+ if len(self.settings["iso_volume_id"]) > 32:
+ raise CatalystError(
+ "ISO volume ID must not exceed 32 characters.")
+ else:
+ self.settings["iso_volume_id"] = "catalyst " + \
+ self.settings["snapshot"]
+
+ def set_default_action_sequence(self):
+ """ Default action sequence for run method.
+
+ This method sets the optional purgeonly action sequence and returns.
+ Or it calls the normal set_action_sequence() for the target stage.
+ """
+ if "purgeonly" in self.settings["options"]:
+ self.settings["action_sequence"] = ["remove_chroot"]
+ return
+ self.set_action_sequence()
+
+ def set_action_sequence(self):
+ """Set basic stage1, 2, 3 action sequences"""
+ self.settings["action_sequence"] = ["unpack", "unpack_snapshot",
+ "setup_confdir", "portage_overlay",
+ "bind", "chroot_setup", "setup_environment",
+ "run_local", "preclean", "unbind", "clean"]
+ self.set_completion_action_sequences()
+
+ def set_completion_action_sequences(self):
+ if "fetch" not in self.settings["options"]:
+ self.settings["action_sequence"].append("capture")
+ if "keepwork" in self.settings["options"]:
+ self.settings["action_sequence"].append("clear_autoresume")
+ elif "seedcache" in self.settings["options"]:
+ self.settings["action_sequence"].append("remove_autoresume")
+ else:
+ self.settings["action_sequence"].append("remove_autoresume")
+ self.settings["action_sequence"].append("remove_chroot")
+
+ def set_use(self):
+ use = self.settings["spec_prefix"] + "/use"
+ if use in self.settings:
+ if isinstance(self.settings[use], str):
+ self.settings["use"] = self.settings[use].split()
+ self.settings["use"] = self.settings[use]
+ del self.settings[use]
+ else:
+ self.settings["use"] = []
+
+ def set_catalyst_use(self):
+ catalyst_use = self.settings["spec_prefix"] + "/catalyst_use"
+ if catalyst_use in self.settings:
+ if isinstance(self.settings[catalyst_use], str):
+ self.settings["catalyst_use"] = self.settings[catalyst_use].split()
+ else:
+ self.settings["catalyst_use"] = self.settings[catalyst_use]
+ del self.settings[catalyst_use]
+ else:
+ self.settings["catalyst_use"] = []
+
+ # Force bindist when options ask for it
+ if "bindist" in self.settings["options"]:
+ log.debug("Enabling bindist USE flag")
+ self.settings["catalyst_use"].append("bindist")
+
+ def set_stage_path(self):
+ self.settings["stage_path"] = normpath(self.settings["chroot_path"])
+
+ def set_mounts(self):
+ pass
+
+ def set_packages(self):
+ pass
+
+ def set_rm(self):
+ if self.settings["spec_prefix"] + "/rm" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/rm'], str):
+ self.settings[self.settings["spec_prefix"] + "/rm"] = \
+ self.settings[self.settings["spec_prefix"] + "/rm"].split()
+
+ def set_linuxrc(self):
+ if self.settings["spec_prefix"] + "/linuxrc" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/linuxrc'], str):
+ self.settings["linuxrc"] = \
+ self.settings[self.settings["spec_prefix"] + "/linuxrc"]
+ del self.settings[self.settings["spec_prefix"] + "/linuxrc"]
+
+ def set_busybox_config(self):
+ if self.settings["spec_prefix"] + "/busybox_config" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/busybox_config'], str):
+ self.settings["busybox_config"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/busybox_config"]
+ del self.settings[self.settings["spec_prefix"] +
+ "/busybox_config"]
+
+ def set_portage_overlay(self):
+ if "portage_overlay" in self.settings:
+ if isinstance(self.settings['portage_overlay'], str):
+ self.settings["portage_overlay"] = \
+ self.settings["portage_overlay"].split()
+ log.info('portage_overlay directories are set to: %s',
+ ' '.join(self.settings['portage_overlay']))
+
+ def set_overlay(self):
+ if self.settings["spec_prefix"] + "/overlay" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/overlay'], str):
+ self.settings[self.settings["spec_prefix"] + "/overlay"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/overlay"].split()
+
+ def set_root_overlay(self):
+ if self.settings["spec_prefix"] + "/root_overlay" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/root_overlay'], str):
+ self.settings[self.settings["spec_prefix"] + "/root_overlay"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/root_overlay"].split()
+
+ def set_root_path(self):
+ """ ROOT= variable for emerges """
+ self.settings["root_path"] = "/"
+
+ def set_valid_build_kernel_vars(self, addlargs):
+ if "boot/kernel" in addlargs:
+ if isinstance(addlargs['boot/kernel'], str):
+ loopy = [addlargs["boot/kernel"]]
+ else:
+ loopy = addlargs["boot/kernel"]
+
+ for x in loopy:
+ self.valid_values |= frozenset([
+ "boot/kernel/" + x + "/aliases",
+ "boot/kernel/" + x + "/config",
+ "boot/kernel/" + x + "/console",
+ "boot/kernel/" + x + "/extraversion",
+ "boot/kernel/" + x + "/gk_action",
+ "boot/kernel/" + x + "/gk_kernargs",
+ "boot/kernel/" + x + "/initramfs_overlay",
+ "boot/kernel/" + x + "/kernelopts",
+ "boot/kernel/" + x + "/packages",
+ "boot/kernel/" + x + "/softlevel",
+ "boot/kernel/" + x + "/sources",
+ "boot/kernel/" + x + "/use",
+ ])
+ if "boot/kernel/" + x + "/packages" in addlargs:
+ if isinstance(addlargs['boot/kernel/' + x + '/packages'], str):
+ addlargs["boot/kernel/" + x + "/packages"] = \
+ [addlargs["boot/kernel/" + x + "/packages"]]
+
+ def set_build_kernel_vars(self):
+ prefix = self.settings["spec_prefix"]
+
+ gk_mainargs = prefix + "/gk_mainargs"
+ if gk_mainargs in self.settings:
+ self.settings["gk_mainargs"] = self.settings[gk_mainargs]
+ del self.settings[gk_mainargs]
+
+ # Ask genkernel to include b2sum if <target>/verify is set
+ verify = prefix + "/verify"
+ if verify in self.settings:
+ assert self.settings[verify] == "blake2"
+ self.settings.setdefault("gk_mainargs", []).append("--b2sum")
+
+ def kill_chroot_pids(self):
+ log.info('Checking for processes running in chroot and killing them.')
+
+ # Force environment variables to be exported so script can see them
+ self.setup_environment()
+
+ killcmd = normpath(self.settings["sharedir"] +
+ self.settings["shdir"] + "/support/kill-chroot-pids.sh")
+ if os.path.exists(killcmd):
+ cmd([killcmd], env=self.env)
+
+ def mount_safety_check(self):
+ """
+ Check and verify that none of our paths in mypath are mounted. We don't
+ want to clean up with things still mounted, and this allows us to check.
+ Returns 1 on ok, 0 on "something is still mounted" case.
+ """
+
+ if not os.path.exists(self.settings["chroot_path"]):
+ return
+
+ log.debug('self.mounts = %s', self.mounts)
+ for x in self.mounts:
+ target = normpath(
+ self.settings["chroot_path"] + self.target_mounts[x])
+ log.debug('mount_safety_check() x = %s %s', x, target)
+ if not os.path.exists(target):
+ continue
+
+ if ismount(target):
+ # Something is still mounted
+ try:
+ log.warning(
+ '%s is still mounted; performing auto-bind-umount...', target)
+ # Try to umount stuff ourselves
+ self.unbind()
+ if ismount(target):
+ raise CatalystError("Auto-unbind failed for " + target)
+ log.notice('Auto-unbind successful...')
+ except CatalystError:
+ raise CatalystError("Unable to auto-unbind " + target)
+
+ def unpack(self):
+
+ clst_unpack_hash = self.resume.get("unpack")
+
+ # Set up all unpack info settings
+ unpack_info = self.decompressor.create_infodict(
+ source=self.settings["source_path"],
+ destination=self.settings["chroot_path"],
+ arch=self.settings["compressor_arch"],
+ other_options=self.settings["compressor_options"],
+ )
+
+ display_msg = (
+ 'Starting %(mode)s from %(source)s\nto '
+ '%(destination)s (this may take some time) ..')
+
+ error_msg = "'%(mode)s' extraction of %(source)s to %(destination)s failed."
+
+ if "seedcache" in self.settings["options"]:
+ if os.path.isdir(unpack_info["source"]):
+ # SEEDCACHE Is a directory, use rsync
+ unpack_info['mode'] = "rsync"
+ else:
+ # SEEDCACHE is a not a directory, try untar'ing
+ log.notice(
+ 'Referenced SEEDCACHE does not appear to be a directory, trying to untar...')
+ unpack_info['source'] = file_check(unpack_info['source'])
+ else:
+ # No SEEDCACHE, use tar
+ unpack_info['source'] = file_check(unpack_info['source'])
+ # end of unpack_info settings
+
+ # set defaults,
+ # only change them if the resume point is proven to be good
+ _unpack = True
+ invalid_chroot = True
+ # Begin autoresume validation
+ if "autoresume" in self.settings["options"]:
+ # check chroot
+ if os.path.isdir(self.settings["chroot_path"]):
+ if self.resume.is_enabled("unpack"):
+ # Autoresume is valid in the chroot
+ _unpack = False
+ invalid_chroot = False
+ log.notice('Resume: "chroot" is valid...')
+ else:
+ # self.resume.is_disabled("unpack")
+ # Autoresume is invalid in the chroot
+ log.notice(
+ 'Resume: "seed source" unpack resume point is disabled')
+
+ # check seed source
+ if os.path.isfile(self.settings["source_path"]) and not invalid_chroot:
+ if self.settings["source_path_hash"].replace("\n", " ") == clst_unpack_hash:
+ # Seed tarball has not changed, chroot is valid
+ _unpack = False
+ invalid_chroot = False
+ log.notice('Resume: "seed source" hash matches chroot...')
+ else:
+ # self.settings["source_path_hash"] != clst_unpack_hash
+ # Seed tarball has changed, so invalidate the chroot
+ _unpack = True
+ invalid_chroot = True
+ log.notice(
+ 'Resume: "seed source" has changed, hashes do not match, invalidating resume...')
+ log.notice(' source_path......: %s',
+ self.settings["source_path"])
+ log.notice(' new source hash..: %s',
+ self.settings["source_path_hash"].replace("\n", " "))
+ log.notice(' recorded hash....: %s',
+ clst_unpack_hash)
+ unpack_info['source'] = file_check(unpack_info['source'])
+
+ else:
+ # No autoresume, check SEEDCACHE
+ if "seedcache" in self.settings["options"]:
+ # if the seedcache is a dir, rsync will clean up the chroot
+ if os.path.isdir(self.settings["source_path"]):
+ pass
+ elif os.path.isdir(self.settings["source_path"]):
+ # We should never reach this, so something is very wrong
+ raise CatalystError(
+ "source path is a dir but seedcache is not enabled: %s"
+ % self.settings["source_path"])
+
+ if _unpack:
+ self.mount_safety_check()
+
+ if invalid_chroot:
+ if "autoresume" in self.settings["options"]:
+ log.notice(
+ 'Resume: Target chroot is invalid, cleaning up...')
+
+ self.clear_autoresume()
+ self.clear_chroot()
+
+ ensure_dirs(self.settings["chroot_path"])
+
+ ensure_dirs(self.settings["chroot_path"] + "/tmp", mode=1777)
+
+ if "pkgcache" in self.settings["options"]:
+ ensure_dirs(self.settings["pkgcache_path"], mode=0o755)
+
+ if "kerncache" in self.settings["options"]:
+ ensure_dirs(self.settings["kerncache_path"], mode=0o755)
+
+ log.notice('%s', display_msg % unpack_info)
+
+ # now run the decompressor
+ if not self.decompressor.extract(unpack_info):
+ log.error('%s', error_msg % unpack_info)
+
+ if "source_path_hash" in self.settings:
+ self.resume.enable("unpack",
+ data=self.settings["source_path_hash"])
+ else:
+ self.resume.enable("unpack")
+ else:
+ log.notice(
+ 'Resume: Valid resume point detected, skipping seed unpack operation...')
+
+ def unpack_snapshot(self):
+ unpack = True
+ snapshot_hash = self.resume.get("unpack_repo")
+
+ unpack_errmsg = "Error unpacking snapshot using mode %(mode)s"
+
+ unpack_info = self.decompressor.create_infodict(
+ source=self.settings["snapshot_path"],
+ destination=self.settings["snapshot_cache_path"],
+ arch=self.settings["compressor_arch"],
+ other_options=self.settings["compressor_options"],
+ )
+
+ target_portdir = normpath(self.settings["chroot_path"] +
+ self.settings["repo_basedir"] + "/" + self.settings["repo_name"])
+ log.info('%s', self.settings['chroot_path'])
+ log.info('unpack_snapshot(), target_portdir = %s', target_portdir)
+ if "snapcache" in self.settings["options"]:
+ snapshot_cache_hash_path = pjoin(
+ self.settings['snapshot_cache_path'], 'catalyst-hash')
+ snapshot_cache_hash = fileutils.readfile(
+ snapshot_cache_hash_path, True)
+ unpack_info['mode'] = self.decompressor.determine_mode(
+ unpack_info['source'])
+
+ cleanup_msg = "Cleaning up invalid snapshot cache at \n\t" + \
+ self.settings["snapshot_cache_path"] + \
+ " (this can take a long time)..."
+
+ if self.settings["snapshot_path_hash"] == snapshot_cache_hash:
+ log.info(
+ 'Valid snapshot cache, skipping unpack of portage tree...')
+ unpack = False
+ else:
+ cleanup_msg = \
+ 'Cleaning up existing portage tree (this can take a long time)...'
+ unpack_info['destination'] = normpath(
+ self.settings["chroot_path"] + self.settings["repo_basedir"])
+ unpack_info['mode'] = self.decompressor.determine_mode(
+ unpack_info['source'])
+
+ if "autoresume" in self.settings["options"] \
+ and os.path.exists(target_portdir) \
+ and self.resume.is_enabled("unpack_repo") \
+ and self.settings["snapshot_path_hash"] == snapshot_hash:
+ log.notice(
+ 'Valid Resume point detected, skipping unpack of portage tree...')
+ unpack = False
+
+ if unpack:
+ if "snapcache" in self.settings["options"]:
+ self.snapcache_lock.write_lock()
+ if os.path.exists(target_portdir):
+ log.info('%s', cleanup_msg)
+ clear_dir(target_portdir)
+
+ log.notice('Unpacking portage tree (this can take a long time) ...')
+ if not self.decompressor.extract(unpack_info):
+ log.error('%s', unpack_errmsg % unpack_info)
+
+ if "snapcache" in self.settings["options"]:
+ with open(snapshot_cache_hash_path, 'w') as myf:
+ myf.write(self.settings["snapshot_path_hash"])
+ else:
+ log.info('Setting snapshot autoresume point')
+ self.resume.enable("unpack_repo",
+ data=self.settings["snapshot_path_hash"])
+
+ if "snapcache" in self.settings["options"]:
+ self.snapcache_lock.unlock()
+
+ def config_profile_link(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("config_profile_link"):
+ log.notice(
+ 'Resume point detected, skipping config_profile_link operation...')
+ else:
+ # TODO: zmedico and I discussed making this a directory and pushing
+ # in a parent file, as well as other user-specified configuration.
+ log.info('Configuring profile link...')
+ clear_path(self.settings['chroot_path'] +
+ self.settings['port_conf'] + '/make.profile')
+ ensure_dirs(self.settings['chroot_path'] +
+ self.settings['port_conf'])
+ cmd(['ln', '-sf',
+ '../..' + self.settings['portdir'] +
+ '/profiles/' + self.settings['target_profile'],
+ self.settings['chroot_path'] + self.settings['port_conf'] + '/make.profile'],
+ env=self.env)
+ self.resume.enable("config_profile_link")
+
+ def setup_confdir(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("setup_confdir"):
+ log.notice(
+ 'Resume point detected, skipping setup_confdir operation...')
+ else:
+ if "portage_confdir" in self.settings:
+ log.info('Configuring %s...', self.settings['port_conf'])
+ dest = normpath(
+ self.settings['chroot_path'] + '/' + self.settings['port_conf'])
+ ensure_dirs(dest)
+ # The trailing slashes on both paths are important:
+ # We want to make sure rsync copies the dirs into each
+ # other and not as subdirs.
+ cmd(['rsync', '-a', self.settings['portage_confdir'] + '/', dest + '/'],
+ env=self.env)
+ self.resume.enable("setup_confdir")
+
+ def portage_overlay(self):
+ """ We copy the contents of our overlays to /usr/local/portage """
+ if "portage_overlay" in self.settings:
+ for x in self.settings["portage_overlay"]:
+ if os.path.exists(x):
+ log.info('Copying overlay dir %s', x)
+ ensure_dirs(
+ self.settings['chroot_path'] + self.settings['local_overlay'])
+ cmd("cp -a " + x + "/* " + self.settings["chroot_path"] +
+ self.settings["local_overlay"],
+ env=self.env)
+
+ def root_overlay(self):
+ """ Copy over the root_overlay """
+ if self.settings["spec_prefix"] + "/root_overlay" in self.settings:
+ for x in self.settings[self.settings["spec_prefix"] +
+ "/root_overlay"]:
+ if os.path.exists(x):
+ log.info('Copying root_overlay: %s', x)
+ cmd(['rsync', '-a', x + '/', self.settings['chroot_path']],
+ env=self.env)
+
+ def bind(self):
+ for x in self.mounts:
+ log.debug('bind(); x = %s', x)
+ target = normpath(
+ self.settings["chroot_path"] + self.target_mounts[x])
+ ensure_dirs(target, mode=0o755)
+
+ if not os.path.exists(self.mountmap[x]):
+ if self.mountmap[x] not in ("maybe_tmpfs", "tmpfs", "shmfs"):
+ ensure_dirs(self.mountmap[x], mode=0o755)
+
+ src = self.mountmap[x]
+ log.debug('bind(); src = %s', src)
+ if "snapcache" in self.settings["options"] and x == "portdir":
+ self.snapcache_lock.read_lock()
+ _cmd = None
+ if src == "maybe_tmpfs":
+ if "var_tmpfs_portage" in self.settings:
+ _cmd = ['mount', '-t', 'tmpfs',
+ '-o', 'size=' +
+ self.settings['var_tmpfs_portage'] + 'G',
+ src, target]
+ elif src == "tmpfs":
+ _cmd = ['mount', '-t', 'tmpfs', src, target]
+ else:
+ if src == "shmfs":
+ _cmd = ['mount', '-t', 'tmpfs', '-o',
+ 'noexec,nosuid,nodev', 'shm', target]
+ else:
+ _cmd = ['mount', '--bind', src, target]
+ if _cmd:
+ log.debug('bind(); _cmd = %s', _cmd)
+ cmd(_cmd, env=self.env, fail_func=self.unbind)
+ log.debug('bind(); finished :D')
+
+ def unbind(self):
+ ouch = 0
+ mypath = self.settings["chroot_path"]
+ myrevmounts = self.mounts[:]
+ myrevmounts.reverse()
+ # Unmount in reverse order for nested bind-mounts
+ for x in myrevmounts:
+ target = normpath(mypath + self.target_mounts[x])
+ if not os.path.exists(target):
+ log.notice('%s does not exist. Skipping', target)
+ continue
+
+ if not ismount(target):
+ log.notice('%s is not a mount point. Skipping', target)
+ continue
+
+ try:
+ cmd(['umount', target], env=self.env)
+ except CatalystError:
+ log.warning('First attempt to unmount failed: %s', target)
+ log.warning('Killing any pids still running in the chroot')
+
+ self.kill_chroot_pids()
+
+ try:
+ cmd(['umount', target], env=self.env)
+ except CatalystError:
+ ouch = 1
+ log.warning("Couldn't umount bind mount: %s", target)
+
+ if "snapcache" in self.settings["options"] and x == "/var/db/repos/gentoo":
+ try:
+ # It's possible the snapshot lock object isn't created yet.
+ # This is because mount safety check calls unbind before the
+ # target is fully initialized
+ self.snapcache_lock.unlock()
+ except Exception:
+ pass
+ if ouch:
+ # if any bind mounts really failed, then we need to raise
+ # this to potentially prevent an upcoming bash stage cleanup script
+ # from wiping our bind mounts.
+ raise CatalystError(
+ "Couldn't umount one or more bind-mounts; aborting for safety.")
+
+ def chroot_setup(self):
+ self.makeconf = read_makeconf(normpath(self.settings["chroot_path"] +
+ self.settings["make_conf"]))
+ self.override_cbuild()
+ self.override_chost()
+ self.override_cflags()
+ self.override_cxxflags()
+ self.override_fcflags()
+ self.override_fflags()
+ self.override_ldflags()
+ self.override_asflags()
+ self.override_common_flags()
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("chroot_setup"):
+ log.notice(
+ 'Resume point detected, skipping chroot_setup operation...')
+ else:
+ log.notice('Setting up chroot...')
+
+ shutil.copy('/etc/resolv.conf',
+ self.settings['chroot_path'] + '/etc/')
+
+ # Copy over the envscript, if applicable
+ if "envscript" in self.settings:
+ if not os.path.exists(self.settings["envscript"]):
+ raise CatalystError(
+ "Can't find envscript " + self.settings["envscript"],
+ print_traceback=True)
+
+ log.warning(
+ 'env variables in catalystrc may cause catastrophic failure.\n'
+ 'If your build fails look here first as the possible problem.')
+
+ shutil.copy(self.settings['envscript'],
+ self.settings['chroot_path'] + '/tmp/envscript')
+
+ # Copy over /etc/hosts from the host in case there are any
+ # specialties in there
+ hosts_file = self.settings['chroot_path'] + '/etc/hosts'
+ if os.path.exists(hosts_file):
+ os.rename(hosts_file, hosts_file + '.catalyst')
+ shutil.copy('/etc/hosts', hosts_file)
+ # write out the make.conf
+ try:
+ self.write_make_conf(setup=True)
+ except OSError as e:
+ raise CatalystError('Could not write %s: %s' % (
+ normpath(self.settings["chroot_path"] +
+ self.settings["make_conf"]), e))
+ self.resume.enable("chroot_setup")
+
+ def write_make_conf(self, setup=True):
+ # Modify and write out make.conf (for the chroot)
+ makepath = normpath(self.settings["chroot_path"] +
+ self.settings["make_conf"])
+ clear_path(makepath)
+ with open(makepath, "w") as myf:
+ log.notice("Writing the stage make.conf to: %s" % makepath)
+ myf.write("# These settings were set by the catalyst build script "
+ "that automatically\n# built this stage.\n")
+ myf.write("# Please consult "
+ "/usr/share/portage/config/make.conf.example "
+ "for a more\n# detailed example.\n")
+
+ for flags in ["COMMON_FLAGS", "CFLAGS", "CXXFLAGS", "FCFLAGS", "FFLAGS",
+ "LDFLAGS", "ASFLAGS"]:
+ if flags in ["LDFLAGS", "ASFLAGS"]:
+ if not flags in self.settings:
+ continue
+ myf.write("# %s is unsupported. USE AT YOUR OWN RISK!\n"
+ % flags)
+ if flags not in self.settings or (flags != "COMMON_FLAGS" and
+ self.settings[flags] == self.settings["COMMON_FLAGS"]):
+ myf.write('%s="${COMMON_FLAGS}"\n' % flags)
+ elif isinstance(self.settings[flags], list):
+ myf.write('%s="%s"\n'
+ % (flags, ' '.join(self.settings[flags])))
+ else:
+ myf.write('%s="%s"\n'
+ % (flags, self.settings[flags]))
+
+ if "CBUILD" in self.settings:
+ myf.write("# This should not be changed unless you know exactly"
+ " what you are doing. You\n# should probably be "
+ "using a different stage, instead.\n")
+ myf.write('CBUILD="' + self.settings["CBUILD"] + '"\n')
+
+ if "CHOST" in self.settings:
+ myf.write("# WARNING: Changing your CHOST is not something "
+ "that should be done lightly.\n# Please consult "
+ "https://wiki.gentoo.org/wiki/Changing_the_CHOST_variable "
+ "before changing.\n")
+ myf.write('CHOST="' + self.settings["CHOST"] + '"\n')
+
+ # Figure out what our USE vars are for building
+ myusevars = []
+ if "bindist" in self.settings["options"]:
+ myf.write(
+ "\n# NOTE: This stage was built with the bindist Use flag enabled\n")
+ if setup or "sticky-config" in self.settings["options"]:
+ myusevars.extend(self.settings["catalyst_use"])
+ log.notice("STICKY-CONFIG is enabled")
+ if "HOSTUSE" in self.settings:
+ myusevars.extend(self.settings["HOSTUSE"])
+
+ if "use" in self.settings:
+ myusevars.extend(self.settings["use"])
+
+ if myusevars:
+ myf.write("# These are the USE and USE_EXPAND flags that were "
+ "used for\n# building in addition to what is provided "
+ "by the profile.\n")
+ myusevars = sorted(set(myusevars))
+ myf.write('USE="' + ' '.join(myusevars) + '"\n')
+ if '-*' in myusevars:
+ log.warning(
+ 'The use of -* in %s/use will cause portage to ignore\n'
+ 'package.use in the profile and portage_confdir.\n'
+ "You've been warned!", self.settings['spec_prefix'])
+
+ myuseexpandvars = {}
+ if "HOSTUSEEXPAND" in self.settings:
+ for hostuseexpand in self.settings["HOSTUSEEXPAND"]:
+ myuseexpandvars.update(
+ {hostuseexpand: self.settings["HOSTUSEEXPAND"][hostuseexpand]})
+
+ if myuseexpandvars:
+ for hostuseexpand in myuseexpandvars:
+ myf.write(hostuseexpand + '="' +
+ ' '.join(myuseexpandvars[hostuseexpand]) + '"\n')
+ # write out a shipable version
+ target_portdir = normpath(self.settings["repo_basedir"] + "/" +
+ self.settings["repo_name"])
+
+ myf.write('PORTDIR="%s"\n' % target_portdir)
+ myf.write('DISTDIR="%s"\n' % self.settings['target_distdir'])
+ myf.write('PKGDIR="%s"\n' % self.settings['target_pkgdir'])
+ if setup:
+ # Setup the portage overlay
+ if "portage_overlay" in self.settings:
+ myf.write('PORTDIR_OVERLAY="%s"\n' %
+ self.settings["local_overlay"])
+
+ # Set default locale for system responses. #478382
+ myf.write(
+ '\n'
+ '# This sets the language of build output to English.\n'
+ '# Please keep this setting intact when reporting bugs.\n'
+ 'LC_MESSAGES=C\n')
+
+ def fsscript(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("fsscript"):
+ log.notice('Resume point detected, skipping fsscript operation...')
+ else:
+ if "fsscript" in self.settings:
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'fsscript'],
+ env=self.env)
+ self.resume.enable("fsscript")
+
+ def rcupdate(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("rcupdate"):
+ log.notice('Resume point detected, skipping rcupdate operation...')
+ else:
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'rc-update'],
+ env=self.env)
+ self.resume.enable("rcupdate")
+
+ def clean(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("clean"):
+ log.notice('Resume point detected, skipping clean operation...')
+ else:
+ for x in self.settings["cleanables"]:
+ log.notice('Cleaning chroot: %s', x)
+ clear_path(normpath(self.settings["destpath"] + x))
+
+ # Put /etc/hosts back into place
+ hosts_file = self.settings['chroot_path'] + '/etc/hosts'
+ if os.path.exists(hosts_file + '.catalyst'):
+ os.rename(hosts_file + '.catalyst', hosts_file)
+
+ # optionally clean up portage configs
+ if ("portage_prefix" in self.settings and
+ "sticky-config" not in self.settings["options"]):
+ log.debug("clean(), portage_preix = %s, no sticky-config",
+ self.settings["portage_prefix"])
+ for _dir in "accept_keywords", "keywords", "mask", "unmask", "use":
+ target = pjoin(self.settings["destpath"],
+ "etc/portage/package.%s" % _dir,
+ self.settings["portage_prefix"])
+ log.notice("Clearing portage_prefix target: %s", target)
+ clear_path(target)
+
+ # Remove our overlay
+ overlay = normpath(
+ self.settings["chroot_path"] + self.settings["local_overlay"])
+ if os.path.exists(overlay):
+ clear_path(overlay)
+
+ if "sticky-config" not in self.settings["options"]:
+ # re-write the make.conf to be sure it is clean
+ self.write_make_conf(setup=False)
+
+ # Clean up old and obsoleted files in /etc
+ if os.path.exists(self.settings["stage_path"]+"/etc"):
+ cmd(['find', self.settings['stage_path'] + '/etc',
+ '-maxdepth', '1', '-name', '*-', '-delete'],
+ env=self.env)
+
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'clean'], env=self.env)
+ self.resume.enable("clean")
+
+ def empty(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("empty"):
+ log.notice('Resume point detected, skipping empty operation...')
+ else:
+ if self.settings["spec_prefix"] + "/empty" in self.settings:
+ if isinstance(
+ self.settings[self.settings['spec_prefix'] + '/empty'],
+ str):
+ self.settings[self.settings["spec_prefix"] + "/empty"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/empty"].split()
+ for x in self.settings[self.settings["spec_prefix"] + "/empty"]:
+ myemp = self.settings["destpath"] + x
+ if not os.path.isdir(myemp) or os.path.islink(myemp):
+ log.warning('not a directory or does not exist, '
+ 'skipping "empty" operation: %s', x)
+ continue
+ log.info('Emptying directory %s', x)
+ clear_dir(myemp)
+ self.resume.enable("empty")
+
+ def remove(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("remove"):
+ log.notice('Resume point detected, skipping remove operation...')
+ else:
+ if self.settings["spec_prefix"] + "/rm" in self.settings:
+ for x in self.settings[self.settings["spec_prefix"] + "/rm"]:
+ # We're going to shell out for all these cleaning
+ # operations, so we get easy glob handling.
+ log.notice('livecd: removing %s', x)
+ clear_path(self.settings["chroot_path"] + x)
+ try:
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'clean'],
+ env=self.env)
+ self.resume.enable("remove")
+ except:
+ self.unbind()
+ raise
+
+ def preclean(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("preclean"):
+ log.notice('Resume point detected, skipping preclean operation...')
+ else:
+ try:
+ if os.path.exists(self.settings["controller_file"]):
+ cmd([self.settings['controller_file'], 'preclean'],
+ env=self.env)
+ self.resume.enable("preclean")
+
+ except:
+ self.unbind()
+ raise CatalystError("Build failed, could not execute preclean")
+
+ def capture(self):
+ # initialize it here so it doesn't use
+ # resources if it is not needed
+ if not self.compressor:
+ self.compressor = CompressMap(self.settings["compress_definitions"],
+ env=self.env, default_mode=self.settings['compression_mode'],
+ comp_prog=self.settings['comp_prog'])
+
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("capture"):
+ log.notice('Resume point detected, skipping capture operation...')
+ else:
+ log.notice('Capture target in a tarball')
+ # Remove filename from path
+ mypath = os.path.dirname(self.settings["target_path"].rstrip('/'))
+
+ # Now make sure path exists
+ ensure_dirs(mypath)
+
+ pack_info = self.compressor.create_infodict(
+ source=".",
+ basedir=self.settings["stage_path"],
+ filename=self.settings["target_path"].rstrip('/'),
+ mode=self.settings["compression_mode"],
+ auto_extension=True,
+ arch=self.settings["compressor_arch"],
+ other_options=self.settings["compressor_options"],
+ )
+ target_filename = ".".join([self.settings["target_path"].rstrip('/'),
+ self.compressor.extension(pack_info['mode'])])
+
+ log.notice('Creating stage tarball... mode: %s',
+ self.settings['compression_mode'])
+
+ if self.compressor.compress(pack_info):
+ self.gen_contents_file(target_filename)
+ self.gen_digest_file(target_filename)
+ self.resume.enable("capture")
+ else:
+ log.warning("Couldn't create stage tarball: %s",
+ target_filename)
+
+ def run_local(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("run_local"):
+ log.notice('Resume point detected, skipping run_local operation...')
+ else:
+ try:
+ if os.path.exists(self.settings["controller_file"]):
+ log.info('run_local() starting controller script...')
+ cmd([self.settings['controller_file'], 'run'],
+ env=self.env)
+ self.resume.enable("run_local")
+ else:
+ log.info('run_local() no controller_file found... %s',
+ self.settings['controller_file'])
+
+ except CatalystError:
+ self.unbind()
+ raise CatalystError("Stage build aborting due to error.",
+ print_traceback=False)
+
+ def setup_environment(self):
+ """
+ Modify the current environment. This is an ugly hack that should be
+ fixed. We need this to use the os.system() call since we can't
+ specify our own environ
+ """
+ log.debug('setup_environment(); settings = %r', self.settings)
+ for x in list(self.settings):
+ log.debug('setup_environment(); processing: %s', x)
+ if x == "options":
+ for opt in self.settings[x]:
+ self.env['clst_' + opt.upper()] = "true"
+ continue
+ # Sanitize var names by doing "s|/-.|_|g"
+ varname = "clst_" + x.replace("/", "_")
+ varname = varname.replace("-", "_")
+ varname = varname.replace(".", "_")
+ if isinstance(self.settings[x], str):
+ # Prefix to prevent namespace clashes
+ if "path" in x:
+ self.env[varname] = self.settings[x].rstrip("/")
+ else:
+ self.env[varname] = self.settings[x]
+ elif isinstance(self.settings[x], list):
+ self.env[varname] = ' '.join(self.settings[x])
+ elif isinstance(self.settings[x], bool):
+ if self.settings[x]:
+ self.env[varname] = "true"
+ else:
+ self.env[varname] = "false"
+ # This handles a dictionary of objects just one level deep and no deeper!
+ # Its currently used only for USE_EXPAND flags which are dictionaries of
+ # lists in arch/amd64.py and friends. If we wanted self.settigs[var]
+ # of any depth, we should make this function recursive.
+ elif isinstance(self.settings[x], dict):
+ if x in ["compress_definitions",
+ "decompress_definitions"]:
+ continue
+ self.env[varname] = ' '.join(self.settings[x].keys())
+ for y in self.settings[x].keys():
+ varname2 = "clst_" + y.replace("/", "_")
+ varname2 = varname2.replace("-", "_")
+ varname2 = varname2.replace(".", "_")
+ if isinstance(self.settings[x][y], str):
+ self.env[varname2] = self.settings[x][y]
+ elif isinstance(self.settings[x][y], list):
+ self.env[varname2] = ' '.join(self.settings[x][y])
+ elif isinstance(self.settings[x][y], bool):
+ if self.settings[x][y]:
+ self.env[varname] = "true"
+ else:
+ self.env[varname] = "false"
+
+ if "makeopts" in self.settings:
+ if isinstance(self.settings["makeopts"], str):
+ self.env["MAKEOPTS"] = self.settings["makeopts"]
+ else:
+ # ensure makeopts is a string
+ self.env["MAKEOPTS"] = ' '.join(self.settings["makeopts"])
+
+ log.debug('setup_environment(); env = %r', self.env)
+
+ def run(self):
+ self.chroot_lock.write_lock()
+
+ # Kill any pids in the chroot
+ self.kill_chroot_pids()
+
+ # Check for mounts right away and abort if we cannot unmount them
+ self.mount_safety_check()
+
+ if "clear-autoresume" in self.settings["options"]:
+ self.clear_autoresume()
+
+ if "purgetmponly" in self.settings["options"]:
+ self.purge()
+ return True
+
+ if "purgeonly" in self.settings["options"]:
+ log.info('StageBase: run() purgeonly')
+ self.purge()
+
+ if "purge" in self.settings["options"]:
+ log.info('StageBase: run() purge')
+ self.purge()
+
+ failure = False
+ for x in self.settings["action_sequence"]:
+ log.notice('--- Running action sequence: %s', x)
+ sys.stdout.flush()
+ try:
+ getattr(self, x)()
+ except LockInUse:
+ log.error('Unable to aquire the lock...')
+ failure = True
+ break
+ except Exception:
+ log.error('Exception running action sequence %s',
+ x, exc_info=True)
+ failure = True
+ break
+
+ if failure:
+ log.notice('Cleaning up... Running unbind()')
+ self.unbind()
+ return False
+ return True
+
+ def unmerge(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("unmerge"):
+ log.notice('Resume point detected, skipping unmerge operation...')
+ else:
+ if self.settings["spec_prefix"] + "/unmerge" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix'] + '/unmerge'], str):
+ self.settings[self.settings["spec_prefix"] + "/unmerge"] = \
+ [self.settings[self.settings["spec_prefix"] + "/unmerge"]]
+
+ # Before cleaning, unmerge stuff
+ try:
+ cmd([self.settings['controller_file'], 'unmerge'] +
+ self.settings[self.settings['spec_prefix'] + '/unmerge'],
+ env=self.env)
+ log.info('unmerge shell script')
+ except CatalystError:
+ self.unbind()
+ raise
+ self.resume.enable("unmerge")
+
+ def target_setup(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("target_setup"):
+ log.notice(
+ 'Resume point detected, skipping target_setup operation...')
+ else:
+ log.notice('Setting up filesystems per filesystem type')
+ cmd([self.settings['controller_file'], 'target_image_setup',
+ self.settings['target_path']], env=self.env)
+ self.resume.enable("target_setup")
+
+ def setup_overlay(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("setup_overlay"):
+ log.notice(
+ 'Resume point detected, skipping setup_overlay operation...')
+ else:
+ if self.settings["spec_prefix"] + "/overlay" in self.settings:
+ for x in self.settings[self.settings["spec_prefix"] + "/overlay"]:
+ if os.path.exists(x):
+ cmd(['rsync', '-a', x + '/', self.settings['target_path']],
+ env=self.env)
+ self.resume.enable("setup_overlay")
+
+ def create_iso(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("create_iso"):
+ log.notice(
+ 'Resume point detected, skipping create_iso operation...')
+ else:
+ # Create the ISO
+ if "iso" in self.settings:
+ cmd([self.settings['controller_file'], 'iso', self.settings['iso']],
+ env=self.env)
+ self.gen_contents_file(self.settings["iso"])
+ self.gen_digest_file(self.settings["iso"])
+ self.resume.enable("create_iso")
+ else:
+ log.warning('livecd/iso was not defined. '
+ 'An ISO Image will not be created.')
+
+ def build_packages(self):
+ build_packages_resume = pjoin(self.settings["autoresume_path"],
+ "build_packages")
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("build_packages"):
+ log.notice(
+ 'Resume point detected, skipping build_packages operation...')
+ else:
+ if self.settings["spec_prefix"] + "/packages" in self.settings:
+ target_pkgs = self.settings["spec_prefix"] + '/packages'
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("build_packages"):
+ log.notice('Resume point detected, skipping build_packages '
+ 'operation...')
+ else:
+ command = [self.settings['controller_file'],
+ 'build_packages']
+ if isinstance(self.settings[target_pkgs], str):
+ command.append(self.settings[target_pkgs])
+ else:
+ command.extend(self.settings[target_pkgs])
+ try:
+ cmd(command, env=self.env)
+ fileutils.touch(build_packages_resume)
+ self.resume.enable("build_packages")
+ except CatalystError:
+ self.unbind()
+ raise CatalystError(
+ self.settings["spec_prefix"] +
+ "build aborting due to error.")
+
+ def build_kernel(self):
+ '''Build all configured kernels'''
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("build_kernel"):
+ log.notice(
+ 'Resume point detected, skipping build_kernel operation...')
+ else:
+ if "boot/kernel" in self.settings:
+ try:
+ mynames = self.settings["boot/kernel"]
+ if isinstance(mynames, str):
+ mynames = [mynames]
+ # Execute the script that sets up the kernel build environment
+ cmd([self.settings['controller_file'], 'pre-kmerge'],
+ env=self.env)
+ for kname in mynames:
+ self._build_kernel(kname=kname)
+ self.resume.enable("build_kernel")
+ except CatalystError:
+ self.unbind()
+ raise CatalystError(
+ "build aborting due to kernel build error.",
+ print_traceback=True)
+
+ def _build_kernel(self, kname):
+ "Build a single configured kernel by name"
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("build_kernel_" + kname):
+ log.notice('Resume point detected, skipping build_kernel '
+ 'for %s operation...', kname)
+ return
+ self._copy_kernel_config(kname=kname)
+
+ # If we need to pass special options to the bootloader
+ # for this kernel put them into the environment
+ key = 'boot/kernel/' + kname + '/kernelopts'
+ if key in self.settings:
+ myopts = self.settings[key]
+
+ if not isinstance(myopts, str):
+ myopts = ' '.join(myopts)
+ self.env[kname + "_kernelopts"] = myopts
+ else:
+ self.env[kname + "_kernelopts"] = ""
+
+ key = 'boot/kernel/' + kname + '/extraversion'
+ self.settings.setdefault(key, '')
+ self.env["clst_kextraversion"] = self.settings[key]
+
+ self._copy_initramfs_overlay(kname=kname)
+
+ # Execute the script that builds the kernel
+ cmd([self.settings['controller_file'], 'kernel', kname],
+ env=self.env)
+
+ if "boot/kernel/" + kname + "/initramfs_overlay" in self.settings:
+ log.notice('Cleaning up temporary overlay dir')
+ clear_dir(self.settings['chroot_path'] + '/tmp/initramfs_overlay/')
+
+ self.resume.is_enabled("build_kernel_" + kname)
+
+ # Execute the script that cleans up the kernel build environment
+ cmd([self.settings['controller_file'], 'post-kmerge'],
+ env=self.env)
+
+ def _copy_kernel_config(self, kname):
+ key = 'boot/kernel/' + kname + '/config'
+ if key in self.settings:
+ if not os.path.exists(self.settings[key]):
+ self.unbind()
+ raise CatalystError("Can't find kernel config: %s" %
+ self.settings[key])
+
+ try:
+ shutil.copy(self.settings[key],
+ self.settings['chroot_path'] + '/var/tmp/' + kname + '.config')
+
+ except IOError:
+ self.unbind()
+
+ def _copy_initramfs_overlay(self, kname):
+ key = 'boot/kernel/' + kname + '/initramfs_overlay'
+ if key in self.settings:
+ if os.path.exists(self.settings[key]):
+ log.notice('Copying initramfs_overlay dir %s',
+ self.settings[key])
+
+ ensure_dirs(
+ self.settings['chroot_path'] +
+ '/tmp/initramfs_overlay/' + self.settings[key])
+
+ cmd('cp -R ' + self.settings[key] + '/* ' +
+ self.settings['chroot_path'] +
+ '/tmp/initramfs_overlay/' + self.settings[key], env=self.env)
+
+ def bootloader(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("bootloader"):
+ log.notice(
+ 'Resume point detected, skipping bootloader operation...')
+ else:
+ try:
+ cmd([self.settings['controller_file'], 'bootloader',
+ self.settings['target_path'].rstrip('/')],
+ env=self.env)
+ self.resume.enable("bootloader")
+ except CatalystError:
+ self.unbind()
+ raise CatalystError("Script aborting due to error.")
+
+ def livecd_update(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("livecd_update"):
+ log.notice(
+ 'Resume point detected, skipping build_packages operation...')
+ else:
+ try:
+ cmd([self.settings['controller_file'], 'livecd-update'],
+ env=self.env)
+ self.resume.enable("livecd_update")
+
+ except CatalystError:
+ self.unbind()
+ raise CatalystError(
+ "build aborting due to livecd_update error.")
+
+ @staticmethod
+ def _debug_pause_():
+ input("press any key to continue: ")
# vim: ts=4 sw=4 sta et sts=4 ai
diff --git a/catalyst/base/targetbase.py b/catalyst/base/targetbase.py
index a09abc73..fa15ec11 100644
--- a/catalyst/base/targetbase.py
+++ b/catalyst/base/targetbase.py
@@ -4,26 +4,29 @@ from abc import ABC, abstractmethod
from catalyst.support import addl_arg_parse
+
class TargetBase(ABC):
- """
- The toplevel class for all targets. This is about as generic as we get.
- """
- def __init__(self, myspec, addlargs):
- addl_arg_parse(myspec,addlargs,self.required_values,self.valid_values)
- self.settings=myspec
- self.env = {
- 'PATH': '/bin:/sbin:/usr/bin:/usr/sbin',
- 'TERM': os.getenv('TERM', 'dumb'),
- }
+ """
+ The toplevel class for all targets. This is about as generic as we get.
+ """
+
+ def __init__(self, myspec, addlargs):
+ addl_arg_parse(myspec, addlargs, self.required_values,
+ self.valid_values)
+ self.settings = myspec
+ self.env = {
+ 'PATH': '/bin:/sbin:/usr/bin:/usr/sbin',
+ 'TERM': os.getenv('TERM', 'dumb'),
+ }
- @property
- @classmethod
- @abstractmethod
- def required_values(cls):
- return NotImplementedError
+ @property
+ @classmethod
+ @abstractmethod
+ def required_values(cls):
+ return NotImplementedError
- @property
- @classmethod
- @abstractmethod
- def valid_values(cls):
- return NotImplementedError
+ @property
+ @classmethod
+ @abstractmethod
+ def valid_values(cls):
+ return NotImplementedError
diff --git a/catalyst/config.py b/catalyst/config.py
index 1571db9d..b527ada0 100644
--- a/catalyst/config.py
+++ b/catalyst/config.py
@@ -4,124 +4,128 @@ import re
from catalyst import log
from catalyst.support import CatalystError
+
class ParserBase():
- filename = ""
- lines = None
- values = None
- key_value_separator = "="
- multiple_values = False
- empty_values = True
- eval_none = False
-
- def __getitem__(self, key):
- return self.values[key]
-
- def get_values(self):
- return self.values
-
- def dump(self):
- dump = ""
- for x in self.values:
- dump += x + " = " + repr(self.values[x]) + "\n"
- return dump
-
- def parse_file(self, filename):
- try:
- with open(filename, "r") as myf:
- self.lines = myf.readlines()
- except:
- raise CatalystError("Could not open file " + filename,
- print_traceback=True)
- self.filename = filename
- self.parse()
-
- def parse_lines(self, lines):
- self.lines = lines
- self.parse()
-
- def parse(self):
- values = {}
- cur_array = []
-
- trailing_comment=re.compile(r'\s*#.*$')
-
- for x, myline in enumerate(self.lines):
- myline = myline.strip()
-
- # Force the line to be clean
- # Remove Comments ( anything following # )
- myline = trailing_comment.sub("", myline)
-
- # Skip any blank lines
- if not myline:
- continue
-
- if self.key_value_separator in myline:
- # Split on the first occurence of the separator creating two strings in the array mobjs
- mobjs = myline.split(self.key_value_separator, 1)
- mobjs[1] = mobjs[1].strip().strip('"')
-
- # Start a new array using the first element of mobjs
- cur_array = [mobjs[0]]
- if mobjs[1]:
- # do any variable substitiution embeded in it with
- # the values already obtained
- mobjs[1] = mobjs[1] % values
- if self.multiple_values:
- # split on white space creating additional array elements
- subarray = mobjs[1].split()
- cur_array += subarray
- else:
- cur_array += [mobjs[1]]
-
- # Else add on to the last key we were working on
- else:
- if self.multiple_values:
- cur_array += myline.split()
- else:
- raise CatalystError("Syntax error: %s" % x, print_traceback=True)
-
- # XXX: Do we really still need this "single value is a string" behavior?
- if len(cur_array) == 2:
- values[cur_array[0]] = cur_array[1]
- else:
- values[cur_array[0]] = cur_array[1:]
-
- if not self.empty_values:
- # Make sure the list of keys is static since we modify inside the loop.
- for x in list(values.keys()):
- # Delete empty key pairs
- if not values[x]:
- log.warning('No value set for key "%s"; deleting', x)
- del values[x]
-
- if self.eval_none:
- # Make sure the list of keys is static since we modify inside the loop.
- for x in list(values.keys()):
- # reset None values
- if isinstance(values[x], str) and values[x].lower() in ['none']:
- log.info('None value found for key "%s"; reseting', x)
- values[x] = None
- self.values = values
+ filename = ""
+ lines = None
+ values = None
+ key_value_separator = "="
+ multiple_values = False
+ empty_values = True
+ eval_none = False
+
+ def __getitem__(self, key):
+ return self.values[key]
+
+ def get_values(self):
+ return self.values
+
+ def dump(self):
+ dump = ""
+ for x in self.values:
+ dump += x + " = " + repr(self.values[x]) + "\n"
+ return dump
+
+ def parse_file(self, filename):
+ try:
+ with open(filename, "r") as myf:
+ self.lines = myf.readlines()
+ except:
+ raise CatalystError("Could not open file " + filename,
+ print_traceback=True)
+ self.filename = filename
+ self.parse()
+
+ def parse_lines(self, lines):
+ self.lines = lines
+ self.parse()
+
+ def parse(self):
+ values = {}
+ cur_array = []
+
+ trailing_comment = re.compile(r'\s*#.*$')
+
+ for x, myline in enumerate(self.lines):
+ myline = myline.strip()
+
+ # Force the line to be clean
+ # Remove Comments ( anything following # )
+ myline = trailing_comment.sub("", myline)
+
+ # Skip any blank lines
+ if not myline:
+ continue
+
+ if self.key_value_separator in myline:
+ # Split on the first occurence of the separator creating two strings in the array mobjs
+ mobjs = myline.split(self.key_value_separator, 1)
+ mobjs[1] = mobjs[1].strip().strip('"')
+
+ # Start a new array using the first element of mobjs
+ cur_array = [mobjs[0]]
+ if mobjs[1]:
+ # do any variable substitiution embeded in it with
+ # the values already obtained
+ mobjs[1] = mobjs[1] % values
+ if self.multiple_values:
+ # split on white space creating additional array elements
+ subarray = mobjs[1].split()
+ cur_array += subarray
+ else:
+ cur_array += [mobjs[1]]
+
+ # Else add on to the last key we were working on
+ else:
+ if self.multiple_values:
+ cur_array += myline.split()
+ else:
+ raise CatalystError("Syntax error: %s" %
+ x, print_traceback=True)
+
+ # XXX: Do we really still need this "single value is a string" behavior?
+ if len(cur_array) == 2:
+ values[cur_array[0]] = cur_array[1]
+ else:
+ values[cur_array[0]] = cur_array[1:]
+
+ if not self.empty_values:
+ # Make sure the list of keys is static since we modify inside the loop.
+ for x in list(values.keys()):
+ # Delete empty key pairs
+ if not values[x]:
+ log.warning('No value set for key "%s"; deleting', x)
+ del values[x]
+
+ if self.eval_none:
+ # Make sure the list of keys is static since we modify inside the loop.
+ for x in list(values.keys()):
+ # reset None values
+ if isinstance(values[x], str) and values[x].lower() in ['none']:
+ log.info('None value found for key "%s"; reseting', x)
+ values[x] = None
+ self.values = values
+
class SpecParser(ParserBase):
- key_value_separator = ':'
- multiple_values = True
- empty_values = False
- eval_none = True
+ key_value_separator = ':'
+ multiple_values = True
+ empty_values = False
+ eval_none = True
+
+ def __init__(self, filename=""):
+ if filename:
+ self.parse_file(filename)
- def __init__(self, filename=""):
- if filename:
- self.parse_file(filename)
class ConfigParser(ParserBase):
- key_value_separator = '='
- multiple_values = False
- empty_values = True
+ key_value_separator = '='
+ multiple_values = False
+ empty_values = True
- def __init__(self, filename=""):
- if filename:
- self.parse_file(filename)
+ def __init__(self, filename=""):
+ if filename:
+ self.parse_file(filename)
diff --git a/catalyst/defaults.py b/catalyst/defaults.py
index 60a2ac9c..4f67878c 100644
--- a/catalyst/defaults.py
+++ b/catalyst/defaults.py
@@ -9,18 +9,18 @@ required_build_targets = ["targetbase", "generic_stage_target"]
# new build types should be added here
valid_build_targets = ["stage1_target", "stage2_target", "stage3_target",
- "stage4_target", "livecd_stage1_target", "livecd_stage2_target",
- "embedded_target", "snapshot_target", "netboot_target"
- ]
+ "stage4_target", "livecd_stage1_target", "livecd_stage2_target",
+ "embedded_target", "snapshot_target", "netboot_target"
+ ]
required_config_file_values = ["storedir", "sharedir", "distdir", "portdir"]
valid_config_file_values = required_config_file_values[:]
-valid_config_file_values.extend([ "distcc", "envscript",
- "options", "DEBUG", "VERBOSE",
- "snapshot_cache", "hash_function", "digests", "contents", "compressor_arch",
- "compression_mode", "compressor_options", "decompressor_search_order",
- ])
+valid_config_file_values.extend(["distcc", "envscript",
+ "options", "DEBUG", "VERBOSE",
+ "snapshot_cache", "hash_function", "digests", "contents", "compressor_arch",
+ "compression_mode", "compressor_options", "decompressor_search_order",
+ ])
# set our base defaults here to keep
# them in one location.
@@ -31,79 +31,79 @@ PKGDIR = BASE_GENTOO_DIR + "/packages"
MAINREPO = "gentoo"
PORTDIR = REPODIR + "/" + MAINREPO
-confdefaults={
- "comp_prog": COMPRESSOR_PROGRAM_OPTIONS['linux'],
- "compression_mode": 'lbzip2',
- "compressor_arch": None,
- "compressor_options": XATTRS_OPTIONS['linux'],
- "decomp_opt": DECOMPRESSOR_PROGRAM_OPTIONS['linux'],
- "decompressor_search_order": DECOMPRESSOR_SEARCH_ORDER,
- "distdir": DISTDIR[:],
- "hash_function": "crc32",
- "icecream": "/var/cache/icecream",
- 'list_xattrs_opt': LIST_XATTRS_OPTIONS['linux'],
- "local_overlay": REPODIR[:] + "/local",
- "port_conf": "/etc/portage",
- "make_conf": "%(port_conf)s/make.conf",
- "options": set(),
- "packagedir": PKGDIR[:],
- "portdir": PORTDIR[:],
- "port_tmpdir": "/var/tmp/portage",
- "PythonDir": "./catalyst",
- "repo_basedir": REPODIR[:],
- "repo_name": MAINREPO[:],
- "sed": "sed",
- "sharedir": "/usr/share/catalyst",
- "shdir": "/usr/share/catalyst/targets/",
- "snapshot_cache": "/var/tmp/catalyst/snapshot_cache",
- "snapshot_name": "%(repo_name)s-",
- "source_matching": "strict",
- "storedir": "/var/tmp/catalyst",
- "target_distdir": DISTDIR[:],
- "target_pkgdir": PKGDIR[:],
- }
+confdefaults = {
+ "comp_prog": COMPRESSOR_PROGRAM_OPTIONS['linux'],
+ "compression_mode": 'lbzip2',
+ "compressor_arch": None,
+ "compressor_options": XATTRS_OPTIONS['linux'],
+ "decomp_opt": DECOMPRESSOR_PROGRAM_OPTIONS['linux'],
+ "decompressor_search_order": DECOMPRESSOR_SEARCH_ORDER,
+ "distdir": DISTDIR[:],
+ "hash_function": "crc32",
+ "icecream": "/var/cache/icecream",
+ 'list_xattrs_opt': LIST_XATTRS_OPTIONS['linux'],
+ "local_overlay": REPODIR[:] + "/local",
+ "port_conf": "/etc/portage",
+ "make_conf": "%(port_conf)s/make.conf",
+ "options": set(),
+ "packagedir": PKGDIR[:],
+ "portdir": PORTDIR[:],
+ "port_tmpdir": "/var/tmp/portage",
+ "PythonDir": "./catalyst",
+ "repo_basedir": REPODIR[:],
+ "repo_name": MAINREPO[:],
+ "sed": "sed",
+ "sharedir": "/usr/share/catalyst",
+ "shdir": "/usr/share/catalyst/targets/",
+ "snapshot_cache": "/var/tmp/catalyst/snapshot_cache",
+ "snapshot_name": "%(repo_name)s-",
+ "source_matching": "strict",
+ "storedir": "/var/tmp/catalyst",
+ "target_distdir": DISTDIR[:],
+ "target_pkgdir": PKGDIR[:],
+}
DEFAULT_CONFIG_FILE = '/etc/catalyst/catalyst.conf'
PORT_LOGDIR_CLEAN = \
- 'find "${PORT_LOGDIR}" -type f ! -name "summary.log*" -mtime +30 -delete'
+ 'find "${PORT_LOGDIR}" -type f ! -name "summary.log*" -mtime +30 -delete'
TARGET_MOUNT_DEFAULTS = {
- "ccache": "/var/tmp/ccache",
- "dev": "/dev",
- "devpts": "/dev/pts",
- "distdir": DISTDIR[:],
- "icecream": "/usr/lib/icecc/bin",
- "kerncache": "/tmp/kerncache",
- "packagedir": PKGDIR[:],
- "portdir": PORTDIR[:],
- "port_tmpdir": "/var/tmp/portage",
- "port_logdir": "/var/log/portage",
- "proc": "/proc",
- "shm": "/dev/shm",
- "run": "/run",
- }
+ "ccache": "/var/tmp/ccache",
+ "dev": "/dev",
+ "devpts": "/dev/pts",
+ "distdir": DISTDIR[:],
+ "icecream": "/usr/lib/icecc/bin",
+ "kerncache": "/tmp/kerncache",
+ "packagedir": PKGDIR[:],
+ "portdir": PORTDIR[:],
+ "port_tmpdir": "/var/tmp/portage",
+ "port_logdir": "/var/log/portage",
+ "proc": "/proc",
+ "shm": "/dev/shm",
+ "run": "/run",
+}
SOURCE_MOUNT_DEFAULTS = {
- "dev": "/dev",
- "devpts": "/dev/pts",
- "distdir": DISTDIR[:],
- "portdir": PORTDIR[:],
- "port_tmpdir": "maybe_tmpfs",
- "proc": "/proc",
- "shm": "shmfs",
- "run": "tmpfs",
- }
+ "dev": "/dev",
+ "devpts": "/dev/pts",
+ "distdir": DISTDIR[:],
+ "portdir": PORTDIR[:],
+ "port_tmpdir": "maybe_tmpfs",
+ "proc": "/proc",
+ "shm": "shmfs",
+ "run": "tmpfs",
+}
option_messages = {
- "autoresume": "Autoresuming support enabled.",
- "ccache": "Compiler cache support enabled.",
- "clear-autoresume": "Cleaning autoresume flags support enabled.",
- "distcc": "Distcc support enabled.",
- "icecream": "Icecream compiler cluster support enabled.",
- "kerncache": "Kernel cache support enabled.",
- "pkgcache": "Package cache support enabled.",
- "purge": "Purge support enabled.",
- "seedcache": "Seed cache support enabled.",
- "snapcache": "Snapshot cache support enabled.",
- }
+ "autoresume": "Autoresuming support enabled.",
+ "ccache": "Compiler cache support enabled.",
+ "clear-autoresume": "Cleaning autoresume flags support enabled.",
+ "distcc": "Distcc support enabled.",
+ "icecream": "Icecream compiler cluster support enabled.",
+ "kerncache": "Kernel cache support enabled.",
+ "pkgcache": "Package cache support enabled.",
+ "purge": "Purge support enabled.",
+ "seedcache": "Seed cache support enabled.",
+ "snapcache": "Snapshot cache support enabled.",
+}
diff --git a/catalyst/fileops.py b/catalyst/fileops.py
index 671d8d9f..5c6f5cd8 100644
--- a/catalyst/fileops.py
+++ b/catalyst/fileops.py
@@ -17,110 +17,111 @@ from catalyst.support import CatalystError
def ensure_dirs(path, gid=-1, uid=-1, mode=0o755, minimal=True,
- failback=None, fatal=False):
- '''Wrapper to snakeoil.osutil's ensure_dirs()
- This additionally allows for failures to run
- cleanup or other code and/or raise fatal errors.
-
- :param path: directory to ensure exists on disk
- :param gid: a valid GID to set any created directories to
- :param uid: a valid UID to set any created directories to
- :param mode: permissions to set any created directories to
- :param minimal: boolean controlling whether or not the specified mode
- must be enforced, or is the minimal permissions necessary. For example,
- if mode=0o755, minimal=True, and a directory exists with mode 0707,
- this will restore the missing group perms resulting in 757.
- :param failback: function to run in the event of a failed attemp
- to create the directory.
- :return: True if the directory could be created/ensured to have those
- permissions, False if not.
- '''
- succeeded = snakeoil_ensure_dirs(path, gid=gid, uid=uid, mode=mode, minimal=minimal)
- if not succeeded:
- if failback:
- failback()
- if fatal:
- raise CatalystError(
- "Failed to create directory: %s" % path, print_traceback=True)
- return succeeded
+ failback=None, fatal=False):
+ '''Wrapper to snakeoil.osutil's ensure_dirs()
+ This additionally allows for failures to run
+ cleanup or other code and/or raise fatal errors.
+
+ :param path: directory to ensure exists on disk
+ :param gid: a valid GID to set any created directories to
+ :param uid: a valid UID to set any created directories to
+ :param mode: permissions to set any created directories to
+ :param minimal: boolean controlling whether or not the specified mode
+ must be enforced, or is the minimal permissions necessary. For example,
+ if mode=0o755, minimal=True, and a directory exists with mode 0707,
+ this will restore the missing group perms resulting in 757.
+ :param failback: function to run in the event of a failed attemp
+ to create the directory.
+ :return: True if the directory could be created/ensured to have those
+ permissions, False if not.
+ '''
+ succeeded = snakeoil_ensure_dirs(
+ path, gid=gid, uid=uid, mode=mode, minimal=minimal)
+ if not succeeded:
+ if failback:
+ failback()
+ if fatal:
+ raise CatalystError(
+ "Failed to create directory: %s" % path, print_traceback=True)
+ return succeeded
def clear_dir(target, mode=0o755, remove=False,
- clear_nondir=True):
- '''Universal directory clearing function
-
- @target: string, path to be cleared or removed
- @mode: integer, desired mode to set the directory to
- @remove: boolean, passed through to clear_dir()
- @return boolean
- '''
- log.debug('start: %s', target)
- if not target:
- log.debug('no target... returning')
- return False
-
- mystat = None
- if os.path.isdir(target) and not os.path.islink(target):
- log.notice('Emptying directory: %s', target)
- # stat the dir, delete the dir, recreate the dir and set
- # the proper perms and ownership
- try:
- log.debug('os.stat()')
- mystat = os.stat(target)
- log.debug('shutil.rmtree()')
- shutil.rmtree(target)
- except Exception:
- log.error('clear_dir failed', exc_info=True)
- return False
- elif os.path.exists(target):
- if clear_nondir:
- log.debug("Clearing (unlinking) non-directory: %s", target)
- os.unlink(target)
- else:
- log.info('clear_dir failed: %s: is not a directory', target)
- return False
- else:
- log.debug("Conditions not met to clear: %s", target)
- log.debug(" isdir: %s", os.path.isdir(target))
- log.debug(" islink: %s", os.path.islink(target))
- log.debug(" exists: %s", os.path.exists(target))
-
- if not remove:
- log.debug('ensure_dirs()')
- ensure_dirs(target, mode=mode)
- if mystat:
- os.chown(target, mystat[ST_UID], mystat[ST_GID])
- os.chmod(target, mystat[ST_MODE])
-
- log.debug('DONE, returning True')
- return True
+ clear_nondir=True):
+ '''Universal directory clearing function
+
+ @target: string, path to be cleared or removed
+ @mode: integer, desired mode to set the directory to
+ @remove: boolean, passed through to clear_dir()
+ @return boolean
+ '''
+ log.debug('start: %s', target)
+ if not target:
+ log.debug('no target... returning')
+ return False
+
+ mystat = None
+ if os.path.isdir(target) and not os.path.islink(target):
+ log.notice('Emptying directory: %s', target)
+ # stat the dir, delete the dir, recreate the dir and set
+ # the proper perms and ownership
+ try:
+ log.debug('os.stat()')
+ mystat = os.stat(target)
+ log.debug('shutil.rmtree()')
+ shutil.rmtree(target)
+ except Exception:
+ log.error('clear_dir failed', exc_info=True)
+ return False
+ elif os.path.exists(target):
+ if clear_nondir:
+ log.debug("Clearing (unlinking) non-directory: %s", target)
+ os.unlink(target)
+ else:
+ log.info('clear_dir failed: %s: is not a directory', target)
+ return False
+ else:
+ log.debug("Conditions not met to clear: %s", target)
+ log.debug(" isdir: %s", os.path.isdir(target))
+ log.debug(" islink: %s", os.path.islink(target))
+ log.debug(" exists: %s", os.path.exists(target))
+
+ if not remove:
+ log.debug('ensure_dirs()')
+ ensure_dirs(target, mode=mode)
+ if mystat:
+ os.chown(target, mystat[ST_UID], mystat[ST_GID])
+ os.chmod(target, mystat[ST_MODE])
+
+ log.debug('DONE, returning True')
+ return True
def clear_path(target_path):
- """Nuke |target_path| regardless of it being a dir, file or glob."""
- targets = glob.glob(target_path)
- for path in targets:
- clear_dir(path, remove=True)
+ """Nuke |target_path| regardless of it being a dir, file or glob."""
+ targets = glob.glob(target_path)
+ for path in targets:
+ clear_dir(path, remove=True)
def move_path(src, dest):
- '''Move a source target to a new destination
-
- :param src: source path to move
- :param dest: destination path to move it to
- :returns: boolean
- '''
- log.debug('Start move_path(%s, %s)', src, dest)
- if os.path.isdir(src) and not os.path.islink(src):
- if os.path.exists(dest):
- log.warning('Removing existing target destination: %s', dest)
- if not clear_dir(dest, remove=True):
- return False
- log.debug('Moving source...')
- try:
- shutil.move(src, dest)
- except Exception:
- log.error('move_path failed', exc_info=True)
- return False
- return True
- return False
+ '''Move a source target to a new destination
+
+ :param src: source path to move
+ :param dest: destination path to move it to
+ :returns: boolean
+ '''
+ log.debug('Start move_path(%s, %s)', src, dest)
+ if os.path.isdir(src) and not os.path.islink(src):
+ if os.path.exists(dest):
+ log.warning('Removing existing target destination: %s', dest)
+ if not clear_dir(dest, remove=True):
+ return False
+ log.debug('Moving source...')
+ try:
+ shutil.move(src, dest)
+ except Exception:
+ log.error('move_path failed', exc_info=True)
+ return False
+ return True
+ return False
diff --git a/catalyst/hash_utils.py b/catalyst/hash_utils.py
index 8b0ff06c..3aae890e 100644
--- a/catalyst/hash_utils.py
+++ b/catalyst/hash_utils.py
@@ -10,121 +10,117 @@ from catalyst.support import CatalystError
# Use HashMap.fields for the value legend
# fields = ["func", "cmd", "args", "id"]
HASH_DEFINITIONS = {
- "adler32" :["calc_hash2", "shash", ["-a", "ADLER32"], "ADLER32"],
- "blake2" :["calc_hash", "b2sum", [ ], "BLAKE2"],
- "crc32" :["calc_hash2", "shash", ["-a", "CRC32"], "CRC32"],
- "crc32b" :["calc_hash2", "shash", ["-a", "CRC32B"], "CRC32B"],
- "gost" :["calc_hash2", "shash", ["-a", "GOST"], "GOST"],
- "haval128" :["calc_hash2", "shash", ["-a", "HAVAL128"], "HAVAL128"],
- "haval160" :["calc_hash2", "shash", ["-a", "HAVAL160"], "HAVAL160"],
- "haval192" :["calc_hash2", "shash", ["-a", "HAVAL192"], "HAVAL192"],
- "haval224" :["calc_hash2", "shash", ["-a", "HAVAL224"], "HAVAL224"],
- "haval256" :["calc_hash2", "shash", ["-a", "HAVAL256"], "HAVAL256"],
- "md2" :["calc_hash2", "shash", ["-a", "MD2"], "MD2"],
- "md4" :["calc_hash2", "shash", ["-a", "MD4"], "MD4"],
- "md5" :["calc_hash2", "shash", ["-a", "MD5"], "MD5"],
- "ripemd128":["calc_hash2", "shash", ["-a", "RIPEMD128"], "RIPEMD128"],
- "ripemd160":["calc_hash2", "shash", ["-a", "RIPEMD160"], "RIPEMD160"],
- "ripemd256":["calc_hash2", "shash", ["-a", "RIPEMD256"], "RIPEMD256"],
- "ripemd320":["calc_hash2", "shash", ["-a", "RIPEMD320"], "RIPEMD320"],
- "sha1" :["calc_hash2", "shash", ["-a", "SHA1"], "SHA1"],
- "sha224" :["calc_hash2", "shash", ["-a", "SHA224"], "SHA224"],
- "sha256" :["calc_hash2", "shash", ["-a", "SHA256"], "SHA256"],
- "sha384" :["calc_hash2", "shash", ["-a", "SHA384"], "SHA384"],
- "sha512" :["calc_hash2", "shash", ["-a", "SHA512"], "SHA512"],
- "snefru128":["calc_hash2", "shash", ["-a", "SNEFRU128"], "SNEFRU128"],
- "snefru256":["calc_hash2", "shash", ["-a", "SNEFRU256"], "SNEFRU256"],
- "tiger" :["calc_hash2", "shash", ["-a", "TIGER"], "TIGER"],
- "tiger128" :["calc_hash2", "shash", ["-a", "TIGER128"], "TIGER128"],
- "tiger160" :["calc_hash2", "shash", ["-a", "TIGER160"], "TIGER160"],
- "whirlpool":["calc_hash2", "shash", ["-a", "WHIRLPOOL"], "WHIRLPOOL"],
- }
+ "adler32" :["calc_hash2", "shash", ["-a", "ADLER32"], "ADLER32"],
+ "blake2" :["calc_hash", "b2sum", [ ], "BLAKE2"],
+ "crc32" :["calc_hash2", "shash", ["-a", "CRC32"], "CRC32"],
+ "crc32b" :["calc_hash2", "shash", ["-a", "CRC32B"], "CRC32B"],
+ "gost" :["calc_hash2", "shash", ["-a", "GOST"], "GOST"],
+ "haval128" :["calc_hash2", "shash", ["-a", "HAVAL128"], "HAVAL128"],
+ "haval160" :["calc_hash2", "shash", ["-a", "HAVAL160"], "HAVAL160"],
+ "haval192" :["calc_hash2", "shash", ["-a", "HAVAL192"], "HAVAL192"],
+ "haval224" :["calc_hash2", "shash", ["-a", "HAVAL224"], "HAVAL224"],
+ "haval256" :["calc_hash2", "shash", ["-a", "HAVAL256"], "HAVAL256"],
+ "md2" :["calc_hash2", "shash", ["-a", "MD2"], "MD2"],
+ "md4" :["calc_hash2", "shash", ["-a", "MD4"], "MD4"],
+ "md5" :["calc_hash2", "shash", ["-a", "MD5"], "MD5"],
+ "ripemd128":["calc_hash2", "shash", ["-a", "RIPEMD128"], "RIPEMD128"],
+ "ripemd160":["calc_hash2", "shash", ["-a", "RIPEMD160"], "RIPEMD160"],
+ "ripemd256":["calc_hash2", "shash", ["-a", "RIPEMD256"], "RIPEMD256"],
+ "ripemd320":["calc_hash2", "shash", ["-a", "RIPEMD320"], "RIPEMD320"],
+ "sha1" :["calc_hash2", "shash", ["-a", "SHA1"], "SHA1"],
+ "sha224" :["calc_hash2", "shash", ["-a", "SHA224"], "SHA224"],
+ "sha256" :["calc_hash2", "shash", ["-a", "SHA256"], "SHA256"],
+ "sha384" :["calc_hash2", "shash", ["-a", "SHA384"], "SHA384"],
+ "sha512" :["calc_hash2", "shash", ["-a", "SHA512"], "SHA512"],
+ "snefru128":["calc_hash2", "shash", ["-a", "SNEFRU128"], "SNEFRU128"],
+ "snefru256":["calc_hash2", "shash", ["-a", "SNEFRU256"], "SNEFRU256"],
+ "tiger" :["calc_hash2", "shash", ["-a", "TIGER"], "TIGER"],
+ "tiger128" :["calc_hash2", "shash", ["-a", "TIGER128"], "TIGER128"],
+ "tiger160" :["calc_hash2", "shash", ["-a", "TIGER160"], "TIGER160"],
+ "whirlpool":["calc_hash2", "shash", ["-a", "WHIRLPOOL"], "WHIRLPOOL"],
+}
class HashMap():
- '''Class for handling
- Catalyst's hash generation'''
-
- fields = ["func", "cmd", "args", "id"]
-
-
- def __init__(self, hashes=None):
- '''Class init
-
- @param hashes: dictionary of Key:[function, cmd, cmd_args, Print string]
- @param fields: list of ordered field names for the hashes
- eg: ["func", "cmd", "args", "id"]
- '''
- if hashes is None:
- hashes = {}
- self.hash_map = {}
-
- # create the hash definition namedtuple classes
- for name in list(hashes):
- obj = namedtuple(name, self.fields)
- obj.__slots__ = ()
- self.hash_map[name] = obj._make(hashes[name])
- del obj
-
-
- def generate_hash(self, file_, hash_="crc32"):
- '''Prefered method of generating a hash for the passed in file_
-
- @param file_: the file to generate the hash for
- @param hash_: the hash algorythm to use
- @returns the hash result
- '''
- try:
- return getattr(self, self.hash_map[hash_].func)(
- file_,
- hash_)
- except:
- raise CatalystError("Error generating hash, is appropriate " + \
- "utility installed on your system?", print_traceback=True)
-
-
- def calc_hash(self, file_, hash_):
- '''
- Calculate the hash for "file_"
-
- @param file_: the file to generate the hash for
- @param hash_: the hash algorythm to use
- @returns the hash result
- '''
- _hash = self.hash_map[hash_]
- args = [_hash.cmd]
- args.extend(_hash.args)
- args.append(file_)
- log.debug('args = %r', args)
- source = Popen(args, stdout=PIPE)
- output = source.communicate()
- mylines = output[0].decode('ascii')
- log.info('%s (%s) = %s', _hash.id, file_, mylines)
- result = "# " + _hash.id + " (b2sum) HASH\n" + mylines
- return result
-
-
- def calc_hash2(self, file_, hash_type):
- '''
- Calculate the hash for "file_"
-
- @param file_: the file to generate the hash for
- @param hash_: the hash algorythm to use
- @returns the hash result
- '''
- _hash = self.hash_map[hash_type]
- args = [_hash.cmd]
- args.extend(_hash.args)
- args.append(file_)
- log.debug('args = %r', args)
- source = Popen(args, stdout=PIPE)
- output = source.communicate()
- lines = output[0].decode('ascii').split('\n')
- log.debug('output = %s', output)
- header = lines[0]
- h_f = lines[1].split()
- hash_result = h_f[0]
- short_file = os.path.split(h_f[1])[1]
- result = header + "\n" + hash_result + " " + short_file + "\n"
- log.info('%s (%s) = %s', header, short_file, result)
- return result
+ '''Class for handling
+ Catalyst's hash generation'''
+
+ fields = ["func", "cmd", "args", "id"]
+
+ def __init__(self, hashes=None):
+ '''Class init
+
+ @param hashes: dictionary of Key:[function, cmd, cmd_args, Print string]
+ @param fields: list of ordered field names for the hashes
+ eg: ["func", "cmd", "args", "id"]
+ '''
+ if hashes is None:
+ hashes = {}
+ self.hash_map = {}
+
+ # create the hash definition namedtuple classes
+ for name in list(hashes):
+ obj = namedtuple(name, self.fields)
+ obj.__slots__ = ()
+ self.hash_map[name] = obj._make(hashes[name])
+ del obj
+
+ def generate_hash(self, file_, hash_="crc32"):
+ '''Prefered method of generating a hash for the passed in file_
+
+ @param file_: the file to generate the hash for
+ @param hash_: the hash algorythm to use
+ @returns the hash result
+ '''
+ try:
+ return getattr(self, self.hash_map[hash_].func)(
+ file_,
+ hash_)
+ except:
+ raise CatalystError("Error generating hash, is appropriate " +
+ "utility installed on your system?", print_traceback=True)
+
+ def calc_hash(self, file_, hash_):
+ '''
+ Calculate the hash for "file_"
+
+ @param file_: the file to generate the hash for
+ @param hash_: the hash algorythm to use
+ @returns the hash result
+ '''
+ _hash = self.hash_map[hash_]
+ args = [_hash.cmd]
+ args.extend(_hash.args)
+ args.append(file_)
+ log.debug('args = %r', args)
+ source = Popen(args, stdout=PIPE)
+ output = source.communicate()
+ mylines = output[0].decode('ascii')
+ log.info('%s (%s) = %s', _hash.id, file_, mylines)
+ result = "# " + _hash.id + " (b2sum) HASH\n" + mylines
+ return result
+
+ def calc_hash2(self, file_, hash_type):
+ '''
+ Calculate the hash for "file_"
+
+ @param file_: the file to generate the hash for
+ @param hash_: the hash algorythm to use
+ @returns the hash result
+ '''
+ _hash = self.hash_map[hash_type]
+ args = [_hash.cmd]
+ args.extend(_hash.args)
+ args.append(file_)
+ log.debug('args = %r', args)
+ source = Popen(args, stdout=PIPE)
+ output = source.communicate()
+ lines = output[0].decode('ascii').split('\n')
+ log.debug('output = %s', output)
+ header = lines[0]
+ h_f = lines[1].split()
+ hash_result = h_f[0]
+ short_file = os.path.split(h_f[1])[1]
+ result = header + "\n" + hash_result + " " + short_file + "\n"
+ log.info('%s (%s) = %s', header, short_file, result)
+ return result
diff --git a/catalyst/lock.py b/catalyst/lock.py
index 4c99e085..5e039fae 100644
--- a/catalyst/lock.py
+++ b/catalyst/lock.py
@@ -10,22 +10,22 @@ LockInUse = osutils.LockException
class LockDir():
- """An object that creates locks inside dirs"""
+ """An object that creates locks inside dirs"""
- def __init__(self, lockdir):
- self.gid = 250
- self.lockfile = os.path.join(lockdir, '.catalyst_lock')
- ensure_dirs(lockdir)
- fileutils.touch(self.lockfile, mode=0o664)
- os.chown(self.lockfile, -1, self.gid)
- self.lock = osutils.FsLock(self.lockfile)
+ def __init__(self, lockdir):
+ self.gid = 250
+ self.lockfile = os.path.join(lockdir, '.catalyst_lock')
+ ensure_dirs(lockdir)
+ fileutils.touch(self.lockfile, mode=0o664)
+ os.chown(self.lockfile, -1, self.gid)
+ self.lock = osutils.FsLock(self.lockfile)
- def read_lock(self):
- self.lock.acquire_read_lock()
+ def read_lock(self):
+ self.lock.acquire_read_lock()
- def write_lock(self):
- self.lock.acquire_write_lock()
+ def write_lock(self):
+ self.lock.acquire_write_lock()
- def unlock(self):
- # Releasing a write lock is the same as a read lock.
- self.lock.release_write_lock()
+ def unlock(self):
+ # Releasing a write lock is the same as a read lock.
+ self.lock.release_write_lock()
diff --git a/catalyst/log.py b/catalyst/log.py
index 8a23660a..9d534355 100644
--- a/catalyst/log.py
+++ b/catalyst/log.py
@@ -16,15 +16,15 @@ import time
class CatalystLogger(logging.Logger):
- """Override the _log member to autosplit on new lines"""
+ """Override the _log member to autosplit on new lines"""
- def _log(self, level, msg, args, **kwargs):
- """If given a multiline message, split it"""
- # We have to interpolate it first in case they spread things out
- # over multiple lines like: Bad Thing:\n%s\nGoodbye!
- msg %= args
- for line in msg.splitlines():
- super(CatalystLogger, self)._log(level, line, (), **kwargs)
+ def _log(self, level, msg, args, **kwargs):
+ """If given a multiline message, split it"""
+ # We have to interpolate it first in case they spread things out
+ # over multiple lines like: Bad Thing:\n%s\nGoodbye!
+ msg %= args
+ for line in msg.splitlines():
+ super(CatalystLogger, self)._log(level, line, (), **kwargs)
# The logger that all output should go through.
@@ -43,14 +43,16 @@ logging.addLevelName(NOTICE, 'NOTICE')
# The API we expose to consumers.
def notice(msg, *args, **kwargs):
- """Log a notice message"""
- logger.log(NOTICE, msg, *args, **kwargs)
+ """Log a notice message"""
+ logger.log(NOTICE, msg, *args, **kwargs)
+
def critical(msg, *args, **kwargs):
- """Log a critical message and then exit"""
- status = kwargs.pop('status', 1)
- logger.critical(msg, *args, **kwargs)
- sys.exit(status)
+ """Log a critical message and then exit"""
+ status = kwargs.pop('status', 1)
+ logger.critical(msg, *args, **kwargs)
+ sys.exit(status)
+
error = logger.error
warning = logger.warning
@@ -59,40 +61,40 @@ debug = logger.debug
class CatalystFormatter(logging.Formatter):
- """Mark bad messages with colors automatically"""
-
- _COLORS = {
- 'CRITICAL': '\033[1;35m',
- 'ERROR': '\033[1;31m',
- 'WARNING': '\033[1;33m',
- 'DEBUG': '\033[1;34m',
- }
- _NORMAL = '\033[0m'
-
- @staticmethod
- def detect_color():
- """Figure out whether the runtime env wants color"""
- if 'NOCOLOR' is os.environ:
- return False
- return os.isatty(sys.stdout.fileno())
-
- def __init__(self, *args, **kwargs):
- """Initialize"""
- color = kwargs.pop('color', None)
- if color is None:
- color = self.detect_color()
- if not color:
- self._COLORS = {}
-
- super(CatalystFormatter, self).__init__(*args, **kwargs)
-
- def format(self, record, **kwargs):
- """Format the |record| with our color settings"""
- msg = super(CatalystFormatter, self).format(record, **kwargs)
- color = self._COLORS.get(record.levelname)
- if color:
- return color + msg + self._NORMAL
- return msg
+ """Mark bad messages with colors automatically"""
+
+ _COLORS = {
+ 'CRITICAL': '\033[1;35m',
+ 'ERROR': '\033[1;31m',
+ 'WARNING': '\033[1;33m',
+ 'DEBUG': '\033[1;34m',
+ }
+ _NORMAL = '\033[0m'
+
+ @staticmethod
+ def detect_color():
+ """Figure out whether the runtime env wants color"""
+ if 'NOCOLOR' is os.environ:
+ return False
+ return os.isatty(sys.stdout.fileno())
+
+ def __init__(self, *args, **kwargs):
+ """Initialize"""
+ color = kwargs.pop('color', None)
+ if color is None:
+ color = self.detect_color()
+ if not color:
+ self._COLORS = {}
+
+ super(CatalystFormatter, self).__init__(*args, **kwargs)
+
+ def format(self, record, **kwargs):
+ """Format the |record| with our color settings"""
+ msg = super(CatalystFormatter, self).format(record, **kwargs)
+ color = self._COLORS.get(record.levelname)
+ if color:
+ return color + msg + self._NORMAL
+ return msg
# We define |debug| in global scope so people can call log.debug(), but it
@@ -100,29 +102,29 @@ class CatalystFormatter(logging.Formatter):
# use that func in here, it's not a problem, so silence the warning.
# pylint: disable=redefined-outer-name
def setup_logging(level, output=None, debug=False, color=None):
- """Initialize the logging module using the |level| level"""
- # The incoming level will be things like "info", but setLevel wants
- # the numeric constant. Convert it here.
- level = logging.getLevelName(level.upper())
-
- # The good stuff.
- fmt = '%(asctime)s: %(levelname)-8s: '
- if debug:
- fmt += '%(filename)s:%(funcName)s: '
- fmt += '%(message)s'
-
- # Figure out where to send the log output.
- if output is None:
- handler = logging.StreamHandler(stream=sys.stdout)
- else:
- handler = logging.FileHandler(output)
-
- # Use a date format that is readable by humans & machines.
- # Think e-mail/RFC 2822: 05 Oct 2013 18:58:50 EST
- tzname = time.strftime('%Z', time.localtime())
- datefmt = '%d %b %Y %H:%M:%S ' + tzname
- formatter = CatalystFormatter(fmt, datefmt, color=color)
- handler.setFormatter(formatter)
-
- logger.addHandler(handler)
- logger.setLevel(level)
+ """Initialize the logging module using the |level| level"""
+ # The incoming level will be things like "info", but setLevel wants
+ # the numeric constant. Convert it here.
+ level = logging.getLevelName(level.upper())
+
+ # The good stuff.
+ fmt = '%(asctime)s: %(levelname)-8s: '
+ if debug:
+ fmt += '%(filename)s:%(funcName)s: '
+ fmt += '%(message)s'
+
+ # Figure out where to send the log output.
+ if output is None:
+ handler = logging.StreamHandler(stream=sys.stdout)
+ else:
+ handler = logging.FileHandler(output)
+
+ # Use a date format that is readable by humans & machines.
+ # Think e-mail/RFC 2822: 05 Oct 2013 18:58:50 EST
+ tzname = time.strftime('%Z', time.localtime())
+ datefmt = '%d %b %Y %H:%M:%S ' + tzname
+ formatter = CatalystFormatter(fmt, datefmt, color=color)
+ handler.setFormatter(formatter)
+
+ logger.addHandler(handler)
+ logger.setLevel(level)
diff --git a/catalyst/main.py b/catalyst/main.py
index 24e9a759..8603012d 100644
--- a/catalyst/main.py
+++ b/catalyst/main.py
@@ -7,7 +7,7 @@ from snakeoil import process
from snakeoil.process import namespaces
from DeComp.definitions import (COMPRESS_DEFINITIONS, DECOMPRESS_DEFINITIONS,
- CONTENTS_DEFINITIONS)
+ CONTENTS_DEFINITIONS)
from DeComp.contents import ContentsMap
from catalyst import log
@@ -18,112 +18,115 @@ from catalyst.support import CatalystError
from catalyst.version import get_version
-conf_values={}
+conf_values = {}
def version():
- log.info(get_version())
- log.info('Copyright 2003-%s Gentoo Foundation', datetime.datetime.now().year)
- log.info('Copyright 2008-2012 various authors')
- log.info('Distributed under the GNU General Public License version 2.1')
+ log.info(get_version())
+ log.info('Copyright 2003-%s Gentoo Foundation',
+ datetime.datetime.now().year)
+ log.info('Copyright 2008-2012 various authors')
+ log.info('Distributed under the GNU General Public License version 2.1')
+
def parse_config(config_files):
- # search a couple of different areas for the main config file
- myconf={}
-
- # try and parse the config file "config_file"
- for config_file in config_files:
- log.notice('Loading configuration file: %s', config_file)
- try:
- config = catalyst.config.ConfigParser(config_file)
- myconf.update(config.get_values())
- except Exception as e:
- log.critical('Could not find parse configuration file: %s: %s',
- config_file, e)
-
- # now, load up the values into conf_values so that we can use them
- for x in list(confdefaults):
- if x in myconf:
- if x == 'options':
- conf_values[x] = set(myconf[x].split())
- elif x in ["decompressor_search_order"]:
- conf_values[x] = myconf[x].split()
- else:
- conf_values[x]=myconf[x]
- else:
- conf_values[x]=confdefaults[x]
-
- # add our python base directory to use for loading target arch's
- conf_values["PythonDir"] = os.path.dirname(os.path.realpath(__file__))
-
- # print out any options messages
- for opt in conf_values['options']:
- if opt in option_messages:
- log.info(option_messages[opt])
-
- for key in ["digests", "envscript", "var_tmpfs_portage", "port_logdir",
- "local_overlay"]:
- if key in myconf:
- conf_values[key] = myconf[key]
-
- if "contents" in myconf:
- # replace '-' with '_' (for compatibility with existing configs)
- conf_values["contents"] = myconf["contents"].replace("-", '_')
-
- if "envscript" in myconf:
- log.info('Envscript support enabled.')
-
- # take care of any variable substitutions that may be left
- for x in list(conf_values):
- if isinstance(conf_values[x], str):
- conf_values[x] = conf_values[x] % conf_values
+ # search a couple of different areas for the main config file
+ myconf = {}
+
+ # try and parse the config file "config_file"
+ for config_file in config_files:
+ log.notice('Loading configuration file: %s', config_file)
+ try:
+ config = catalyst.config.ConfigParser(config_file)
+ myconf.update(config.get_values())
+ except Exception as e:
+ log.critical('Could not find parse configuration file: %s: %s',
+ config_file, e)
+
+ # now, load up the values into conf_values so that we can use them
+ for x in list(confdefaults):
+ if x in myconf:
+ if x == 'options':
+ conf_values[x] = set(myconf[x].split())
+ elif x in ["decompressor_search_order"]:
+ conf_values[x] = myconf[x].split()
+ else:
+ conf_values[x] = myconf[x]
+ else:
+ conf_values[x] = confdefaults[x]
+
+ # add our python base directory to use for loading target arch's
+ conf_values["PythonDir"] = os.path.dirname(os.path.realpath(__file__))
+
+ # print out any options messages
+ for opt in conf_values['options']:
+ if opt in option_messages:
+ log.info(option_messages[opt])
+
+ for key in ["digests", "envscript", "var_tmpfs_portage", "port_logdir",
+ "local_overlay"]:
+ if key in myconf:
+ conf_values[key] = myconf[key]
+
+ if "contents" in myconf:
+ # replace '-' with '_' (for compatibility with existing configs)
+ conf_values["contents"] = myconf["contents"].replace("-", '_')
+
+ if "envscript" in myconf:
+ log.info('Envscript support enabled.')
+
+ # take care of any variable substitutions that may be left
+ for x in list(conf_values):
+ if isinstance(conf_values[x], str):
+ conf_values[x] = conf_values[x] % conf_values
def import_module(target):
- """
- import catalyst's own modules
- (i.e. targets and the arch modules)
- """
- try:
- mod_name = "catalyst.targets." + target
- module = __import__(mod_name, [],[], ["not empty"])
- except ImportError:
- log.critical('Python module import error: %s', target, exc_info=True)
- return module
+ """
+ import catalyst's own modules
+ (i.e. targets and the arch modules)
+ """
+ try:
+ mod_name = "catalyst.targets." + target
+ module = __import__(mod_name, [], [], ["not empty"])
+ except ImportError:
+ log.critical('Python module import error: %s', target, exc_info=True)
+ return module
def build_target(addlargs):
- try:
- target = addlargs["target"].replace('-', '_')
- module = import_module(target)
- target = getattr(module, target)(conf_values, addlargs)
- except AttributeError:
- raise CatalystError(
- "Target \"%s\" not available." % target,
- print_traceback=True)
- except CatalystError:
- return False
- return target.run()
+ try:
+ target = addlargs["target"].replace('-', '_')
+ module = import_module(target)
+ target = getattr(module, target)(conf_values, addlargs)
+ except AttributeError:
+ raise CatalystError(
+ "Target \"%s\" not available." % target,
+ print_traceback=True)
+ except CatalystError:
+ return False
+ return target.run()
class FilePath():
- """Argparse type for getting a path to a file."""
+ """Argparse type for getting a path to a file."""
- def __init__(self, exists=True):
- self.exists = exists
+ def __init__(self, exists=True):
+ self.exists = exists
- def __call__(self, string):
- if not os.path.exists(string):
- raise argparse.ArgumentTypeError('file does not exist: %s' % string)
- return string
+ def __call__(self, string):
+ if not os.path.exists(string):
+ raise argparse.ArgumentTypeError(
+ 'file does not exist: %s' % string)
+ return string
- def __repr__(self):
- return '%s(exists=%s)' % (type(self).__name__, self.exists)
+ def __repr__(self):
+ return '%s(exists=%s)' % (type(self).__name__, self.exists)
def get_parser():
- """Return an argument parser"""
- epilog = """Usage examples:
+ """Return an argument parser"""
+ epilog = """Usage examples:
Using the commandline option (-C, --cli) to build a Portage snapshot:
$ catalyst -C target=snapshot version_stamp=my_date
@@ -134,312 +137,314 @@ $ catalyst -s 20071121
Using the specfile option (-f, --file) to build a stage target:
$ catalyst -f stage1-specfile.spec"""
- parser = argparse.ArgumentParser(epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
-
- parser.add_argument('-V', '--version',
- action='version', version=get_version(),
- help='display version information')
-
- group = parser.add_argument_group('Program output options')
- group.add_argument('-d', '--debug',
- default=False, action='store_true',
- help='enable debugging (and default --log-level debug)')
- group.add_argument('-v', '--verbose',
- default=False, action='store_true',
- help='verbose output (and default --log-level info)')
- group.add_argument('--log-level',
- default=None,
- choices=('critical', 'error', 'warning', 'notice', 'info', 'debug'),
- help='set verbosity of output (default: notice)')
- group.add_argument('--log-file',
- type=FilePath(exists=False),
- help='write all output to this file (instead of stdout)')
- group.add_argument('--color',
- default=None, action='store_true',
- help='colorize output all the time (default: detect)')
- group.add_argument('--nocolor',
- dest='color', action='store_false',
- help='never colorize output all the time (default: detect)')
-
- group = parser.add_argument_group('Developer options')
- group.add_argument('--trace',
- default=False, action='store_true',
- help='trace program output (akin to `sh -x`)')
- group.add_argument('--profile',
- default=False, action='store_true',
- help='profile program execution')
-
- group = parser.add_argument_group('Temporary file management')
- group.add_argument('-a', '--clear-autoresume',
- default=False, action='store_true',
- help='clear autoresume flags')
- group.add_argument('-p', '--purge',
- default=False, action='store_true',
- help='clear tmp dirs, package cache, autoresume flags')
- group.add_argument('-P', '--purgeonly',
- default=False, action='store_true',
- help='clear tmp dirs, package cache, autoresume flags and exit')
- group.add_argument('-T', '--purgetmponly',
- default=False, action='store_true',
- help='clear tmp dirs and autoresume flags and exit')
- group.add_argument('--versioned-cachedir',
- dest='versioned_cachedir', action='store_true',
- help='use stage version on cache directory name')
- group.add_argument('--unversioned-cachedir',
- dest='versioned_cachedir', action='store_false',
- help='do not use stage version on cache directory name')
- group.set_defaults(versioned_cachedir=False)
-
-
- group = parser.add_argument_group('Target/config file management')
- group.add_argument('-F', '--fetchonly',
- default=False, action='store_true',
- help='fetch files only')
- group.add_argument('-c', '--configs',
- type=FilePath(), action='append',
- help='use specified configuration files')
- group.add_argument('-f', '--file',
- type=FilePath(),
- help='read specfile')
- group.add_argument('-s', '--snapshot',
- help='generate a release snapshot')
- group.add_argument('-C', '--cli',
- default=[], nargs=argparse.REMAINDER,
- help='catalyst commandline (MUST BE LAST OPTION)')
-
- return parser
+ parser = argparse.ArgumentParser(
+ epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
+
+ parser.add_argument('-V', '--version',
+ action='version', version=get_version(),
+ help='display version information')
+
+ group = parser.add_argument_group('Program output options')
+ group.add_argument('-d', '--debug',
+ default=False, action='store_true',
+ help='enable debugging (and default --log-level debug)')
+ group.add_argument('-v', '--verbose',
+ default=False, action='store_true',
+ help='verbose output (and default --log-level info)')
+ group.add_argument('--log-level',
+ default=None,
+ choices=('critical', 'error', 'warning',
+ 'notice', 'info', 'debug'),
+ help='set verbosity of output (default: notice)')
+ group.add_argument('--log-file',
+ type=FilePath(exists=False),
+ help='write all output to this file (instead of stdout)')
+ group.add_argument('--color',
+ default=None, action='store_true',
+ help='colorize output all the time (default: detect)')
+ group.add_argument('--nocolor',
+ dest='color', action='store_false',
+ help='never colorize output all the time (default: detect)')
+
+ group = parser.add_argument_group('Developer options')
+ group.add_argument('--trace',
+ default=False, action='store_true',
+ help='trace program output (akin to `sh -x`)')
+ group.add_argument('--profile',
+ default=False, action='store_true',
+ help='profile program execution')
+
+ group = parser.add_argument_group('Temporary file management')
+ group.add_argument('-a', '--clear-autoresume',
+ default=False, action='store_true',
+ help='clear autoresume flags')
+ group.add_argument('-p', '--purge',
+ default=False, action='store_true',
+ help='clear tmp dirs, package cache, autoresume flags')
+ group.add_argument('-P', '--purgeonly',
+ default=False, action='store_true',
+ help='clear tmp dirs, package cache, autoresume flags and exit')
+ group.add_argument('-T', '--purgetmponly',
+ default=False, action='store_true',
+ help='clear tmp dirs and autoresume flags and exit')
+ group.add_argument('--versioned-cachedir',
+ dest='versioned_cachedir', action='store_true',
+ help='use stage version on cache directory name')
+ group.add_argument('--unversioned-cachedir',
+ dest='versioned_cachedir', action='store_false',
+ help='do not use stage version on cache directory name')
+ group.set_defaults(versioned_cachedir=False)
+
+ group = parser.add_argument_group('Target/config file management')
+ group.add_argument('-F', '--fetchonly',
+ default=False, action='store_true',
+ help='fetch files only')
+ group.add_argument('-c', '--configs',
+ type=FilePath(), action='append',
+ help='use specified configuration files')
+ group.add_argument('-f', '--file',
+ type=FilePath(),
+ help='read specfile')
+ group.add_argument('-s', '--snapshot',
+ help='generate a release snapshot')
+ group.add_argument('-C', '--cli',
+ default=[], nargs=argparse.REMAINDER,
+ help='catalyst commandline (MUST BE LAST OPTION)')
+
+ return parser
def trace(func, *args, **kwargs):
- """Run |func| through the trace module (akin to `sh -x`)"""
- import trace
-
- # Ignore common system modules we use.
- ignoremods = set((
- 'argparse',
- 'genericpath', 'gettext',
- 'locale',
- 'os',
- 'posixpath',
- 're',
- 'sre_compile', 'sre_parse', 'sys',
- 'tempfile', 'threading',
- 'UserDict',
- ))
-
- # Ignore all the system modules.
- ignoredirs = set(sys.path)
- # But try to strip out the catalyst paths.
- try:
- ignoredirs.remove(os.path.dirname(os.path.dirname(
- os.path.realpath(__file__))))
- except KeyError:
- pass
-
- tracer = trace.Trace(
- count=False,
- trace=True,
- timing=True,
- ignoremods=ignoremods,
- ignoredirs=ignoredirs)
- return tracer.runfunc(func, *args, **kwargs)
+ """Run |func| through the trace module (akin to `sh -x`)"""
+ import trace
+
+ # Ignore common system modules we use.
+ ignoremods = set((
+ 'argparse',
+ 'genericpath', 'gettext',
+ 'locale',
+ 'os',
+ 'posixpath',
+ 're',
+ 'sre_compile', 'sre_parse', 'sys',
+ 'tempfile', 'threading',
+ 'UserDict',
+ ))
+
+ # Ignore all the system modules.
+ ignoredirs = set(sys.path)
+ # But try to strip out the catalyst paths.
+ try:
+ ignoredirs.remove(os.path.dirname(os.path.dirname(
+ os.path.realpath(__file__))))
+ except KeyError:
+ pass
+
+ tracer = trace.Trace(
+ count=False,
+ trace=True,
+ timing=True,
+ ignoremods=ignoremods,
+ ignoredirs=ignoredirs)
+ return tracer.runfunc(func, *args, **kwargs)
def profile(func, *args, **kwargs):
- """Run |func| through the profile module"""
- # Should make this an option.
- sort_keys = ('time',)
+ """Run |func| through the profile module"""
+ # Should make this an option.
+ sort_keys = ('time',)
- # Collect the profile.
- import cProfile
- profiler = cProfile.Profile(subcalls=True, builtins=True)
- try:
- ret = profiler.runcall(func, *args, **kwargs)
- finally:
- # Then process the results.
- import pstats
- stats = pstats.Stats(profiler, stream=sys.stderr)
- stats.strip_dirs().sort_stats(*sort_keys).print_stats()
+ # Collect the profile.
+ import cProfile
+ profiler = cProfile.Profile(subcalls=True, builtins=True)
+ try:
+ ret = profiler.runcall(func, *args, **kwargs)
+ finally:
+ # Then process the results.
+ import pstats
+ stats = pstats.Stats(profiler, stream=sys.stderr)
+ stats.strip_dirs().sort_stats(*sort_keys).print_stats()
- return ret
+ return ret
def main(argv):
- """The main entry point for frontends to use"""
- parser = get_parser()
- opts = parser.parse_args(argv)
+ """The main entry point for frontends to use"""
+ parser = get_parser()
+ opts = parser.parse_args(argv)
- if opts.trace:
- return trace(_main, parser, opts)
- if opts.profile:
- return profile(_main, parser, opts)
- return _main(parser, opts)
+ if opts.trace:
+ return trace(_main, parser, opts)
+ if opts.profile:
+ return profile(_main, parser, opts)
+ return _main(parser, opts)
def _main(parser, opts):
- """The "main" main function so we can trace/profile."""
- # Initialize the logger before anything else.
- log_level = opts.log_level
- if log_level is None:
- if opts.debug:
- log_level = 'debug'
- elif opts.verbose:
- log_level = 'info'
- else:
- log_level = 'notice'
- log.setup_logging(log_level, output=opts.log_file, debug=opts.debug,
- color=opts.color)
-
- # Parse the command line options.
- myconfigs = opts.configs
- if not myconfigs:
- myconfigs = [DEFAULT_CONFIG_FILE]
- myspecfile = opts.file
- mycmdline = opts.cli[:]
-
- if opts.snapshot:
- mycmdline.append('target=snapshot')
- mycmdline.append('version_stamp=' + opts.snapshot)
-
- conf_values['DEBUG'] = opts.debug
- conf_values['VERBOSE'] = opts.debug or opts.verbose
-
- options = set()
- if opts.fetchonly:
- options.add('fetch')
- if opts.purge:
- options.add('purge')
- if opts.purgeonly:
- options.add('purgeonly')
- if opts.purgetmponly:
- options.add('purgetmponly')
- if opts.clear_autoresume:
- options.add('clear-autoresume')
-
- # Make sure we have some work before moving further.
- if not myspecfile and not mycmdline:
- parser.error('please specify one of either -f or -C or -s')
-
- # made it this far so start by outputting our version info
- version()
- # import configuration file and import our main module using those settings
- parse_config(myconfigs)
-
- conf_values["options"].update(options)
- log.notice('conf_values[options] = %s', conf_values['options'])
-
- # initialize our contents generator
- contents_map = ContentsMap(CONTENTS_DEFINITIONS,
- comp_prog=conf_values['comp_prog'],
- decomp_opt=conf_values['decomp_opt'],
- list_xattrs_opt=conf_values['list_xattrs_opt'])
- conf_values["contents_map"] = contents_map
-
- # initialze our hash and contents generators
- hash_map = HashMap(HASH_DEFINITIONS)
- conf_values["hash_map"] = hash_map
-
- # initialize our (de)compression definitions
- conf_values['decompress_definitions'] = DECOMPRESS_DEFINITIONS
- conf_values['compress_definitions'] = COMPRESS_DEFINITIONS
- # TODO add capability to config/spec new definitions
-
- # Start checking that digests are valid now that hash_map is initialized
- if "digests" in conf_values:
- digests = set(conf_values['digests'].split())
- valid_digests = set(HASH_DEFINITIONS.keys())
-
- # Use the magic keyword "auto" to use all algos that are available.
- skip_missing = False
- if 'auto' in digests:
- skip_missing = True
- digests.remove('auto')
- if not digests:
- digests = set(valid_digests)
-
- # First validate all the requested digests are valid keys.
- if digests - valid_digests:
- log.critical(
- 'These are not valid digest entries:\n'
- '%s\n'
- 'Valid digest entries:\n'
- '%s',
- ', '.join(digests - valid_digests),
- ', '.join(sorted(valid_digests)))
-
- # Then check for any programs that the hash func requires.
- for digest in digests:
- try:
- process.find_binary(hash_map.hash_map[digest].cmd)
- except process.CommandNotFound:
- # In auto mode, just ignore missing support.
- if skip_missing:
- digests.remove(digest)
- continue
- log.critical(
- 'The "%s" binary needed by digest "%s" was not found. '
- 'It needs to be in your system path.',
- hash_map.hash_map[digest].cmd, digest)
-
- # Now reload the config with our updated value.
- conf_values['digests'] = ' '.join(digests)
-
- if "hash_function" in conf_values:
- if conf_values["hash_function"] not in HASH_DEFINITIONS:
- log.critical(
- '%s is not a valid hash_function entry\n'
- 'Valid hash_function entries:\n'
- '%s', conf_values["hash_function"], HASH_DEFINITIONS.keys())
- try:
- process.find_binary(hash_map.hash_map[conf_values["hash_function"]].cmd)
- except process.CommandNotFound:
- log.critical(
- 'The "%s" binary needed by hash_function "%s" was not found. '
- 'It needs to be in your system path.',
- hash_map.hash_map[conf_values['hash_function']].cmd,
- conf_values['hash_function'])
-
- # detect GNU sed
- for sed in ('/usr/bin/gsed', '/bin/sed', '/usr/bin/sed'):
- if os.path.exists(sed):
- conf_values["sed"] = sed
- break
-
- addlargs={}
-
- if myspecfile:
- log.notice("Processing spec file: %s", myspecfile)
- spec = catalyst.config.SpecParser(myspecfile)
- addlargs.update(spec.get_values())
-
- if mycmdline:
- try:
- cmdline = catalyst.config.ConfigParser()
- cmdline.parse_lines(mycmdline)
- addlargs.update(cmdline.get_values())
- except CatalystError:
- log.critical('Could not parse commandline')
-
- if "target" not in addlargs:
- raise CatalystError("Required value \"target\" not specified.")
-
- if os.getuid() != 0:
- # catalyst cannot be run as a normal user due to chroots, mounts, etc
- log.critical('This script requires root privileges to operate')
-
- # Start off by creating unique namespaces to run in. Would be nice to
- # use pid & user namespaces, but snakeoil's namespace module has signal
- # transfer issues (CTRL+C doesn't propagate), and user namespaces need
- # more work due to Gentoo build process (uses sudo/root/portage).
- namespaces.simple_unshare(
- mount=True, uts=True, ipc=True, pid=False, net=False, user=False,
- hostname='catalyst')
-
- # everything is setup, so the build is a go
- try:
- success = build_target(addlargs)
- except KeyboardInterrupt:
- log.critical('Catalyst build aborted due to user interrupt (Ctrl-C)')
- if not success:
- sys.exit(2)
- sys.exit(0)
+ """The "main" main function so we can trace/profile."""
+ # Initialize the logger before anything else.
+ log_level = opts.log_level
+ if log_level is None:
+ if opts.debug:
+ log_level = 'debug'
+ elif opts.verbose:
+ log_level = 'info'
+ else:
+ log_level = 'notice'
+ log.setup_logging(log_level, output=opts.log_file, debug=opts.debug,
+ color=opts.color)
+
+ # Parse the command line options.
+ myconfigs = opts.configs
+ if not myconfigs:
+ myconfigs = [DEFAULT_CONFIG_FILE]
+ myspecfile = opts.file
+ mycmdline = opts.cli[:]
+
+ if opts.snapshot:
+ mycmdline.append('target=snapshot')
+ mycmdline.append('version_stamp=' + opts.snapshot)
+
+ conf_values['DEBUG'] = opts.debug
+ conf_values['VERBOSE'] = opts.debug or opts.verbose
+
+ options = set()
+ if opts.fetchonly:
+ options.add('fetch')
+ if opts.purge:
+ options.add('purge')
+ if opts.purgeonly:
+ options.add('purgeonly')
+ if opts.purgetmponly:
+ options.add('purgetmponly')
+ if opts.clear_autoresume:
+ options.add('clear-autoresume')
+
+ # Make sure we have some work before moving further.
+ if not myspecfile and not mycmdline:
+ parser.error('please specify one of either -f or -C or -s')
+
+ # made it this far so start by outputting our version info
+ version()
+ # import configuration file and import our main module using those settings
+ parse_config(myconfigs)
+
+ conf_values["options"].update(options)
+ log.notice('conf_values[options] = %s', conf_values['options'])
+
+ # initialize our contents generator
+ contents_map = ContentsMap(CONTENTS_DEFINITIONS,
+ comp_prog=conf_values['comp_prog'],
+ decomp_opt=conf_values['decomp_opt'],
+ list_xattrs_opt=conf_values['list_xattrs_opt'])
+ conf_values["contents_map"] = contents_map
+
+ # initialze our hash and contents generators
+ hash_map = HashMap(HASH_DEFINITIONS)
+ conf_values["hash_map"] = hash_map
+
+ # initialize our (de)compression definitions
+ conf_values['decompress_definitions'] = DECOMPRESS_DEFINITIONS
+ conf_values['compress_definitions'] = COMPRESS_DEFINITIONS
+ # TODO add capability to config/spec new definitions
+
+ # Start checking that digests are valid now that hash_map is initialized
+ if "digests" in conf_values:
+ digests = set(conf_values['digests'].split())
+ valid_digests = set(HASH_DEFINITIONS.keys())
+
+ # Use the magic keyword "auto" to use all algos that are available.
+ skip_missing = False
+ if 'auto' in digests:
+ skip_missing = True
+ digests.remove('auto')
+ if not digests:
+ digests = set(valid_digests)
+
+ # First validate all the requested digests are valid keys.
+ if digests - valid_digests:
+ log.critical(
+ 'These are not valid digest entries:\n'
+ '%s\n'
+ 'Valid digest entries:\n'
+ '%s',
+ ', '.join(digests - valid_digests),
+ ', '.join(sorted(valid_digests)))
+
+ # Then check for any programs that the hash func requires.
+ for digest in digests:
+ try:
+ process.find_binary(hash_map.hash_map[digest].cmd)
+ except process.CommandNotFound:
+ # In auto mode, just ignore missing support.
+ if skip_missing:
+ digests.remove(digest)
+ continue
+ log.critical(
+ 'The "%s" binary needed by digest "%s" was not found. '
+ 'It needs to be in your system path.',
+ hash_map.hash_map[digest].cmd, digest)
+
+ # Now reload the config with our updated value.
+ conf_values['digests'] = ' '.join(digests)
+
+ if "hash_function" in conf_values:
+ if conf_values["hash_function"] not in HASH_DEFINITIONS:
+ log.critical(
+ '%s is not a valid hash_function entry\n'
+ 'Valid hash_function entries:\n'
+ '%s', conf_values["hash_function"], HASH_DEFINITIONS.keys())
+ try:
+ process.find_binary(
+ hash_map.hash_map[conf_values["hash_function"]].cmd)
+ except process.CommandNotFound:
+ log.critical(
+ 'The "%s" binary needed by hash_function "%s" was not found. '
+ 'It needs to be in your system path.',
+ hash_map.hash_map[conf_values['hash_function']].cmd,
+ conf_values['hash_function'])
+
+ # detect GNU sed
+ for sed in ('/usr/bin/gsed', '/bin/sed', '/usr/bin/sed'):
+ if os.path.exists(sed):
+ conf_values["sed"] = sed
+ break
+
+ addlargs = {}
+
+ if myspecfile:
+ log.notice("Processing spec file: %s", myspecfile)
+ spec = catalyst.config.SpecParser(myspecfile)
+ addlargs.update(spec.get_values())
+
+ if mycmdline:
+ try:
+ cmdline = catalyst.config.ConfigParser()
+ cmdline.parse_lines(mycmdline)
+ addlargs.update(cmdline.get_values())
+ except CatalystError:
+ log.critical('Could not parse commandline')
+
+ if "target" not in addlargs:
+ raise CatalystError("Required value \"target\" not specified.")
+
+ if os.getuid() != 0:
+ # catalyst cannot be run as a normal user due to chroots, mounts, etc
+ log.critical('This script requires root privileges to operate')
+
+ # Start off by creating unique namespaces to run in. Would be nice to
+ # use pid & user namespaces, but snakeoil's namespace module has signal
+ # transfer issues (CTRL+C doesn't propagate), and user namespaces need
+ # more work due to Gentoo build process (uses sudo/root/portage).
+ namespaces.simple_unshare(
+ mount=True, uts=True, ipc=True, pid=False, net=False, user=False,
+ hostname='catalyst')
+
+ # everything is setup, so the build is a go
+ try:
+ success = build_target(addlargs)
+ except KeyboardInterrupt:
+ log.critical('Catalyst build aborted due to user interrupt (Ctrl-C)')
+ if not success:
+ sys.exit(2)
+ sys.exit(0)
diff --git a/catalyst/support.py b/catalyst/support.py
index c7a8fc73..654b23aa 100644
--- a/catalyst/support.py
+++ b/catalyst/support.py
@@ -9,99 +9,100 @@ from subprocess import Popen
from catalyst import log
from catalyst.defaults import valid_config_file_values
-BASH_BINARY = "/bin/bash"
+BASH_BINARY = "/bin/bash"
class CatalystError(Exception):
- def __init__(self, message, print_traceback=False):
- if message:
- log.error('CatalystError: %s', message, exc_info=print_traceback)
+ def __init__(self, message, print_traceback=False):
+ if message:
+ log.error('CatalystError: %s', message, exc_info=print_traceback)
def cmd(mycmd, env=None, debug=False, fail_func=None):
- """Run the external |mycmd|.
-
- If |mycmd| is a string, then it's assumed to be a bash snippet and will
- be run through bash. Otherwise, it's a standalone command line and will
- be run directly.
- """
- log.debug('cmd: %r', mycmd)
- sys.stdout.flush()
-
- if env is None:
- env = {}
- if 'BASH_ENV' not in env:
- env = env.copy()
- env['BASH_ENV'] = '/etc/spork/is/not/valid/profile.env'
-
- args = []
- if isinstance(mycmd, str):
- args.append(BASH_BINARY)
- if debug:
- args.append('-x')
- args.extend(['-c', mycmd])
- else:
- args.extend(mycmd)
-
- log.debug('args: %r', args)
- proc = Popen(args, env=env)
- ret = proc.wait()
- if ret:
- if fail_func:
- log.error('cmd(%r) exited %s; running fail_func().', args, ret)
- fail_func()
- raise CatalystError('cmd(%r) exited %s' % (args, ret),
- print_traceback=False)
+ """Run the external |mycmd|.
+
+ If |mycmd| is a string, then it's assumed to be a bash snippet and will
+ be run through bash. Otherwise, it's a standalone command line and will
+ be run directly.
+ """
+ log.debug('cmd: %r', mycmd)
+ sys.stdout.flush()
+
+ if env is None:
+ env = {}
+ if 'BASH_ENV' not in env:
+ env = env.copy()
+ env['BASH_ENV'] = '/etc/spork/is/not/valid/profile.env'
+
+ args = []
+ if isinstance(mycmd, str):
+ args.append(BASH_BINARY)
+ if debug:
+ args.append('-x')
+ args.extend(['-c', mycmd])
+ else:
+ args.extend(mycmd)
+
+ log.debug('args: %r', args)
+ proc = Popen(args, env=env)
+ ret = proc.wait()
+ if ret:
+ if fail_func:
+ log.error('cmd(%r) exited %s; running fail_func().', args, ret)
+ fail_func()
+ raise CatalystError('cmd(%r) exited %s' % (args, ret),
+ print_traceback=False)
def file_check(filepath, extensions=None, strict=True):
- '''Check for the files existence and that only one exists
- if others are found with various extensions
- '''
- if os.path.isfile(filepath):
- return filepath
- # it didn't exist
- # so check if there are files of that name with an extension
- files = glob.glob("%s.*" % filepath)
- # remove any false positive files
- files = [x for x in files if not x.endswith(".CONTENTS") and not x.endswith(".DIGESTS")]
- if len(files) == 1:
- return files[0]
- if len(files) > 1 and strict:
- msg = "Ambiguos Filename: %s\nPlease specify the correct extension as well" % filepath
- raise CatalystError(msg, print_traceback=False)
- target_file = None
- for ext in extensions:
- target = filepath + "." + ext
- if target in files:
- target_file = target
- break
- if target_file:
- return target_file
- raise CatalystError("File Not Found: %s" % filepath)
-
-
-def file_locate(settings,filelist,expand=1):
- #if expand=1, non-absolute paths will be accepted and
- # expanded to os.getcwd()+"/"+localpath if file exists
- for myfile in filelist:
- if myfile not in settings:
- #filenames such as cdtar are optional, so we don't assume the variable is defined.
- pass
- else:
- if not settings[myfile]:
- raise CatalystError("File variable \"" + myfile +
- "\" has a length of zero (not specified.)", print_traceback=True)
- if settings[myfile][0]=="/":
- if not os.path.exists(settings[myfile]):
- raise CatalystError("Cannot locate specified " + myfile +
- ": " + settings[myfile], print_traceback=False)
- elif expand and os.path.exists(os.getcwd()+"/"+settings[myfile]):
- settings[myfile]=os.getcwd()+"/"+settings[myfile]
- else:
- raise CatalystError("Cannot locate specified " + myfile +
- ": "+settings[myfile]+" (2nd try)" +
-"""
+ '''Check for the files existence and that only one exists
+ if others are found with various extensions
+ '''
+ if os.path.isfile(filepath):
+ return filepath
+ # it didn't exist
+ # so check if there are files of that name with an extension
+ files = glob.glob("%s.*" % filepath)
+ # remove any false positive files
+ files = [x for x in files if not x.endswith(
+ ".CONTENTS") and not x.endswith(".DIGESTS")]
+ if len(files) == 1:
+ return files[0]
+ if len(files) > 1 and strict:
+ msg = "Ambiguos Filename: %s\nPlease specify the correct extension as well" % filepath
+ raise CatalystError(msg, print_traceback=False)
+ target_file = None
+ for ext in extensions:
+ target = filepath + "." + ext
+ if target in files:
+ target_file = target
+ break
+ if target_file:
+ return target_file
+ raise CatalystError("File Not Found: %s" % filepath)
+
+
+def file_locate(settings, filelist, expand=1):
+ # if expand=1, non-absolute paths will be accepted and
+ # expanded to os.getcwd()+"/"+localpath if file exists
+ for myfile in filelist:
+ if myfile not in settings:
+ # filenames such as cdtar are optional, so we don't assume the variable is defined.
+ pass
+ else:
+ if not settings[myfile]:
+ raise CatalystError("File variable \"" + myfile +
+ "\" has a length of zero (not specified.)", print_traceback=True)
+ if settings[myfile][0] == "/":
+ if not os.path.exists(settings[myfile]):
+ raise CatalystError("Cannot locate specified " + myfile +
+ ": " + settings[myfile], print_traceback=False)
+ elif expand and os.path.exists(os.getcwd()+"/"+settings[myfile]):
+ settings[myfile] = os.getcwd()+"/"+settings[myfile]
+ else:
+ raise CatalystError("Cannot locate specified " + myfile +
+ ": "+settings[myfile]+" (2nd try)" +
+ """
Spec file format:
The spec file format is a very simple and easy-to-use format for storing data. Here's an example
@@ -121,132 +122,132 @@ that the order of multiple-value items is preserved, but the order that the item
defined are not preserved. In other words, "foo", "bar", "oni" ordering is preserved but "item1"
"item2" "item3" ordering is not, as the item strings are stored in a dictionary (hash).
""",
- print_traceback=True)
+ print_traceback=True)
def parse_makeconf(mylines):
- mymakeconf={}
- pos=0
- pat=re.compile("([0-9a-zA-Z_]*)=(.*)")
- while pos<len(mylines):
- if len(mylines[pos])<=1:
- #skip blanks
- pos += 1
- continue
- if mylines[pos][0] in ["#"," ","\t"]:
- #skip indented lines, comments
- pos += 1
- continue
- else:
- myline=mylines[pos]
- mobj=pat.match(myline)
- pos += 1
- if mobj.group(2):
- clean_string = re.sub(r"\"",r"",mobj.group(2))
- mymakeconf[mobj.group(1)]=clean_string
- return mymakeconf
+ mymakeconf = {}
+ pos = 0
+ pat = re.compile("([0-9a-zA-Z_]*)=(.*)")
+ while pos < len(mylines):
+ if len(mylines[pos]) <= 1:
+ # skip blanks
+ pos += 1
+ continue
+ if mylines[pos][0] in ["#", " ", "\t"]:
+ # skip indented lines, comments
+ pos += 1
+ continue
+ else:
+ myline = mylines[pos]
+ mobj = pat.match(myline)
+ pos += 1
+ if mobj.group(2):
+ clean_string = re.sub(r"\"", r"", mobj.group(2))
+ mymakeconf[mobj.group(1)] = clean_string
+ return mymakeconf
def read_makeconf(mymakeconffile):
- if os.path.exists(mymakeconffile):
- try:
- try:
- import snakeoil.bash #import snakeoil.fileutils
- return snakeoil.bash.read_bash_dict(mymakeconffile, sourcing_command="source")
- except ImportError:
- try:
- import portage.util
- return portage.util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True)
- except Exception:
- try:
- import portage_util
- return portage_util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True)
- except ImportError:
- with open(mymakeconffile, "r") as myf:
- mylines=myf.readlines()
- return parse_makeconf(mylines)
- except Exception:
- raise CatalystError("Could not parse make.conf file " +
- mymakeconffile, print_traceback=True)
- else:
- makeconf={}
- return makeconf
-
-
-def pathcompare(path1,path2):
- # Change double slashes to slash
- path1 = re.sub(r"//",r"/",path1)
- path2 = re.sub(r"//",r"/",path2)
- # Removing ending slash
- path1 = re.sub("/$","",path1)
- path2 = re.sub("/$","",path2)
-
- if path1 == path2:
- return 1
- return 0
+ if os.path.exists(mymakeconffile):
+ try:
+ try:
+ import snakeoil.bash # import snakeoil.fileutils
+ return snakeoil.bash.read_bash_dict(mymakeconffile, sourcing_command="source")
+ except ImportError:
+ try:
+ import portage.util
+ return portage.util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True)
+ except Exception:
+ try:
+ import portage_util
+ return portage_util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True)
+ except ImportError:
+ with open(mymakeconffile, "r") as myf:
+ mylines = myf.readlines()
+ return parse_makeconf(mylines)
+ except Exception:
+ raise CatalystError("Could not parse make.conf file " +
+ mymakeconffile, print_traceback=True)
+ else:
+ makeconf = {}
+ return makeconf
+
+
+def pathcompare(path1, path2):
+ # Change double slashes to slash
+ path1 = re.sub(r"//", r"/", path1)
+ path2 = re.sub(r"//", r"/", path2)
+ # Removing ending slash
+ path1 = re.sub("/$", "", path1)
+ path2 = re.sub("/$", "", path2)
+
+ if path1 == path2:
+ return 1
+ return 0
def ismount(path):
- """Like os.path.ismount, but also support bind mounts"""
- if os.path.ismount(path):
- return 1
- a=os.popen("mount")
- mylines=a.readlines()
- a.close()
- for line in mylines:
- mysplit=line.split()
- if pathcompare(path,mysplit[2]):
- return 1
- return 0
-
-
-def addl_arg_parse(myspec,addlargs,requiredspec,validspec):
- "helper function to help targets parse additional arguments"
- messages = []
- for x in addlargs.keys():
- if x not in validspec and x not in valid_config_file_values and x not in requiredspec:
- messages.append("Argument \""+x+"\" not recognized.")
- else:
- myspec[x]=addlargs[x]
-
- for x in requiredspec:
- if x not in myspec:
- messages.append("Required argument \""+x+"\" not specified.")
-
- if messages:
- raise CatalystError('\n\tAlso: '.join(messages))
+ """Like os.path.ismount, but also support bind mounts"""
+ if os.path.ismount(path):
+ return 1
+ a = os.popen("mount")
+ mylines = a.readlines()
+ a.close()
+ for line in mylines:
+ mysplit = line.split()
+ if pathcompare(path, mysplit[2]):
+ return 1
+ return 0
+
+
+def addl_arg_parse(myspec, addlargs, requiredspec, validspec):
+ "helper function to help targets parse additional arguments"
+ messages = []
+ for x in addlargs.keys():
+ if x not in validspec and x not in valid_config_file_values and x not in requiredspec:
+ messages.append("Argument \""+x+"\" not recognized.")
+ else:
+ myspec[x] = addlargs[x]
+
+ for x in requiredspec:
+ if x not in myspec:
+ messages.append("Required argument \""+x+"\" not specified.")
+
+ if messages:
+ raise CatalystError('\n\tAlso: '.join(messages))
def countdown(secs=5, doing="Starting"):
- # Don't sleep if this is non-interactive
- if not os.isatty(sys.stdin.fileno()) or secs == 0:
- return
+ # Don't sleep if this is non-interactive
+ if not os.isatty(sys.stdin.fileno()) or secs == 0:
+ return
- sys.stdout.write(
- ('>>> Waiting %s seconds before starting...\n'
- '>>> (Control-C to abort)...\n'
- '%s in: ') % (secs, doing))
- for sec in reversed(range(1, secs + 1)):
- sys.stdout.write(str(sec) + " ")
- sys.stdout.flush()
- time.sleep(1)
- sys.stdout.write('\n')
+ sys.stdout.write(
+ ('>>> Waiting %s seconds before starting...\n'
+ '>>> (Control-C to abort)...\n'
+ '%s in: ') % (secs, doing))
+ for sec in reversed(range(1, secs + 1)):
+ sys.stdout.write(str(sec) + " ")
+ sys.stdout.flush()
+ time.sleep(1)
+ sys.stdout.write('\n')
def normpath(mypath):
- """Clean up a little more than os.path.normpath
-
- Namely:
- - Make sure leading // is turned into /.
- - Leave trailing slash intact.
- """
- TrailingSlash=False
- if mypath[-1] == "/":
- TrailingSlash=True
- newpath = os.path.normpath(mypath)
- if len(newpath) > 1:
- if newpath[:2] == "//":
- newpath = newpath[1:]
- if TrailingSlash:
- newpath=newpath+'/'
- return newpath
+ """Clean up a little more than os.path.normpath
+
+ Namely:
+ - Make sure leading // is turned into /.
+ - Leave trailing slash intact.
+ """
+ TrailingSlash = False
+ if mypath[-1] == "/":
+ TrailingSlash = True
+ newpath = os.path.normpath(mypath)
+ if len(newpath) > 1:
+ if newpath[:2] == "//":
+ newpath = newpath[1:]
+ if TrailingSlash:
+ newpath = newpath+'/'
+ return newpath
diff --git a/catalyst/targets/embedded.py b/catalyst/targets/embedded.py
index 97604503..e288228e 100644
--- a/catalyst/targets/embedded.py
+++ b/catalyst/targets/embedded.py
@@ -14,42 +14,44 @@ from catalyst import log
from catalyst.support import normpath
from catalyst.base.stagebase import StageBase
+
class embedded(StageBase):
- """
- Builder class for embedded target
- """
- required_values = frozenset()
- valid_values = required_values | frozenset([
- "boot/kernel",
- "embedded/empty",
- "embedded/fs-finish",
- "embedded/fs-ops",
- "embedded/fs-prepare",
- "embedded/fs-type",
- "embedded/linuxrc",
- "embedded/mergeroot",
- "embedded/packages",
- "embedded/rm",
- "embedded/runscript",
- "embedded/unmerge",
- "embedded/use",
- ])
-
- def __init__(self,spec,addlargs):
- StageBase.__init__(self,spec,addlargs)
-
- def set_action_sequence(self):
- self.settings["action_sequence"]=["dir_setup","unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir",\
- "portage_overlay","bind","chroot_setup",\
- "setup_environment","build_kernel","build_packages",\
- "bootloader","root_overlay","fsscript","unmerge",\
- "unbind","remove","empty","clean","capture","clear_autoresume"]
-
- def set_stage_path(self):
- self.settings["stage_path"]=normpath(self.settings["chroot_path"]+"/tmp/mergeroot")
- log.info('embedded stage path is %s', self.settings['stage_path'])
-
- def set_root_path(self):
- self.settings["root_path"]=normpath("/tmp/mergeroot")
- log.info('embedded root path is %s', self.settings['root_path'])
+ """
+ Builder class for embedded target
+ """
+ required_values = frozenset()
+ valid_values = required_values | frozenset([
+ "boot/kernel",
+ "embedded/empty",
+ "embedded/fs-finish",
+ "embedded/fs-ops",
+ "embedded/fs-prepare",
+ "embedded/fs-type",
+ "embedded/linuxrc",
+ "embedded/mergeroot",
+ "embedded/packages",
+ "embedded/rm",
+ "embedded/runscript",
+ "embedded/unmerge",
+ "embedded/use",
+ ])
+
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
+
+ def set_action_sequence(self):
+ self.settings["action_sequence"] = ["dir_setup", "unpack", "unpack_snapshot",
+ "config_profile_link", "setup_confdir",
+ "portage_overlay", "bind", "chroot_setup",
+ "setup_environment", "build_kernel", "build_packages",
+ "bootloader", "root_overlay", "fsscript", "unmerge",
+ "unbind", "remove", "empty", "clean", "capture", "clear_autoresume"]
+
+ def set_stage_path(self):
+ self.settings["stage_path"] = normpath(
+ self.settings["chroot_path"]+"/tmp/mergeroot")
+ log.info('embedded stage path is %s', self.settings['stage_path'])
+
+ def set_root_path(self):
+ self.settings["root_path"] = normpath("/tmp/mergeroot")
+ log.info('embedded root path is %s', self.settings['root_path'])
diff --git a/catalyst/targets/livecd_stage1.py b/catalyst/targets/livecd_stage1.py
index fece8f79..f4f6b512 100644
--- a/catalyst/targets/livecd_stage1.py
+++ b/catalyst/targets/livecd_stage1.py
@@ -9,46 +9,48 @@ from catalyst.base.stagebase import StageBase
class livecd_stage1(StageBase):
- """
- Builder class for LiveCD stage1.
- """
- required_values = frozenset([
- "livecd/packages",
- ])
- valid_values = required_values | frozenset([
- "livecd/use",
- ])
-
- def __init__(self,spec,addlargs):
- StageBase.__init__(self,spec,addlargs)
-
- def set_action_sequence(self):
- self.settings["action_sequence"]=["unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir","portage_overlay",\
- "bind","chroot_setup","setup_environment","build_packages",\
- "unbind", "clean"]
- self.set_completion_action_sequences()
-
- def set_spec_prefix(self):
- self.settings["spec_prefix"]="livecd"
-
- def set_catalyst_use(self):
- StageBase.set_catalyst_use(self)
- if "catalyst_use" in self.settings:
- self.settings["catalyst_use"].append("livecd")
- else:
- self.settings["catalyst_use"] = ["livecd"]
-
- def set_packages(self):
- StageBase.set_packages(self)
- if self.settings["spec_prefix"]+"/packages" in self.settings:
- if isinstance(self.settings[self.settings['spec_prefix']+'/packages'], str):
- self.settings[self.settings["spec_prefix"]+"/packages"] = \
- self.settings[self.settings["spec_prefix"]+"/packages"].split()
-
- def set_pkgcache_path(self):
- if "pkgcache_path" in self.settings:
- if not isinstance(self.settings['pkgcache_path'], str):
- self.settings["pkgcache_path"] = normpath(' '.join(self.settings["pkgcache_path"]))
- else:
- StageBase.set_pkgcache_path(self)
+ """
+ Builder class for LiveCD stage1.
+ """
+ required_values = frozenset([
+ "livecd/packages",
+ ])
+ valid_values = required_values | frozenset([
+ "livecd/use",
+ ])
+
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
+
+ def set_action_sequence(self):
+ self.settings["action_sequence"] = ["unpack", "unpack_snapshot",
+ "config_profile_link", "setup_confdir", "portage_overlay",
+ "bind", "chroot_setup", "setup_environment", "build_packages",
+ "unbind", "clean"]
+ self.set_completion_action_sequences()
+
+ def set_spec_prefix(self):
+ self.settings["spec_prefix"] = "livecd"
+
+ def set_catalyst_use(self):
+ StageBase.set_catalyst_use(self)
+ if "catalyst_use" in self.settings:
+ self.settings["catalyst_use"].append("livecd")
+ else:
+ self.settings["catalyst_use"] = ["livecd"]
+
+ def set_packages(self):
+ StageBase.set_packages(self)
+ if self.settings["spec_prefix"]+"/packages" in self.settings:
+ if isinstance(self.settings[self.settings['spec_prefix']+'/packages'], str):
+ self.settings[self.settings["spec_prefix"]+"/packages"] = \
+ self.settings[self.settings["spec_prefix"] +
+ "/packages"].split()
+
+ def set_pkgcache_path(self):
+ if "pkgcache_path" in self.settings:
+ if not isinstance(self.settings['pkgcache_path'], str):
+ self.settings["pkgcache_path"] = normpath(
+ ' '.join(self.settings["pkgcache_path"]))
+ else:
+ StageBase.set_pkgcache_path(self)
diff --git a/catalyst/targets/livecd_stage2.py b/catalyst/targets/livecd_stage2.py
index c917f9fd..d41321e6 100644
--- a/catalyst/targets/livecd_stage2.py
+++ b/catalyst/targets/livecd_stage2.py
@@ -9,112 +9,112 @@ from catalyst.base.stagebase import StageBase
class livecd_stage2(StageBase):
- """
- Builder class for a LiveCD stage2 build.
- """
- required_values = frozenset([
- "boot/kernel",
- ])
- valid_values = required_values | frozenset([
- "livecd/bootargs",
- "livecd/cdtar",
- "livecd/depclean",
- "livecd/empty",
- "livecd/fsops",
- "livecd/fsscript",
- "livecd/fstype",
- "livecd/gk_mainargs",
- "livecd/iso",
- "livecd/linuxrc",
- "livecd/modblacklist",
- "livecd/motd",
- "livecd/overlay",
- "livecd/rcadd",
- "livecd/rcdel",
- "livecd/readme",
- "livecd/rm",
- "livecd/root_overlay",
- "livecd/splash_theme",
- "livecd/type",
- "livecd/unmerge",
- "livecd/users",
- "livecd/verify",
- "livecd/volid",
- "livecd/xdm",
- "livecd/xinitrc",
- "livecd/xsession",
- "portage_overlay",
- ])
+ """
+ Builder class for a LiveCD stage2 build.
+ """
+ required_values = frozenset([
+ "boot/kernel",
+ ])
+ valid_values = required_values | frozenset([
+ "livecd/bootargs",
+ "livecd/cdtar",
+ "livecd/depclean",
+ "livecd/empty",
+ "livecd/fsops",
+ "livecd/fsscript",
+ "livecd/fstype",
+ "livecd/gk_mainargs",
+ "livecd/iso",
+ "livecd/linuxrc",
+ "livecd/modblacklist",
+ "livecd/motd",
+ "livecd/overlay",
+ "livecd/rcadd",
+ "livecd/rcdel",
+ "livecd/readme",
+ "livecd/rm",
+ "livecd/root_overlay",
+ "livecd/splash_theme",
+ "livecd/type",
+ "livecd/unmerge",
+ "livecd/users",
+ "livecd/verify",
+ "livecd/volid",
+ "livecd/xdm",
+ "livecd/xinitrc",
+ "livecd/xsession",
+ "portage_overlay",
+ ])
- def __init__(self,spec,addlargs):
- StageBase.__init__(self,spec,addlargs)
- if "livecd/type" not in self.settings:
- self.settings["livecd/type"] = "generic-livecd"
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
+ if "livecd/type" not in self.settings:
+ self.settings["livecd/type"] = "generic-livecd"
- file_locate(self.settings, ["cdtar","controller_file"])
+ file_locate(self.settings, ["cdtar", "controller_file"])
- def set_spec_prefix(self):
- self.settings["spec_prefix"]="livecd"
+ def set_spec_prefix(self):
+ self.settings["spec_prefix"] = "livecd"
- def set_target_path(self):
- '''Set the target path for the finished stage.
+ def set_target_path(self):
+ '''Set the target path for the finished stage.
- This method runs the StageBase.set_target_path mehtod,
- and additionally creates a staging directory for assembling
- the final components needed to produce the iso image.
- '''
- super(livecd_stage2, self).set_target_path()
- clear_dir(self.settings['target_path'])
+ This method runs the StageBase.set_target_path mehtod,
+ and additionally creates a staging directory for assembling
+ the final components needed to produce the iso image.
+ '''
+ super(livecd_stage2, self).set_target_path()
+ clear_dir(self.settings['target_path'])
- def run_local(self):
- # what modules do we want to blacklist?
- if "livecd/modblacklist" in self.settings:
- path = normpath(self.settings["chroot_path"] +
- "/etc/modprobe.d/blacklist.conf")
- try:
- with open(path, "a") as myf:
- myf.write("\n#Added by Catalyst:")
- # workaround until config.py is using configparser
- if isinstance(self.settings["livecd/modblacklist"], str):
- self.settings["livecd/modblacklist"] = self.settings[
- "livecd/modblacklist"].split()
- for x in self.settings["livecd/modblacklist"]:
- myf.write("\nblacklist "+x)
- except:
- self.unbind()
- raise CatalystError("Couldn't open " +
- self.settings["chroot_path"] +
- "/etc/modprobe.d/blacklist.conf.",
- print_traceback=True)
+ def run_local(self):
+ # what modules do we want to blacklist?
+ if "livecd/modblacklist" in self.settings:
+ path = normpath(self.settings["chroot_path"] +
+ "/etc/modprobe.d/blacklist.conf")
+ try:
+ with open(path, "a") as myf:
+ myf.write("\n#Added by Catalyst:")
+ # workaround until config.py is using configparser
+ if isinstance(self.settings["livecd/modblacklist"], str):
+ self.settings["livecd/modblacklist"] = self.settings[
+ "livecd/modblacklist"].split()
+ for x in self.settings["livecd/modblacklist"]:
+ myf.write("\nblacklist "+x)
+ except:
+ self.unbind()
+ raise CatalystError("Couldn't open " +
+ self.settings["chroot_path"] +
+ "/etc/modprobe.d/blacklist.conf.",
+ print_traceback=True)
- def set_action_sequence(self):
- self.settings["action_sequence"]=[
- "unpack",
- "unpack_snapshot",
- "config_profile_link",
- "setup_confdir",
- "portage_overlay",
- "bind",
- "chroot_setup",
- "setup_environment",
- "run_local",
- "build_kernel"
- ]
- if "fetch" not in self.settings["options"]:
- self.settings["action_sequence"] += [
- "bootloader",
- "preclean",
- "livecd_update",
- "root_overlay",
- "fsscript",
- "rcupdate",
- "unmerge",
- "unbind",
- "remove",
- "empty",
- "clean",
- "target_setup",
- "setup_overlay",
- "create_iso"
- ]
- self.settings["action_sequence"].append("clear_autoresume")
+ def set_action_sequence(self):
+ self.settings["action_sequence"] = [
+ "unpack",
+ "unpack_snapshot",
+ "config_profile_link",
+ "setup_confdir",
+ "portage_overlay",
+ "bind",
+ "chroot_setup",
+ "setup_environment",
+ "run_local",
+ "build_kernel"
+ ]
+ if "fetch" not in self.settings["options"]:
+ self.settings["action_sequence"] += [
+ "bootloader",
+ "preclean",
+ "livecd_update",
+ "root_overlay",
+ "fsscript",
+ "rcupdate",
+ "unmerge",
+ "unbind",
+ "remove",
+ "empty",
+ "clean",
+ "target_setup",
+ "setup_overlay",
+ "create_iso"
+ ]
+ self.settings["action_sequence"].append("clear_autoresume")
diff --git a/catalyst/targets/netboot.py b/catalyst/targets/netboot.py
index 95fe3368..12dcf989 100644
--- a/catalyst/targets/netboot.py
+++ b/catalyst/targets/netboot.py
@@ -13,148 +13,155 @@ from catalyst.base.stagebase import StageBase
class netboot(StageBase):
- """
- Builder class for a netboot build, version 2
- """
- required_values = frozenset([
- "boot/kernel",
- ])
- valid_values = required_values | frozenset([
- "netboot/busybox_config",
- "netboot/extra_files",
- "netboot/linuxrc",
- "netboot/overlay",
- "netboot/packages",
- "netboot/root_overlay",
- "netboot/use",
- ])
-
- def __init__(self,spec,addlargs):
- try:
- if "netboot/packages" in addlargs:
- if isinstance(addlargs['netboot/packages'], str):
- loopy=[addlargs["netboot/packages"]]
- else:
- loopy=addlargs["netboot/packages"]
-
- for x in loopy:
- self.valid_values |= {"netboot/packages/"+x+"/files"}
- except:
- raise CatalystError("configuration error in netboot/packages.")
-
- StageBase.__init__(self,spec,addlargs)
- self.settings["merge_path"]=normpath("/tmp/image/")
-
- def set_target_path(self):
- self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+\
- self.settings["target_subpath"])
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("setup_target_path"):
- log.notice('Resume point detected, skipping target path setup operation...')
- else:
- # first clean up any existing target stuff
- clear_path(self.settings['target_path'])
- self.resume.enable("setup_target_path")
- ensure_dirs(self.settings["storedir"]+"/builds/")
-
- def copy_files_to_image(self):
- # copies specific files from the buildroot to merge_path
- myfiles=[]
-
- # check for autoresume point
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("copy_files_to_image"):
- log.notice('Resume point detected, skipping target path setup operation...')
- else:
- if "netboot/packages" in self.settings:
- if isinstance(self.settings['netboot/packages'], str):
- loopy=[self.settings["netboot/packages"]]
- else:
- loopy=self.settings["netboot/packages"]
-
- for x in loopy:
- if "netboot/packages/"+x+"/files" in self.settings:
- if isinstance(self.settings['netboot/packages/'+x+'/files'], list):
- myfiles.extend(self.settings["netboot/packages/"+x+"/files"])
- else:
- myfiles.append(self.settings["netboot/packages/"+x+"/files"])
-
- if "netboot/extra_files" in self.settings:
- if isinstance(self.settings['netboot/extra_files'], list):
- myfiles.extend(self.settings["netboot/extra_files"])
- else:
- myfiles.append(self.settings["netboot/extra_files"])
-
- try:
- cmd([self.settings['controller_file'], 'image'] +
- myfiles, env=self.env)
- except CatalystError:
- self.unbind()
- raise CatalystError("Failed to copy files to image!",
- print_traceback=True)
-
- self.resume.enable("copy_files_to_image")
-
- def setup_overlay(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("setup_overlay"):
- log.notice('Resume point detected, skipping setup_overlay operation...')
- else:
- if "netboot/overlay" in self.settings:
- for x in self.settings["netboot/overlay"]:
- if os.path.exists(x):
- cmd(['rsync', '-a', x + '/',
- self.settings['chroot_path'] + self.settings['merge_path']],
- env=self.env)
- self.resume.enable("setup_overlay")
-
- def move_kernels(self):
- # we're done, move the kernels to builds/*
- # no auto resume here as we always want the
- # freshest images moved
- try:
- cmd([self.settings['controller_file'], 'final'], env=self.env)
- log.notice('Netboot Build Finished!')
- except CatalystError:
- self.unbind()
- raise CatalystError("Failed to move kernel images!",
- print_traceback=True)
-
- def remove(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("remove"):
- log.notice('Resume point detected, skipping remove operation...')
- else:
- if self.settings["spec_prefix"]+"/rm" in self.settings:
- for x in self.settings[self.settings["spec_prefix"]+"/rm"]:
- # we're going to shell out for all these cleaning operations,
- # so we get easy glob handling
- log.notice('netboot: removing %s', x)
- clear_path(self.settings['chroot_path'] +
- self.settings['merge_path'] + x)
-
- def empty(self):
- if "autoresume" in self.settings["options"] \
- and self.resume.is_enabled("empty"):
- log.notice('Resume point detected, skipping empty operation...')
- else:
- if "netboot/empty" in self.settings:
- if isinstance(self.settings['netboot/empty'], str):
- self.settings["netboot/empty"]=self.settings["netboot/empty"].split()
- for x in self.settings["netboot/empty"]:
- myemp=self.settings["chroot_path"] + self.settings["merge_path"] + x
- if not os.path.isdir(myemp):
- log.warning('not a directory or does not exist, skipping "empty" operation: %s', x)
- continue
- log.info('Emptying directory %s', x)
- # stat the dir, delete the dir, recreate the dir and set
- # the proper perms and ownership
- clear_dir(myemp)
- self.resume.enable("empty")
-
- def set_action_sequence(self):
- self.settings["action_sequence"]=["unpack","unpack_snapshot","config_profile_link",
- "setup_confdir","portage_overlay","bind","chroot_setup",\
- "setup_environment","build_packages","root_overlay",\
- "copy_files_to_image","setup_overlay","build_kernel","move_kernels",\
- "remove","empty","unbind","clean","clear_autoresume"]
+ """
+ Builder class for a netboot build, version 2
+ """
+ required_values = frozenset([
+ "boot/kernel",
+ ])
+ valid_values = required_values | frozenset([
+ "netboot/busybox_config",
+ "netboot/extra_files",
+ "netboot/linuxrc",
+ "netboot/overlay",
+ "netboot/packages",
+ "netboot/root_overlay",
+ "netboot/use",
+ ])
+
+ def __init__(self, spec, addlargs):
+ try:
+ if "netboot/packages" in addlargs:
+ if isinstance(addlargs['netboot/packages'], str):
+ loopy = [addlargs["netboot/packages"]]
+ else:
+ loopy = addlargs["netboot/packages"]
+
+ for x in loopy:
+ self.valid_values |= {"netboot/packages/"+x+"/files"}
+ except:
+ raise CatalystError("configuration error in netboot/packages.")
+
+ StageBase.__init__(self, spec, addlargs)
+ self.settings["merge_path"] = normpath("/tmp/image/")
+
+ def set_target_path(self):
+ self.settings["target_path"] = normpath(self.settings["storedir"]+"/builds/" +
+ self.settings["target_subpath"])
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("setup_target_path"):
+ log.notice(
+ 'Resume point detected, skipping target path setup operation...')
+ else:
+ # first clean up any existing target stuff
+ clear_path(self.settings['target_path'])
+ self.resume.enable("setup_target_path")
+ ensure_dirs(self.settings["storedir"]+"/builds/")
+
+ def copy_files_to_image(self):
+ # copies specific files from the buildroot to merge_path
+ myfiles = []
+
+ # check for autoresume point
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("copy_files_to_image"):
+ log.notice(
+ 'Resume point detected, skipping target path setup operation...')
+ else:
+ if "netboot/packages" in self.settings:
+ if isinstance(self.settings['netboot/packages'], str):
+ loopy = [self.settings["netboot/packages"]]
+ else:
+ loopy = self.settings["netboot/packages"]
+
+ for x in loopy:
+ if "netboot/packages/"+x+"/files" in self.settings:
+ if isinstance(self.settings['netboot/packages/'+x+'/files'], list):
+ myfiles.extend(
+ self.settings["netboot/packages/"+x+"/files"])
+ else:
+ myfiles.append(
+ self.settings["netboot/packages/"+x+"/files"])
+
+ if "netboot/extra_files" in self.settings:
+ if isinstance(self.settings['netboot/extra_files'], list):
+ myfiles.extend(self.settings["netboot/extra_files"])
+ else:
+ myfiles.append(self.settings["netboot/extra_files"])
+
+ try:
+ cmd([self.settings['controller_file'], 'image'] +
+ myfiles, env=self.env)
+ except CatalystError:
+ self.unbind()
+ raise CatalystError("Failed to copy files to image!",
+ print_traceback=True)
+
+ self.resume.enable("copy_files_to_image")
+
+ def setup_overlay(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("setup_overlay"):
+ log.notice(
+ 'Resume point detected, skipping setup_overlay operation...')
+ else:
+ if "netboot/overlay" in self.settings:
+ for x in self.settings["netboot/overlay"]:
+ if os.path.exists(x):
+ cmd(['rsync', '-a', x + '/',
+ self.settings['chroot_path'] + self.settings['merge_path']],
+ env=self.env)
+ self.resume.enable("setup_overlay")
+
+ def move_kernels(self):
+ # we're done, move the kernels to builds/*
+ # no auto resume here as we always want the
+ # freshest images moved
+ try:
+ cmd([self.settings['controller_file'], 'final'], env=self.env)
+ log.notice('Netboot Build Finished!')
+ except CatalystError:
+ self.unbind()
+ raise CatalystError("Failed to move kernel images!",
+ print_traceback=True)
+
+ def remove(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("remove"):
+ log.notice('Resume point detected, skipping remove operation...')
+ else:
+ if self.settings["spec_prefix"]+"/rm" in self.settings:
+ for x in self.settings[self.settings["spec_prefix"]+"/rm"]:
+ # we're going to shell out for all these cleaning operations,
+ # so we get easy glob handling
+ log.notice('netboot: removing %s', x)
+ clear_path(self.settings['chroot_path'] +
+ self.settings['merge_path'] + x)
+
+ def empty(self):
+ if "autoresume" in self.settings["options"] \
+ and self.resume.is_enabled("empty"):
+ log.notice('Resume point detected, skipping empty operation...')
+ else:
+ if "netboot/empty" in self.settings:
+ if isinstance(self.settings['netboot/empty'], str):
+ self.settings["netboot/empty"] = self.settings["netboot/empty"].split()
+ for x in self.settings["netboot/empty"]:
+ myemp = self.settings["chroot_path"] + \
+ self.settings["merge_path"] + x
+ if not os.path.isdir(myemp):
+ log.warning(
+ 'not a directory or does not exist, skipping "empty" operation: %s', x)
+ continue
+ log.info('Emptying directory %s', x)
+ # stat the dir, delete the dir, recreate the dir and set
+ # the proper perms and ownership
+ clear_dir(myemp)
+ self.resume.enable("empty")
+
+ def set_action_sequence(self):
+ self.settings["action_sequence"] = ["unpack", "unpack_snapshot", "config_profile_link",
+ "setup_confdir", "portage_overlay", "bind", "chroot_setup",
+ "setup_environment", "build_packages", "root_overlay",
+ "copy_files_to_image", "setup_overlay", "build_kernel", "move_kernels",
+ "remove", "empty", "unbind", "clean", "clear_autoresume"]
diff --git a/catalyst/targets/snapshot.py b/catalyst/targets/snapshot.py
index d876acbe..16563f14 100644
--- a/catalyst/targets/snapshot.py
+++ b/catalyst/targets/snapshot.py
@@ -12,94 +12,96 @@ from catalyst.fileops import (clear_dir, ensure_dirs)
class snapshot(TargetBase, GenBase):
- """
- Builder class for snapshots.
- """
- required_values = frozenset([
- "target",
- "version_stamp",
- ])
- valid_values = required_values | frozenset([
- "compression_mode",
- ])
-
- def __init__(self,myspec,addlargs):
- TargetBase.__init__(self, myspec, addlargs)
- GenBase.__init__(self,myspec)
-
- self.settings["target_subpath"]="repos"
- st=self.settings["storedir"]
- self.settings["snapshot_path"] = normpath(st + "/snapshots/"
- + self.settings["snapshot_name"]
- + self.settings["version_stamp"])
- self.settings["tmp_path"]=normpath(st+"/tmp/"+self.settings["target_subpath"])
-
- def setup(self):
- x=normpath(self.settings["storedir"]+"/snapshots")
- ensure_dirs(x)
-
- def run(self):
- if "purgeonly" in self.settings["options"]:
- self.purge()
- return True
-
- if "purge" in self.settings["options"]:
- self.purge()
-
- success = True
- self.setup()
- log.notice('Creating %s tree snapshot %s from %s ...',
- self.settings["repo_name"], self.settings['version_stamp'],
- self.settings['portdir'])
-
- mytmp=self.settings["tmp_path"]
- ensure_dirs(mytmp)
-
- cmd(['rsync', '-a', '--no-o', '--no-g', '--delete',
- '--exclude=/packages/',
- '--exclude=/distfiles/',
- '--exclude=/local/',
- '--exclude=CVS/',
- '--exclude=.svn',
- '--exclude=.git/',
- '--filter=H_**/files/digest-*',
- self.settings['portdir'] + '/',
- mytmp + '/' + self.settings['repo_name'] + '/'],
- env=self.env)
-
- log.notice('Compressing %s snapshot tarball ...', self.settings["repo_name"])
- compressor = CompressMap(self.settings["compress_definitions"],
- env=self.env, default_mode=self.settings['compression_mode'],
- comp_prog=self.settings["comp_prog"])
- infodict = compressor.create_infodict(
- source=self.settings["repo_name"],
- destination=self.settings["snapshot_path"],
- basedir=mytmp,
- filename=self.settings["snapshot_path"],
- mode=self.settings["compression_mode"],
- auto_extension=True
- )
- if not compressor.compress(infodict):
- success = False
- log.error('Snapshot compression failure')
- else:
- filename = '.'.join([self.settings["snapshot_path"],
- compressor.extension(self.settings["compression_mode"])])
- log.notice('Snapshot successfully written to %s', filename)
- self.gen_contents_file(filename)
- self.gen_digest_file(filename)
- if "keepwork" not in self.settings["options"]:
- self.cleanup()
- if success:
- log.info('snapshot: complete!')
- return success
-
- def kill_chroot_pids(self):
- pass
-
- def cleanup(self):
- log.info('Cleaning up ...')
- self.purge()
-
- def purge(self):
- clear_dir(self.settings['tmp_path'])
+ """
+ Builder class for snapshots.
+ """
+ required_values = frozenset([
+ "target",
+ "version_stamp",
+ ])
+ valid_values = required_values | frozenset([
+ "compression_mode",
+ ])
+
+ def __init__(self, myspec, addlargs):
+ TargetBase.__init__(self, myspec, addlargs)
+ GenBase.__init__(self, myspec)
+
+ self.settings["target_subpath"] = "repos"
+ st = self.settings["storedir"]
+ self.settings["snapshot_path"] = normpath(st + "/snapshots/"
+ + self.settings["snapshot_name"]
+ + self.settings["version_stamp"])
+ self.settings["tmp_path"] = normpath(
+ st+"/tmp/"+self.settings["target_subpath"])
+
+ def setup(self):
+ x = normpath(self.settings["storedir"]+"/snapshots")
+ ensure_dirs(x)
+
+ def run(self):
+ if "purgeonly" in self.settings["options"]:
+ self.purge()
+ return True
+
+ if "purge" in self.settings["options"]:
+ self.purge()
+
+ success = True
+ self.setup()
+ log.notice('Creating %s tree snapshot %s from %s ...',
+ self.settings["repo_name"], self.settings['version_stamp'],
+ self.settings['portdir'])
+
+ mytmp = self.settings["tmp_path"]
+ ensure_dirs(mytmp)
+
+ cmd(['rsync', '-a', '--no-o', '--no-g', '--delete',
+ '--exclude=/packages/',
+ '--exclude=/distfiles/',
+ '--exclude=/local/',
+ '--exclude=CVS/',
+ '--exclude=.svn',
+ '--exclude=.git/',
+ '--filter=H_**/files/digest-*',
+ self.settings['portdir'] + '/',
+ mytmp + '/' + self.settings['repo_name'] + '/'],
+ env=self.env)
+
+ log.notice('Compressing %s snapshot tarball ...',
+ self.settings["repo_name"])
+ compressor = CompressMap(self.settings["compress_definitions"],
+ env=self.env, default_mode=self.settings['compression_mode'],
+ comp_prog=self.settings["comp_prog"])
+ infodict = compressor.create_infodict(
+ source=self.settings["repo_name"],
+ destination=self.settings["snapshot_path"],
+ basedir=mytmp,
+ filename=self.settings["snapshot_path"],
+ mode=self.settings["compression_mode"],
+ auto_extension=True
+ )
+ if not compressor.compress(infodict):
+ success = False
+ log.error('Snapshot compression failure')
+ else:
+ filename = '.'.join([self.settings["snapshot_path"],
+ compressor.extension(self.settings["compression_mode"])])
+ log.notice('Snapshot successfully written to %s', filename)
+ self.gen_contents_file(filename)
+ self.gen_digest_file(filename)
+ if "keepwork" not in self.settings["options"]:
+ self.cleanup()
+ if success:
+ log.info('snapshot: complete!')
+ return success
+
+ def kill_chroot_pids(self):
+ pass
+
+ def cleanup(self):
+ log.info('Cleaning up ...')
+ self.purge()
+
+ def purge(self):
+ clear_dir(self.settings['tmp_path'])
diff --git a/catalyst/targets/stage1.py b/catalyst/targets/stage1.py
index a1a29cff..daa0b854 100644
--- a/catalyst/targets/stage1.py
+++ b/catalyst/targets/stage1.py
@@ -10,114 +10,117 @@ from catalyst.base.stagebase import StageBase
class stage1(StageBase):
- """
- Builder class for a stage1 installation tarball build.
- """
- required_values = frozenset()
- valid_values = required_values | frozenset([
- "chost",
- "update_seed",
- "update_seed_command",
- ])
-
- def __init__(self,spec,addlargs):
- StageBase.__init__(self,spec,addlargs)
-
- def set_stage_path(self):
- self.settings["stage_path"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
- log.notice('stage1 stage path is %s', self.settings['stage_path'])
-
- def set_root_path(self):
- # sets the root path, relative to 'chroot_path', of the stage1 root
- self.settings["root_path"]=normpath("/tmp/stage1root")
- log.info('stage1 root path is %s', self.settings['root_path'])
-
- def set_cleanables(self):
- StageBase.set_cleanables(self)
- self.settings["cleanables"].extend([\
- "/usr/share/zoneinfo", self.settings["port_conf"] + "/package*"])
-
- # XXX: How do these override_foo() functions differ from the ones in StageBase and why aren't they in stage3_target?
- # XXY: It appears the difference is that these functions are actually doing something and the ones in stagebase don't :-(
- # XXZ: I have a wierd suspicion that it's the difference in capitolization
-
- def override_chost(self):
- if "chost" in self.settings:
- self.settings["CHOST"] = self.settings["chost"]
-
- def override_common_flags(self):
- if "common_flags" in self.settings:
- self.settings["COMMON_FLAGS"] = self.settings["common_flags"]
-
- def override_cflags(self):
- if "cflags" in self.settings:
- self.settings["CFLAGS"] = self.settings["cflags"]
-
- def override_cxxflags(self):
- if "cxxflags" in self.settings:
- self.settings["CXXFLAGS"] = self.settings["cxxflags"]
-
- def override_fcflags(self):
- if "fcflags" in self.settings:
- self.settings["FCFLAGS"] = self.settings["fcflags"]
-
- def override_fflags(self):
- if "fflags" in self.settings:
- self.settings["FFLAGS"] = self.settings["fflags"]
-
- def override_ldflags(self):
- if "ldflags" in self.settings:
- self.settings["LDFLAGS"] = self.settings["ldflags"]
-
- def set_portage_overlay(self):
- StageBase.set_portage_overlay(self)
- if "portage_overlay" in self.settings:
- log.warning(
- 'Using an overlay for earlier stages could cause build issues.\n'
- "If you break it, you buy it. Don't complain to us about it.\n"
- "Don't say we did not warn you.")
-
- def set_mounts(self):
- # stage_path/proc probably doesn't exist yet, so create it
- ensure_dirs(self.settings["stage_path"]+"/proc")
-
- # alter the mount mappings to bind mount proc onto it
- self.mounts.append("stage1root/proc")
- self.target_mounts["stage1root/proc"] = "/tmp/stage1root/proc"
- self.mountmap["stage1root/proc"] = "/proc"
-
- def set_completion_action_sequences(self):
- '''Override function for stage1
-
- Its purpose is to move the new stage1root out of the seed stage
- and rename it to the stage1 chroot_path after cleaning the seed stage
- chroot for re-use in stage2 without the need to unpack it.
- '''
- if "fetch" not in self.settings["options"]:
- self.settings["action_sequence"].append("capture")
- if "keepwork" in self.settings["options"]:
- self.settings["action_sequence"].append("clear_autoresume")
- elif "seedcache" in self.settings["options"]:
- self.settings["action_sequence"].append("remove_autoresume")
- self.settings["action_sequence"].append("clean_stage1")
- else:
- self.settings["action_sequence"].append("remove_autoresume")
- self.settings["action_sequence"].append("remove_chroot")
-
-
- def clean_stage1(self):
- '''seedcache is enabled, so salvage the /tmp/stage1root,
- remove the seed chroot'''
- log.notice('Salvaging the stage1root from the chroot path ...')
- # move the self.settings["stage_path"] outside of the self.settings["chroot_path"]
- tmp_path = normpath(self.settings["storedir"] + "/tmp/" + "stage1root")
- if move_path(self.settings["stage_path"], tmp_path):
- self.remove_chroot()
- # move it to self.settings["chroot_path"]
- if not move_path(tmp_path, self.settings["chroot_path"]):
- log.error('clean_stage1 failed, see previous log messages for details')
- return False
- log.notice('Successfully moved and cleaned the stage1root for the seedcache')
- return True
- log.error('clean_stage1 failed to move the stage1root to a temporary loation')
- return False
+ """
+ Builder class for a stage1 installation tarball build.
+ """
+ required_values = frozenset()
+ valid_values = required_values | frozenset([
+ "chost",
+ "update_seed",
+ "update_seed_command",
+ ])
+
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
+
+ def set_stage_path(self):
+ self.settings["stage_path"] = normpath(
+ self.settings["chroot_path"]+self.settings["root_path"])
+ log.notice('stage1 stage path is %s', self.settings['stage_path'])
+
+ def set_root_path(self):
+ # sets the root path, relative to 'chroot_path', of the stage1 root
+ self.settings["root_path"] = normpath("/tmp/stage1root")
+ log.info('stage1 root path is %s', self.settings['root_path'])
+
+ def set_cleanables(self):
+ StageBase.set_cleanables(self)
+ self.settings["cleanables"].extend([
+ "/usr/share/zoneinfo", self.settings["port_conf"] + "/package*"])
+
+ # XXX: How do these override_foo() functions differ from the ones in StageBase and why aren't they in stage3_target?
+ # XXY: It appears the difference is that these functions are actually doing something and the ones in stagebase don't :-(
+ # XXZ: I have a wierd suspicion that it's the difference in capitolization
+
+ def override_chost(self):
+ if "chost" in self.settings:
+ self.settings["CHOST"] = self.settings["chost"]
+
+ def override_common_flags(self):
+ if "common_flags" in self.settings:
+ self.settings["COMMON_FLAGS"] = self.settings["common_flags"]
+
+ def override_cflags(self):
+ if "cflags" in self.settings:
+ self.settings["CFLAGS"] = self.settings["cflags"]
+
+ def override_cxxflags(self):
+ if "cxxflags" in self.settings:
+ self.settings["CXXFLAGS"] = self.settings["cxxflags"]
+
+ def override_fcflags(self):
+ if "fcflags" in self.settings:
+ self.settings["FCFLAGS"] = self.settings["fcflags"]
+
+ def override_fflags(self):
+ if "fflags" in self.settings:
+ self.settings["FFLAGS"] = self.settings["fflags"]
+
+ def override_ldflags(self):
+ if "ldflags" in self.settings:
+ self.settings["LDFLAGS"] = self.settings["ldflags"]
+
+ def set_portage_overlay(self):
+ StageBase.set_portage_overlay(self)
+ if "portage_overlay" in self.settings:
+ log.warning(
+ 'Using an overlay for earlier stages could cause build issues.\n'
+ "If you break it, you buy it. Don't complain to us about it.\n"
+ "Don't say we did not warn you.")
+
+ def set_mounts(self):
+ # stage_path/proc probably doesn't exist yet, so create it
+ ensure_dirs(self.settings["stage_path"]+"/proc")
+
+ # alter the mount mappings to bind mount proc onto it
+ self.mounts.append("stage1root/proc")
+ self.target_mounts["stage1root/proc"] = "/tmp/stage1root/proc"
+ self.mountmap["stage1root/proc"] = "/proc"
+
+ def set_completion_action_sequences(self):
+ '''Override function for stage1
+
+ Its purpose is to move the new stage1root out of the seed stage
+ and rename it to the stage1 chroot_path after cleaning the seed stage
+ chroot for re-use in stage2 without the need to unpack it.
+ '''
+ if "fetch" not in self.settings["options"]:
+ self.settings["action_sequence"].append("capture")
+ if "keepwork" in self.settings["options"]:
+ self.settings["action_sequence"].append("clear_autoresume")
+ elif "seedcache" in self.settings["options"]:
+ self.settings["action_sequence"].append("remove_autoresume")
+ self.settings["action_sequence"].append("clean_stage1")
+ else:
+ self.settings["action_sequence"].append("remove_autoresume")
+ self.settings["action_sequence"].append("remove_chroot")
+
+ def clean_stage1(self):
+ '''seedcache is enabled, so salvage the /tmp/stage1root,
+ remove the seed chroot'''
+ log.notice('Salvaging the stage1root from the chroot path ...')
+ # move the self.settings["stage_path"] outside of the self.settings["chroot_path"]
+ tmp_path = normpath(self.settings["storedir"] + "/tmp/" + "stage1root")
+ if move_path(self.settings["stage_path"], tmp_path):
+ self.remove_chroot()
+ # move it to self.settings["chroot_path"]
+ if not move_path(tmp_path, self.settings["chroot_path"]):
+ log.error(
+ 'clean_stage1 failed, see previous log messages for details')
+ return False
+ log.notice(
+ 'Successfully moved and cleaned the stage1root for the seedcache')
+ return True
+ log.error(
+ 'clean_stage1 failed to move the stage1root to a temporary loation')
+ return False
diff --git a/catalyst/targets/stage2.py b/catalyst/targets/stage2.py
index e975b14c..0b7244e3 100644
--- a/catalyst/targets/stage2.py
+++ b/catalyst/targets/stage2.py
@@ -9,40 +9,40 @@ from catalyst.base.stagebase import StageBase
class stage2(StageBase):
- """
- Builder class for a stage2 installation tarball build.
- """
- required_values = frozenset()
- valid_values = required_values | frozenset([
- "chost",
- ])
-
- def __init__(self,spec,addlargs):
- StageBase.__init__(self,spec,addlargs)
-
- # XXX: How do these override_foo() functions differ from the ones in
- # StageBase and why aren't they in stage3_target?
-
- def override_chost(self):
- if "chost" in self.settings:
- self.settings["CHOST"] = self.settings["chost"]
-
- def override_cflags(self):
- if "cflags" in self.settings:
- self.settings["CFLAGS"] = self.settings["cflags"]
-
- def override_cxxflags(self):
- if "cxxflags" in self.settings:
- self.settings["CXXFLAGS"] = self.settings["cxxflags"]
-
- def override_ldflags(self):
- if "ldflags" in self.settings:
- self.settings["LDFLAGS"] = self.settings["ldflags"]
-
- def set_portage_overlay(self):
- StageBase.set_portage_overlay(self)
- if "portage_overlay" in self.settings:
- log.warning(
- 'Using an overlay for earlier stages could cause build issues.\n'
- "If you break it, you buy it. Don't complain to us about it.\n"
- "Don't say we did not warn you.")
+ """
+ Builder class for a stage2 installation tarball build.
+ """
+ required_values = frozenset()
+ valid_values = required_values | frozenset([
+ "chost",
+ ])
+
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
+
+ # XXX: How do these override_foo() functions differ from the ones in
+ # StageBase and why aren't they in stage3_target?
+
+ def override_chost(self):
+ if "chost" in self.settings:
+ self.settings["CHOST"] = self.settings["chost"]
+
+ def override_cflags(self):
+ if "cflags" in self.settings:
+ self.settings["CFLAGS"] = self.settings["cflags"]
+
+ def override_cxxflags(self):
+ if "cxxflags" in self.settings:
+ self.settings["CXXFLAGS"] = self.settings["cxxflags"]
+
+ def override_ldflags(self):
+ if "ldflags" in self.settings:
+ self.settings["LDFLAGS"] = self.settings["ldflags"]
+
+ def set_portage_overlay(self):
+ StageBase.set_portage_overlay(self)
+ if "portage_overlay" in self.settings:
+ log.warning(
+ 'Using an overlay for earlier stages could cause build issues.\n'
+ "If you break it, you buy it. Don't complain to us about it.\n"
+ "Don't say we did not warn you.")
diff --git a/catalyst/targets/stage3.py b/catalyst/targets/stage3.py
index c8532aba..a8192a01 100644
--- a/catalyst/targets/stage3.py
+++ b/catalyst/targets/stage3.py
@@ -8,22 +8,22 @@ from catalyst.base.stagebase import StageBase
class stage3(StageBase):
- """
- Builder class for a stage3 installation tarball build.
- """
- required_values = frozenset()
- valid_values = frozenset()
+ """
+ Builder class for a stage3 installation tarball build.
+ """
+ required_values = frozenset()
+ valid_values = frozenset()
- def __init__(self,spec,addlargs):
- StageBase.__init__(self,spec,addlargs)
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
- def set_portage_overlay(self):
- StageBase.set_portage_overlay(self)
- if "portage_overlay" in self.settings:
- log.warning(
- 'Using an overlay for earlier stages could cause build issues.\n'
- "If you break it, you buy it. Don't complain to us about it.\n"
- "Don't say we did not warn you.")
+ def set_portage_overlay(self):
+ StageBase.set_portage_overlay(self)
+ if "portage_overlay" in self.settings:
+ log.warning(
+ 'Using an overlay for earlier stages could cause build issues.\n'
+ "If you break it, you buy it. Don't complain to us about it.\n"
+ "Don't say we did not warn you.")
- def set_cleanables(self):
- StageBase.set_cleanables(self)
+ def set_cleanables(self):
+ StageBase.set_cleanables(self)
diff --git a/catalyst/targets/stage4.py b/catalyst/targets/stage4.py
index 1e16ff8c..99b67934 100644
--- a/catalyst/targets/stage4.py
+++ b/catalyst/targets/stage4.py
@@ -7,39 +7,39 @@ from catalyst.base.stagebase import StageBase
class stage4(StageBase):
- """
- Builder class for stage4.
- """
- required_values = frozenset([
- "stage4/packages",
- ])
- valid_values = required_values | frozenset([
- "boot/kernel",
- "portage_overlay",
- "splash_theme",
- "stage4/empty",
- "stage4/fsscript",
- "stage4/gk_mainargs",
- "stage4/linuxrc",
- "stage4/rcadd",
- "stage4/rcdel",
- "stage4/rm",
- "stage4/root_overlay",
- "stage4/unmerge",
- "stage4/use",
- ])
+ """
+ Builder class for stage4.
+ """
+ required_values = frozenset([
+ "stage4/packages",
+ ])
+ valid_values = required_values | frozenset([
+ "boot/kernel",
+ "portage_overlay",
+ "splash_theme",
+ "stage4/empty",
+ "stage4/fsscript",
+ "stage4/gk_mainargs",
+ "stage4/linuxrc",
+ "stage4/rcadd",
+ "stage4/rcdel",
+ "stage4/rm",
+ "stage4/root_overlay",
+ "stage4/unmerge",
+ "stage4/use",
+ ])
- def __init__(self,spec,addlargs):
- StageBase.__init__(self,spec,addlargs)
+ def __init__(self, spec, addlargs):
+ StageBase.__init__(self, spec, addlargs)
- def set_cleanables(self):
- self.settings["cleanables"]=["/var/tmp/*","/tmp/*"]
+ def set_cleanables(self):
+ self.settings["cleanables"] = ["/var/tmp/*", "/tmp/*"]
- def set_action_sequence(self):
- self.settings["action_sequence"] = ["unpack", "unpack_snapshot",
- "config_profile_link", "setup_confdir", "portage_overlay",
- "bind", "chroot_setup", "setup_environment", "build_packages",
- "build_kernel", "bootloader", "root_overlay", "fsscript",
- "preclean", "rcupdate", "unmerge", "unbind", "remove", "empty",
- "clean"]
- self.set_completion_action_sequences()
+ def set_action_sequence(self):
+ self.settings["action_sequence"] = ["unpack", "unpack_snapshot",
+ "config_profile_link", "setup_confdir", "portage_overlay",
+ "bind", "chroot_setup", "setup_environment", "build_packages",
+ "build_kernel", "bootloader", "root_overlay", "fsscript",
+ "preclean", "rcupdate", "unmerge", "unbind", "remove", "empty",
+ "clean"]
+ self.set_completion_action_sequences()
diff --git a/catalyst/version.py b/catalyst/version.py
index 25c121f7..dbada51a 100644
--- a/catalyst/version.py
+++ b/catalyst/version.py
@@ -5,62 +5,61 @@ import os
from snakeoil.version import get_git_version as get_ver
-__version__= "3.0.7"
+__version__ = "3.0.7"
_ver = None
def get_git_version(version=__version__):
- """Return: a string describing our version."""
- # pylint: disable=global-statement
- global _ver
- cwd = os.path.dirname(os.path.abspath(__file__))
- version_info = get_ver(cwd)
+ """Return: a string describing our version."""
+ # pylint: disable=global-statement
+ global _ver
+ cwd = os.path.dirname(os.path.abspath(__file__))
+ version_info = get_ver(cwd)
- if not version_info:
- s = "extended version info unavailable"
- elif version_info['tag'] == __version__:
- s = 'released %s' % (version_info['date'],)
- else:
- s = ('vcs version %s, date %s' %
- (version_info['rev'], version_info['date']))
+ if not version_info:
+ s = "extended version info unavailable"
+ elif version_info['tag'] == __version__:
+ s = 'released %s' % (version_info['date'],)
+ else:
+ s = ('vcs version %s, date %s' %
+ (version_info['rev'], version_info['date']))
- _ver = 'Catalyst %s\n%s' % (version, s)
+ _ver = 'Catalyst %s\n%s' % (version, s)
- return _ver
+ return _ver
def get_version(reset=False):
- '''Returns a saved release version string or the
- generated git release version.
- '''
- # pylint: disable=global-statement
- global __version__, _ver
- if _ver and not reset:
- return _ver
- try: # getting the fixed version
- from .verinfo import version
- _ver = version
- __version__ = version.split('\n')[0].split()[1]
- except ImportError: # get the live version
- version = get_git_version()
- return version
-
+ '''Returns a saved release version string or the
+ generated git release version.
+ '''
+ # pylint: disable=global-statement
+ global __version__, _ver
+ if _ver and not reset:
+ return _ver
+ try: # getting the fixed version
+ from .verinfo import version
+ _ver = version
+ __version__ = version.split('\n')[0].split()[1]
+ except ImportError: # get the live version
+ version = get_git_version()
+ return version
def set_release_version(version, root=None):
- '''Saves the release version along with the
- git log release information
+ '''Saves the release version along with the
+ git log release information
- @param version: string
- @param root: string, optional alternate root path to save to
- '''
- #global __version__
- filename = "verinfo.py"
- if not root:
- path = os.path.join(os.path.dirname(__file__), filename)
- else:
- path = os.path.join(root, filename)
- #__version__ = version
- ver = get_git_version(version)
- with open(path, 'w') as f:
- f.write("version = {0!r}".format(ver))
+ @param version: string
+ @param root: string, optional alternate root path to save to
+ '''
+ #global __version__
+ filename = "verinfo.py"
+ if not root:
+ path = os.path.join(os.path.dirname(__file__), filename)
+ else:
+ path = os.path.join(root, filename)
+ #__version__ = version
+ ver = get_git_version(version)
+ with open(path, 'w') as f:
+ f.write("version = {0!r}".format(ver))
diff --git a/doc/make_subarch_table_guidexml.py b/doc/make_subarch_table_guidexml.py
index 7ee95bde..67ed3ccc 100755
--- a/doc/make_subarch_table_guidexml.py
+++ b/doc/make_subarch_table_guidexml.py
@@ -9,45 +9,45 @@ import toml
def write_guidexml(arch_to_subarch):
- with open('doc/subarches.generated.xml', 'w') as f:
- f.write(textwrap.dedent("""\
+ with open('doc/subarches.generated.xml', 'w') as f:
+ f.write(textwrap.dedent("""\
<table>
<tr>
<th>Architecture</th>
<th>Sub-architectures</th>
</tr>
"""))
- for arch, subarches in sorted(arch_to_subarch.items()):
- f.write(textwrap.dedent("""\
+ for arch, subarches in sorted(arch_to_subarch.items()):
+ f.write(textwrap.dedent("""\
<tr>
<ti><c>%s</c></ti>
<ti><c>%s</c></ti>
</tr>
""") % (arch, '\n'.join(textwrap.wrap(' '.join(sorted(subarches)), 60))))
- f.write("</table>\n")
+ f.write("</table>\n")
def write_asciidoc(arch_to_subarch):
- with open('doc/subarches.generated.txt', 'w') as f:
- for arch, subarches in sorted(arch_to_subarch.items()):
- f.write('*%s*::\n' % arch)
- f.write(' %s\n' % ', '.join(sorted(subarches)))
- f.write('\n')
+ with open('doc/subarches.generated.txt', 'w') as f:
+ for arch, subarches in sorted(arch_to_subarch.items()):
+ f.write('*%s*::\n' % arch)
+ f.write(' %s\n' % ', '.join(sorted(subarches)))
+ f.write('\n')
def main(_argv):
- arch_to_subarch = {}
- p = pathlib.Path('arch')
+ arch_to_subarch = {}
+ p = pathlib.Path('arch')
- for file in p.glob('*.toml'):
- data = toml.load(file)
+ for file in p.glob('*.toml'):
+ data = toml.load(file)
- for arch in [x for x in data if x != 'setarch']:
- arch_to_subarch.update({arch: list(data[arch].keys())})
+ for arch in [x for x in data if x != 'setarch']:
+ arch_to_subarch.update({arch: list(data[arch].keys())})
- write_guidexml(arch_to_subarch)
- write_asciidoc(arch_to_subarch)
+ write_guidexml(arch_to_subarch)
+ write_asciidoc(arch_to_subarch)
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/doc/make_target_table.py b/doc/make_target_table.py
index d1f97c9b..5d291e33 100755
--- a/doc/make_target_table.py
+++ b/doc/make_target_table.py
@@ -15,31 +15,31 @@ import sys
def main(_argv):
- source_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ source_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- # Force consistent sorting order.
- locale.setlocale(locale.LC_COLLATE, 'C')
+ # Force consistent sorting order.
+ locale.setlocale(locale.LC_COLLATE, 'C')
- targets = list()
- for filename in glob.glob(os.path.join(source_root, 'catalyst/targets/*.py')):
- if '__init__' in filename:
- continue
+ targets = list()
+ for filename in glob.glob(os.path.join(source_root, 'catalyst/targets/*.py')):
+ if '__init__' in filename:
+ continue
- name = os.path.basename(filename)[0:-3]
- target_name = name.replace('_', '-')
- module_name = 'catalyst.targets.' + name
+ name = os.path.basename(filename)[0:-3]
+ target_name = name.replace('_', '-')
+ module_name = 'catalyst.targets.' + name
- __import__(module_name)
- module = sys.modules[module_name]
+ __import__(module_name)
+ module = sys.modules[module_name]
- targets.append((target_name, module))
+ targets.append((target_name, module))
- for target_name, module in sorted(targets, key=lambda x: x[0]):
- print('`%s`;;' % target_name)
- # Replace blank lines with `+` (asciidoc list item continuation)
- print(module.__doc__.strip().replace('\n\n', '\n+\n'))
- print('')
+ for target_name, module in sorted(targets, key=lambda x: x[0]):
+ print('`%s`;;' % target_name)
+ # Replace blank lines with `+` (asciidoc list item continuation)
+ print(module.__doc__.strip().replace('\n\n', '\n+\n'))
+ print('')
if __name__ == '__main__':
- main(sys.argv[1:])
+ main(sys.argv[1:])
diff --git a/setup.py b/setup.py
index 32741a94..fef09622 100755
--- a/setup.py
+++ b/setup.py
@@ -17,102 +17,103 @@ _maintainer_name, _maintainer_email = _parseaddr(__maintainer__)
def _posix_path(path):
- """Convert a native path to a POSIX path
+ """Convert a native path to a POSIX path
- Distutils wants all paths to be written in the Unix convention
- (i.e. slash-separated) [1], so that's what we'll do here.
+ Distutils wants all paths to be written in the Unix convention
+ (i.e. slash-separated) [1], so that's what we'll do here.
- [1]: https://docs.python.org/2/distutils/setupscript.html
- """
- if _os.path.sep != '/':
- return path.replace(_os.path.sep, '/')
- return path
+ [1]: https://docs.python.org/2/distutils/setupscript.html
+ """
+ if _os.path.sep != '/':
+ return path.replace(_os.path.sep, '/')
+ return path
def _files(prefix, root):
- """Iterate through all the file paths under `root`
-
- Yielding `(target_dir, (file_source_paths, ...))` tuples.
- """
- for dirpath, _dirnames, filenames in _os.walk(root):
- reldir = _os.path.relpath(dirpath, root)
- install_directory = _posix_path(
- _os.path.join(prefix, reldir))
- file_source_paths = [
- _posix_path(_os.path.join(dirpath, filename))
- for filename in filenames]
- yield (install_directory, file_source_paths)
-
-
-_data_files = [('/etc/catalyst', ['etc/catalyst.conf','etc/catalystrc']),
- ('/usr/share/man/man1', ['files/catalyst.1']),
- ('/usr/share/man/man5', ['files/catalyst-config.5', 'files/catalyst-spec.5'])
- ]
+ """Iterate through all the file paths under `root`
+
+ Yielding `(target_dir, (file_source_paths, ...))` tuples.
+ """
+ for dirpath, _dirnames, filenames in _os.walk(root):
+ reldir = _os.path.relpath(dirpath, root)
+ install_directory = _posix_path(
+ _os.path.join(prefix, reldir))
+ file_source_paths = [
+ _posix_path(_os.path.join(dirpath, filename))
+ for filename in filenames]
+ yield (install_directory, file_source_paths)
+
+
+_data_files = [('/etc/catalyst', ['etc/catalyst.conf', 'etc/catalystrc']),
+ ('/usr/share/man/man1', ['files/catalyst.1']),
+ ('/usr/share/man/man5',
+ ['files/catalyst-config.5', 'files/catalyst-spec.5'])
+ ]
_data_files.extend(_files('share/catalyst/arch', 'arch'))
_data_files.extend(_files('share/catalyst/livecd', 'livecd'))
_data_files.extend(_files('share/catalyst/targets', 'targets'))
class set_version(_Command):
- '''Saves the specified release version information
- '''
- description = "hardcode script's version using VERSION from environment"
- user_options = [] # [(long_name, short_name, desc),]
-
- def initialize_options (self):
- pass
-
- def finalize_options (self):
- pass
-
- def run(self):
- # pylint: disable=global-statement
- global __version__
- try:
- version = _os.environ['VERSION']
- except KeyError:
- print("Try setting 'VERSION=x.y.z' on the command line... Aborting")
- return
- _set_release_version(version)
- __version__ = _get_version()
- print("Version set to:\n", __version__)
+ '''Saves the specified release version information
+ '''
+ description = "hardcode script's version using VERSION from environment"
+ user_options = [] # [(long_name, short_name, desc),]
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ # pylint: disable=global-statement
+ global __version__
+ try:
+ version = _os.environ['VERSION']
+ except KeyError:
+ print("Try setting 'VERSION=x.y.z' on the command line... Aborting")
+ return
+ _set_release_version(version)
+ __version__ = _get_version()
+ print("Version set to:\n", __version__)
_setup(
- name=_package_name,
- version=__version__,
- maintainer=_maintainer_name,
- maintainer_email=_maintainer_email,
- url='https://wiki.gentoo.org/wiki/Catalyst',
- download_url='http://distfiles.gentoo.org/distfiles/{0}-{1}.tar.bz2'.format(
- _package_name, __version__),
- license='GNU General Public License (GPL)',
- platforms=['all'],
- description=__doc__,
- long_description=_codecs.open(
- _os.path.join(_this_dir, 'README'), 'r', 'utf-8').read(),
- classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
- 'Intended Audience :: System Administrators',
- 'Operating System :: POSIX',
- 'Topic :: System :: Archiving :: Packaging',
- 'Topic :: System :: Installation/Setup',
- 'Topic :: System :: Software Distribution',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: 3.8',
- ],
- scripts=['bin/{0}'.format(_package_name)],
- packages=[
- _package_name,
- '{0}.base'.format(_package_name),
- '{0}.targets'.format(_package_name),
- ],
- data_files=_data_files,
- provides=[_package_name],
- cmdclass={
- 'set_version': set_version
- },
- )
+ name=_package_name,
+ version=__version__,
+ maintainer=_maintainer_name,
+ maintainer_email=_maintainer_email,
+ url='https://wiki.gentoo.org/wiki/Catalyst',
+ download_url='http://distfiles.gentoo.org/distfiles/{0}-{1}.tar.bz2'.format(
+ _package_name, __version__),
+ license='GNU General Public License (GPL)',
+ platforms=['all'],
+ description=__doc__,
+ long_description=_codecs.open(
+ _os.path.join(_this_dir, 'README'), 'r', 'utf-8').read(),
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
+ 'Intended Audience :: System Administrators',
+ 'Operating System :: POSIX',
+ 'Topic :: System :: Archiving :: Packaging',
+ 'Topic :: System :: Installation/Setup',
+ 'Topic :: System :: Software Distribution',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ ],
+ scripts=['bin/{0}'.format(_package_name)],
+ packages=[
+ _package_name,
+ '{0}.base'.format(_package_name),
+ '{0}.targets'.format(_package_name),
+ ],
+ data_files=_data_files,
+ provides=[_package_name],
+ cmdclass={
+ 'set_version': set_version
+ },
+)
diff --git a/targets/stage1/build.py b/targets/stage1/build.py
index b01eb9cd..fec1162c 100755
--- a/targets/stage1/build.py
+++ b/targets/stage1/build.py
@@ -10,8 +10,11 @@ from portage.util import grabfile_package, stack_lists
# wrap it here to take care of the different
# ways portage handles stacked profiles
# last case is for portage-2.1_pre*
+
+
def scan_profile(path):
- return stack_lists([grabfile_package(os.path.join(x, path)) for x in portage.settings.profiles], incremental=1)
+ return stack_lists([grabfile_package(os.path.join(x, path)) for x in portage.settings.profiles], incremental=1)
+
# loaded the stacked packages / packages.build files
pkgs = scan_profile("packages")
@@ -24,13 +27,13 @@ buildpkgs = scan_profile("packages.build")
# system profile (it may have <,>,=,etc... operators
# and version numbers)
for pkg in pkgs:
- try:
- bidx = buildpkgs.index(dep_getkey(pkg))
- buildpkgs[bidx] = pkg
- if buildpkgs[bidx][0:1] == "*":
- buildpkgs[bidx] = buildpkgs[bidx][1:]
- except Exception:
- pass
+ try:
+ bidx = buildpkgs.index(dep_getkey(pkg))
+ buildpkgs[bidx] = pkg
+ if buildpkgs[bidx][0:1] == "*":
+ buildpkgs[bidx] = buildpkgs[bidx][1:]
+ except Exception:
+ pass
for b in buildpkgs:
- sys.stdout.write(b + " ")
+ sys.stdout.write(b + " ")
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2020-04-15 19:59 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-04-15 19:59 [gentoo-commits] proj/catalyst:master commit in: targets/stage1/, /, catalyst/base/, catalyst/, doc/, catalyst/targets/, bin/ Matt Turner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox