* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-04-24 22:13 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-04-24 22:13 UTC (permalink / raw
To: gentoo-commits
commit: 37d14f0a9ca1a7e0cccb485620322edd0e5e9967
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 24 22:12:29 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sun Apr 24 22:12:29 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=37d14f0a
fix the delet old packages part
---
gobs/bin/gobs_updatedb | 4 +-
gobs/pym/package.py | 34 +-
gobs/pym/pgsqlbackend.py | 760 ++++++++++++++++++++++++----------------------
3 files changed, 411 insertions(+), 387 deletions(-)
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb
index a6afa17..582efa6 100755
--- a/gobs/bin/gobs_updatedb
+++ b/gobs/bin/gobs_updatedb
@@ -67,12 +67,12 @@ def update_cpv_db(mysettings, database, gobs_settings_dict):
# Check if we don't have the cp in the package table
package_id = database.have_package_db(categories, package)
if package_id is None:
- # Add new package with ebuilds
+ # Add new package with ebuilds
package_id = init_package.add_new_package_db(categories, package)
package_id_list_tree.append(package_id)
# Ceck if we have the cp in the package table
elif package_id is not None:
- # Update the packages with ebuilds
+ # Update the packages with ebuilds
init_package.update_package_db(categories, package, package_id)
package_id_list_tree.append(package_id)
# Update the metadata for categories
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
index 1180bd2..ed8d70b 100644
--- a/gobs/pym/package.py
+++ b/gobs/pym/package.py
@@ -80,7 +80,7 @@ class gobs_package(object):
ebuild_version_text = self.init_text.get_ebuild_text(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")
init_repoman = gobs_repoman(self.mysettings, self.myportdb, self.database)
repoman_error = init_repoman.check_repoman(categories, package, ebuild_version_tree, config_id)
- ebuild_version_metadata_tree = get_ebuild_metadata(ebuild_line)
+ ebuild_version_metadata_tree = self.get_ebuild_metadata(ebuild_line)
# if there some error to get the metadata we add rubish to the
# ebuild_version_metadata_tree and set ebuild_version_checksum_tree to 0
# so it can be updated next time we update the db
@@ -105,14 +105,14 @@ class gobs_package(object):
for k, v in packageDict.iteritems():
attDict = {}
metadata_restrictions = []
- for i in v['ebuild_version_metadata_tree'][4].split():
- metadata_restrictions.append(i)
+ for i in v['ebuild_version_metadata_tree'][4].split():
+ metadata_restrictions.append(i)
metadata_keyword = []
- for i in v['ebuild_version_metadata_tree'][8].split():
- metadata_keyword.append(i)
+ for i in v['ebuild_version_metadata_tree'][8].split():
+ metadata_keyword.append(i)
metadata_iuse = []
- for i in v['ebuild_version_metadata_tree'][10].split():
- metadata_iuse.append(i)
+ for i in v['ebuild_version_metadata_tree'][10].split():
+ metadata_iuse.append(i)
attDict['restrictions'] = metadata_restrictions
attDict['keyword'] = metadata_keyword
attDict['iuse'] = metadata_iuse
@@ -174,7 +174,7 @@ class gobs_package(object):
pkgdir = self.mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR + cp
categories_dir = self.mysettings['PORTDIR'] + "/" + categories + "/"
# Get the ebuild list for cp
- ebuild_list_tree = self.portdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
+ ebuild_list_tree = self.myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
config_cpv_listDict = self.config_match_ebuild(categories, package)
config_id = self.database.get_default_config()[0]
packageDict ={}
@@ -186,13 +186,9 @@ class gobs_package(object):
ebuild_id_list = return_id[0]
package_id_list = return_id[1]
package_id = package_id_list[0]
- # Add the manifest file to db
- manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
- get_manifest_text = self.init_text.get_file_text(pkgdir + "/Manifest")
- database.add_new_manifest_sql(package_id, get_manifest_text, manifest_checksum_tree)
# Add metadataDict to db
metadataDict = self.get_metadataDict(packageDict, ebuild_id_list)
- database.add_new_metadata(metadataDict)
+ self.database.add_new_metadata(metadataDict)
# Add any qa and repoman erro for the ebuild to buildlog
qa_error = []
init_manifest = gobs_manifest(self.mysettings)
@@ -206,7 +202,11 @@ class gobs_package(object):
# Add some checksum on some files
package_metadataDict = self.get_package_metadataDict(pkgdir, package)
self.database.add_new_package_metadata(package_id, package_metadataDict)
- return package_id
+ # Add the manifest file to db
+ manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
+ get_manifest_text = self.init_text.get_file_text(pkgdir + "/Manifest")
+ self.database.add_new_manifest_sql(package_id, get_manifest_text, manifest_checksum_tree)
+ return package_id
def update_package_db(self, categories, package, package_id):
# Update the categories and package with new info
@@ -214,7 +214,7 @@ class gobs_package(object):
# Get the checksum from the file in portage tree
manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
# Get the checksum from the db in package table
- manifest_checksum_db = self.database.get_manifest_db(package_id)[0]
+ manifest_checksum_db = self.database.get_manifest_db(package_id)
# if we have the same checksum return else update the package
ebuild_list_tree = self.myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
if manifest_checksum_tree != manifest_checksum_db:
@@ -264,5 +264,5 @@ class gobs_package(object):
# Add the ebuild to the buildqueru table if needed
self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
# Mark or remove any old ebuilds
- init_old_cpv = gobs_old_cpv(self.database, self.myportdb)
- init_old_cpv.mark_old_ebuild_db(categories, package, package_id, sorted(ebuild_list_tree))
+ init_old_cpv = gobs_old_cpv(self.database, self.myportdb, self.mysettings)
+ init_old_cpv.mark_old_ebuild_db(categories, package, package_id)
diff --git a/gobs/pym/pgsqlbackend.py b/gobs/pym/pgsqlbackend.py
index 27fdf57..d89feec 100644
--- a/gobs/pym/pgsqlbackend.py
+++ b/gobs/pym/pgsqlbackend.py
@@ -1,377 +1,401 @@
class SQLPackageDatabase(object):
- """we have to store our stuff somewhere
-
- subclass and redefine init to provide
- at least self.cursor"""
-
- # These are set by subclasses
- db = None
- cursor = None
- syntax_placeholder = None
- syntax_autoincrement = None
-
- sql = {}
-
- def get_default_config(self):
- cursor = self.conn.cursor()
- sqlQ = 'SELECT id FROM configs WHERE default_config = True'
- cursor.execute(sqlQ)
- return cursor.fetchone()
+ """we have to store our stuff somewhere
+ subclass and redefine init to provide
+ at least self.cursor"""
+
+ # These are set by subclasses
+ db = None
+ cursor = None
+ syntax_placeholder = None
+ syntax_autoincrement = None
+ sql = {}
+
+ def get_default_config(self):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT id FROM configs WHERE default_config = True'
+ cursor.execute(sqlQ)
+ return cursor.fetchone()
- def get_config_list(self):
- cursor = self.conn.cursor()
- sqlQ = 'SELECT id FROM configs WHERE default_config = False AND active = True'
- cursor.execute(sqlQ)
- return cursor.fetchall()
-
- def get_config_list_all(self):
- cursor = self.conn.cursor()
- sqlQ = 'SELECT id FROM configs'
- cursor.execute(sqlQ)
- return cursor.fetchall()
-
- def update__make_conf(self, configsDict):
- cursor = self.conn.cursor()
- sqlQ = 'UPDATE configs SET make_conf_checksum = %s, make_conf_text = %s, active = %s, config_error = %s WHERE id = %s'
- for k, v in configsDict.iteritems():
- params = [v['make_conf_checksum_tree'], v['make_conf_text'], v['active'], v['config_error'], k]
- cursor.execute(sqlQ, params)
- self.conn.commit()
-
- def have_package_db(self, categories, package):
- cursor = self.conn.cursor()
- sqlQ ='SELECT package_id FROM packages WHERE category = %s AND package_name = %s'
- params = categories, package
- cursor.execute(sqlQ, params)
- return cursor.fetchone()
-
- def get_categories_db(self):
- cursor = self.conn.cursor()
- sqlQ =' SELECT category FROM categories'
- cursor.execute(sqlQ)
- return cursor.fetchall()
-
- def get_categories_checksum_db(self, categories):
- cursor = self.conn.cursor()
- sqlQ =' SELECT metadata_xml_checksum FROM categories_meta WHERE category = %s'
- cursor.execute(sqlQ, (categories,))
- return cursor.fetchone()
-
- def add_new_categories_meta_sql(self, categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree):
- cursor = self.conn.cursor()
- sqlQ = 'INSERT INTO categories_meta (category, metadata_xml_checksum, metadata_xml_text) VALUES ( %s, %s, %s )'
- params = categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree
- cursor.execute(sqlQ, params)
- self.conn.commit()
-
- def update_categories_meta_sql(self, categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree):
- cursor = self.conn.cursor()
- sqlQ ='UPDATE categories_meta SET metadata_xml_checksum = %s, metadata_xml_text = %s WHERE category = %s'
- params = (categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree, categories)
- cursor.execute(sqlQ, params)
- self.conn.commit()
-
- def add_new_manifest_sql(self, package_id, get_manifest_text, manifest_checksum_tree):
- cursor = self.conn.cursor()
- sqlQ = 'INSERT INTO manifest (package_id, manifest, checksum) VALUES ( %s, %s, %s )'
- params = package_id, get_manifest_text, manifest_checksum_tree
- cursor.execute(sqlQ, params)
- self.conn.commit()
-
- def add_new_package_metadata(self, package_id, package_metadataDict):
- cursor = self.conn.cursor()
- sqlQ = 'SELECT changelog_checksum FROM packages_meta WHERE package_id = %s'
- cursor.execute(sqlQ, (package_id,))
- if cursor.fetchone() is None:
- sqlQ = 'INSERT INTO packages_meta (package_id, changelog_text, changelog_checksum, metadata_text, metadata_checksum) VALUES ( %s, %s, %s, %s, %s )'
- for k, v in package_metadataDict.iteritems():
- params = package_id, v['changelog_text'], v['changelog_checksum'], v[' metadata_xml_text'], v['metadata_xml_checksum']
- cursor.execute(sqlQ, params)
- self.conn.commit()
-
- def update_new_package_metadata(self, package_id, package_metadataDict):
- cursor = self.conn.cursor()
- sqlQ = 'SELECT changelog_checksum, metadata_checksum FROM packages_meta WHERE package_id = %s'
- cursor.execute(sqlQ, package_id)
- entries = cursor.fetchone()
- changelog_checksum_db = entries[0]
- metadata_checksum_db = entries[1]
- for k, v in package_metadataDict.iteritems():
- if changelog_checksum_db != v['changelog_checksum']:
- sqlQ = 'UPDATE packages_meta SET changelog_text = %s, changelog_checksum = %s WHERE package_id = %s'
- params = v['changelog_text'], v['changelog_checksum'], package_id
- cursor.execute(sqlQ, params)
- if metadata_checksum_db != v['metadata_xml_checksum']:
- sqlQ = 'UPDATE packages_meta SET metadata_text = %s, metadata_checksum = %s WHERE package_id = %s'
- params = v[' metadata_xml_text'], v['metadata_xml_checksum'], package_id
- cursor.execute(sqlQ, params)
- self.conn.commit()
-
- def get_manifest_db(self, package_id):
- cursor = self.conn.cursor()
- sqlQ = 'SELECT checksum FROM manifest WHERE package_id = %s'
- cursor.execute(sqlQ, (package_id,))
- return cursor.fetchone()
-
- def update_manifest_sql(self, package_id, get_manifest_text, manifest_checksum_tree):
- cursor = self.conn.cursor()
- sqlQ = 'UPDATE manifest SET checksum = %s, manifest = %s WHERE package_id = %s'
- params = (manifest_checksum_tree, get_manifest_text, package_id)
- cursor.execute(sqlQ, params)
- self.conn.commit()
-
- def add_new_metadata(self, metadataDict):
- for k, v in metadataDict.iteritems():
- cursor = self.conn.cursor()
- sqlQ = 'SELECT updaterestrictions( %s, %s )'
- params = k, v['restrictions']
- cursor.execute(sqlQ, params)
- sqlQ = 'SELECT updatekeywords( %s, %s )'
- params = k, v['keyword']
- cursor.execute(sqlQ, params)
- sqlQ = 'SELECT updateiuse( %s, %s )'
- params = k, v['iuse']
- cursor.execute(sqlQ, params)
- self.conn.commit()
-
- def add_new_package_sql(self, packageDict):
- #lets have a new cursor for each metod as per best practice
- cursor = self.conn.cursor()
- sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
- ebuild_id_list = []
- package_id_list = []
- for k, v in packageDict.iteritems():
- params = [v['categories'], v['package'], v['ebuild_version_tree'], v['ebuild_version_revision'], v['ebuild_version_checksum_tree'],
- v['ebuild_version_text'], v['ebuild_version_metadata_tree'][0], v['ebuild_version_metadata_tree'][1],
- v['ebuild_version_metadata_tree'][12], v['ebuild_version_metadata_tree'][2], v['ebuild_version_metadata_tree'][3],
- v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
- v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
- v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
- v['ebuild_version_metadata_tree'][16]]
- cursor.execute(sqlQ, params)
- mid = cursor.fetchone()
- mid=mid[0]
- ebuild_id_list.append(mid[1])
- package_id_list.append(mid[0])
- self.conn.commit()
- # add_new_metadata(metadataDict)
- return ebuild_id_list, package_id_list
-
- def add_new_ebuild_sql(packageDict, new_ebuild_list):
- #lets have a new cursor for each metod as per best practice
- cursor = self.conn.cursor()
- sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
- ebuild_id_list = []
- package_id_list = []
- for k, v in packageDict.iteritems():
- for x in new_ebuild_list:
- if x == v['ebuild_version_tree']:
- params = [v['categories'], v['package'], v['ebuild_version_tree'], v['ebuild_version_revision'], v['ebuild_version_checksum_tree'],
- v['ebuild_version_text'], v['ebuild_version_metadata_tree'][0], v['ebuild_version_metadata_tree'][1],
- v['ebuild_version_metadata_tree'][12], v['ebuild_version_metadata_tree'][2], v['ebuild_version_metadata_tree'][3],
- v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
- v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
- v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
- v['ebuild_version_metadata_tree'][16]]
- cursor.execute(sqlQ, params)
- mid = cursor.fetchone()
- mid=mid[0]
- ebuild_id_list.append(mid[1])
- package_id_list.append(mid[0])
- self.conn.commit()
- # add_new_metadata(metadataDict)
- return ebuild_id_list, package_id_list
-
- def update_active_ebuild(self, package_id, ebuild_version_tree):
- cursor = self.conn.cursor()
- sqlQ ="UPDATE ebuilds SET active = 'False', timestamp = now() WHERE package_id = %s AND ebuild_version = %s AND active = 'True'"
- cursor.execute(sqlQ, (package_id, ebuild_version_tree))
- self.conn.commit()
-
- def get_cpv_from_ebuild_id(self, ebuild_id):
- cursor = self.conn.cursor()
- sqlQ = 'SELECT package_id FROM ebuild WHERE id = %s'
- self.cursor.execute(sql, ebuild_id)
- entries = self.cursor.fetchone()
- return entries
-
- def get_cp_from_package_id(self, package_id):
- cursor = self.conn.cursor()
- sqlQ = "SELECT ARRAY_TO_STRING(ARRAY[category, package_name] , '/') AS cp FROM packages WHERE package_id = %s"
- cursor.execute(sqlQ, (package_id,))
- return cursor.fetchone()
+ def get_config_list(self):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT id FROM configs WHERE default_config = False AND active = True'
+ cursor.execute(sqlQ)
+ return cursor.fetchall()
+
+ def get_config_list_all(self):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT id FROM configs'
+ cursor.execute(sqlQ)
+ return cursor.fetchall()
+
+ def update__make_conf(self, configsDict):
+ cursor = self.conn.cursor()
+ sqlQ = 'UPDATE configs SET make_conf_checksum = %s, make_conf_text = %s, active = %s, config_error = %s WHERE id = %s'
+ for k, v in configsDict.iteritems():
+ params = [v['make_conf_checksum_tree'], v['make_conf_text'], v['active'], v['config_error'], k]
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def have_package_db(self, categories, package):
+ cursor = self.conn.cursor()
+ sqlQ ='SELECT package_id FROM packages WHERE category = %s AND package_name = %s'
+ params = categories, package
+ cursor.execute(sqlQ, params)
+ return cursor.fetchone()
+
+ def get_categories_db(self):
+ cursor = self.conn.cursor()
+ sqlQ =' SELECT category FROM categories'
+ cursor.execute(sqlQ)
+ return cursor.fetchall()
+
+ def get_categories_checksum_db(self, categories):
+ cursor = self.conn.cursor()
+ sqlQ =' SELECT metadata_xml_checksum FROM categories_meta WHERE category = %s'
+ cursor.execute(sqlQ, (categories,))
+ return cursor.fetchone()
+
+ def add_new_categories_meta_sql(self, categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree):
+ cursor = self.conn.cursor()
+ sqlQ = 'INSERT INTO categories_meta (category, metadata_xml_checksum, metadata_xml_text) VALUES ( %s, %s, %s )'
+ params = categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def update_categories_meta_sql(self, categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree):
+ cursor = self.conn.cursor()
+ sqlQ ='UPDATE categories_meta SET metadata_xml_checksum = %s, metadata_xml_text = %s WHERE category = %s'
+ params = (categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree, categories)
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def add_new_manifest_sql(self, package_id, get_manifest_text, manifest_checksum_tree):
+ cursor = self.conn.cursor()
+ sqlQ = 'INSERT INTO manifest (package_id, manifest, checksum) VALUES ( %s, %s, %s )'
+ params = package_id, get_manifest_text, manifest_checksum_tree
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def add_new_package_metadata(self, package_id, package_metadataDict):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT changelog_checksum FROM packages_meta WHERE package_id = %s'
+ cursor.execute(sqlQ, (package_id,))
+ if cursor.fetchone() is None:
+ sqlQ = 'INSERT INTO packages_meta (package_id, changelog_text, changelog_checksum, metadata_text, metadata_checksum) VALUES ( %s, %s, %s, %s, %s )'
+ for k, v in package_metadataDict.iteritems():
+ params = package_id, v['changelog_text'], v['changelog_checksum'], v[' metadata_xml_text'], v['metadata_xml_checksum']
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def update_new_package_metadata(self, package_id, package_metadataDict):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT changelog_checksum, metadata_checksum FROM packages_meta WHERE package_id = %s'
+ cursor.execute(sqlQ, package_id)
+ entries = cursor.fetchone()
+ if entries is None:
+ changelog_checksum_db = None
+ metadata_checksum_db = None
+ else:
+ changelog_checksum_db = entries[0]
+ metadata_checksum_db = entries[1]
+ for k, v in package_metadataDict.iteritems():
+ if changelog_checksum_db != v['changelog_checksum']:
+ sqlQ = 'UPDATE packages_meta SET changelog_text = %s, changelog_checksum = %s WHERE package_id = %s'
+ params = v['changelog_text'], v['changelog_checksum'], package_id
+ cursor.execute(sqlQ, params)
+ if metadata_checksum_db != v['metadata_xml_checksum']:
+ sqlQ = 'UPDATE packages_meta SET metadata_text = %s, metadata_checksum = %s WHERE package_id = %s'
+ params = v[' metadata_xml_text'], v['metadata_xml_checksum'], package_id
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def get_manifest_db(self, package_id):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT checksum FROM manifest WHERE package_id = %s'
+ cursor.execute(sqlQ, (package_id,))
+ manifest_checksum = cursor.fetchone()
+ if manifest_checksum is None:
+ return None
+ return manifest_checksum[0]
+
+ def update_manifest_sql(self, package_id, get_manifest_text, manifest_checksum_tree):
+ cursor = self.conn.cursor()
+ sqlQ = 'UPDATE manifest SET checksum = %s, manifest = %s WHERE package_id = %s'
+ params = (manifest_checksum_tree, get_manifest_text, package_id)
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def add_new_metadata(self, metadataDict):
+ for k, v in metadataDict.iteritems():
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT updaterestrictions( %s, %s )'
+ params = k, v['restrictions']
+ cursor.execute(sqlQ, params)
+ sqlQ = 'SELECT updatekeywords( %s, %s )'
+ params = k, v['keyword']
+ cursor.execute(sqlQ, params)
+ sqlQ = 'SELECT updateiuse( %s, %s )'
+ params = k, v['iuse']
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def add_new_package_sql(self, packageDict):
+ #lets have a new cursor for each metod as per best practice
+ cursor = self.conn.cursor()
+ sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
+ ebuild_id_list = []
+ package_id_list = []
+ for k, v in packageDict.iteritems():
+ params = [v['categories'], v['package'], v['ebuild_version_tree'], v['ebuild_version_revision'], v['ebuild_version_checksum_tree'],
+ v['ebuild_version_text'], v['ebuild_version_metadata_tree'][0], v['ebuild_version_metadata_tree'][1],
+ v['ebuild_version_metadata_tree'][12], v['ebuild_version_metadata_tree'][2], v['ebuild_version_metadata_tree'][3],
+ v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
+ v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
+ v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
+ v['ebuild_version_metadata_tree'][16]]
+ cursor.execute(sqlQ, params)
+ mid = cursor.fetchone()
+ mid=mid[0]
+ ebuild_id_list.append(mid[1])
+ package_id_list.append(mid[0])
+ self.conn.commit()
+ # add_new_metadata(metadataDict)
+ return ebuild_id_list, package_id_list
+
+ def add_new_ebuild_sql(packageDict, new_ebuild_list):
+ #lets have a new cursor for each metod as per best practice
+ cursor = self.conn.cursor()
+ sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
+ ebuild_id_list = []
+ package_id_list = []
+ for k, v in packageDict.iteritems():
+ for x in new_ebuild_list:
+ if x == v['ebuild_version_tree']:
+ params = [v['categories'], v['package'], v['ebuild_version_tree'], v['ebuild_version_revision'], v['ebuild_version_checksum_tree'],
+ v['ebuild_version_text'], v['ebuild_version_metadata_tree'][0], v['ebuild_version_metadata_tree'][1],
+ v['ebuild_version_metadata_tree'][12], v['ebuild_version_metadata_tree'][2], v['ebuild_version_metadata_tree'][3],
+ v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
+ v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
+ v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
+ v['ebuild_version_metadata_tree'][16]]
+ cursor.execute(sqlQ, params)
+ mid = cursor.fetchone()
+ mid=mid[0]
+ ebuild_id_list.append(mid[1])
+ package_id_list.append(mid[0])
+ self.conn.commit()
+ # add_new_metadata(metadataDict)
+ return ebuild_id_list, package_id_list
+
+ def update_active_ebuild(self, package_id, ebuild_version_tree):
+ cursor = self.conn.cursor()
+ sqlQ ="UPDATE ebuilds SET active = 'False', timestamp = now() WHERE package_id = %s AND ebuild_version = %s AND active = 'True'"
+ cursor.execute(sqlQ, (package_id, ebuild_version_tree))
+ self.conn.commit()
+
+ def get_cpv_from_ebuild_id(self, ebuild_id):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT package_id FROM ebuild WHERE id = %s'
+ self.cursor.execute(sql, ebuild_id)
+ entries = self.cursor.fetchone()
+ return entries
+
+ def get_cp_from_package_id(self, package_id):
+ cursor = self.conn.cursor()
+ sqlQ = "SELECT ARRAY_TO_STRING(ARRAY[category, package_name] , '/') AS cp FROM packages WHERE package_id = %s"
+ cursor.execute(sqlQ, (package_id,))
+ return cursor.fetchone()
- def add_new_package_buildqueue(self, ebuild_id, config_id, iuse_flags_list, use_enable, message):
- cursor = self.conn.cursor()
- sqlQ="SELECT insert_buildqueue( %s, %s, %s, %s, %s )"
- if not iuse_flags_list:
- iuse_flags_list=None
- use_enable=None
- params = ebuild_id, unicode(config_id), iuse_flags_list, use_enable, message
- cursor.execute(sqlQ, params)
- self.conn.commit()
-
- def get_ebuild_checksum(self, package_id, ebuild_version_tree):
- cursor = self.conn.cursor()
- sqlQ = 'SELECT ebuild_checksum FROM ebuilds WHERE package_id = %s AND ebuild_version = %s AND active = TRUE'
- cursor.execute(sqlQ, (package_id, ebuild_version_tree))
- entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
-
- def add_old_package(self, old_package_list):
- cursor = self.conn.cursor()
- mark_old_list = []
- sqlQ = "UPDATE ebuilds SET active = 'FALSE', timestamp = NOW() WHERE package_id = %s AND active = 'TRUE' RETURNING package_id"
- for old_package in old_package_list:
- cursor.execute(sqlQ, (old_package[0],))
- entries = cursor.fetchone()
- if entries is not None:
- mark_old_list.append(entries[0])
- self.conn.commit()
- return mark_old_list
+ def add_new_package_buildqueue(self, ebuild_id, config_id, iuse_flags_list, use_enable, message):
+ cursor = self.conn.cursor()
+ sqlQ="SELECT insert_buildqueue( %s, %s, %s, %s, %s )"
+ if not iuse_flags_list:
+ iuse_flags_list=None
+ use_enable=None
+ params = ebuild_id, unicode(config_id), iuse_flags_list, use_enable, message
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def get_ebuild_checksum(self, package_id, ebuild_version_tree):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT ebuild_checksum FROM ebuilds WHERE package_id = %s AND ebuild_version = %s AND active = TRUE'
+ cursor.execute(sqlQ, (package_id, ebuild_version_tree))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+ def add_old_package(self, old_package_list):
+ cursor = self.conn.cursor()
+ mark_old_list = []
+ sqlQ = "UPDATE ebuilds SET active = 'FALSE', timestamp = NOW() WHERE package_id = %s AND active = 'TRUE' RETURNING package_id"
+ for old_package in old_package_list:
+ cursor.execute(sqlQ, (old_package[0],))
+ entries = cursor.fetchone()
+ if entries is not None:
+ mark_old_list.append(entries[0])
+ self.conn.commit()
+ return mark_old_list
- def get_old_categories(self, categories_line):
- cursor = self.conn.cursor()
- sqlQ = "SELECT package_name FROM packages WHERE category = %s"
- cursor.execute(sqlQ (categories_line))
- return cursor.fetchone()
-
- def del_old_categories(self, real_old_categoriess):
- cursor = self.conn.cursor()
- sqlQ = 'DELETE FROM categories categories_meta WHERE category = %s'
- cursor.execute(sqlQ (real_old_categories))
- self.conn.commit()
-
- def add_old_ebuild(self, package_id, old_ebuild_list):
- cursor = self.conn.cursor()
- sqlQ1 = "UPDATE ebuilds SET active = 'FALSE' WHERE package_id = %s AND ebuild_version = %s"
- sqlQ2 = "SELECT id FROM ebuilds WHERE package_id = %s AND ebuild_version = %s AND active = 'TRUE'"
- sqlQ3 = "SELECT queue_id FROM buildqueue WHERE ebuild_id = %s"
- sqlQ4 = 'DELETE FROM ebuildqueuedwithuses buildqueue WHERE queue_id = %s'
- for old_ebuild in old_ebuild_list:
- cursor.execute(sqlQ2, (package_id, old_ebuild[0]))
- ebuild_id_list = cursor.fetchall()
- if ebuild_id_list is not None:
- for ebuild_id in ebuild_id_list:
- cursor.execute(sqlQ3, (ebuild_id))
- queue_id_list = cursor.fetchall()
- if queue_id_list is not None:
- for queue_id in queue_id_list:
- cursor.execute(sqlQ4, (queue_id))
- cursor.execute(sqlQ1, (package_id, old_ebuild[0]))
- self.conn.commit()
-
- def cp_all_old_db(self, old_package_id_list):
- cursor = self.conn.cursor()
- old_package_list = []
- for old_package in old_package_id_list:
- sqlQ = "SELECT package_id FROM ebuilds WHERE package_id = %s AND active = 'FALSE' AND date_part('days', NOW() - timestamp) < 60"
- cursor.execute(sqlQ, old_package)
- entries = cursor.fetchone()
- if entries is None:
- old_package_list.append(old_package)
- return old_package_list
- def cp_all_db(self):
- cursor = self.conn.cursor()
- sqlQ = "SELECT package_id FROM packages"
- cursor.execute(sqlQ)
- return cursor.fetchall()
-
- def del_old_ebuild(self, ebuild_old_list_db):
- cursor = self.conn.cursor()
- sqlQ2 = 'SELECT build_id FROM buildlog WHERE ebuild_id = %s'
- sqlQ3 = 'DELETE FROM qa_problems repoman_problems WHERE build_id = %s'
- sqlQ4 = 'DELETE FROM ebuildhaveskeywords ebuildhavesiuses WHERE ebuild_id = %s'
- sqlQ5 = 'DELETE FROM ebuildbuildwithuses WHERE build_id = %s'
- sqlQ6 = 'DELETE FROM ebuildhavesrestrictions buildlog WHERE ebuild_id = %s'
- for del_ebuild_old in ebuild_old_list_db:
- cursor.execute(sqlQ2, (del_ebuild_old[0],))
- build_id_list = cursor.fetchall()
- for build_id in build_id_list:
- cursor.execute(sqlQ3, (build_id,))
- cursor.execute(sqlQ5, (build_id,))
- cursor.execute(sqlQ4, (del_ebuild_old[0],))
- cursor.execute(sqlQ6, (del_ebuild_old[0],))
- self.conn.commit()
-
- def del_old_package(self, package_id_list):
- cursor = self.conn.cursor()
- sqlQ1 = 'SELECT id FROM ebuilds WHERE package_id = %s'
- sqlQ2 = 'SELECT build_id FROM buildlog WHERE ebuild_id = %s'
- sqlQ3 = 'DELETE FROM qa_problems, repoman_problems, ebuildbuildwithuses WHERE build_id = %s'
- sqlQ4 = 'DELETE FROM ebuildhaveskeywords, ebuildhavesiuses, ebuildhavesrestrictions, buildlog WHERE ebuild_id = %s'
- sqlQ5 = 'DELETE FROM ebuilds, manifest, package_meta, packages WHERE package_id = %s'
- for package_id in package_id_list:
- cursor.execute(sqlQ1, package_id)
- ebuild_id_list = cursor.fetchall()
- for ebuild_id in ebuild_id_list:
- cursor.execute(sqlQ2, ebuild_id)
- build_id_list = cursor.fetchall()
- for build_id in build_id_list:
- cursor.execute(sqlQ3, build_id)
- cursor.execute(sqlQ4, ebuild_id)
- cursor.execute(sqlQ5, package_id)
- self.conn.commit()
-
- def cp_list_db(self, package_id):
- cursor = self.conn.cursor()
- sqlQ = "SELECT ebuild_version FROM ebuilds WHERE active = 'TRUE' AND package_id = %s"
- cursor.execute(sqlQ, (package_id))
- return cursor.fetchall()
-
- def cp_list_old_db(self, package_id):
- cursor = self.conn.cursor()
- sqlQ ="SELECT id, ebuild_version FROM ebuilds WHERE active = 'FALSE' AND package_id = %s AND date_part('days', NOW() - timestamp) > 60"
- cursor.execute(sqlQ, package_id)
- return cursor.fetchall()
-
- def add_qa_repoman(self, ebuild_id_list, qa_error, packageDict, config_id):
- ebuild_i = 0
- cursor = self.conn.cursor()
- for k, v in packageDict.iteritems():
- ebuild_id = ebuild_id_list[ebuild_i]
- sqlQ = 'INSERT INTO buildlog (ebuild_id, config, error_summary, timestamp ) VALUES ( %s, %s, %s, now() ) RETURNING build_id'
- if v['ebuild_error'] != [] or qa_error != []:
- if v['ebuild_error'] != [] or qa_error == []:
- summary = "Repoman"
- elif v['ebuild_error'] == [] or qa_error != []:
- summary = "QA"
- else:
- summary = "QA:Repoman"
- params = (ebuild_id, config_id, summary)
- cursor.execute(sqlQ, params)
- build_id = cursor.fetchone()
- if v['ebuild_error'] != []:
- sqlQ = 'INSERT INTO repoman_problems (problem, build_id ) VALUES ( %s, %s )'
- for x in v['ebuild_error']:
- params = (x, build_id)
- cursor.execute(sqlQ, params)
- if qa_error != []:
- sqlQ = 'INSERT INTO qa_problems (problem, build_id ) VALUES ( %s, %s )'
- for x in qa_error:
- params = (x, build_id)
- cursor.execute(sqlQ, params)
- ebuild_i = ebuild_i +1
- self.conn.commit()
-
- def get_arch_db(self):
- cursor = self.conn.cursor()
- sqlQ = 'SELECT keyword FROM keywords WHERE keyword = %s'
- cursor.execute(sqlQ, ('ppc',))
- return cursor.fetchone()
+ def get_old_categories(self, categories_line):
+ cursor = self.conn.cursor()
+ sqlQ = "SELECT package_name FROM packages WHERE category = %s"
+ cursor.execute(sqlQ (categories_line))
+ return cursor.fetchone()
+
+ def del_old_categories(self, real_old_categoriess):
+ cursor = self.conn.cursor()
+ sqlQ1 = 'DELETE FROM categories_meta WHERE category = %s'
+ sqlQ2 = 'DELETE FROM categories categories_meta WHERE category = %s'
+ cursor.execute(sqlQ1 (real_old_categories))
+ cursor.execute(sqlQ2 (real_old_categories))
+ self.conn.commit()
+
+ def add_old_ebuild(self, package_id, old_ebuild_list):
+ cursor = self.conn.cursor()
+ sqlQ1 = "UPDATE ebuilds SET active = 'FALSE' WHERE package_id = %s AND ebuild_version = %s"
+ sqlQ2 = "SELECT id FROM ebuilds WHERE package_id = %s AND ebuild_version = %s AND active = 'TRUE'"
+ sqlQ3 = "SELECT queue_id FROM buildqueue WHERE ebuild_id = %s"
+ sqlQ4 = 'DELETE FROM ebuildqueuedwithuses WHERE queue_id = %s'
+ sqlQ5 = 'DELETE FROM buildqueue WHERE queue_id = %s'
+ for old_ebuild in old_ebuild_list:
+ cursor.execute(sqlQ2, (package_id, old_ebuild[0]))
+ ebuild_id_list = cursor.fetchall()
+ if ebuild_id_list is not None:
+ for ebuild_id in ebuild_id_list:
+ cursor.execute(sqlQ3, (ebuild_id))
+ queue_id_list = cursor.fetchall()
+ if queue_id_list is not None:
+ for queue_id in queue_id_list:
+ cursor.execute(sqlQ4, (queue_id))
+ cursor.execute(sqlQ5, (queue_id))
+ cursor.execute(sqlQ1, (package_id, old_ebuild[0]))
+ self.conn.commit()
+
+ def cp_all_old_db(self, old_package_id_list):
+ cursor = self.conn.cursor()
+ old_package_list = []
+ for old_package in old_package_id_list:
+ sqlQ = "SELECT package_id FROM ebuilds WHERE package_id = %s AND active = 'FALSE' AND date_part('days', NOW() - timestamp) < 60"
+ cursor.execute(sqlQ, old_package)
+ entries = cursor.fetchone()
+ if entries is None:
+ old_package_list.append(old_package)
+ return old_package_list
+
+ def cp_all_db(self):
+ cursor = self.conn.cursor()
+ sqlQ = "SELECT package_id FROM packages"
+ cursor.execute(sqlQ)
+ return cursor.fetchall()
+
+ def del_old_ebuild(self, ebuild_old_list_db):
+ cursor = self.conn.cursor()
+ sqlQ1 = 'SELECT build_id FROM buildlog WHERE ebuild_id = %s'
+ sqlQ2 = 'DELETE FROM qa_problems WHERE build_id = %s'
+ sqlQ3 = 'DELETE FROM repoman_problems WHERE build_id = %s'
+ sqlQ4 = 'DELETE FROM ebuildbuildwithuses WHERE build_id = %s'
+ sqlQ5 = 'DELETE FROM ebuildhaveskeywords WHERE ebuild_id = %s'
+ sqlQ6 = 'DELETE FROM ebuildhavesiuses WHERE ebuild_id = %s'
+ sqlQ7 = 'DELETE FROM ebuildhavesrestrictions WHERE ebuild_id = %s'
+ sqlQ8 = 'DELETE FROM buildlog WHERE ebuild_id = %s'
+ sqlQ9 = 'SELECT queue_id FROM buildqueue WHERE ebuild_id = %s'
+ sqlQ10 = 'DELETE FROM ebuildqueuedwithuses WHERE queue_id = %s'
+ sqlQ11 = 'DELETE FROM buildqueue WHERE ebuild_id = %s'
+ sqlQ12 = 'DELETE FROM ebuilds WHERE id = %s'
+ for ebuild_id in ebuild_old_list_db:
+ cursor.execute(sqlQ1, (ebuild_id[0],))
+ build_id_list = cursor.fetchall()
+ for build_id in build_id_list:
+ cursor.execute(sqlQ2, (build_id,))
+ cursor.execute(sqlQ3, (build_id,))
+ cursor.execute(sqlQ4, (build_id,))
+ cursor.execute(sqlQ9, (ebuild_id[0],))
+ queue_id_list = cursor.fetchall()
+ for queue_id in queue_id_list:
+ cursor.execute(sqlQ10, (queue_id,))
+ cursor.execute(sqlQ5, (ebuild_id[0],))
+ cursor.execute(sqlQ6, (ebuild_id[0],))
+ cursor.execute(sqlQ7, (ebuild_id[0],))
+ cursor.execute(sqlQ8, (ebuild_id[0],))
+ cursor.execute(sqlQ11, (ebuild_id[0],))
+ cursor.execute(sqlQ12, (ebuild_id[0],))
+ self.conn.commit()
+
+ def del_old_package(self, package_id_list):
+ cursor = self.conn.cursor()
+ sqlQ1 = 'SELECT id FROM ebuilds WHERE package_id = %s'
+ sqlQ2 = 'DELETE FROM ebuilds WHERE package_id = %s'
+ sqlQ3 = 'DELETE FROM manifest WHERE package_id = %s'
+ sqlQ4 = 'DELETE FROM packages_meta WHERE package_id = %s'
+ sqlQ5 = 'DELETE FROM packages WHERE package_id = %s'
+ for package_id in package_id_list:
+ cursor.execute(sqlQ1, package_id)
+ ebuild_id_list = cursor.fetchall()
+ self.del_old_ebuild(ebuild_id_list)
+ cursor.execute(sqlQ2, (package_id,))
+ cursor.execute(sqlQ3, (package_id,))
+ cursor.execute(sqlQ4, (package_id,))
+ cursor.execute(sqlQ5, (package_id,))
+ self.conn.commit()
+
+ def cp_list_db(self, package_id):
+ cursor = self.conn.cursor()
+ sqlQ = "SELECT ebuild_version FROM ebuilds WHERE active = 'TRUE' AND package_id = %s"
+ cursor.execute(sqlQ, (package_id))
+ return cursor.fetchall()
+
+ def cp_list_old_db(self, package_id):
+ cursor = self.conn.cursor()
+ sqlQ ="SELECT id, ebuild_version FROM ebuilds WHERE active = 'FALSE' AND package_id = %s AND date_part('days', NOW() - timestamp) > 60"
+ cursor.execute(sqlQ, package_id)
+ return cursor.fetchall()
+
+ def add_qa_repoman(self, ebuild_id_list, qa_error, packageDict, config_id):
+ ebuild_i = 0
+ cursor = self.conn.cursor()
+ for k, v in packageDict.iteritems():
+ ebuild_id = ebuild_id_list[ebuild_i]
+ sqlQ = "INSERT INTO buildlog (ebuild_id, config, error_summary, timestamp, hash ) VALUES ( %s, %s, %s, now(), '0' ) RETURNING build_id"
+ if v['ebuild_error'] != [] or qa_error != []:
+ if v['ebuild_error'] != [] or qa_error == []:
+ summary = "Repoman"
+ elif v['ebuild_error'] == [] or qa_error != []:
+ summary = "QA"
+ else:
+ summary = "QA:Repoman"
+ params = (ebuild_id, config_id, summary)
+ cursor.execute(sqlQ, params)
+ build_id = cursor.fetchone()
+ if v['ebuild_error'] != []:
+ sqlQ = 'INSERT INTO repoman_problems (problem, build_id ) VALUES ( %s, %s )'
+ for x in v['ebuild_error']:
+ params = (x, build_id)
+ cursor.execute(sqlQ, params)
+ if qa_error != []:
+ sqlQ = 'INSERT INTO qa_problems (problem, build_id ) VALUES ( %s, %s )'
+ for x in qa_error:
+ params = (x, build_id)
+ cursor.execute(sqlQ, params)
+ ebuild_i = ebuild_i +1
+ self.conn.commit()
+
+ def get_arch_db(self):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT keyword FROM keywords WHERE keyword = %s'
+ cursor.execute(sqlQ, ('ppc',))
+ return cursor.fetchone()
- def add_new_arch_db(self, arch_list):
- cursor = self.conn.cursor()
- sqlQ = 'INSERT INTO keywords (keyword) VALUES ( %s )'
- for arch in arch_list:
- cursor.execute(sqlQ, (arch,))
- self.conn.commit()
-
- def closeconnection(self):
- self.conn.close()
+ def add_new_arch_db(self, arch_list):
+ cursor = self.conn.cursor()
+ sqlQ = 'INSERT INTO keywords (keyword) VALUES ( %s )'
+ for arch in arch_list:
+ cursor.execute(sqlQ, (arch,))
+ self.conn.commit()
+
+ def closeconnection(self):
+ self.conn.close()
class PgSQLPackageDB(SQLPackageDatabase):
"""override for MySQL backend"""
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-12-27 23:09 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-12-27 23:09 UTC (permalink / raw
To: gentoo-commits
commit: 6505a65933a1091252d93d206d9a570dae82cc5b
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Thu Dec 27 22:51:11 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Thu Dec 27 22:51:11 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=6505a659
Code change for the host/config split
---
gobs/bin/gobs_guest_jobs | 7 ++++---
gobs/bin/gobs_host_jobs | 7 ++++---
gobs/pym/build_log.py | 15 +++++++++------
gobs/pym/mysql_querys.py | 18 +++++++++---------
gobs/pym/package.py | 13 ++++++-------
gobs/pym/readconf.py | 2 ++
gobs/pym/sync.py | 15 ++++++++++-----
gobs/pym/updatedb.py | 12 +++++++-----
8 files changed, 51 insertions(+), 38 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index 2333d62..b97e7ed 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -19,16 +19,17 @@ def main():
repeat = True
reader = get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
- config_profile = gobs_settings_dict['gobs_config']
+ config = gobs_settings_dict['gobs_config']
+ hostname = gobs_settings_dict['hostname']
CM=connectionManager()
conn = CM.newConnection()
- config_id = get_config_id(conn, config_profile)
+ config_id = get_config_id(conn, config, hostname)
add_gobs_logs(conn, "Job and build deamon started.", "info", config_id)
init_build_job = build_job_action(conn, config_id)
while repeat:
if not conn.is_connected() is True:
conn.reconnect(attempts=2, delay=1)
- jobs_main(conn, config_profile)
+ jobs_main(conn, config)
if check_configure_guest(conn, config_id) is not True:
time.sleep(6)
continue
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
index 162ac43..905d8af 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs
@@ -13,14 +13,15 @@ def main():
# Main
reader = get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
- config_profile = gobs_settings_dict['gobs_config']
+ config = gobs_settings_dict['gobs_config']
+ hostname = gobs_settings_dict['hostname']
CM=connectionManager()
conn = CM.newConnection()
- config_id = get_config_id(conn, config_profile)
+ config_id = get_config_id(conn, config, hostname)
add_gobs_logs(conn, "Job deamon started", "info", config_id)
repeat = True
while repeat:
- jobs_main(conn, config_profile)
+ jobs_main(conn, config_id)
repeat = False
time.sleep(60)
conn.close
diff --git a/gobs/pym/build_log.py b/gobs/pym/build_log.py
index 7b2fac4..3582406 100644
--- a/gobs/pym/build_log.py
+++ b/gobs/pym/build_log.py
@@ -216,8 +216,10 @@ def add_buildlog_process(settings, pkg):
conn.reconnect(attempts=2, delay=1)
reader=get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
- config_profile = gobs_settings_dict['gobs_config']
- config_id = get_config_id(conn, config_profile)
+ config = gobs_settings_dict['gobs_config']
+ hostname =gobs_settings_dict['gobs_hostname']
+ host_config = hostname + "/" + config
+ config_id = get_config_id(conn, config, hostname)
build_dict = get_build_dict_db(conn, config_id, settings, pkg)
if build_dict is None:
log_msg = "Package %s:%s is NOT logged." % (pkg.cpv, pkg.repo,)
@@ -236,7 +238,7 @@ def add_buildlog_process(settings, pkg):
log_hash.update(log_line)
build_log_dict['build_error'] = build_error
build_log_dict['log_hash'] = log_hash.hexdigest()
- build_log_dict['logfilename'] = settings.get("PORTAGE_LOG_FILE").split(config_profile)[1]
+ build_log_dict['logfilename'] = settings.get("PORTAGE_LOG_FILE").split(host_config)[1]
log_msg = "Logfile name: %s" % (settings.get("PORTAGE_LOG_FILE"),)
add_gobs_logs(conn, log_msg, "info", config_id)
log_id = add_new_buildlog(conn, build_dict, build_log_dict)
@@ -244,7 +246,7 @@ def add_buildlog_process(settings, pkg):
msg = ""
# emerge_info_logfilename = settings.get("PORTAGE_LOG_FILE")[:-3] + "emerge_log.log"
if log_id is None:
- os.chmod(settings.get("PORTAGE_LOG_FILE"), 0o664)
+ # os.chmod(settings.get("PORTAGE_LOG_FILE"), 0o664)
log_msg = "Package %s:%s is NOT logged." % (pkg.cpv, pkg.repo,)
add_gobs_logs(conn, log_msg, "info", config_id)
else:
@@ -317,8 +319,9 @@ def log_fail_queru(conn, build_dict, settings):
else:
build_dict['build_useflags'] = None
if settings.get("PORTAGE_LOG_FILE") is not None:
- config_profile = get_config(conn, config_id)
- build_log_dict['logfilename'] = settings.get("PORTAGE_LOG_FILE").split(config_profile)[1]
+ hostname, config = get_config(conn, config_id)
+ host_config = hostname +"/" + config
+ build_log_dict['logfilename'] = settings.get("PORTAGE_LOG_FILE").split(host_config)[1]
os.chmod(settings.get("PORTAGE_LOG_FILE"), 0o224)
else:
build_log_dict['logfilename'] = ""
diff --git a/gobs/pym/mysql_querys.py b/gobs/pym/mysql_querys.py
index f179d26..3d9d495 100644
--- a/gobs/pym/mysql_querys.py
+++ b/gobs/pym/mysql_querys.py
@@ -1,10 +1,10 @@
from __future__ import print_function
# Queryes to add the logs
-def get_config_id(connection, config):
+def get_config_id(connection, config, host):
cursor = connection.cursor()
- sqlQ = 'SELECT config_id FROM configs WHERE config = %s'
- cursor.execute(sqlQ,(config,))
+ sqlQ = 'SELECT config_id FROM configs WHERE config = %s AND hostname= host'
+ cursor.execute(sqlQ,(config, host,))
entries = cursor.fetchone()
cursor.close()
if not entries is None:
@@ -69,11 +69,11 @@ def get_config_list_all(connection):
def get_config(connection, config_id):
cursor = connection.cursor()
- sqlQ ='SELECT config FROM configs WHERE config_id = %s'
+ sqlQ ='SELECT host, config FROM configs WHERE config_id = %s'
cursor.execute(sqlQ, (config_id,))
- config = cursor.fetchone()
+ hostname, config = cursor.fetchone()
cursor.close()
- return config[0]
+ return hostname[0], config[0]
def update_make_conf(connection, configsDict):
cursor = connection.cursor()
@@ -85,11 +85,11 @@ def update_make_conf(connection, configsDict):
def get_default_config(connection):
cursor = connection.cursor()
- sqlQ = "SELECT config FROM configs WHERE default_config = 'True'"
+ sqlQ = "SELECT host, config FROM configs WHERE default_config = 'True'"
cursor.execute(sqlQ)
- entries = cursor.fetchone()
+ hostname, config = cursor.fetchone()
cursor.close()
- return entries[0]
+ return hostname[0], config[0]
def get_repo_id(connection, repo):
cursor = connection.cursor()
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
index 03b6c21..a86d512 100644
--- a/gobs/pym/package.py
+++ b/gobs/pym/package.py
@@ -12,7 +12,6 @@ from gobs.mysql_querys import get_config, get_config_id, add_gobs_logs, get_defa
from gobs.readconf import get_conf_settings
reader=get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
-config_profile = gobs_settings_dict['gobs_config']
class gobs_package(object):
@@ -22,9 +21,9 @@ class gobs_package(object):
self._myportdb = myportdb
self._config_id = get_config_id(conn, config_profile)
- def change_config(self, config_setup):
+ def change_config(self, host_config):
# Change config_root config_setup = table config
- my_new_setup = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_setup + "/"
+ my_new_setup = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + host_config + "/"
mysettings_setup = portage.config(config_root = my_new_setup)
return mysettings_setup
@@ -35,8 +34,8 @@ class gobs_package(object):
for config_id in config_id_list:
# Change config/setup
- config_setup = get_config(self._conn, config_id)
- mysettings_setup = self.change_config(config_setup)
+ hostname, config = get_config(self._conn, config_id)
+ mysettings_setup = self.change_config(hostname + "/" + config)
myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
# Get the latest cpv from portage with the config that we can build
@@ -155,10 +154,10 @@ class gobs_package(object):
add_new_build_job(self._conn, ebuild_id, config_id, use_flagsDict)
# B = Build cpv use-flags config
- config_setup = get_config(self._conn, config_id)
+ hostname, config = get_config(self._conn, config_id)
# FIXME log_msg need a fix to log the use flags corect.
- log_msg = "B %s:%s USE: %s %s" % (k, v['repo'], use_flagsDict, config_setup,)
+ log_msg = "B %s:%s USE: %s %s:%s" % (k, v['repo'], use_flagsDict, hostname, config,)
add_gobs_logs(self._conn, log_msg, "info", self._config_id)
i = i +1
diff --git a/gobs/pym/readconf.py b/gobs/pym/readconf.py
index 1a1c036..45ed10a 100644
--- a/gobs/pym/readconf.py
+++ b/gobs/pym/readconf.py
@@ -1,6 +1,7 @@
import os
import sys
import re
+import socket
class get_conf_settings(object):
# open the /etc/buildhost/buildhost.conf file and get the needed
@@ -45,5 +46,6 @@ class get_conf_settings(object):
gobs_settings_dict['sql_passwd'] = get_sql_passwd.rstrip('\n')
gobs_settings_dict['gobs_gitreponame'] = get_gobs_gitreponame.rstrip('\n')
gobs_settings_dict['gobs_config'] = get_gobs_config.rstrip('\n')
+ gobs_settings_dict['hostname'] = socket.gethostname()
# gobs_settings_dict['gobs_logfile'] = get_gobs_logfile.rstrip('\n')
return gobs_settings_dict
diff --git a/gobs/pym/sync.py b/gobs/pym/sync.py
index 085973a..3e25878 100644
--- a/gobs/pym/sync.py
+++ b/gobs/pym/sync.py
@@ -3,17 +3,21 @@ import portage
import os
import errno
import sys
+# Do not support python 3.*
from git import *
+
from _emerge.main import emerge_main
from gobs.readconf import get_conf_settings
from gobs.mysql_querys import get_config_id, add_gobs_logs, get_default_config
reader=get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
-config_profile = gobs_settings_dict['gobs_config']
+_config = gobs_settings_dict['gobs_config']
+_hostname =gobs_settings_dict['gobs_hostname']
def git_pull(conn):
- config_id = get_config_id(conn, config_profile)
+ #FIXME: Use git direct so we can use python 3.*
+ config_id = get_config_id(conn, _config, _hostname)
log_msg = "Git pull"
add_gobs_logs(conn, log_msg, "info", config_id)
repo = Repo("/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/")
@@ -27,9 +31,10 @@ def git_pull(conn):
return True
def sync_tree(conn):
- config_id = get_config_id(conn, config_profile)
- config_setup = get_default_config(conn) # HostConfigDir = table configs id
- default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_setup + "/"
+ config_id = get_config_id(conn, _config, _hostname)
+ hostname, config = get_default_config(conn) # HostConfigDir = table configs id
+ host_config = hostname +"/" + config
+ default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + host_config + "/"
mysettings = portage.config(config_root = default_config_root)
tmpcmdline = []
tmpcmdline.append("--sync")
diff --git a/gobs/pym/updatedb.py b/gobs/pym/updatedb.py
old mode 100755
new mode 100644
index b33eb03..cc5681c
--- a/gobs/pym/updatedb.py
+++ b/gobs/pym/updatedb.py
@@ -17,7 +17,8 @@ from gobs.package import gobs_package
from gobs.readconf import get_conf_settings
reader = get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
-config_profile = gobs_settings_dict['gobs_config']
+_config = gobs_settings_dict['gobs_config']
+_hostname =gobs_settings_dict['gobs_hostname']
def init_portage_settings(conn, config_id):
# check config setup
@@ -26,12 +27,13 @@ def init_portage_settings(conn, config_id):
add_gobs_logs(conn, log_msg, "info", config_id)
# Get default config from the configs table and default_config=1
- config_setup = get_default_config(conn) # HostConfigDir = table configs id
- default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_setup + "/"
+ hostname, config = get_default_config(conn) # HostConfigDir = table configs id
+ host_config = hostname +"/" + config
+ default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + host_config + "/"
# Set config_root (PORTAGE_CONFIGROOT) to default_config_root
mysettings = portage.config(config_root = default_config_root)
- log_msg = "Setting default config to: %s" % (config_setup,)
+ log_msg = "Setting default config to: %s" % (host_config,)
add_gobs_logs(conn, log_msg, "info", config_id)
return mysettings
@@ -116,7 +118,7 @@ def update_db_main(conn):
# Main
# Logging
- config_id = get_config_id(conn, config_profile)
+ config_id = get_config_id(conn, _config, _hostname)
log_msg = "Update db started."
add_gobs_logs(conn, log_msg, "info", config_id)
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-12-23 17:09 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-12-23 17:09 UTC (permalink / raw
To: gentoo-commits
commit: 568ff94daf38bbdfbafb0b296f935a74424e6744
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sun Dec 23 17:08:48 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sun Dec 23 17:08:48 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=568ff94d
We use the Event scheduler in the mysql for the cron stuff
---
gobs/bin/gobs_cron | 25 -------------------------
gobs/pym/cron.py | 13 -------------
gobs/pym/mysql_querys.py | 42 ------------------------------------------
3 files changed, 0 insertions(+), 80 deletions(-)
diff --git a/gobs/bin/gobs_cron b/gobs/bin/gobs_cron
deleted file mode 100755
index 773700b..0000000
--- a/gobs/bin/gobs_cron
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/python
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-
-from gobs.readconf import get_conf_settings
-from gobs.cron import cron_main
-from gobs.ConnectionManager import connectionManager
-from gobs.mysql_querys import add_gobs_logs, get_config_id
-
-def main():
- # Main
- reader = get_conf_settings()
- gobs_settings_dict=reader.read_gobs_settings_all()
- config_profile = gobs_settings_dict['gobs_config']
- CM=connectionManager()
- conn = CM.newConnection()
- config_id = get_config_id(conn, config_profile)
- add_gobs_logs(conn, "Cron job started", "info", config_id)
- cron_main(conn, config_id)
- add_gobs_logs(conn, "Cron job done.", "info", config_id)
- conn.close
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/gobs/pym/cron.py b/gobs/pym/cron.py
deleted file mode 100644
index 706719f..0000000
--- a/gobs/pym/cron.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from __future__ import print_function
-
-from gobs.mysql_querys import get_cron_jobs, get_cron_info, check_jobs, \
- add_new_job, add_gobs_logs
-
-def cron_main(conn, config_id):
- cron_id_list = get_cron_jobs(conn, config_id)
- for cron_id in cron_id_list:
- job_type_id, run_config_id = get_cron_info(conn, cron_id)
- if not check_jobs(conn, job_type_id, config_id, run_config_id):
- log_msg = "Job: %s added to job list on %s for %s" % (job_type_id, config_id, run_config_id,)
- add_gobs_logs(conn, log_msg, "info", config_id)
- add_new_job(conn, cron_id, job_type_id, config_id, run_config_id)
diff --git a/gobs/pym/mysql_querys.py b/gobs/pym/mysql_querys.py
index 66c21f0..f179d26 100644
--- a/gobs/pym/mysql_querys.py
+++ b/gobs/pym/mysql_querys.py
@@ -55,48 +55,6 @@ def update_job_list(connection, status, job_id):
connection.commit()
cursor.close()
-def get_cron_jobs(connection, config_id):
- cursor = connection.cursor()
- sqlQ = 'SELECT cron_id FROM cron WHERE config_id = %s AND TIMESTAMPDIFF(MINUTES, time_stamp, NOW()) >= time_when'
- cursor.execute(sqlQ, (config_id,))
- entries = cursor.fetchall()
- cursor.close()
- if entries is None:
- return None
- cron_id_list = []
- for cron_id in entries:
- cron_id_list.append(cron_id[0])
- return sorted(cron_id_list)
-
-def check_jobs(connection, job_type_id, config_id, run_config_id):
- cursor = connection.cursor()
- sqlQ = "SELECT job_id FROM jobs WHERE job_type_id = %s AND config_id = %s AND run_config_id = %s AND (status = 'Runing' OR status = 'Waiting')"
- cursor.execute(sqlQ)
- entries = cursor.fetchone()
- cursor.close()
- if entries is None:
- return False
- return True
-
-def get_cron_info(connection, cron_id):
- cursor = connection.cursor()
- sqlQ = 'SELECT job_type_id, run_config_id FROM cron WHERE cron_id = %s'
- cursor.execute(sqlQ)
- entries = cursor.fetchone()
- cursor.close()
- job_type_id = entries[0]
- run_config_id = entries[1]
- return job_type_id, run_config_id
-
-def add_new_job(connection, cron_id, job_type_id, config_id, run_config_id):
- cursor = connection.cursor()
- sqlQ1 = "INSERT INTO jobs (job_type_id, user, config_id, run_config_id) VALUES ( %s, 'cron', %s, %s)"
- sqlQ2 = 'UPDATE cron SET time_stamp = NOW() WHERE cron_id = %s'
- cursor.execute(sqlQ1, (job_type_id, config_id, run_config_id,))
- cursor.execute(sqlQ2(cron_id,))
- connection.commit()
- cursor.close()
-
# Queryes to handel the configs* tables
def get_config_list_all(connection):
cursor = connection.cursor()
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-12-22 2:59 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-12-22 2:59 UTC (permalink / raw
To: gentoo-commits
commit: 2cbdb964e162582123ad5e4d0fe33c178df9d2a2
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sat Dec 22 02:59:16 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sat Dec 22 02:59:16 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=2cbdb964
add gobs cron
---
gobs/bin/gobs_cron | 25 +++++++++++++++++++++++++
gobs/pym/cron.py | 13 +++++++++++++
gobs/pym/mysql_querys.py | 42 ++++++++++++++++++++++++++++++++++++++++++
3 files changed, 80 insertions(+), 0 deletions(-)
diff --git a/gobs/bin/gobs_cron b/gobs/bin/gobs_cron
new file mode 100644
index 0000000..773700b
--- /dev/null
+++ b/gobs/bin/gobs_cron
@@ -0,0 +1,25 @@
+#!/usr/bin/python
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from gobs.readconf import get_conf_settings
+from gobs.cron import cron_main
+from gobs.ConnectionManager import connectionManager
+from gobs.mysql_querys import add_gobs_logs, get_config_id
+
+def main():
+ # Main
+ reader = get_conf_settings()
+ gobs_settings_dict=reader.read_gobs_settings_all()
+ config_profile = gobs_settings_dict['gobs_config']
+ CM=connectionManager()
+ conn = CM.newConnection()
+ config_id = get_config_id(conn, config_profile)
+ add_gobs_logs(conn, "Cron job started", "info", config_id)
+ cron_main(conn, config_id)
+ add_gobs_logs(conn, "Cron job done.", "info", config_id)
+ conn.close
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/gobs/pym/cron.py b/gobs/pym/cron.py
new file mode 100644
index 0000000..706719f
--- /dev/null
+++ b/gobs/pym/cron.py
@@ -0,0 +1,13 @@
+from __future__ import print_function
+
+from gobs.mysql_querys import get_cron_jobs, get_cron_info, check_jobs, \
+ add_new_job, add_gobs_logs
+
+def cron_main(conn, config_id):
+ cron_id_list = get_cron_jobs(conn, config_id)
+ for cron_id in cron_id_list:
+ job_type_id, run_config_id = get_cron_info(conn, cron_id)
+ if not check_jobs(conn, job_type_id, config_id, run_config_id):
+ log_msg = "Job: %s added to job list on %s for %s" % (job_type_id, config_id, run_config_id,)
+ add_gobs_logs(conn, log_msg, "info", config_id)
+ add_new_job(conn, cron_id, job_type_id, config_id, run_config_id)
diff --git a/gobs/pym/mysql_querys.py b/gobs/pym/mysql_querys.py
index d9bbcc3..8c531cb 100644
--- a/gobs/pym/mysql_querys.py
+++ b/gobs/pym/mysql_querys.py
@@ -48,6 +48,48 @@ def update_job_list(connection, status, job_id):
connection.commit()
cursor.close()
+def get_cron_jobs(connection, config_id):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT cron_id FROM cron WHERE config_id = %s'
+ cursor.execute(sqlQ, (config_id,))
+ entries = cursor.fetchall()
+ cursor.close()
+ if entries is None:
+ return None
+ cron_id_list = []
+ for cron_id in entries:
+ cron_id_list.append(cron_id[0])
+ return sorted(cron_id_list)
+
+def check_jobs(connection, job_type_id, config_id, run_config_id):
+ cursor = connection.cursor()
+ sqlQ = "SELECT job_id FROM jobs WHERE job_type_id = %s AND config_id = %s AND run_config_id = %s AND (status = 'Runing' OR status = 'Waiting')"
+ cursor.execute(sqlQ)
+ entries = cursor.fetchone()
+ cursor.close()
+ if entries is None:
+ return False
+ return True
+
+def get_cron_info(connection, cron_id):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT job_type_id, run_config_id FROM cron WHERE cron_id = %s'
+ cursor.execute(sqlQ)
+ entries = cursor.fetchone()
+ cursor.close()
+ job_type_id = entries[0]
+ run_config_id = entries[1]
+ return job_type_id, run_config_id
+
+def add_new_job(connection, cron_id, job_type_id, config_id, run_config_id):
+ cursor = connection.cursor()
+ sqlQ1 = "INSERT INTO jobs (job_type_id, user, config_id, run_config_id) VALUES ( %s, 'cron', %s, %s)"
+ sqlQ2 = 'UPDATE cron SET time_stamp = NOW() WHERE cron_id = %s'
+ cursor.execute(sqlQ1, (job_type_id, config_id, run_config_id,))
+ cursor.execute(sqlQ2(cron_id,))
+ connection.commit()
+ cursor.close()
+
# Queryes to handel the configs* tables
def get_config_list_all(connection):
cursor = connection.cursor()
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-12-21 1:44 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-12-21 1:44 UTC (permalink / raw
To: gentoo-commits
commit: 5abc1defb1546604228643d138e3819d32b9d949
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Fri Dec 21 01:35:46 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Fri Dec 21 01:35:46 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=5abc1def
Update for mysql support
---
gobs/bin/gobs_guest_jobs | 48 ++---
gobs/bin/gobs_host_jobs | 4 +-
gobs/pym/ConnectionManager.py | 54 +----
gobs/pym/Scheduler.py | 8 +-
gobs/pym/actions.py | 9 +-
gobs/pym/build_job.py | 165 +++++++++++++
gobs/pym/check_setup.py | 20 +-
gobs/pym/jobs.py | 38 ++--
gobs/pym/mysql_querys.py | 261 ++++++++++++++--------
gobs/pym/package.py | 129 +++++------
gobs/pym/pgsql_querys.py | 508 -----------------------------------------
gobs/pym/sync.py | 22 ++-
gobs/pym/updatedb.py | 85 ++++----
13 files changed, 515 insertions(+), 836 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index 7c20734..5494b83 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -3,51 +3,37 @@ from __future__ import print_function
# Get the options from the config file set in gobs.readconf
from gobs.readconf import get_conf_settings
-reader=get_conf_settings()
-gobs_settings_dict=reader.read_gobs_settings_all()
-
from gobs.ConnectionManager import connectionManager
-CM=connectionManager(gobs_settings_dict)
-#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql_querys import *
-
+from gobs.mysql_querys import add_gobs_logs, get_config_id
from gobs.check_setup import check_configure_guest
-from gobs.build_queru import queruaction
+from gobs.build_job import build_job_action
from gobs.jobs import jobs_main
import portage
import sys
import os
import time
-from multiprocessing import Process
-def main_loop(config_profile):
+def main():
repeat = True
- conn=CM.getConnection()
- add_gobs_logs(conn, "Job and build deamon started.", "info", config_profile)
- CM.putConnection(conn)
+ reader = get_conf_settings()
+ gobs_settings_dict=reader.read_gobs_settings_all()
+ config_profile = gobs_settings_dict['gobs_config']
+ CM=connectionManager()
+ conn = CM.getConnection()
+ config_id = get_config_id(conn, config_profile)
+ add_gobs_logs(conn, "Job and build deamon started.", "info", config_id)
+ init_build_job = build_job_action(conn, config_id)
while repeat:
- jobs_main(config_profile)
- if check_configure_guest(config_profile) is not True:
+ jobs_main(conn, config_profile)
+ if check_configure_guest(conn, config_id) is not True:
time.sleep(6)
continue
else:
- init_queru = queruaction(config_profile)
- init_queru.procces_qureru()
- # p = Process(target=init_queru.procces_qureru)
- # p.start()
- # p.join()
- repeat = True
- time.sleep(6)
- return
-
-def main():
- # Main
- config_profile = gobs_settings_dict['gobs_config']
- #we provide the main_loop with the ConnectionManager so we can hand out connections from within the loop
- main_loop(config_profile)
- CM.closeAllConnections()
+ procces_build_jobs()
+ repeat = True
+ time.sleep(6)
+ CM.putConnection(conn)
if __name__ == "__main__":
main()
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
index 5dac511..5273430 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs
@@ -6,6 +6,7 @@ from __future__ import print_function
from gobs.readconf import get_conf_settings
from gobs.jobs import jobs_main
from gobs.ConnectionManager import connectionManager
+from gobs.mysql_querys import add_gobs_logs, get_config_id
import time
def main():
@@ -15,7 +16,8 @@ def main():
config_profile = gobs_settings_dict['gobs_config']
CM=connectionManager()
conn = CM.getConnection()
- add_gobs_logs(conn, "Job deamon started", "info", config_profile)
+ config_id = get_config_id(conn, config_profile)
+ add_gobs_logs(conn, "Job deamon started", "info", config_id)
repeat = True
while repeat:
jobs_main(conn, config_profile)
diff --git a/gobs/pym/ConnectionManager.py b/gobs/pym/ConnectionManager.py
index 9d345c1..14f875f 100644
--- a/gobs/pym/ConnectionManager.py
+++ b/gobs/pym/ConnectionManager.py
@@ -1,21 +1,13 @@
-#a simple CM build around sie singleton so there can only be 1 CM but you can call the class in different place with out caring about it.
-#when the first object is created of this class, the SQL settings are read from the file and stored in the class for later reuse by the next object and so on.
-#(maybe later add support for connection pools)
+#
from __future__ import print_function
from gobs.readconf import get_conf_settings
reader = get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
-if settings_dict['sql_backend'] == 'pgsql':
- from gobs.pgsql_querys import *
-if settings_dict['sql_backend'] == 'mysql':
- from gobs.mysql_querys import *
-
class connectionManager(object):
_instance = None
- #size of the connection Pool
- def __new__(cls, numberOfconnections=20, *args, **kwargs):
+ def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(connectionManager, cls).__new__(cls, *args, **kwargs)
#read the sql user/host etc and store it in the local object
@@ -25,16 +17,6 @@ class connectionManager(object):
cls._password=settings_dict['sql_passwd']
cls._database=settings_dict['sql_db']
#shouldnt we include port also?
- if cls._backend == 'pgsql':
- try:
- from psycopg2 import pool, extensions
- except ImportError:
- print("Please install a recent version of dev-python/psycopg for Python")
- sys.exit(1)
- #setup connection pool
- cls._connectionNumber=numberOfconnections
- #always create 1 connection
- cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password)
if cls._backend == 'mysql':
try:
import mysql.connector
@@ -59,34 +41,6 @@ class connectionManager(object):
print(err)
return cls._instance
- ## returns the name of the database pgsql/mysql etc
- def getName(self):
- return self._backend
-
- def getConnection(self):
- if self._backend == 'pgsql':
- return self._pool.getconn()
- if self._backend == 'mysql':
- return self._cnx
-
- def putConnection(self, connection):
- if self._backend == 'pgsql':
- self._pool.putconn(connection , key=None, close=True)
- if self._backend == 'mysql':
- self._cnx.close()
-
- def closeAllConnections(self):
- self._pool.closeall()
+ def newConnection(self):
+ return self._cnx
-##how to use this class
-#get a instance of the class (there can only be 1 instance but many pointers (single ton))
-#get the connection
-#conn=cm.getConnection()
-#get a cursor
-#cur=conn.cursor()
-#do stuff
-#cur.execute(stuff)
-#"close a connection" temporarily put it away for reuse
-#cm.putConnection(conn)
-#kill all connections, should only be used just before the program terminates
-#cm.closeAllConnections()
diff --git a/gobs/pym/Scheduler.py b/gobs/pym/Scheduler.py
index 8e5c6ba..6c446cb 100644
--- a/gobs/pym/Scheduler.py
+++ b/gobs/pym/Scheduler.py
@@ -1264,7 +1264,7 @@ class Scheduler(PollScheduler):
if not self._terminated_tasks:
self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
self._status_display.failed = len(self._failed_pkgs)
- add_buildlog_main(settings, pkg, trees)
+ add_buildlog_main(settings, pkg)
return
self._task_complete(pkg)
@@ -1283,7 +1283,7 @@ class Scheduler(PollScheduler):
self._pkg_cache.pop(pkg_to_replace, None)
if pkg.installed:
- add_buildlog_main(settings, pkg, trees)
+ add_buildlog_main(settings, pkg)
return
# Call mtimedb.commit() after each merge so that
@@ -1294,7 +1294,7 @@ class Scheduler(PollScheduler):
if not mtimedb["resume"]["mergelist"]:
del mtimedb["resume"]
mtimedb.commit()
- add_buildlog_main(settings, pkg, trees)
+ add_buildlog_main(settings, pkg)
def _build_exit(self, build):
self._running_tasks.pop(id(build), None)
@@ -1332,7 +1332,7 @@ class Scheduler(PollScheduler):
self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
self._status_display.failed = len(self._failed_pkgs)
self._deallocate_config(build.settings)
- add_buildlog_main(settings, pkg, trees)
+ add_buildlog_main(settings, pkg)
self._jobs -= 1
self._status_display.running = self._jobs
self._schedule()
diff --git a/gobs/pym/actions.py b/gobs/pym/actions.py
index 6842ff3..9ca6b12 100644
--- a/gobs/pym/actions.py
+++ b/gobs/pym/actions.py
@@ -82,6 +82,7 @@ from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.userquery import userquery
from gobs.build_queru import log_fail_queru
+from gobs.ConnectionManager import connectionManager
if sys.hexversion >= 0x3000000:
long = int
@@ -144,6 +145,9 @@ def build_mydepgraph(settings, trees, mtimedb, myopts, myparams, myaction, myfil
def action_build(settings, trees, mtimedb,
myopts, myaction, myfiles, spinner, build_dict):
+ CM2=connectionManager()
+ conn2 = CM2.newConnection()
+
if '--usepkgonly' not in myopts:
old_tree_timestamp_warn(settings['PORTDIR'], settings)
@@ -367,7 +371,10 @@ def action_build(settings, trees, mtimedb,
mydepgraph.display_problems()
if build_dict['check_fail'] is True:
- log_fail_queru(build_dict, settings)
+ if not conn2.is_connected() is True:
+ conn2.reconnect(attempts=2, delay=1)
+ log_fail_queru(conn2, build_dict, settings)
+ conn2.close
return 1
if "--pretend" not in myopts and \
diff --git a/gobs/pym/build_job.py b/gobs/pym/build_job.py
new file mode 100644
index 0000000..7562b3c
--- /dev/null
+++ b/gobs/pym/build_job.py
@@ -0,0 +1,165 @@
+# Get the options from the config file set in gobs.readconf
+from __future__ import print_function
+import portage
+import os
+import re
+import sys
+import signal
+import multiprocessing
+
+from gobs.manifest import gobs_manifest
+from gobs.depclean import main_depclean
+from gobs.flags import gobs_use_flags
+from portage import _encodings
+from portage import _unicode_decode
+from portage.versions import cpv_getkey
+from portage.dep import check_required_use
+from gobs.main import emerge_main
+from gobs.build_log import log_fail_queru
+from gobs.actions import load_emerge_config
+from gobs.mysql_querys import add_gobs_logs, get_cp_repo_from_package_id, get_packages_to_build
+
+class build_job_action(object):
+
+ def __init__(self, conn, config_id):
+ self._mysettings = portage.config(config_root = "/")
+ self._config_id = config_id
+ self._myportdb = portage.portdb
+ self._conn = conn
+
+ def make_build_list(self, build_dict, settings, portdb):
+ package_id = build_dict['package_id']
+ cp, repo = get_cp_repo_from_package_id(self._conn, package_id)
+ element = cp.split('/')
+ package = element[1]
+ cpv = cp + "-" + build_dict['ebuild_version']
+ pkgdir = self._myportdb.getRepositoryPath(repo) + "/" + cp
+ init_manifest = gobs_manifest(settings, pkgdir)
+ try:
+ ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + build_dict['ebuild_version'] + ".ebuild")[0]
+ except:
+ ebuild_version_checksum_tree = None
+ if ebuild_version_checksum_tree == build_dict['checksum']:
+ init_flags = gobs_use_flags(settings, portdb, cpv)
+ build_use_flags_list = init_flags.comper_useflags(build_dict)
+ log_msg = "build_use_flags_list %s" % (build_use_flags_list,)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+ manifest_error = init_manifest.check_file_in_manifest(portdb, cpv, build_use_flags_list, repo)
+ if manifest_error is None:
+ build_dict['check_fail'] = False
+ build_cpv_dict = {}
+ build_cpv_dict[cpv] = build_use_flags_list
+ log_msg = "build_cpv_dict: %s" % (build_cpv_dict,)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+ return build_cpv_dict
+ else:
+ build_dict['type_fail'] = "Manifest error"
+ build_dict['check_fail'] = True
+ log_msg = "Manifest error: %s:%s" % cpv, manifest_error
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+ else:
+ build_dict['type_fail'] = "Wrong ebuild checksum"
+ build_dict['check_fail'] = True
+ if build_dict['check_fail'] is True:
+ log_fail_queru(conn, build_dict, settings)
+ return None
+ return build_cpv_dict
+
+ def build_procces(self, buildqueru_cpv_dict, build_dict, settings, portdb):
+ build_cpv_list = []
+ depclean_fail = True
+ for k, build_use_flags_list in buildqueru_cpv_dict.iteritems():
+ build_cpv_list.append("=" + k)
+ if not build_use_flags_list == None:
+ build_use_flags = ""
+ for flags in build_use_flags_list:
+ build_use_flags = build_use_flags + flags + " "
+ filetext = '=' + k + ' ' + build_use_flags
+ log_msg = "filetext: %s" % filetext
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+ with open("/etc/portage/package.use/99_autounmask", "a") as f:
+ f.write(filetext)
+ f.write('\n')
+ f.close
+ log_msg = "build_cpv_list: %s" % (build_cpv_list,)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+
+ argscmd = []
+ for emerge_option in build_dict['emerge_options']:
+ if emerge_option == '--depclean':
+ pass
+ elif emerge_option == '--nodepclean':
+ pass
+ elif emerge_option == '--nooneshot':
+ pass
+ else:
+ if not emerge_option in argscmd:
+ argscmd.append(emerge_option)
+ for build_cpv in build_cpv_list:
+ argscmd.append(build_cpv)
+ print("Emerge options: %s" % argscmd)
+ log_msg = "argscmd: %s" % (argscmd,)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+
+ # close the db for the multiprocessing pool will make new ones
+ # and we don't need this one for some time.
+ conn.close()
+
+ # Call main_emerge to build the package in build_cpv_list
+ print("Build: %s" % build_dict)
+ build_fail = emerge_main(argscmd, build_dict)
+ # Run depclean
+ if '--depclean' in build_dict['emerge_options'] and not '--nodepclean' in build_dict['emerge_options']:
+ depclean_fail = main_depclean()
+ try:
+ os.remove("/etc/portage/package.use/99_autounmask")
+ with open("/etc/portage/package.use/99_autounmask", "a") as f:
+ f.close
+ except:
+ pass
+
+ # reconnect to the db if needed.
+ if not conn.is_connected() is True:
+ conn.reconnect(attempts=2, delay=1)
+
+ build_dict2 = {}
+ build_dict2 = get_packages_to_build(self._conn, self._config_id)
+ if build_dict['build_job_id'] == build_dict2['build_job_id']:
+ log_msg = "build_job %s was not removed" % (build_dict['build_job_id'],)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+ print("qurery was not removed")
+ if build_fail is True:
+ build_dict['type_fail'] = "Emerge faild"
+ build_dict['check_fail'] = True
+ log_msg = "Emerge faild!"
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+ else:
+ build_dict['type_fail'] = "Querey was not removed"
+ build_dict['check_fail'] = True
+ log_fail_queru(conn, build_dict, settings)
+ if build_fail is True:
+ return True
+ return False
+
+ def procces_build_jobs(self):
+ build_dict = {}
+ build_dict = get_packages_to_build(self._self._conn, self._config_id)
+ settings, trees, mtimedb = load_emerge_config()
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ if build_dict is None:
+ return
+ log_msg = "build_dict: %s" % (build_dict,)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+ if not build_dict['ebuild_id'] is None and build_dict['checksum'] is not None:
+ buildqueru_cpv_dict = self.make_build_list(build_dict, settings, portdb)
+ log_msg = "buildqueru_cpv_dict: %s" % (buildqueru_cpv_dict,)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+ if buildqueru_cpv_dict is None:
+ return
+ fail_build_procces = self.build_procces(buildqueru_cpv_dict, build_dict, settings, portdb)
+ return
+ if not build_dict['emerge_options'] is [] and build_dict['ebuild_id'] is None:
+ return
+ if not build_dict['ebuild_id'] is None and build_dict['emerge_options'] is None:
+ pass
+ # del_old_queue(self._conn, build_dict['queue_id'])
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py
index 417a137..4997d0f 100644
--- a/gobs/pym/check_setup.py
+++ b/gobs/pym/check_setup.py
@@ -6,15 +6,18 @@ import errno
from portage.exception import DigestException, FileNotFound, ParseError, PermissionDenied
from gobs.text import get_file_text
from gobs.readconf import get_conf_settings
+from gobs.mysql_querys import get_config_id, get_config_list_all, add_gobs_logs, get_config, update_make_conf
+
reader=get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
config_profile = gobs_settings_dict['gobs_config']
def check_make_conf(conn):
+ _config_id = get_config_id(conn, config_profile)
# Get the config list
config_id_list_all = get_config_list_all(conn)
log_msg = "Checking configs for changes and errors"
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", _config_id)
configsDict = {}
for config_id in config_id_list_all:
attDict={}
@@ -35,21 +38,20 @@ def check_make_conf(conn):
attDict['config_error'] = str(e)
attDict['active'] = 'False'
log_msg = "%s FAIL!" % (config,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", _config_id)
else:
attDict['config_error'] = ''
attDict['active'] = 'True'
log_msg = "%s PASS" % (config,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", _config_id)
attDict['make_conf_text'] = get_file_text(make_conf_file)
attDict['make_conf_checksum_tree'] = make_conf_checksum_tree
configsDict[config_id]=attDict
update_make_conf(conn, configsDict)
log_msg = "Checking configs for changes and errors ... Done"
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", _config_id)
-def check_make_conf_guest(conn, config_profile):
- print('config_profile', config_profile)
+def check_make_conf_guest(conn, config_id):
make_conf_file = "/etc/portage/make.conf"
# Check if we can open the file and close it
# Check if we have some error in the file (portage.util.getconfig)
@@ -62,7 +64,7 @@ def check_make_conf_guest(conn, config_profile):
# With errors we return false
except Exception as e:
return False
- make_conf_checksum_db = get_profile_checksum(conn, config_profile)
+ make_conf_checksum_db = get_profile_checksum(conn, config_id)
if make_conf_checksum_db is None:
return False
print('make_conf_checksum_tree', make_conf_checksum_tree)
@@ -71,7 +73,7 @@ def check_make_conf_guest(conn, config_profile):
return False
return True
-def check_configure_guest(conn, config_profile):
- pass_make_conf = check_make_conf_guest(conn, config_profile)
+def check_configure_guest(conn, config_id):
+ pass_make_conf = check_make_conf_guest(conn, config_id)
print(pass_make_conf)
return pass_make_conf
\ No newline at end of file
diff --git a/gobs/pym/jobs.py b/gobs/pym/jobs.py
index 504c951..164de06 100644
--- a/gobs/pym/jobs.py
+++ b/gobs/pym/jobs.py
@@ -3,28 +3,30 @@ from __future__ import print_function
from gobs.sync import git_pull, sync_tree
from gobs.buildquerydb import add_buildquery_main, del_buildquery_main
from gobs.updatedb import update_db_main
+from gobs.mysql_querys import get_config_id, add_gobs_logs, get_jobs_id, get_job, update_job_list
def jobs_main(conn, config_profile):
- jobs_id = get_jobs_id(conn, config_profile)
+ config_id = get_config_id(conn, config_profile)
+ jobs_id = get_jobs_id(conn, config_id)
if jobs_id is None:
return
for job_id in jobs_id:
- job, config_id = get_job(conn, job_id)
+ job, run_config_id = get_job(conn, job_id)
log_msg = "Job: %s Type: %s" % (job_id, job,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
if job == "addbuildquery":
update_job_list(conn, "Runing", job_id)
log_msg = "Job %s is runing." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- result = add_buildquery_main(config_id)
+ add_gobs_logs(conn, log_msg, "info", config_id)
+ result = add_buildquery_main(run_config_id)
if result is True:
update_job_list(conn, "Done", job_id)
log_msg = "Job %s is done.." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
else:
update_job_list(conn, "Fail", job_id)
log_msg = "Job %s did fail." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
elif job == "delbuildquery":
update_job_list(conn, "Runing", job_id)
log_msg = "Job %s is runing." % (job_id,)
@@ -33,48 +35,48 @@ def jobs_main(conn, config_profile):
if result is True:
update_job_list(conn, "Done", job_id)
log_msg = "Job %s is done.." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
else:
update_job_list(conn, "Fail", job_id)
log_msg = "Job %s did fail." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
elif job == "gsync":
update_job_list(conn, "Runing", job_id)
log_msg = "Job %s is runing." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
result = git_pull(conn)
if result is True:
update_job_list(conn, "Done", job_id)
log_msg = "Job %s is done.." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
else:
update_job_list(conn, "Fail", job_id)
log_msg = "Job %s did fail." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
elif job == "esync":
update_job_list(conn, "Runing", job_id)
log_msg = "Job %s is runing." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
result = sync_tree(conn)
if result is True:
update_job_list(conn, "Done", job_id)
log_msg = "Job %s is done.." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
else:
update_job_list(conn, "Fail", job_id)
log_msg = "Job %s did fail." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
elif job == "updatedb":
update_job_list(conn, "Runing", job_id)
log_msg = "Job %s is runing." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
result = update_db_main(conn)
if result is True:
update_job_list(conn, "Done", job_id)
log_msg = "Job %s is done.." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
else:
update_job_list(conn, "Fail", job_id)
log_msg = "Job %s did fail." % (job_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
return
diff --git a/gobs/pym/mysql_querys.py b/gobs/pym/mysql_querys.py
index 45a622f..bf2e20c 100644
--- a/gobs/pym/mysql_querys.py
+++ b/gobs/pym/mysql_querys.py
@@ -1,19 +1,27 @@
-#every function takes a connection as a parameter that is provided by the CM
from __future__ import print_function
# Queryes to add the logs
+def get_config_id(connection, config):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT config_id FROM configs WHERE config = %s'
+ cursor.execute(sqlQ,(config,))
+ entries = cursor.fetchone()
+ cursor.close()
+ if not entries is None:
+ return entries[0]
+
def add_gobs_logs(connection, log_msg, log_type, config):
cursor = connection.cursor()
- sqlQ = 'INSERT INTO logs (config_id, log_type, msg) VALUES ( (SELECT config_id FROM configs WHERE config = %s), %s, %s )'
+ sqlQ = 'INSERT INTO logs (config_id, log_type, msg) VALUES ( %s, %s, %s )'
cursor.execute(sqlQ, (config, log_type, log_msg))
connection.commit()
cursor.close()
# Queryes to handel the jobs table
-def get_jobs_id(connection, config):
+def get_jobs_id(connection, config_id):
cursor = connection.cursor()
- sqlQ = "SELECT job_id FROM jobs WHERE status = 'Waiting' AND config_id = (SELECT config_id FROM configs WHERE config = %s)"
- cursor.execute(sqlQ, (config,))
+ sqlQ = "SELECT job_id FROM jobs WHERE status = 'Waiting' AND config_id = %s"
+ cursor.execute(sqlQ, (config_id,))
entries = cursor.fetchall()
cursor.close()
if entries is None:
@@ -28,6 +36,7 @@ def get_job(connection, job_id):
sqlQ ='SELECT job, config_id2 FROM jobs WHERE job_id = %s'
cursor.execute(sqlQ, (job_id,))
entries = cursor.fetchone()
+ cursor.close()
job = entries[0]
config_id = entries[1]
return job, config_id
@@ -37,6 +46,7 @@ def update_job_list(connection, status, job_id):
sqlQ = 'UPDATE jobs SET status = %s WHERE job_id = %s'
cursor.execute(sqlQ, (status, job_id,))
connection.commit()
+ cursor.close()
# Queryes to handel the configs* tables
def get_config_list_all(connection):
@@ -44,21 +54,27 @@ def get_config_list_all(connection):
sqlQ = 'SELECT config_id FROM configs'
cursor.execute(sqlQ)
entries = cursor.fetchall()
- return entries
+ cursor.close()
+ config_id_list = []
+ for config_id in entries:
+ config_id_list.append(config_id[0])
+ return config_id_list
def get_config(connection, config_id):
cursor = connection.cursor()
sqlQ ='SELECT config FROM configs WHERE config_id = %s'
cursor.execute(sqlQ, (config_id,))
config = cursor.fetchone()
+ cursor.close()
return config[0]
def update_make_conf(connection, configsDict):
cursor = connection.cursor()
- sqlQ = 'UPDATE configs_metadata SET checksum = %s, make_conf_text = %s, active = %s, config_error = %s WHERE config_id = %s'
+ sqlQ = 'UPDATE configs_metadata SET checksum = %s, make_conf_text = %s, active = %s, config_error_text = %s WHERE config_id = %s'
for k, v in configsDict.iteritems():
cursor.execute(sqlQ, (v['make_conf_checksum_tree'], v['make_conf_text'], v['active'], v['config_error'], k,))
connection.commit()
+ cursor.close()
def get_default_config(connection):
cursor = connection.cursor()
@@ -67,45 +83,65 @@ def get_default_config(connection):
entries = cursor.fetchone()
return entries
+def get_repo_id(connection, repo):
+ cursor = connection.cursor()
+ sqlQ ='SELECT repo_id FROM repos WHERE repo = %s'
+ cursor.execute(sqlQ, (repo,))
+ entries = cursor.fetchone()
+ cursor.close()
+ if not entries is None:
+ return entries[0]
+
def update_repo_db(connection, repo_list):
cursor = connection.cursor()
- sqlQ1 = 'SELECT repo_id FROM repos WHERE repo = %s'
- sqlQ2 = 'INSERT INTO repos (repo) VALUES ( %s )'
+ sqlQ = 'INSERT INTO repos (repo) VALUES ( %s )'
for repo in repo_list:
- cursor.execute(sqlQ1, (repo,))
- entries = cursor.fetchone()
- if entries is None:
- cursor.execute(sqlQ2, (repo,))
+ repo_id = get_repo_id(connection, repo)
+ if repo_id is None:
+ cursor.execute(sqlQ, (repo,))
connection.commit()
- return
+ cursor.close()
-def get_package_id(connection, categories, package, repo):
+def get_category_id(connection, categories):
cursor = connection.cursor()
- sqlQ ='SELECT package_id FROM packages WHERE category = %s AND package = %s AND repo_id = (SELECT repo_id FROM repos WHERE repo = %s)'
- params = categories, package, repo
- cursor.execute(sqlQ, params)
+ sqlQ = "SELECT category_id FROM categories WHERE category = %s AND active = 'True'"
+ cursor.execute(sqlQ, (categories,))
entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
+ cursor.close()
+ if not entries is None:
+ return entries[0]
+
+def update_categories_db(connection, categories):
+ cursor = connection.cursor()
+ sqlQ = 'INSERT INTO categories (category) VALUES ( %s )'
+ category_id = get_category_id(connection, categories)
+ if category_id is None:
+ cursor.execute(sqlQ, (categories,))
+ connection.commit()
+ cursor.close()
-# Add new info to the packages table
-def get_repo_id(connection, repo):
+def get_package_id(connection, categories, package, repo):
cursor = connection.cursor()
- sqlQ ='SELECT repo_id FROM repos WHERE repo = %s'
- cursor.execute(sqlQ, (repo,))
+ sqlQ ='SELECT package_id FROM packages WHERE category_id = %s AND package = %s AND repo_id = %s'
+ category_id = get_category_id(connection, categories)
+ repo_id = get_repo_id(connection, repo)
+ cursor.execute(sqlQ, (category_id, package, repo_id,))
entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
+ cursor.close()
+ if not entries is None:
+ return entries[0]
def add_new_manifest_sql(connection, categories, package, repo, manifest_checksum_tree):
cursor = connection.cursor()
- sqlQ = "INSERT INTO packages (category, package, repo_id, checksum, active) VALUES (%s, %s, %s, %s, 'True') RETURNING package_id"
+ sqlQ1 = "INSERT INTO packages (category_id, package, repo_id, checksum, active) VALUES (%s, %s, %s, %s, 'True')"
+ sqlQ2 = 'SELECT LAST_INSERT_ID()'
repo_id = get_repo_id(connection, repo)
- cursor.execute(sqlQ, (categories, package, repo_id, manifest_checksum_tree,))
+ category_id = get_category_id(connection, categories)
+ cursor.execute(sqlQ1, (category_id, package, repo_id, manifest_checksum_tree,))
+ cursor.execute(sqlQ2)
package_id = cursor.fetchone()[0]
connection.commit()
+ cursor.close()
return package_id
def get_restriction_id(connection, restriction):
@@ -113,41 +149,43 @@ def get_restriction_id(connection, restriction):
sqlQ ='SELECT restriction_id FROM restrictions WHERE restriction = %s'
cursor.execute(sqlQ, (restriction,))
entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
+ cursor.close()
+ if not entries is None:
+ return entries[0]
def get_use_id(connection, use_flag):
cursor = connection.cursor()
sqlQ ='SELECT use_id FROM uses WHERE flag = %s'
cursor.execute(sqlQ, (use_flag,))
entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
+ cursor.close()
+ if not entries is None:
+ return entries[0]
def get_keyword_id(connection, keyword):
cursor = connection.cursor()
sqlQ ='SELECT keyword_id FROM keywords WHERE keyword = %s'
cursor.execute(sqlQ, (keyword,))
entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
+ cursor.close()
+ if not entries is None:
+ return entries[0]
def add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse_list):
cursor = connection.cursor()
- sqlQ1 = 'INSERT INTO keywords (keyword) VALUES ( %s ) RETURNING keyword_id'
- sqlQ3 = 'INSERT INTO restrictions (restriction) VALUES ( %s ) RETURNING restriction_id'
+ sqlQ1 = 'INSERT INTO keywords (keyword) VALUES ( %s )'
+ sqlQ3 = 'INSERT INTO restrictions (restriction) VALUES ( %s )'
sqlQ4 = 'INSERT INTO ebuilds_restrictions (ebuild_id, restriction_id) VALUES ( %s, %s )'
- sqlQ5 = 'INSERT INTO uses (flag) VALUES ( %s ) RETURNING use_id'
+ sqlQ5 = 'INSERT INTO uses (flag) VALUES ( %s )'
sqlQ6 = 'INSERT INTO ebuilds_iuse (ebuild_id, use_id, status) VALUES ( %s, %s, %s)'
sqlQ7 = 'INSERT INTO ebuilds_keywords (ebuild_id, keyword_id, status) VALUES ( %s, %s, %s)'
+ sqlQ8 = 'SELECT LAST_INSERT_ID()'
# FIXME restriction need some filter as iuse and keyword have.
for restriction in restrictions:
restriction_id = get_restriction_id(connection, restriction)
if restriction_id is None:
cursor.execute(sqlQ3, (restriction,))
+ cursor.execute(sqlQ8)
restriction_id = cursor.fetchone()[0]
cursor.execute(sqlQ4, (ebuild_id, restriction_id,))
for iuse in iuse_list:
@@ -160,6 +198,7 @@ def add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, i
use_id = get_use_id(connection, iuse)
if use_id is None:
cursor.execute(sqlQ5, (iuse,))
+ cursor.execute(sqlQ8)
use_id = cursor.fetchone()[0]
cursor.execute(sqlQ6, (ebuild_id, use_id, set_iuse,))
for keyword in keywords:
@@ -173,22 +212,26 @@ def add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, i
keyword_id = get_keyword_id(connection, keyword)
if keyword_id is None:
cursor.execute(sqlQ1, (keyword,))
+ cursor.execute(sqlQ8)
keyword_id = cursor.fetchone()[0]
cursor.execute(sqlQ7, (ebuild_id, keyword_id, set_keyword,))
connection.commit()
+ cursor.close()
def add_new_ebuild_sql(connection, package_id, ebuildDict):
cursor = connection.cursor()
sqlQ1 = 'SELECT repo_id FROM packages WHERE package_id = %s'
- sqlQ2 = "INSERT INTO ebuilds (package_id, version, checksum, active) VALUES (%s, %s, %s, 'True') RETURNING ebuild_id"
- sqlQ4 = "INSERT INTO ebuilds_metadata (ebuild_id, revision) VALUES (%s, %s)"
+ sqlQ2 = "INSERT INTO ebuilds (package_id, version, checksum, active) VALUES (%s, %s, %s, 'True')"
+ sqlQ3 = "INSERT INTO ebuilds_metadata (ebuild_id, revision) VALUES (%s, %s)"
+ sqlQ4 = 'SELECT LAST_INSERT_ID()'
ebuild_id_list = []
cursor.execute(sqlQ1, (package_id,))
repo_id = cursor.fetchone()[0]
for k, v in ebuildDict.iteritems():
cursor.execute(sqlQ2, (package_id, v['ebuild_version_tree'], v['ebuild_version_checksum_tree'],))
+ cursor.execute(sqlQ4)
ebuild_id = cursor.fetchone()[0]
- cursor.execute(sqlQ4, (ebuild_id, v['ebuild_version_revision_tree'],))
+ cursor.execute(sqlQ3, (ebuild_id, v['ebuild_version_revision_tree'],))
ebuild_id_list.append(ebuild_id)
restrictions = []
keywords = []
@@ -201,6 +244,7 @@ def add_new_ebuild_sql(connection, package_id, ebuildDict):
iuse.append(i)
add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse)
connection.commit()
+ cursor.close()
return ebuild_id_list
def get_config_id_list(connection):
@@ -208,6 +252,7 @@ def get_config_id_list(connection):
sqlQ = "SELECT configs.config_id FROM configs, configs_metadata WHERE configs.default_config = 'False' AND configs_metadata.active = 'True' AND configs.config_id = configs_metadata.config_id"
cursor.execute(sqlQ)
entries = cursor.fetchall()
+ cursor.close()
if entries == ():
return None
else:
@@ -216,58 +261,72 @@ def get_config_id_list(connection):
config_id_list.append(config_id[0])
return config_id_list
-def get_config_id(connection, config):
- cursor = connection.cursor()
- sqlQ = 'SELECT config_id FROM configs WHERE config = %s'
- cursor.execute(sqlQ,(config,))
- entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
-
-def add_new_package_buildqueue(connection, ebuild_id, config_id, use_flagsDict):
+ef add_new_build_job(connection, ebuild_id, config_id, use_flagsDict):
cursor = connection.cursor()
- sqlQ1 = 'INSERT INTO build_jobs (ebuild_id, config_id) VALUES (%s, %s) RETURNING build_job_id'
- sqlQ3 = 'INSERT INTO build_jobs_use (build_job_id, use_id, status) VALUES (%s, (SELECT use_id FROM uses WHERE flag = %s), %s)'
+ sqlQ1 = 'INSERT INTO build_jobs (ebuild_id, config_id) VALUES (%s, %s)'
+ sqlQ2 = 'INSERT INTO build_jobs_use (build_job_id, use_id, status) VALUES (%s, %s, %s)'
+ sqlQ3 = 'SELECT LAST_INSERT_ID()'
+ sqlQ4 = 'INSERT INTO uses (flag) VALUES ( %s )'
cursor.execute(sqlQ1, (ebuild_id, config_id,))
+ cursor.execute(sqlQ3)
build_job_id = cursor.fetchone()[0]
for k, v in use_flagsDict.iteritems():
- cursor.execute(sqlQ3, (build_job_id, k, v,))
+ use_id = get_use_id(connection, k)
+ if use_id is None:
+ cursor.execute(sqlQ4, (k,))
+ cursor.execute(sqlQ3)
+ use_id = cursor.fetchone()[0]
+ cursor.execute(sqlQ2, (build_job_id, use_id, v,))
connection.commit()
+ cursor.close()
def get_manifest_db(connection, package_id):
cursor = connection.cursor()
sqlQ = 'SELECT checksum FROM packages WHERE package_id = %s'
cursor.execute(sqlQ, (package_id,))
entries = cursor.fetchone()
- if entries is None:
- return None
- # If entries is not None we need [0]
- return entries[0]
+ cursor.close()
+ if not entries is None:
+ return entries[0]
+
+def get_category(connection, category_id):
+ cursor = connection.cursor()
+ sqlQ = "SELECT category FROM categories WHERE category_id = %s AND active = 'True'"
+ cursor.execute(sqlQ, (category_id,))
+ entries = cursor.fetchone()
+ cursor.close()
+ if not entries is None:
+ return entries[0]
def get_cp_from_package_id(connection, package_id):
cursor = connection.cursor()
- sqlQ = "SELECT ARRAY_TO_STRING(ARRAY[category, package] , '/') AS cp FROM packages WHERE package_id = %s"
+ sqlQ = "SELECT category_id, package FROM packages WHERE package_id = %s"
cursor.execute(sqlQ, (package_id,))
- return cursor.fetchone()
+ entries = cursor.fetchone()
+ cursor.close()
+ category_id = entries[0]
+ package = entries[1]
+ category = get_category(connection, category_id)
+ cp = category + '/' + package
+ return cp
-def get_cp_repo_from_package_id(connection, package_id):
+ef get_cp_repo_from_package_id(connection, package_id):
cursor =connection.cursor()
sqlQ = 'SELECT repos.repo FROM repos, packages WHERE repos.repo_id = packages.repo_id AND packages.package_id = %s'
cp = get_cp_from_package_id(connection, package_id)
cursor.execute(sqlQ, (package_id,))
repo = cursor.fetchone()
- return cp[0], repo[0]
+ cursor.close()
+ return cp, repo[0]
def get_ebuild_checksum(connection, package_id, ebuild_version_tree):
cursor = connection.cursor()
sqlQ = "SELECT checksum FROM ebuilds WHERE package_id = %s AND version = %s AND active = 'True'"
cursor.execute(sqlQ, (package_id, ebuild_version_tree))
entries = cursor.fetchone()
- if entries is None:
- return None
- # If entries is not None we need [0]
- return entries[0]
+ cursor.close()
+ if not entries is None:
+ return entries[0]
def add_old_ebuild(connection, package_id, old_ebuild_list):
cursor = connection.cursor()
@@ -289,24 +348,28 @@ def add_old_ebuild(connection, package_id, old_ebuild_list):
cursor.execute(sqlQ5, (build_job_id))
cursor.execute(sqlQ1, (package_id, old_ebuild[0]))
connection.commit()
+ cursor.close()
def update_active_ebuild_to_fales(connection, package_id, ebuild_version_tree):
cursor = connection.cursor()
sqlQ ="UPDATE ebuilds SET active = 'False' WHERE package_id = %s AND version = %s AND active = 'True'"
cursor.execute(sqlQ, (package_id, ebuild_version_tree))
connection.commit()
+ cursor.close()
def update_manifest_sql(connection, package_id, manifest_checksum_tree):
cursor = connection.cursor()
sqlQ = 'UPDATE packages SET checksum = %s WHERE package_id = %s'
cursor.execute(sqlQ, (manifest_checksum_tree, package_id,))
connection.commit()
+ cursor.close()
def get_build_jobs_id_list_config(connection, config_id):
cursor = connection.cursor()
sqlQ = 'SELECT build_job_id FROM build_jobs WHERE config_id = %s'
cursor.execute(sqlQ, (config_id,))
entries = cursor.fetchall()
+ cursor.close()
build_jobs_id_list = []
if not entries == []:
for build_job_id_id in entries:
@@ -326,32 +389,34 @@ def del_old_build_jobs(connection, build_job_id):
cursor.execute(sqlQ4, (build_job_id,))
cursor.execute(sqlQ3, (build_job_id,))
connection.commit()
+ cursor.close()
-def get_profile_checksum(connection, config_profile):
+def get_profile_checksum(connection, config_id):
cursor = connection.cursor()
- sqlQ = "SELECT checksum FROM configs_metadata WHERE active = 'True' AND config_id = (SELECT config_id FROM configs WHERE config = %s) AND auto = 'True'"
- cursor.execute(sqlQ, (config_profile,))
+ sqlQ = "SELECT checksum FROM configs_metadata WHERE active = 'True' AND config_id = %s AND auto = 'True'"
+ cursor.execute(sqlQ, (config_id,))
entries = cursor.fetchone()
- if entries is None:
- return
- return entries[0]
+ cursor.close()
+ if not entries is None:
+ return entries[0]
-def get_packages_to_build(connection, config):
+def get_packages_to_build(connection, config_id):
cursor =connection.cursor()
- sqlQ1 = "SELECT build_jobs.build_job_id, build_jobs.ebuild_id, ebuilds.package_id FROM build_jobs, ebuilds WHERE build_jobs.config_id = (SELECT config_id FROM configs WHERE config = %s) AND build_jobs.ebuild_id = ebuilds.ebuild_id AND ebuilds.active = 'True' AND extract(epoch from (NOW()) - build_jobs.time_stamp) > 7200 ORDER BY build_jobs.build_job_id LIMIT 1"
+ sqlQ1 = "SELECT build_jobs.build_job_id, build_jobs.ebuild_id, ebuilds.package_id FROM build_jobs, ebuilds WHERE build_jobs.config_id = %s AND build_jobs.ebuild_id = ebuilds.ebuild_id AND ebuilds.active = 'True' AND TIMESTAMPDIFF(HOUR, build_jobs.time_stamp, NOW()) > 1 ORDER BY build_jobs.build_job_id LIMIT 1"
sqlQ2 = 'SELECT version, checksum FROM ebuilds WHERE ebuild_id = %s'
sqlQ3 = 'SELECT uses.flag, build_jobs_use.status FROM build_jobs_use, uses WHERE build_jobs_use.build_job_id = %s AND build_jobs_use.use_id = uses.use_id'
- sqlQ4 = "SELECT build_jobs.build_job_id, build_jobs.ebuild_id, ebuilds.package_id FROM build_jobs, ebuilds WHERE build_jobs.config_id = (SELECT config_id FROM configs WHERE config = %s) AND build_jobs.ebuild_id = ebuilds.ebuild_id AND ebuilds.active = 'True' AND build_jobs.status = 'Now' LIMIT 1"
- sqlQ5 = 'SELECT emerge_options.option FROM configs_emerge_options, emerge_options WHERE configs_emerge_options.config_id = (SELECT config_id FROM configs WHERE config = %s) AND configs_emerge_options.options_id = emerge_options.options_id'
+ sqlQ4 = "SELECT build_jobs.build_job_id, build_jobs.ebuild_id, ebuilds.package_id FROM build_jobs, ebuilds WHERE build_jobs.config_id = %s AND build_jobs.ebuild_id = ebuilds.ebuild_id AND ebuilds.active = 'True' AND build_jobs.status = 'Now' LIMIT 1"
+ sqlQ5 = 'SELECT emerge_options.option FROM configs_emerge_options, emerge_options WHERE configs_emerge_options.config_id = %s AND configs_emerge_options.options_id = emerge_options.options_id'
sqlQ6 = 'SELECT emerge_options.option FROM build_jobs_emerge_options, emerge_options WHERE build_jobs_emerge_options.build_job_id = %s AND build_jobs_emerge_options.options_id = emerge_options.options_id'
- cursor.execute(sqlQ4, (config,))
+ cursor.execute(sqlQ4, (config_id,))
entries = cursor.fetchone()
if entries is None:
- cursor.execute(sqlQ1, (config,))
+ cursor.execute(sqlQ1, (config_id,))
entries = cursor.fetchone()
if entries is None:
return
build_dict={}
+ build_dict['config_id'] = config_id
build_dict['build_job_id'] = entries[0]
build_dict['ebuild_id']= entries[1]
build_dict['package_id'] = entries[2]
@@ -366,12 +431,13 @@ def get_packages_to_build(connection, config):
uses[ row[0] ] = row[1]
build_dict['build_useflags']=uses
emerge_options_list = []
- cursor.execute(sqlQ5, (config,))
+ cursor.execute(sqlQ5, (config_id,))
entries = cursor.fetchall()
for option in entries:
emerge_options_list.append(option[0])
cursor.execute(sqlQ6, (build_dict['build_job_id'],))
entries = cursor.fetchall()
+ cursor.close()
for option in entries:
emerge_options_list.append(option[0])
if emerge_options_list == []:
@@ -386,6 +452,7 @@ def update_fail_times(connection, fail_querue_dict):
cursor.execute(sqlQ1, (fail_querue_dict['fail_times'], fail_querue_dict['build_job_id'], fail_querue_dict['fail_type'],))
cursor.execute(sqlQ2, (fail_querue_dict['build_job_id'],))
connection.commit()
+ cursor.close()
def get_fail_querue_dict(connection, build_dict):
cursor = connection.cursor()
@@ -393,10 +460,10 @@ def get_fail_querue_dict(connection, build_dict):
sqlQ = 'SELECT fail_times FROM build_jobs_retest WHERE build_job_id = %s AND fail_type = %s'
cursor.execute(sqlQ, (build_dict['build_job_id'], build_dict['type_fail'],))
entries = cursor.fetchone()
- if entries is None:
- return None
- fail_querue_dict['fail_times'] = entries[0]
- return fail_querue_dict
+ cursor.close()
+ if not entries is None:
+ fail_querue_dict['fail_times'] = entries[0]
+ return fail_querue_dict
def add_fail_querue_dict(connection, fail_querue_dict):
cursor = connection.cursor()
@@ -405,15 +472,16 @@ def add_fail_querue_dict(connection, fail_querue_dict):
cursor.execute(sqlQ1, (fail_querue_dict['build_job_id'],fail_querue_dict['fail_type'], fail_querue_dict['fail_times']))
cursor.execute(sqlQ2, (fail_querue_dict['build_job_id'],))
connection.commit()
+ cursor.close()
def get_ebuild_id_db_checksum(connection, build_dict):
cursor = connection.cursor()
sqlQ = 'SELECT ebuild_id FROM ebuilds WHERE version = %s AND checksum = %s AND package_id = %s'
cursor.execute(sqlQ, (build_dict['ebuild_version'], build_dict['checksum'], build_dict['package_id']))
ebuild_id = cursor.fetchone()
- if ebuild_id is None:
- return
- return ebuild_id[0]
+ cursor.close()
+ if not ebuild_id is None:
+ return ebuild_id[0]
def get_build_job_id(connection, build_dict):
cursor = connection.cursor()
@@ -433,12 +501,14 @@ def get_build_job_id(connection, build_dict):
for x in entries:
useflagsdict[x[0]] = x[1]
if useflagsdict == build_dict['build_useflags']:
+ cursor.close()
return build_job_id[0]
+ cursor.close()
def add_new_buildlog(connection, build_dict, build_log_dict):
cursor = connection.cursor()
sqlQ1 = 'SELECT build_log_id FROM build_logs WHERE ebuild_id = %s'
- sqlQ2 = 'INSERT INTO build_logs (ebuild_id) VALUES (%s) RETURNING build_log_id'
+ sqlQ2 = 'INSERT INTO build_logs (ebuild_id) VALUES (%s)'
sqlQ3 = "UPDATE build_logs SET fail = 'true', summery = %s, log_hash = %s WHERE build_log_id = %s"
sqlQ4 = 'INSERT INTO build_logs_config (build_log_id, config_id, logname) VALUES (%s, %s, %s)'
sqlQ6 = 'INSERT INTO build_logs_use (build_log_id, use_id, status) VALUES (%s, %s, %s)'
@@ -446,6 +516,7 @@ def add_new_buildlog(connection, build_dict, build_log_dict):
sqlQ8 = 'SELECT use_id, status FROM build_logs_use WHERE build_log_id = %s'
sqlQ9 = 'SELECT config_id FROM build_logs_config WHERE build_log_id = %s'
sqlQ10 = "UPDATE build_logs SET fail = false, log_hash = %s WHERE build_log_id = %s"
+ sqlQ11 = 'SELECT LAST_INSERT_ID()'
build_log_id_list = []
cursor.execute(sqlQ1, (build_dict['ebuild_id'],))
entries = cursor.fetchall()
@@ -480,6 +551,7 @@ def add_new_buildlog(connection, build_dict, build_log_dict):
def build_log_id_no_match(build_dict, build_log_dict):
cursor.execute(sqlQ2, (build_dict['ebuild_id'],))
+ cursor.execute(sqlQ11)
build_log_id = cursor.fetchone()[0]
if 'True' in build_log_dict['summary_error_list']:
cursor.execute(sqlQ3, (build_log_dict['build_error'], build_log_dict['log_hash'], build_log_id,))
@@ -492,19 +564,24 @@ def add_new_buildlog(connection, build_dict, build_log_dict):
return build_log_id
if build_dict['build_job_id'] is None and build_log_id_list is None:
- return build_log_id_no_match(build_dict, build_log_dict)
+ build_log_id = build_log_id_no_match(build_dict, build_log_dict)
+ cursor.close()
+ return build_log_id
elif build_dict['build_job_id'] is None and not build_log_id_list is None:
build_log_id, match = build_log_id_match(build_log_id_list, build_dict, build_log_dict)
if not match:
build_log_id = build_log_id_no_match(build_dict, build_log_dict)
+ cursor.close()
return build_log_id
elif not build_dict['build_job_id'] is None and not build_log_id_list is None:
build_log_id, match = build_log_id_match(build_log_id_list, build_dict, build_log_dict)
if not match:
build_log_id = build_log_id_no_match(build_dict, build_log_dict)
del_old_build_jobs(connection, build_dict['build_job_id'])
+ cursor.close()
return build_log_id
elif not build_dict['build_job_id'] is None and build_log_id_list is None:
build_log_id = build_log_id_no_match(build_dict, build_log_dict)
del_old_build_jobs(connection, build_dict['build_job_id'])
+ cursor.close()
return build_log_id
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
index 246b5f8..5257516 100644
--- a/gobs/pym/package.py
+++ b/gobs/pym/package.py
@@ -4,25 +4,23 @@ from gobs.flags import gobs_use_flags
from gobs.repoman_gobs import gobs_repoman
from gobs.manifest import gobs_manifest
from gobs.text import get_ebuild_cvs_revision
-from gobs.readconf import get_conf_settings
from gobs.flags import gobs_use_flags
+from gobs.mysql_querys import get_config, get_config_id, add_gobs_logs, get_default_config, \
+ add_new_build_job, get_config_id_list, update_manifest_sql, add_new_manifest_sql, \
+ add_new_ebuild_sql, update_active_ebuild_to_fales, add_old_ebuild, \
+ get_ebuild_checksum, get_manifest_db, get_cp_repo_from_package_id
+ from gobs.readconf import get_conf_settings
reader=get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
config_profile = gobs_settings_dict['gobs_config']
-# make a CM
-from gobs.ConnectionManager import connectionManager
-CM=connectionManager(gobs_settings_dict)
-#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql_querys import *
-if CM.getName()=='mysql':
- from gobs.mysql_querys import *
class gobs_package(object):
- def __init__(self, mysettings, myportdb):
+ def __init__(self, conn, mysettings, myportdb):
+ self._conn
self._mysettings = mysettings
self._myportdb = myportdb
+ self._config_id = get_config_id(conn, config_profile)
def change_config(self, config_setup):
# Change config_root config_setup = table config
@@ -34,11 +32,10 @@ class gobs_package(object):
config_cpv_listDict ={}
if config_id_list == []:
return config_cpv_listDict
- conn=CM.getConnection()
for config_id in config_id_list:
# Change config/setup
- config_setup = get_config(conn, config_id)
+ config_setup = get_config(self._conn, config_id)
mysettings_setup = self.change_config(config_setup)
myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
@@ -65,7 +62,6 @@ class gobs_package(object):
# Clean some cache
myportdb_setup.close_caches()
portage.portdbapi.portdbapi_instances.remove(myportdb_setup)
- CM.putConnection(conn)
return config_cpv_listDict
def get_ebuild_metadata(self, cpv, repo):
@@ -81,7 +77,6 @@ class gobs_package(object):
return ebuild_auxdb_list
def get_packageDict(self, pkgdir, cpv, repo):
- conn=CM.getConnection()
#Get categories, package and version from cpv
ebuild_version_tree = portage.versions.cpv_getversion(cpv)
@@ -95,9 +90,9 @@ class gobs_package(object):
except:
ebuild_version_checksum_tree = "0"
log_msg = "QA: Can't checksum the ebuild file. %s on repo %s" % (cpv, repo,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
log_msg = "C %s:%s ... Fail." % (cpv, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
ebuild_version_cvs_revision_tree = '0'
else:
ebuild_version_cvs_revision_tree = get_ebuild_cvs_revision(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")
@@ -107,7 +102,7 @@ class gobs_package(object):
#repoman_error = init_repoman.check_repoman(pkgdir, cpv, config_id)
#if repoman_error != []:
# log_msg = "Repoman: %s have errors on repo %s" % (cpv, repo,)
- # add_gobs_logs(conn, log_msg, "info", config_profile)
+ # add_gobs_logs(self._conn, log_msg, "info", self._config_id)
repoman_error = []
# Get the ebuild metadata
@@ -117,7 +112,7 @@ class gobs_package(object):
# so it can be updated next time we update the db
if ebuild_version_metadata_tree == []:
log_msg = " QA: %s have broken metadata on repo %s" % (cpv, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
ebuild_version_metadata_tree = ['','','','','','','','','','','','','','','','','','','','','','','','','']
ebuild_version_checksum_tree = '0'
@@ -130,10 +125,9 @@ class gobs_package(object):
#attDict['ebuild_version_text_tree'] = ebuild_version_text_tree[0]
attDict['ebuild_version_revision_tree'] = ebuild_version_cvs_revision_tree
attDict['ebuild_error'] = repoman_error
- CM.putConnection(conn)
return attDict
- def add_new_ebuild_buildquery_db(self, ebuild_id_list, packageDict, config_cpv_listDict):
+ def add_new_build_job_db(self, ebuild_id_list, packageDict, config_cpv_listDict):
conn=CM.getConnection()
# Get the needed info from packageDict and config_cpv_listDict and put that in buildqueue
# Only add it if ebuild_version in packageDict and config_cpv_listDict match
@@ -158,16 +152,15 @@ class gobs_package(object):
# Comper and add the cpv to buildqueue
if build_cpv == k:
- add_new_package_buildqueue(conn, ebuild_id, config_id, use_flagsDict)
+ add_new_build_job(self._conn, ebuild_id, config_id, use_flagsDict)
# B = Build cpv use-flags config
- config_setup = get_config(conn, config_id)
+ config_setup = get_config(self._conn, config_id)
# FIXME log_msg need a fix to log the use flags corect.
log_msg = "B %s:%s USE: %s %s" % (k, v['repo'], use_flagsDict, config_setup,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
i = i +1
- CM.putConnection(conn)
def get_package_metadataDict(self, pkgdir, package):
# Make package_metadataDict
@@ -185,14 +178,13 @@ class gobs_package(object):
return package_metadataDict
def add_new_package_db(self, categories, package, repo):
- conn=CM.getConnection()
# Add new categories package ebuild to tables package and ebuilds
# C = Checking
# N = New Package
log_msg = "C %s/%s:%s" % (categories, package, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
log_msg = "N %s/%s:%s" % (categories, package, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
pkgdir = self._myportdb.getRepositoryPath(repo) + "/" + categories + "/" + package # Get RepoDIR + cp
# Get the cp manifest file checksum.
@@ -201,12 +193,11 @@ class gobs_package(object):
except:
manifest_checksum_tree = "0"
log_msg = "QA: Can't checksum the Manifest file. %s/%s:%s" % (categories, package, repo,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
log_msg = "C %s/%s:%s ... Fail." % (categories, package, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
return
- package_id = add_new_manifest_sql(conn, categories, package, repo, manifest_checksum_tree)
+ package_id = add_new_manifest_sql(self._conn, categories, package, repo, manifest_checksum_tree)
# Get the ebuild list for cp
mytree = []
@@ -214,14 +205,13 @@ class gobs_package(object):
ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=mytree)
if ebuild_list_tree == []:
log_msg = "QA: Can't get the ebuilds list. %s/%s:%s" % (categories, package, repo,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
log_msg = "C %s/%s:%s ... Fail." % (categories, package, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
return
# set config to default config
- default_config = get_default_config(conn)
+ default_config = get_default_config(self._conn)
# Make the needed packageDict with ebuild infos so we can add it later to the db.
packageDict ={}
@@ -230,29 +220,28 @@ class gobs_package(object):
packageDict[cpv] = self.get_packageDict(pkgdir, cpv, repo)
# Add new ebuilds to the db
- ebuild_id_list = add_new_ebuild_sql(conn, package_id, packageDict)
+ ebuild_id_list = add_new_ebuild_sql(self._conn, package_id, packageDict)
# Get the best cpv for the configs and add it to config_cpv_listDict
- configs_id_list = get_config_id_list(conn)
+ configs_id_list = get_config_id_list(self._conn)
config_cpv_listDict = self.config_match_ebuild(categories + "/" + package, configs_id_list)
# Add the ebuild to the buildquery table if needed
- self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
+ self.add_new_build_job_db(ebuild_id_list, packageDict, config_cpv_listDict)
log_msg = "C %s/%s:%s ... Done." % (categories, package, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
print(categories, package, repo)
CM.putConnection(conn)
def update_package_db(self, package_id):
- conn=CM.getConnection()
# Update the categories and package with new info
# C = Checking
- cp, repo = get_cp_repo_from_package_id(conn, package_id)
+ cp, repo = get_cp_repo_from_package_id(self._conn, package_id)
element = cp.split('/')
package = element[1]
log_msg = "C %s:%s" % (cp, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
pkgdir = self._myportdb.getRepositoryPath(repo) + "/" + cp # Get RepoDIR + cp
# Get the cp mainfest file checksum
@@ -261,18 +250,17 @@ class gobs_package(object):
except:
manifest_checksum_tree = "0"
log_msg = "QA: Can't checksum the Manifest file. %s:%s" % (cp, repo,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
log_msg = "C %s:%s ... Fail." % (cp, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
return
# if we NOT have the same checksum in the db update the package
- if manifest_checksum_tree != get_manifest_db(conn, package_id):
+ if manifest_checksum_tree != get_manifest_db(self._conn, package_id):
# U = Update
log_msg = "U %s:%s" % (cp, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
# Get the ebuild list for cp
mytree = []
@@ -280,10 +268,9 @@ class gobs_package(object):
ebuild_list_tree = self._myportdb.cp_list(cp, use_cache=1, mytree=mytree)
if ebuild_list_tree == []:
log_msg = "QA: Can't get the ebuilds list. %s:%s" % (cp, repo,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
log_msg = "C %s:%s ... Fail." % (cp, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
return
packageDict ={}
for cpv in sorted(ebuild_list_tree):
@@ -297,38 +284,36 @@ class gobs_package(object):
# Get the checksum of the ebuild in tree and db
ebuild_version_checksum_tree = packageDict[cpv]['ebuild_version_checksum_tree']
- ebuild_version_manifest_checksum_db = get_ebuild_checksum(conn, package_id, ebuild_version_tree)
+ ebuild_version_manifest_checksum_db = get_ebuild_checksum(self._conn, package_id, ebuild_version_tree)
# Check if the checksum have change
- if ebuild_version_manifest_checksum_db is None or ebuild_version_checksum_tree != ebuild_version_manifest_checksum_db:
-
- if ebuild_version_manifest_checksum_db is None:
- # N = New ebuild
- log_msg = "N %s:%s" % (cpv, repo,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- else:
- # U = Updated ebuild
- log_msg = "U %s:%s" % (cpv, repo,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
-
- # Fix so we can use add_new_ebuild_sql() to update the ebuilds
- old_ebuild_list.append(ebuild_version_tree)
- add_old_ebuild(conn, package_id, old_ebuild_list)
- update_active_ebuild_to_fales(conn, package_id, ebuild_version_tree)
+ if ebuild_version_manifest_checksum_db is None:
+ # N = New ebuild
+ log_msg = "N %s:%s" % (cpv, repo,)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+ elif ebuild_version_checksum_tree != ebuild_version_manifest_checksum_db:
+ # U = Updated ebuild
+ log_msg = "U %s:%s" % (cpv, repo,)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
+
+ # Fix so we can use add_new_ebuild_sql() to update the ebuilds
+ old_ebuild_list.append(ebuild_version_tree)
+ add_old_ebuild(self._conn, package_id, old_ebuild_list)
+ update_active_ebuild_to_fales(self._conn, package_id, ebuild_version_tree)
+
# Use packageDict and to update the db
# Add new ebuilds to the db
- ebuild_id_list = add_new_ebuild_sql(conn, package_id, packageDict)
+ ebuild_id_list = add_new_ebuild_sql(self._conn, package_id, packageDict)
# update the cp manifest checksum
- update_manifest_sql(conn, package_id, manifest_checksum_tree)
+ update_manifest_sql(self._conn, package_id, manifest_checksum_tree)
# Get the best cpv for the configs and add it to config_cpv_listDict
- configs_id_list = get_config_id_list(conn)
+ configs_id_list = get_config_id_list(self._conn)
config_cpv_listDict = self.config_match_ebuild(cp, configs_id_list)
# Add the ebuild to the buildqueru table if needed
- self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
+ self.add_new_build_job_db(ebuild_id_list, packageDict, config_cpv_listDict)
log_msg = "C %s:%s ... Done." % (cp, repo)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+ add_gobs_logs(self._conn, log_msg, "info", self._config_id)
diff --git a/gobs/pym/pgsql_querys.py b/gobs/pym/pgsql_querys.py
deleted file mode 100644
index b7b5b8a..0000000
--- a/gobs/pym/pgsql_querys.py
+++ /dev/null
@@ -1,508 +0,0 @@
-#every function takes a connection as a parameter that is provided by the CM
-from __future__ import print_function
-
-# Queryes to add the logs
-def add_gobs_logs(connection, log_msg, log_type, config):
- cursor = connection.cursor()
- sqlQ = 'INSERT INTO logs (config_id, type, msg) VALUES ( (SELECT config_id FROM configs WHERE config = %s), %s, %s )'
- cursor.execute(sqlQ, (config, log_type, log_msg))
- connection.commit()
-
-# Queryes to handel the jobs table
-def get_jobs_id(connection, config):
- cursor = connection.cursor()
- sqlQ = "SELECT job_id FROM jobs WHERE status = 'Waiting' AND config_id = (SELECT config_id FROM configs WHERE config = %s)"
- cursor.execute(sqlQ, (config,))
- entries = cursor.fetchall()
- if entries is None:
- return None
- jobs_id = []
- for job_id in entries:
- jobs_id.append(job_id[0])
- return sorted(jobs_id)
-
-def get_job(connection, job_id):
- cursor = connection.cursor()
- sqlQ ='SELECT job, config_id2 FROM jobs WHERE job_id = %s'
- cursor.execute(sqlQ, (job_id,))
- entries = cursor.fetchone()
- job = entries[0]
- config_id = entries[1]
- return job, config_id
-
-def update_job_list(connection, status, job_id):
- cursor = connection.cursor()
- sqlQ = 'UPDATE jobs SET status = %s WHERE job_id = %s'
- cursor.execute(sqlQ, (status, job_id,))
- connection.commit()
-
-# Queryes to handel the configs* tables
-def get_config_list_all(connection):
- cursor = connection.cursor()
- sqlQ = 'SELECT config_id FROM configs'
- cursor.execute(sqlQ)
- entries = cursor.fetchall()
- return entries
-
-def get_config(connection, config_id):
- cursor = connection.cursor()
- sqlQ ='SELECT config FROM configs WHERE config_id = %s'
- cursor.execute(sqlQ, (config_id,))
- config = cursor.fetchone()
- return config[0]
-
-def update_make_conf(connection, configsDict):
- cursor = connection.cursor()
- sqlQ = 'UPDATE configs_metadata SET checksum = %s, make_conf_text = %s, active = %s, config_error = %s WHERE config_id = %s'
- for k, v in configsDict.iteritems():
- cursor.execute(sqlQ, (v['make_conf_checksum_tree'], v['make_conf_text'], v['active'], v['config_error'], k,))
- connection.commit()
-
-def get_default_config(connection):
- cursor = connection.cursor()
- sqlQ = "SELECT config FROM configs WHERE default_config = 'True'"
- cursor.execute(sqlQ)
- entries = cursor.fetchone()
- return entries
-
-def update_repo_db(connection, repo_list):
- cursor = connection.cursor()
- sqlQ1 = 'SELECT repo_id FROM repos WHERE repo = %s'
- sqlQ2 = 'INSERT INTO repos (repo) VALUES ( %s )'
- for repo in repo_list:
- cursor.execute(sqlQ1, (repo,))
- entries = cursor.fetchone()
- if entries is None:
- cursor.execute(sqlQ2, (repo,))
- connection.commit()
- return
-
-def get_package_id(connection, categories, package, repo):
- cursor = connection.cursor()
- sqlQ ='SELECT package_id FROM packages WHERE category = %s AND package = %s AND repo_id = (SELECT repo_id FROM repos WHERE repo = %s)'
- params = categories, package, repo
- cursor.execute(sqlQ, params)
- entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
-
-# Add new info to the packages table
-def get_repo_id(connection, repo):
- cursor = connection.cursor()
- sqlQ ='SELECT repo_id FROM repos WHERE repo = %s'
- cursor.execute(sqlQ, (repo,))
- entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
-
-def add_new_manifest_sql(connection, categories, package, repo, manifest_checksum_tree):
- cursor = connection.cursor()
- sqlQ = "INSERT INTO packages (category, package, repo_id, checksum, active) VALUES (%s, %s, %s, %s, 'True') RETURNING package_id"
- repo_id = get_repo_id(connection, repo)
- cursor.execute(sqlQ, (categories, package, repo_id, manifest_checksum_tree,))
- package_id = cursor.fetchone()[0]
- connection.commit()
- return package_id
-
-def get_restriction_id(connection, restriction):
- cursor = connection.cursor()
- sqlQ ='SELECT restriction_id FROM restrictions WHERE restriction = %s'
- cursor.execute(sqlQ, (restriction,))
- entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
-
-def get_use_id(connection, use_flag):
- cursor = connection.cursor()
- sqlQ ='SELECT use_id FROM uses WHERE flag = %s'
- cursor.execute(sqlQ, (use_flag,))
- entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
-
-def get_keyword_id(connection, keyword):
- cursor = connection.cursor()
- sqlQ ='SELECT keyword_id FROM keywords WHERE keyword = %s'
- cursor.execute(sqlQ, (keyword,))
- entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
-
-def add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse_list):
- cursor = connection.cursor()
- sqlQ1 = 'INSERT INTO keywords (keyword) VALUES ( %s ) RETURNING keyword_id'
- sqlQ3 = 'INSERT INTO restrictions (restriction) VALUES ( %s ) RETURNING restriction_id'
- sqlQ4 = 'INSERT INTO ebuilds_restrictions (ebuild_id, restriction_id) VALUES ( %s, %s )'
- sqlQ5 = 'INSERT INTO uses (flag) VALUES ( %s ) RETURNING use_id'
- sqlQ6 = 'INSERT INTO ebuilds_iuse (ebuild_id, use_id, status) VALUES ( %s, %s, %s)'
- sqlQ7 = 'INSERT INTO ebuilds_keywords (ebuild_id, keyword_id, status) VALUES ( %s, %s, %s)'
- # FIXME restriction need some filter as iuse and keyword have.
- for restriction in restrictions:
- restriction_id = get_restriction_id(connection, restriction)
- if restriction_id is None:
- cursor.execute(sqlQ3, (restriction,))
- restriction_id = cursor.fetchone()[0]
- cursor.execute(sqlQ4, (ebuild_id, restriction_id,))
- for iuse in iuse_list:
- set_iuse = 'disable'
- if iuse[0] in ["+"]:
- iuse = iuse[1:]
- set_iuse = 'enable'
- elif iuse[0] in ["-"]:
- iuse = iuse[1:]
- use_id = get_use_id(connection, iuse)
- if use_id is None:
- cursor.execute(sqlQ5, (iuse,))
- use_id = cursor.fetchone()[0]
- cursor.execute(sqlQ6, (ebuild_id, use_id, set_iuse,))
- for keyword in keywords:
- set_keyword = 'stable'
- if keyword[0] in ["~"]:
- keyword = keyword[1:]
- set_keyword = 'unstable'
- elif keyword[0] in ["-"]:
- keyword = keyword[1:]
- set_keyword = 'testing'
- keyword_id = get_keyword_id(connection, keyword)
- if keyword_id is None:
- cursor.execute(sqlQ1, (keyword,))
- keyword_id = cursor.fetchone()[0]
- cursor.execute(sqlQ7, (ebuild_id, keyword_id, set_keyword,))
- connection.commit()
-
-def add_new_ebuild_sql(connection, package_id, ebuildDict):
- cursor = connection.cursor()
- sqlQ1 = 'SELECT repo_id FROM packages WHERE package_id = %s'
- sqlQ2 = "INSERT INTO ebuilds (package_id, version, checksum, active) VALUES (%s, %s, %s, 'True') RETURNING ebuild_id"
- sqlQ4 = "INSERT INTO ebuilds_metadata (ebuild_id, revision) VALUES (%s, %s)"
- ebuild_id_list = []
- cursor.execute(sqlQ1, (package_id,))
- repo_id = cursor.fetchone()[0]
- for k, v in ebuildDict.iteritems():
- cursor.execute(sqlQ2, (package_id, v['ebuild_version_tree'], v['ebuild_version_checksum_tree'],))
- ebuild_id = cursor.fetchone()[0]
- cursor.execute(sqlQ4, (ebuild_id, v['ebuild_version_revision_tree'],))
- ebuild_id_list.append(ebuild_id)
- restrictions = []
- keywords = []
- iuse = []
- for i in v['ebuild_version_metadata_tree'][4].split():
- restrictions.append(i)
- for i in v['ebuild_version_metadata_tree'][8].split():
- keywords.append(i)
- for i in v['ebuild_version_metadata_tree'][10].split():
- iuse.append(i)
- add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse)
- connection.commit()
- return ebuild_id_list
-
-def get_config_id_list(connection):
- cursor = connection.cursor()
- sqlQ = "SELECT configs.config_id FROM configs, configs_metadata WHERE configs.default_config = 'False' AND configs_metadata.active = 'True' AND configs.config_id = configs_metadata.config_id"
- cursor.execute(sqlQ)
- entries = cursor.fetchall()
- if entries == ():
- return None
- else:
- config_id_list = []
- for config_id in entries:
- config_id_list.append(config_id[0])
- return config_id_list
-
-def get_config_id(connection, config):
- cursor = connection.cursor()
- sqlQ = 'SELECT config_id FROM configs WHERE config = %s'
- cursor.execute(sqlQ,(config,))
- entries = cursor.fetchone()
- if entries is None:
- return None
- return entries[0]
-
-def add_new_package_buildqueue(connection, ebuild_id, config_id, use_flagsDict):
- cursor = connection.cursor()
- sqlQ1 = 'INSERT INTO build_jobs (ebuild_id, config_id) VALUES (%s, %s) RETURNING build_job_id'
- sqlQ3 = 'INSERT INTO build_jobs_use (build_job_id, use_id, status) VALUES (%s, (SELECT use_id FROM uses WHERE flag = %s), %s)'
- cursor.execute(sqlQ1, (ebuild_id, config_id,))
- build_job_id = cursor.fetchone()[0]
- for k, v in use_flagsDict.iteritems():
- cursor.execute(sqlQ3, (build_job_id, k, v,))
- connection.commit()
-
-def get_manifest_db(connection, package_id):
- cursor = connection.cursor()
- sqlQ = 'SELECT checksum FROM packages WHERE package_id = %s'
- cursor.execute(sqlQ, (package_id,))
- entries = cursor.fetchone()
- if entries is None:
- return None
- # If entries is not None we need [0]
- return entries[0]
-
-def get_cp_from_package_id(connection, package_id):
- cursor = connection.cursor()
- sqlQ = "SELECT ARRAY_TO_STRING(ARRAY[category, package] , '/') AS cp FROM packages WHERE package_id = %s"
- cursor.execute(sqlQ, (package_id,))
- return cursor.fetchone()
-
-def get_cp_repo_from_package_id(connection, package_id):
- cursor =connection.cursor()
- sqlQ = 'SELECT repos.repo FROM repos, packages WHERE repos.repo_id = packages.repo_id AND packages.package_id = %s'
- cp = get_cp_from_package_id(connection, package_id)
- cursor.execute(sqlQ, (package_id,))
- repo = cursor.fetchone()
- return cp[0], repo[0]
-
-def get_ebuild_checksum(connection, package_id, ebuild_version_tree):
- cursor = connection.cursor()
- sqlQ = "SELECT checksum FROM ebuilds WHERE package_id = %s AND version = %s AND active = 'True'"
- cursor.execute(sqlQ, (package_id, ebuild_version_tree))
- entries = cursor.fetchone()
- if entries is None:
- return None
- # If entries is not None we need [0]
- return entries[0]
-
-def add_old_ebuild(connection, package_id, old_ebuild_list):
- cursor = connection.cursor()
- sqlQ1 = "UPDATE ebuilds SET active = 'False' WHERE package_id = %s AND version = %s"
- sqlQ2 = "SELECT ebuild_id FROM ebuilds WHERE package_id = %s AND version = %s AND active = 'True'"
- sqlQ3 = "SELECT build_job_id FROM build_jobs WHERE ebuild_id = %s"
- sqlQ4 = 'DELETE FROM build_jobs_use WHERE build_job_id = %s'
- sqlQ5 = 'DELETE FROM build_jobs WHERE build_job_id = %s'
- for old_ebuild in old_ebuild_list:
- cursor.execute(sqlQ2, (package_id, old_ebuild[0]))
- ebuild_id_list = cursor.fetchall()
- if ebuild_id_list is not None:
- for ebuild_id in ebuild_id_list:
- cursor.execute(sqlQ3, (ebuild_id))
- build_job_id_list = cursor.fetchall()
- if build_job_id_list is not None:
- for build_job_id in build_job_id_list:
- cursor.execute(sqlQ4, (build_job_id))
- cursor.execute(sqlQ5, (build_job_id))
- cursor.execute(sqlQ1, (package_id, old_ebuild[0]))
- connection.commit()
-
-def update_active_ebuild_to_fales(connection, package_id, ebuild_version_tree):
- cursor = connection.cursor()
- sqlQ ="UPDATE ebuilds SET active = 'False' WHERE package_id = %s AND version = %s AND active = 'True'"
- cursor.execute(sqlQ, (package_id, ebuild_version_tree))
- connection.commit()
-
-def update_manifest_sql(connection, package_id, manifest_checksum_tree):
- cursor = connection.cursor()
- sqlQ = 'UPDATE packages SET checksum = %s WHERE package_id = %s'
- cursor.execute(sqlQ, (manifest_checksum_tree, package_id,))
- connection.commit()
-
-def get_build_jobs_id_list_config(connection, config_id):
- cursor = connection.cursor()
- sqlQ = 'SELECT build_job_id FROM build_jobs WHERE config_id = %s'
- cursor.execute(sqlQ, (config_id,))
- entries = cursor.fetchall()
- build_jobs_id_list = []
- if not entries == []:
- for build_job_id_id in entries:
- build_jobs_id_list.append(build_job_id[0])
- else:
- build_log_id_list = None
- return build_jobs_id_list
-
-def del_old_build_jobs(connection, build_job_id):
- cursor = connection.cursor()
- sqlQ1 = 'DELETE FROM build_jobs_use WHERE build_job_id = %s'
- sqlQ2 = 'DELETE FROM build_jobs_retest WHERE build_job_id = %s'
- sqlQ3 = 'DELETE FROM build_jobs WHERE build_job_id = %s'
- sqlQ4 = 'DELETE FROM build_jobs_emerge_options WHERE build_job_id = %s'
- cursor.execute(sqlQ1, (build_job_id,))
- cursor.execute(sqlQ2, (build_job_id,))
- cursor.execute(sqlQ4, (build_job_id,))
- cursor.execute(sqlQ3, (build_job_id,))
- connection.commit()
-
-def get_profile_checksum(connection, config_profile):
- cursor = connection.cursor()
- sqlQ = "SELECT checksum FROM configs_metadata WHERE active = 'True' AND config_id = (SELECT config_id FROM configs WHERE config = %s) AND auto = 'True'"
- cursor.execute(sqlQ, (config_profile,))
- entries = cursor.fetchone()
- if entries is None:
- return
- return entries[0]
-
-def get_packages_to_build(connection, config):
- cursor =connection.cursor()
- sqlQ1 = "SELECT build_jobs.build_job_id, build_jobs.ebuild_id, ebuilds.package_id FROM build_jobs, ebuilds WHERE build_jobs.config_id = (SELECT config_id FROM configs WHERE config = %s) AND build_jobs.ebuild_id = ebuilds.ebuild_id AND ebuilds.active = 'True' AND extract(epoch from (NOW()) - build_jobs.time_stamp) > 7200 ORDER BY build_jobs.build_job_id LIMIT 1"
- sqlQ2 = 'SELECT version, checksum FROM ebuilds WHERE ebuild_id = %s'
- sqlQ3 = 'SELECT uses.flag, build_jobs_use.status FROM build_jobs_use, uses WHERE build_jobs_use.build_job_id = %s AND build_jobs_use.use_id = uses.use_id'
- sqlQ4 = "SELECT build_jobs.build_job_id, build_jobs.ebuild_id, ebuilds.package_id FROM build_jobs, ebuilds WHERE build_jobs.config_id = (SELECT config_id FROM configs WHERE config = %s) AND build_jobs.ebuild_id = ebuilds.ebuild_id AND ebuilds.active = 'True' AND build_jobs.status = 'Now' LIMIT 1"
- sqlQ5 = 'SELECT emerge_options.option FROM configs_emerge_options, emerge_options WHERE configs_emerge_options.config_id = (SELECT config_id FROM configs WHERE config = %s) AND configs_emerge_options.options_id = emerge_options.options_id'
- sqlQ6 = 'SELECT emerge_options.option FROM build_jobs_emerge_options, emerge_options WHERE build_jobs_emerge_options.build_job_id = %s AND build_jobs_emerge_options.options_id = emerge_options.options_id'
- cursor.execute(sqlQ4, (config,))
- entries = cursor.fetchone()
- if entries is None:
- cursor.execute(sqlQ1, (config,))
- entries = cursor.fetchone()
- if entries is None:
- return
- build_dict={}
- build_dict['build_job_id'] = entries[0]
- build_dict['ebuild_id']= entries[1]
- build_dict['package_id'] = entries[2]
- cursor.execute(sqlQ2, (entries[1],))
- entries = cursor.fetchone()
- build_dict['ebuild_version'] = entries[0]
- build_dict['checksum']=entries[1]
- #add a enabled and disabled list to the objects in the item list
- cursor.execute(sqlQ3, (build_dict['build_job_id'],))
- uses={}
- for row in cursor.fetchall():
- uses[ row[0] ] = row[1]
- build_dict['build_useflags']=uses
- emerge_options_list = []
- cursor.execute(sqlQ5, (config,))
- entries = cursor.fetchall()
- for option in entries:
- emerge_options_list.append(option[0])
- cursor.execute(sqlQ6, (build_dict['build_job_id'],))
- entries = cursor.fetchall()
- for option in entries:
- emerge_options_list.append(option[0])
- if emerge_options_list == []:
- emerge_options_list = None
- build_dict['emerge_options'] = emerge_options_list
- return build_dict
-
-def update_fail_times(connection, fail_querue_dict):
- cursor = connection.cursor()
- sqlQ1 = 'UPDATE build_jobs_retest SET fail_times = %s WHERE build_job_id = %s AND fail_type = %s'
- sqlQ2 = 'UPDATE build_jobs SET time_stamp = NOW() WHERE build_job_id = %s'
- cursor.execute(sqlQ1, (fail_querue_dict['fail_times'], fail_querue_dict['build_job_id'], fail_querue_dict['fail_type'],))
- cursor.execute(sqlQ2, (fail_querue_dict['build_job_id'],))
- connection.commit()
-
-def get_fail_querue_dict(connection, build_dict):
- cursor = connection.cursor()
- fail_querue_dict = {}
- sqlQ = 'SELECT fail_times FROM build_jobs_retest WHERE build_job_id = %s AND fail_type = %s'
- cursor.execute(sqlQ, (build_dict['build_job_id'], build_dict['type_fail'],))
- entries = cursor.fetchone()
- if entries is None:
- return None
- fail_querue_dict['fail_times'] = entries[0]
- return fail_querue_dict
-
-def add_fail_querue_dict(connection, fail_querue_dict):
- cursor = connection.cursor()
- sqlQ1 = 'INSERT INTO build_jobs_retest (build_job_id, fail_type, fail_times) VALUES ( %s, %s, %s)'
- sqlQ2 = 'UPDATE build_jobs SET time_stamp = NOW() WHERE build_job_id = %s'
- cursor.execute(sqlQ1, (fail_querue_dict['build_job_id'],fail_querue_dict['fail_type'], fail_querue_dict['fail_times']))
- cursor.execute(sqlQ2, (fail_querue_dict['build_job_id'],))
- connection.commit()
-
-def get_ebuild_id_db_checksum(connection, build_dict):
- cursor = connection.cursor()
- sqlQ = 'SELECT ebuild_id FROM ebuilds WHERE version = %s AND checksum = %s AND package_id = %s'
- cursor.execute(sqlQ, (build_dict['ebuild_version'], build_dict['checksum'], build_dict['package_id']))
- ebuild_id = cursor.fetchone()
- if ebuild_id is None:
- return
- return ebuild_id[0]
-
-def get_build_job_id(connection, build_dict):
- cursor = connection.cursor()
- sqlQ1 = 'SELECT build_job_id FROM build_jobs WHERE ebuild_id = %s AND config_id = %s'
- sqlQ2 = 'SELECT use_id, status FROM build_jobs_use WHERE build_job_id = %s'
- cursor.execute(sqlQ1, (build_dict['ebuild_id'], build_dict['config_id']))
- build_job_id_list = cursor.fetchall()
- if build_job_id_list == []:
- return
- for build_job_id in build_job_id_list:
- cursor.execute(sqlQ2, (build_job_id[0],))
- entries = cursor.fetchall()
- useflagsdict = {}
- if entries == []:
- useflagsdict = None
- else:
- for x in entries:
- useflagsdict[x[0]] = x[1]
- if useflagsdict == build_dict['build_useflags']:
- return build_job_id[0]
-
-def add_new_buildlog(connection, build_dict, build_log_dict):
- cursor = connection.cursor()
- sqlQ1 = 'SELECT build_log_id FROM build_logs WHERE ebuild_id = %s'
- sqlQ2 = 'INSERT INTO build_logs (ebuild_id) VALUES (%s) RETURNING build_log_id'
- sqlQ3 = "UPDATE build_logs SET fail = 'true', summery = %s, log_hash = %s WHERE build_log_id = %s"
- sqlQ4 = 'INSERT INTO build_logs_config (build_log_id, config_id, logname) VALUES (%s, %s, %s)'
- sqlQ6 = 'INSERT INTO build_logs_use (build_log_id, use_id, status) VALUES (%s, %s, %s)'
- sqlQ7 = 'SELECT log_hash FROM build_logs WHERE build_log_id = %s'
- sqlQ8 = 'SELECT use_id, status FROM build_logs_use WHERE build_log_id = %s'
- sqlQ9 = 'SELECT config_id FROM build_logs_config WHERE build_log_id = %s'
- sqlQ10 = "UPDATE build_logs SET fail = false, log_hash = %s WHERE build_log_id = %s"
- build_log_id_list = []
- cursor.execute(sqlQ1, (build_dict['ebuild_id'],))
- entries = cursor.fetchall()
- if not entries == []:
- for build_log_id in entries:
- build_log_id_list.append(build_log_id[0])
- else:
- build_log_id_list = None
-
- def build_log_id_match(build_log_id_list, build_dict, build_log_dict):
- for build_log_id in build_log_id_list:
- cursor.execute(sqlQ7, (build_log_id,))
- log_hash = cursor.fetchone()
- cursor.execute(sqlQ8, (build_log_id,))
- entries = cursor.fetchall()
- useflagsdict = {}
- if entries == []:
- useflagsdict = None
- else:
- for x in entries:
- useflagsdict[x[0]] = x[1]
- if log_hash[0] == build_log_dict['log_hash'] and build_dict['build_useflags'] == useflagsdict:
- cursor.execute(sqlQ9, (build_log_id,))
- config_id_list = []
- for config_id in cursor.fetchall():
- config_id_list.append(config_id[0])
- if build_dict['config_id'] in config_id_list:
- return None, True
- cursor.execute(sqlQ4, (build_log_id, build_dict['config_id'], build_log_dict['logfilename'],))
- return build_log_id, True
- return None, False
-
- def build_log_id_no_match(build_dict, build_log_dict):
- cursor.execute(sqlQ2, (build_dict['ebuild_id'],))
- build_log_id = cursor.fetchone()[0]
- if 'True' in build_log_dict['summary_error_list']:
- cursor.execute(sqlQ3, (build_log_dict['build_error'], build_log_dict['log_hash'], build_log_id,))
- else:
- cursor.execute(sqlQ10, (build_log_dict['log_hash'], build_log_id,))
- cursor.execute(sqlQ4, (build_log_id, build_dict['config_id'], build_log_dict['logfilename'],))
- if not build_dict['build_useflags'] is None:
- for use_id, status in build_dict['build_useflags'].iteritems():
- cursor.execute(sqlQ6, (build_log_id, use_id, status))
- return build_log_id
-
- if build_dict['build_job_id'] is None and build_log_id_list is None:
- return build_log_id_no_match(build_dict, build_log_dict)
- elif build_dict['build_job_id'] is None and not build_log_id_list is None:
- build_log_id, match = build_log_id_match(build_log_id_list, build_dict, build_log_dict)
- if not match:
- build_log_id = build_log_id_no_match(build_dict, build_log_dict)
- return build_log_id
- elif not build_dict['build_job_id'] is None and not build_log_id_list is None:
- build_log_id, match = build_log_id_match(build_log_id_list, build_dict, build_log_dict)
- if not match:
- build_log_id = build_log_id_no_match(build_dict, build_log_dict)
- del_old_build_jobs(connection, build_dict['build_job_id'])
- return build_log_id
- elif not build_dict['build_job_id'] is None and build_log_id_list is None:
- build_log_id = build_log_id_no_match(build_dict, build_log_dict)
- del_old_build_jobs(connection, build_dict['build_job_id'])
- return build_log_id
diff --git a/gobs/pym/sync.py b/gobs/pym/sync.py
index 4ee1a4d..085973a 100644
--- a/gobs/pym/sync.py
+++ b/gobs/pym/sync.py
@@ -5,38 +5,44 @@ import errno
import sys
from git import *
from _emerge.main import emerge_main
+from gobs.readconf import get_conf_settings
+from gobs.mysql_querys import get_config_id, add_gobs_logs, get_default_config
reader=get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
config_profile = gobs_settings_dict['gobs_config']
def git_pull(conn):
+ config_id = get_config_id(conn, config_profile)
log_msg = "Git pull"
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
repo = Repo("/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/")
repo_remote = repo.remotes.origin
repo_remote.pull()
master = repo.head.reference
log_msg = "Git log: %s" % (master.log(),)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
log_msg = "Git pull ... Done"
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
return True
def sync_tree(conn):
- config_id = get_default_config(conn) # HostConfigDir = table configs id
- default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id[0] + "/"
+ config_id = get_config_id(conn, config_profile)
+ config_setup = get_default_config(conn) # HostConfigDir = table configs id
+ default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_setup + "/"
mysettings = portage.config(config_root = default_config_root)
tmpcmdline = []
tmpcmdline.append("--sync")
tmpcmdline.append("--quiet")
tmpcmdline.append("--config-root=" + default_config_root)
log_msg = "Emerge --sync"
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
fail_sync = emerge_main(args=tmpcmdline)
+ if not conn.is_connected() is True:
+ conn.reconnect(attempts=2, delay=1)
if fail_sync is True:
log_msg = "Emerge --sync fail!"
- add_gobs_logs(conn, log_msg, "error", config_profile)
+ add_gobs_logs(conn, log_msg, "error", config_id)
return False
else:
# Need to add a config dir so we can use profiles/base for reading the tree.
@@ -49,5 +55,5 @@ def sync_tree(conn):
except:
pass
log_msg = "Emerge --sync ... Done."
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
return True
diff --git a/gobs/pym/updatedb.py b/gobs/pym/updatedb.py
index ba76d53..b33eb03 100755
--- a/gobs/pym/updatedb.py
+++ b/gobs/pym/updatedb.py
@@ -6,55 +6,51 @@ from __future__ import print_function
import sys
import os
import multiprocessing
+import portage
+from gobs.ConnectionManager import connectionManager
+from gobs.mysql_querys import get_config_id, add_gobs_logs, get_default_config, get_package_id, update_repo_db, \
+ update_categories_db
+from gobs.check_setup import check_make_conf
+from gobs.package import gobs_package
# Get the options from the config file set in gobs.readconf
from gobs.readconf import get_conf_settings
reader = get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
config_profile = gobs_settings_dict['gobs_config']
-# make a CM
-from gobs.ConnectionManager import connectionManager
-CM=connectionManager(gobs_settings_dict)
-#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql_querys import *
-if CM.getName()=='mysql':
- from gobs.mysql_querys import *
-
-from gobs.check_setup import check_make_conf
-from gobs.package import gobs_package
-import portage
-
-def init_portage_settings():
+def init_portage_settings(conn, config_id):
# check config setup
- conn=CM.getConnection()
- check_make_conf()
+ check_make_conf(conn)
log_msg = "Check configs done"
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
# Get default config from the configs table and default_config=1
- config = get_default_config(conn)[0] # HostConfigDir = table configs id
- default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config + "/"
+ config_setup = get_default_config(conn) # HostConfigDir = table configs id
+ default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_setup + "/"
# Set config_root (PORTAGE_CONFIGROOT) to default_config_root
mysettings = portage.config(config_root = default_config_root)
- log_msg = "Setting default config to: %s" % (config,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+ log_msg = "Setting default config to: %s" % (config_setup,)
+ add_gobs_logs(conn, log_msg, "info", config_id)
return mysettings
def update_cpv_db_pool(mysettings, myportdb, cp, repo):
- conn=CM.getConnection()
- init_package = gobs_package(mysettings, myportdb)
+ CM2=connectionManager()
+ conn2 = CM2.newConnection()
+ if not conn2.is_connected() is True:
+ conn2.reconnect(attempts=2, delay=1)
+ init_package = gobs_package(conn2, mysettings, myportdb)
# split the cp to categories and package
element = cp.split('/')
categories = element[0]
package = element[1]
+ # update the categories table
+ update_categories_db(conn2, categories)
+
# Check if we don't have the cp in the package table
- package_id = get_package_id(conn, categories, package, repo)
- CM.putConnection(conn)
+ package_id = get_package_id(conn2, categories, package, repo)
if package_id is None:
# Add new package with ebuilds
@@ -65,13 +61,12 @@ def update_cpv_db_pool(mysettings, myportdb, cp, repo):
# Update the packages with ebuilds
init_package.update_package_db(package_id)
- return
+ conn2.close
-def update_cpv_db():
- conn=CM.getConnection()
- mysettings = init_portage_settings()
+def update_cpv_db(conn, config_id):
+ mysettings = init_portage_settings(conn, config_id)
log_msg = "Checking categories, package, ebuilds"
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
# Setup portdb, package
myportdb = portage.portdbapi(mysettings=mysettings)
@@ -87,7 +82,10 @@ def update_cpv_db():
# Get the repos and update the repos db
repo_list = myportdb.getRepositories()
update_repo_db(conn, repo_list)
- CM.putConnection(conn)
+
+ # close the db for the multiprocessing pool will make new ones
+ # and we don't need this one for some time.
+ conn.close()
# Get the rootdirs for the repos
repo_trees_list = myportdb.porttrees
@@ -102,25 +100,28 @@ def update_cpv_db():
# Run the update package for all package in the list and in a multiprocessing pool
for cp in sorted(package_list_tree):
pool.apply_async(update_cpv_db_pool, (mysettings, myportdb, cp, repo,))
- # update_cpv_db_pool(mysettings, myportdb, init_package, package_line, repo)
+ # update_cpv_db_pool(mysettings, myportdb, cp, repo)
+
+ # close and join the multiprocessing pools
pool.close()
pool.join()
- conn=CM.getConnection()
+
+ # reconnect to the db if needed.
+ if not conn.is_connected() is True:
+ conn.reconnect(attempts=2, delay=1)
log_msg = "Checking categories, package and ebuilds ... done"
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+ add_gobs_logs(conn, log_msg, "info", config_id)
-def update_db_main():
+def update_db_main(conn):
# Main
- conn=CM.getConnection()
# Logging
+ config_id = get_config_id(conn, config_profile)
log_msg = "Update db started."
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ add_gobs_logs(conn, log_msg, "info", config_id)
# Update the cpv db
- update_cpv_db()
+ update_cpv_db(conn, config_id)
log_msg = "Update db ... Done."
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+ add_gobs_logs(conn, log_msg, "info", config_id)
return True
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-12-19 2:11 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-12-19 2:11 UTC (permalink / raw
To: gentoo-commits
commit: fec9f69cfcc8ea1028d77cb90168cd8845b58ee7
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 19 02:10:57 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Wed Dec 19 02:10:57 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=fec9f69c
Add mysql support
---
gobs/bin/gobs_host_jobs | 20 +--
gobs/pym/ConnectionManager.py | 106 ++++++---
gobs/pym/check_setup.py | 21 +--
gobs/pym/jobs.py | 27 +--
gobs/pym/mysql_querys.py | 510 +++++++++++++++++++++++++++++++++++++++++
gobs/pym/sync.py | 18 +--
6 files changed, 601 insertions(+), 101 deletions(-)
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
index 72683c9..5dac511 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs
@@ -5,33 +5,23 @@ from __future__ import print_function
from gobs.readconf import get_conf_settings
from gobs.jobs import jobs_main
-
-reader = get_conf_settings()
-gobs_settings_dict=reader.read_gobs_settings_all()
-
-# make a CM
from gobs.ConnectionManager import connectionManager
-CM=connectionManager(gobs_settings_dict)
-#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql_querys import add_gobs_logs
-
-import logging
import time
def main():
# Main
+ reader = get_conf_settings()
+ gobs_settings_dict=reader.read_gobs_settings_all()
config_profile = gobs_settings_dict['gobs_config']
- # Logging
+ CM=connectionManager()
conn = CM.getConnection()
add_gobs_logs(conn, "Job deamon started", "info", config_profile)
- CM.putConnection(conn)
repeat = True
while repeat:
- jobs_main(config_profile)
+ jobs_main(conn, config_profile)
repeat = False
time.sleep(60)
- CM.closeAllConnections()
+ CM.putConnection(conn)
if __name__ == "__main__":
main()
\ No newline at end of file
diff --git a/gobs/pym/ConnectionManager.py b/gobs/pym/ConnectionManager.py
index 5e8a763..a06e2cd 100644
--- a/gobs/pym/ConnectionManager.py
+++ b/gobs/pym/ConnectionManager.py
@@ -2,45 +2,83 @@
#when the first object is created of this class, the SQL settings are read from the file and stored in the class for later reuse by the next object and so on.
#(maybe later add support for connection pools)
from __future__ import print_function
+from gobs.readconf import get_conf_settings
+reader = get_conf_settings()
+gobs_settings_dict=reader.read_gobs_settings_all()
+
+if settings_dict['sql_backend']=='pgsql':
+ from gobs.pgsql_querys import *
+if settings_dict['sql_backend']=='mysql':
+ from gobs.mysql_querys import *
class connectionManager(object):
- _instance = None
+ _instance = None
- #size of the connection Pool
- def __new__(cls, settings_dict, numberOfconnections=20, *args, **kwargs):
- if not cls._instance:
- cls._instance = super(connectionManager, cls).__new__(cls, *args, **kwargs)
- #read the sql user/host etc and store it in the local object
- cls._backend=settings_dict['sql_backend']
- cls._host=settings_dict['sql_host']
- cls._user=settings_dict['sql_user']
- cls._password=settings_dict['sql_passwd']
- cls._database=settings_dict['sql_db']
- #shouldnt we include port also?
- try:
- from psycopg2 import pool, extensions
- cls._connectionNumber=numberOfconnections
- #always create 1 connection
- cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password)
- cls._name=cls._backend
- except ImportError:
- print("Please install a recent version of dev-python/psycopg for Python")
- sys.exit(1)
- #setup connection pool
- return cls._instance
-
- ## returns the name of the database pgsql/mysql etc
- def getName(self):
- return self._name
-
- def getConnection(self):
- return self._pool.getconn()
+ #size of the connection Pool
+ def __new__(cls, numberOfconnections=20, *args, **kwargs):
+ if not cls._instance:
+ cls._instance = super(connectionManager, cls).__new__(cls, *args, **kwargs)
+ #read the sql user/host etc and store it in the local object
+ cls._backend=settings_dict['sql_backend']
+ cls._host=settings_dict['sql_host']
+ cls._user=settings_dict['sql_user']
+ cls._password=settings_dict['sql_passwd']
+ cls._database=settings_dict['sql_db']
+ #shouldnt we include port also?
+ if cls._backend == 'pgsql'
+ try:
+ from psycopg2 import pool, extensions
+ except ImportError:
+ print("Please install a recent version of dev-python/psycopg for Python")
+ sys.exit(1)
+ #setup connection pool
+ cls._connectionNumber=numberOfconnections
+ #always create 1 connection
+ cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password)
+ cls._name=cls._backend
+ if cls._backend == 'mysql'
+ try:
+ import mysql.connector
+ from mysql.connector import errorcode
+ except ImportError:
+ print("Please install a recent version of dev-python/mysql-connector-python for Python")
+ sys.exit(1)
+ db_config = {}
+ db_config['user'] = cls._user
+ db_config['password'] = cls._password
+ db_config['host'] = cls._host
+ db_config['database'] = cls._database
+ db_config['raise_on_warnings'] = True
+ try:
+ cls._cnx = mysql.connector.connect(**db_config)
+ except mysql.connector.Error as err:
+ if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
+ print("Something is wrong your username or password")
+ elif err.errno == errorcode.ER_BAD_DB_ERROR:
+ print("Database does not exists")
+ else:
+ print(err)
+ cls._name=cls._backend
+ return cls._instance
+
+ ## returns the name of the database pgsql/mysql etc
+ def getName(self):
+ return self._name
+
+ def getConnection(self):
+ if self._name == 'pgsql'
+ return self._pool.getconn()
+ if self._name == 'mysql'
+ return self._cnx
- def putConnection(self, connection):
- self._pool.putconn(connection , key=None, close=True)
+ def putConnection(self, connection):
+ if self._name == 'pgsql'
+ self._pool.putconn(connection , key=None, close=True)
+ if self._name == 'mysql'
+ return self._cnx.close()
- def closeAllConnections(self):
- self._pool.closeall()
+ def closeAllConnections(self):
+ self._pool.closeall()
##how to use this class
#get a instance of the class (there can only be 1 instance but many pointers (single ton))
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py
index a97d639..417a137 100644
--- a/gobs/pym/check_setup.py
+++ b/gobs/pym/check_setup.py
@@ -9,16 +9,9 @@ from gobs.readconf import get_conf_settings
reader=get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
config_profile = gobs_settings_dict['gobs_config']
-# make a CM
-from gobs.ConnectionManager import connectionManager
-CM=connectionManager(gobs_settings_dict)
-#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql_querys import *
-def check_make_conf():
+def check_make_conf(conn):
# Get the config list
- conn=CM.getConnection()
config_id_list_all = get_config_list_all(conn)
log_msg = "Checking configs for changes and errors"
add_gobs_logs(conn, log_msg, "info", config_profile)
@@ -54,10 +47,8 @@ def check_make_conf():
update_make_conf(conn, configsDict)
log_msg = "Checking configs for changes and errors ... Done"
add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
-def check_make_conf_guest(config_profile):
- conn=CM.getConnection()
+def check_make_conf_guest(conn, config_profile):
print('config_profile', config_profile)
make_conf_file = "/etc/portage/make.conf"
# Check if we can open the file and close it
@@ -70,21 +61,17 @@ def check_make_conf_guest(config_profile):
mysettings.validate()
# With errors we return false
except Exception as e:
- CM.putConnection(conn)
return False
make_conf_checksum_db = get_profile_checksum(conn, config_profile)
if make_conf_checksum_db is None:
- CM.putConnection(conn)
return False
print('make_conf_checksum_tree', make_conf_checksum_tree)
print('make_conf_checksum_db', make_conf_checksum_db)
if make_conf_checksum_tree != make_conf_checksum_db:
- CM.putConnection(conn)
return False
- CM.putConnection(conn)
return True
-def check_configure_guest(config_profile):
- pass_make_conf = check_make_conf_guest(config_profile)
+def check_configure_guest(conn, config_profile):
+ pass_make_conf = check_make_conf_guest(conn, config_profile)
print(pass_make_conf)
return pass_make_conf
\ No newline at end of file
diff --git a/gobs/pym/jobs.py b/gobs/pym/jobs.py
index d68cff2..504c951 100644
--- a/gobs/pym/jobs.py
+++ b/gobs/pym/jobs.py
@@ -1,25 +1,12 @@
from __future__ import print_function
-# Get the options from the config file set in gobs.readconf
-from gobs.readconf import get_conf_settings
-reader=get_conf_settings()
-gobs_settings_dict=reader.read_gobs_settings_all()
-
-from gobs.ConnectionManager import connectionManager
-CM=connectionManager(gobs_settings_dict)
-#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql_querys import *
-
from gobs.sync import git_pull, sync_tree
from gobs.buildquerydb import add_buildquery_main, del_buildquery_main
from gobs.updatedb import update_db_main
-def jobs_main(config_profile):
- conn = CM.getConnection()
+def jobs_main(conn, config_profile):
jobs_id = get_jobs_id(conn, config_profile)
if jobs_id is None:
- CM.putConnection(conn)
return
for job_id in jobs_id:
job, config_id = get_job(conn, job_id)
@@ -51,11 +38,11 @@ def jobs_main(config_profile):
update_job_list(conn, "Fail", job_id)
log_msg = "Job %s did fail." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- elif job == "gitsync":
+ elif job == "gsync":
update_job_list(conn, "Runing", job_id)
log_msg = "Job %s is runing." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- result = git_pull()
+ result = git_pull(conn)
if result is True:
update_job_list(conn, "Done", job_id)
log_msg = "Job %s is done.." % (job[1],)
@@ -64,11 +51,11 @@ def jobs_main(config_profile):
update_job_list(conn, "Fail", job_id)
log_msg = "Job %s did fail." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- elif job == "emergesync":
+ elif job == "esync":
update_job_list(conn, "Runing", job_id)
log_msg = "Job %s is runing." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- result = sync_tree()
+ result = sync_tree(conn)
if result is True:
update_job_list(conn, "Done", job_id)
log_msg = "Job %s is done.." % (job_id,)
@@ -81,7 +68,7 @@ def jobs_main(config_profile):
update_job_list(conn, "Runing", job_id)
log_msg = "Job %s is runing." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- result = update_db_main()
+ result = update_db_main(conn)
if result is True:
update_job_list(conn, "Done", job_id)
log_msg = "Job %s is done.." % (job_id,)
@@ -90,4 +77,4 @@ def jobs_main(config_profile):
update_job_list(conn, "Fail", job_id)
log_msg = "Job %s did fail." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- return
\ No newline at end of file
+ return
diff --git a/gobs/pym/mysql_querys.py b/gobs/pym/mysql_querys.py
new file mode 100644
index 0000000..45a622f
--- /dev/null
+++ b/gobs/pym/mysql_querys.py
@@ -0,0 +1,510 @@
+#every function takes a connection as a parameter that is provided by the CM
+from __future__ import print_function
+
+# Queryes to add the logs
+def add_gobs_logs(connection, log_msg, log_type, config):
+ cursor = connection.cursor()
+ sqlQ = 'INSERT INTO logs (config_id, log_type, msg) VALUES ( (SELECT config_id FROM configs WHERE config = %s), %s, %s )'
+ cursor.execute(sqlQ, (config, log_type, log_msg))
+ connection.commit()
+ cursor.close()
+
+# Queryes to handel the jobs table
+def get_jobs_id(connection, config):
+ cursor = connection.cursor()
+ sqlQ = "SELECT job_id FROM jobs WHERE status = 'Waiting' AND config_id = (SELECT config_id FROM configs WHERE config = %s)"
+ cursor.execute(sqlQ, (config,))
+ entries = cursor.fetchall()
+ cursor.close()
+ if entries is None:
+ return None
+ jobs_id = []
+ for job_id in entries:
+ jobs_id.append(job_id[0])
+ return sorted(jobs_id)
+
+def get_job(connection, job_id):
+ cursor = connection.cursor()
+ sqlQ ='SELECT job, config_id2 FROM jobs WHERE job_id = %s'
+ cursor.execute(sqlQ, (job_id,))
+ entries = cursor.fetchone()
+ job = entries[0]
+ config_id = entries[1]
+ return job, config_id
+
+def update_job_list(connection, status, job_id):
+ cursor = connection.cursor()
+ sqlQ = 'UPDATE jobs SET status = %s WHERE job_id = %s'
+ cursor.execute(sqlQ, (status, job_id,))
+ connection.commit()
+
+# Queryes to handel the configs* tables
+def get_config_list_all(connection):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT config_id FROM configs'
+ cursor.execute(sqlQ)
+ entries = cursor.fetchall()
+ return entries
+
+def get_config(connection, config_id):
+ cursor = connection.cursor()
+ sqlQ ='SELECT config FROM configs WHERE config_id = %s'
+ cursor.execute(sqlQ, (config_id,))
+ config = cursor.fetchone()
+ return config[0]
+
+def update_make_conf(connection, configsDict):
+ cursor = connection.cursor()
+ sqlQ = 'UPDATE configs_metadata SET checksum = %s, make_conf_text = %s, active = %s, config_error = %s WHERE config_id = %s'
+ for k, v in configsDict.iteritems():
+ cursor.execute(sqlQ, (v['make_conf_checksum_tree'], v['make_conf_text'], v['active'], v['config_error'], k,))
+ connection.commit()
+
+def get_default_config(connection):
+ cursor = connection.cursor()
+ sqlQ = "SELECT config FROM configs WHERE default_config = 'True'"
+ cursor.execute(sqlQ)
+ entries = cursor.fetchone()
+ return entries
+
+def update_repo_db(connection, repo_list):
+ cursor = connection.cursor()
+ sqlQ1 = 'SELECT repo_id FROM repos WHERE repo = %s'
+ sqlQ2 = 'INSERT INTO repos (repo) VALUES ( %s )'
+ for repo in repo_list:
+ cursor.execute(sqlQ1, (repo,))
+ entries = cursor.fetchone()
+ if entries is None:
+ cursor.execute(sqlQ2, (repo,))
+ connection.commit()
+ return
+
+def get_package_id(connection, categories, package, repo):
+ cursor = connection.cursor()
+ sqlQ ='SELECT package_id FROM packages WHERE category = %s AND package = %s AND repo_id = (SELECT repo_id FROM repos WHERE repo = %s)'
+ params = categories, package, repo
+ cursor.execute(sqlQ, params)
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+# Add new info to the packages table
+def get_repo_id(connection, repo):
+ cursor = connection.cursor()
+ sqlQ ='SELECT repo_id FROM repos WHERE repo = %s'
+ cursor.execute(sqlQ, (repo,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def add_new_manifest_sql(connection, categories, package, repo, manifest_checksum_tree):
+ cursor = connection.cursor()
+ sqlQ = "INSERT INTO packages (category, package, repo_id, checksum, active) VALUES (%s, %s, %s, %s, 'True') RETURNING package_id"
+ repo_id = get_repo_id(connection, repo)
+ cursor.execute(sqlQ, (categories, package, repo_id, manifest_checksum_tree,))
+ package_id = cursor.fetchone()[0]
+ connection.commit()
+ return package_id
+
+def get_restriction_id(connection, restriction):
+ cursor = connection.cursor()
+ sqlQ ='SELECT restriction_id FROM restrictions WHERE restriction = %s'
+ cursor.execute(sqlQ, (restriction,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def get_use_id(connection, use_flag):
+ cursor = connection.cursor()
+ sqlQ ='SELECT use_id FROM uses WHERE flag = %s'
+ cursor.execute(sqlQ, (use_flag,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def get_keyword_id(connection, keyword):
+ cursor = connection.cursor()
+ sqlQ ='SELECT keyword_id FROM keywords WHERE keyword = %s'
+ cursor.execute(sqlQ, (keyword,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse_list):
+ cursor = connection.cursor()
+ sqlQ1 = 'INSERT INTO keywords (keyword) VALUES ( %s ) RETURNING keyword_id'
+ sqlQ3 = 'INSERT INTO restrictions (restriction) VALUES ( %s ) RETURNING restriction_id'
+ sqlQ4 = 'INSERT INTO ebuilds_restrictions (ebuild_id, restriction_id) VALUES ( %s, %s )'
+ sqlQ5 = 'INSERT INTO uses (flag) VALUES ( %s ) RETURNING use_id'
+ sqlQ6 = 'INSERT INTO ebuilds_iuse (ebuild_id, use_id, status) VALUES ( %s, %s, %s)'
+ sqlQ7 = 'INSERT INTO ebuilds_keywords (ebuild_id, keyword_id, status) VALUES ( %s, %s, %s)'
+ # FIXME restriction need some filter as iuse and keyword have.
+ for restriction in restrictions:
+ restriction_id = get_restriction_id(connection, restriction)
+ if restriction_id is None:
+ cursor.execute(sqlQ3, (restriction,))
+ restriction_id = cursor.fetchone()[0]
+ cursor.execute(sqlQ4, (ebuild_id, restriction_id,))
+ for iuse in iuse_list:
+ set_iuse = 'disable'
+ if iuse[0] in ["+"]:
+ iuse = iuse[1:]
+ set_iuse = 'enable'
+ elif iuse[0] in ["-"]:
+ iuse = iuse[1:]
+ use_id = get_use_id(connection, iuse)
+ if use_id is None:
+ cursor.execute(sqlQ5, (iuse,))
+ use_id = cursor.fetchone()[0]
+ cursor.execute(sqlQ6, (ebuild_id, use_id, set_iuse,))
+ for keyword in keywords:
+ set_keyword = 'stable'
+ if keyword[0] in ["~"]:
+ keyword = keyword[1:]
+ set_keyword = 'unstable'
+ elif keyword[0] in ["-"]:
+ keyword = keyword[1:]
+ set_keyword = 'testing'
+ keyword_id = get_keyword_id(connection, keyword)
+ if keyword_id is None:
+ cursor.execute(sqlQ1, (keyword,))
+ keyword_id = cursor.fetchone()[0]
+ cursor.execute(sqlQ7, (ebuild_id, keyword_id, set_keyword,))
+ connection.commit()
+
+def add_new_ebuild_sql(connection, package_id, ebuildDict):
+ cursor = connection.cursor()
+ sqlQ1 = 'SELECT repo_id FROM packages WHERE package_id = %s'
+ sqlQ2 = "INSERT INTO ebuilds (package_id, version, checksum, active) VALUES (%s, %s, %s, 'True') RETURNING ebuild_id"
+ sqlQ4 = "INSERT INTO ebuilds_metadata (ebuild_id, revision) VALUES (%s, %s)"
+ ebuild_id_list = []
+ cursor.execute(sqlQ1, (package_id,))
+ repo_id = cursor.fetchone()[0]
+ for k, v in ebuildDict.iteritems():
+ cursor.execute(sqlQ2, (package_id, v['ebuild_version_tree'], v['ebuild_version_checksum_tree'],))
+ ebuild_id = cursor.fetchone()[0]
+ cursor.execute(sqlQ4, (ebuild_id, v['ebuild_version_revision_tree'],))
+ ebuild_id_list.append(ebuild_id)
+ restrictions = []
+ keywords = []
+ iuse = []
+ for i in v['ebuild_version_metadata_tree'][4].split():
+ restrictions.append(i)
+ for i in v['ebuild_version_metadata_tree'][8].split():
+ keywords.append(i)
+ for i in v['ebuild_version_metadata_tree'][10].split():
+ iuse.append(i)
+ add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse)
+ connection.commit()
+ return ebuild_id_list
+
+def get_config_id_list(connection):
+ cursor = connection.cursor()
+ sqlQ = "SELECT configs.config_id FROM configs, configs_metadata WHERE configs.default_config = 'False' AND configs_metadata.active = 'True' AND configs.config_id = configs_metadata.config_id"
+ cursor.execute(sqlQ)
+ entries = cursor.fetchall()
+ if entries == ():
+ return None
+ else:
+ config_id_list = []
+ for config_id in entries:
+ config_id_list.append(config_id[0])
+ return config_id_list
+
+def get_config_id(connection, config):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT config_id FROM configs WHERE config = %s'
+ cursor.execute(sqlQ,(config,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def add_new_package_buildqueue(connection, ebuild_id, config_id, use_flagsDict):
+ cursor = connection.cursor()
+ sqlQ1 = 'INSERT INTO build_jobs (ebuild_id, config_id) VALUES (%s, %s) RETURNING build_job_id'
+ sqlQ3 = 'INSERT INTO build_jobs_use (build_job_id, use_id, status) VALUES (%s, (SELECT use_id FROM uses WHERE flag = %s), %s)'
+ cursor.execute(sqlQ1, (ebuild_id, config_id,))
+ build_job_id = cursor.fetchone()[0]
+ for k, v in use_flagsDict.iteritems():
+ cursor.execute(sqlQ3, (build_job_id, k, v,))
+ connection.commit()
+
+def get_manifest_db(connection, package_id):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT checksum FROM packages WHERE package_id = %s'
+ cursor.execute(sqlQ, (package_id,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ # If entries is not None we need [0]
+ return entries[0]
+
+def get_cp_from_package_id(connection, package_id):
+ cursor = connection.cursor()
+ sqlQ = "SELECT ARRAY_TO_STRING(ARRAY[category, package] , '/') AS cp FROM packages WHERE package_id = %s"
+ cursor.execute(sqlQ, (package_id,))
+ return cursor.fetchone()
+
+def get_cp_repo_from_package_id(connection, package_id):
+ cursor =connection.cursor()
+ sqlQ = 'SELECT repos.repo FROM repos, packages WHERE repos.repo_id = packages.repo_id AND packages.package_id = %s'
+ cp = get_cp_from_package_id(connection, package_id)
+ cursor.execute(sqlQ, (package_id,))
+ repo = cursor.fetchone()
+ return cp[0], repo[0]
+
+def get_ebuild_checksum(connection, package_id, ebuild_version_tree):
+ cursor = connection.cursor()
+ sqlQ = "SELECT checksum FROM ebuilds WHERE package_id = %s AND version = %s AND active = 'True'"
+ cursor.execute(sqlQ, (package_id, ebuild_version_tree))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ # If entries is not None we need [0]
+ return entries[0]
+
+def add_old_ebuild(connection, package_id, old_ebuild_list):
+ cursor = connection.cursor()
+ sqlQ1 = "UPDATE ebuilds SET active = 'False' WHERE package_id = %s AND version = %s"
+ sqlQ2 = "SELECT ebuild_id FROM ebuilds WHERE package_id = %s AND version = %s AND active = 'True'"
+ sqlQ3 = "SELECT build_job_id FROM build_jobs WHERE ebuild_id = %s"
+ sqlQ4 = 'DELETE FROM build_jobs_use WHERE build_job_id = %s'
+ sqlQ5 = 'DELETE FROM build_jobs WHERE build_job_id = %s'
+ for old_ebuild in old_ebuild_list:
+ cursor.execute(sqlQ2, (package_id, old_ebuild[0]))
+ ebuild_id_list = cursor.fetchall()
+ if ebuild_id_list is not None:
+ for ebuild_id in ebuild_id_list:
+ cursor.execute(sqlQ3, (ebuild_id))
+ build_job_id_list = cursor.fetchall()
+ if build_job_id_list is not None:
+ for build_job_id in build_job_id_list:
+ cursor.execute(sqlQ4, (build_job_id))
+ cursor.execute(sqlQ5, (build_job_id))
+ cursor.execute(sqlQ1, (package_id, old_ebuild[0]))
+ connection.commit()
+
+def update_active_ebuild_to_fales(connection, package_id, ebuild_version_tree):
+ cursor = connection.cursor()
+ sqlQ ="UPDATE ebuilds SET active = 'False' WHERE package_id = %s AND version = %s AND active = 'True'"
+ cursor.execute(sqlQ, (package_id, ebuild_version_tree))
+ connection.commit()
+
+def update_manifest_sql(connection, package_id, manifest_checksum_tree):
+ cursor = connection.cursor()
+ sqlQ = 'UPDATE packages SET checksum = %s WHERE package_id = %s'
+ cursor.execute(sqlQ, (manifest_checksum_tree, package_id,))
+ connection.commit()
+
+def get_build_jobs_id_list_config(connection, config_id):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT build_job_id FROM build_jobs WHERE config_id = %s'
+ cursor.execute(sqlQ, (config_id,))
+ entries = cursor.fetchall()
+ build_jobs_id_list = []
+ if not entries == []:
+ for build_job_id_id in entries:
+ build_jobs_id_list.append(build_job_id[0])
+ else:
+ build_log_id_list = None
+ return build_jobs_id_list
+
+def del_old_build_jobs(connection, build_job_id):
+ cursor = connection.cursor()
+ sqlQ1 = 'DELETE FROM build_jobs_use WHERE build_job_id = %s'
+ sqlQ2 = 'DELETE FROM build_jobs_retest WHERE build_job_id = %s'
+ sqlQ3 = 'DELETE FROM build_jobs WHERE build_job_id = %s'
+ sqlQ4 = 'DELETE FROM build_jobs_emerge_options WHERE build_job_id = %s'
+ cursor.execute(sqlQ1, (build_job_id,))
+ cursor.execute(sqlQ2, (build_job_id,))
+ cursor.execute(sqlQ4, (build_job_id,))
+ cursor.execute(sqlQ3, (build_job_id,))
+ connection.commit()
+
+def get_profile_checksum(connection, config_profile):
+ cursor = connection.cursor()
+ sqlQ = "SELECT checksum FROM configs_metadata WHERE active = 'True' AND config_id = (SELECT config_id FROM configs WHERE config = %s) AND auto = 'True'"
+ cursor.execute(sqlQ, (config_profile,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return
+ return entries[0]
+
+def get_packages_to_build(connection, config):
+ cursor =connection.cursor()
+ sqlQ1 = "SELECT build_jobs.build_job_id, build_jobs.ebuild_id, ebuilds.package_id FROM build_jobs, ebuilds WHERE build_jobs.config_id = (SELECT config_id FROM configs WHERE config = %s) AND build_jobs.ebuild_id = ebuilds.ebuild_id AND ebuilds.active = 'True' AND extract(epoch from (NOW()) - build_jobs.time_stamp) > 7200 ORDER BY build_jobs.build_job_id LIMIT 1"
+ sqlQ2 = 'SELECT version, checksum FROM ebuilds WHERE ebuild_id = %s'
+ sqlQ3 = 'SELECT uses.flag, build_jobs_use.status FROM build_jobs_use, uses WHERE build_jobs_use.build_job_id = %s AND build_jobs_use.use_id = uses.use_id'
+ sqlQ4 = "SELECT build_jobs.build_job_id, build_jobs.ebuild_id, ebuilds.package_id FROM build_jobs, ebuilds WHERE build_jobs.config_id = (SELECT config_id FROM configs WHERE config = %s) AND build_jobs.ebuild_id = ebuilds.ebuild_id AND ebuilds.active = 'True' AND build_jobs.status = 'Now' LIMIT 1"
+ sqlQ5 = 'SELECT emerge_options.option FROM configs_emerge_options, emerge_options WHERE configs_emerge_options.config_id = (SELECT config_id FROM configs WHERE config = %s) AND configs_emerge_options.options_id = emerge_options.options_id'
+ sqlQ6 = 'SELECT emerge_options.option FROM build_jobs_emerge_options, emerge_options WHERE build_jobs_emerge_options.build_job_id = %s AND build_jobs_emerge_options.options_id = emerge_options.options_id'
+ cursor.execute(sqlQ4, (config,))
+ entries = cursor.fetchone()
+ if entries is None:
+ cursor.execute(sqlQ1, (config,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return
+ build_dict={}
+ build_dict['build_job_id'] = entries[0]
+ build_dict['ebuild_id']= entries[1]
+ build_dict['package_id'] = entries[2]
+ cursor.execute(sqlQ2, (entries[1],))
+ entries = cursor.fetchone()
+ build_dict['ebuild_version'] = entries[0]
+ build_dict['checksum']=entries[1]
+ #add a enabled and disabled list to the objects in the item list
+ cursor.execute(sqlQ3, (build_dict['build_job_id'],))
+ uses={}
+ for row in cursor.fetchall():
+ uses[ row[0] ] = row[1]
+ build_dict['build_useflags']=uses
+ emerge_options_list = []
+ cursor.execute(sqlQ5, (config,))
+ entries = cursor.fetchall()
+ for option in entries:
+ emerge_options_list.append(option[0])
+ cursor.execute(sqlQ6, (build_dict['build_job_id'],))
+ entries = cursor.fetchall()
+ for option in entries:
+ emerge_options_list.append(option[0])
+ if emerge_options_list == []:
+ emerge_options_list = None
+ build_dict['emerge_options'] = emerge_options_list
+ return build_dict
+
+def update_fail_times(connection, fail_querue_dict):
+ cursor = connection.cursor()
+ sqlQ1 = 'UPDATE build_jobs_retest SET fail_times = %s WHERE build_job_id = %s AND fail_type = %s'
+ sqlQ2 = 'UPDATE build_jobs SET time_stamp = NOW() WHERE build_job_id = %s'
+ cursor.execute(sqlQ1, (fail_querue_dict['fail_times'], fail_querue_dict['build_job_id'], fail_querue_dict['fail_type'],))
+ cursor.execute(sqlQ2, (fail_querue_dict['build_job_id'],))
+ connection.commit()
+
+def get_fail_querue_dict(connection, build_dict):
+ cursor = connection.cursor()
+ fail_querue_dict = {}
+ sqlQ = 'SELECT fail_times FROM build_jobs_retest WHERE build_job_id = %s AND fail_type = %s'
+ cursor.execute(sqlQ, (build_dict['build_job_id'], build_dict['type_fail'],))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ fail_querue_dict['fail_times'] = entries[0]
+ return fail_querue_dict
+
+def add_fail_querue_dict(connection, fail_querue_dict):
+ cursor = connection.cursor()
+ sqlQ1 = 'INSERT INTO build_jobs_retest (build_job_id, fail_type, fail_times) VALUES ( %s, %s, %s)'
+ sqlQ2 = 'UPDATE build_jobs SET time_stamp = NOW() WHERE build_job_id = %s'
+ cursor.execute(sqlQ1, (fail_querue_dict['build_job_id'],fail_querue_dict['fail_type'], fail_querue_dict['fail_times']))
+ cursor.execute(sqlQ2, (fail_querue_dict['build_job_id'],))
+ connection.commit()
+
+def get_ebuild_id_db_checksum(connection, build_dict):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT ebuild_id FROM ebuilds WHERE version = %s AND checksum = %s AND package_id = %s'
+ cursor.execute(sqlQ, (build_dict['ebuild_version'], build_dict['checksum'], build_dict['package_id']))
+ ebuild_id = cursor.fetchone()
+ if ebuild_id is None:
+ return
+ return ebuild_id[0]
+
+def get_build_job_id(connection, build_dict):
+ cursor = connection.cursor()
+ sqlQ1 = 'SELECT build_job_id FROM build_jobs WHERE ebuild_id = %s AND config_id = %s'
+ sqlQ2 = 'SELECT use_id, status FROM build_jobs_use WHERE build_job_id = %s'
+ cursor.execute(sqlQ1, (build_dict['ebuild_id'], build_dict['config_id']))
+ build_job_id_list = cursor.fetchall()
+ if build_job_id_list == []:
+ return
+ for build_job_id in build_job_id_list:
+ cursor.execute(sqlQ2, (build_job_id[0],))
+ entries = cursor.fetchall()
+ useflagsdict = {}
+ if entries == []:
+ useflagsdict = None
+ else:
+ for x in entries:
+ useflagsdict[x[0]] = x[1]
+ if useflagsdict == build_dict['build_useflags']:
+ return build_job_id[0]
+
+def add_new_buildlog(connection, build_dict, build_log_dict):
+ cursor = connection.cursor()
+ sqlQ1 = 'SELECT build_log_id FROM build_logs WHERE ebuild_id = %s'
+ sqlQ2 = 'INSERT INTO build_logs (ebuild_id) VALUES (%s) RETURNING build_log_id'
+ sqlQ3 = "UPDATE build_logs SET fail = 'true', summery = %s, log_hash = %s WHERE build_log_id = %s"
+ sqlQ4 = 'INSERT INTO build_logs_config (build_log_id, config_id, logname) VALUES (%s, %s, %s)'
+ sqlQ6 = 'INSERT INTO build_logs_use (build_log_id, use_id, status) VALUES (%s, %s, %s)'
+ sqlQ7 = 'SELECT log_hash FROM build_logs WHERE build_log_id = %s'
+ sqlQ8 = 'SELECT use_id, status FROM build_logs_use WHERE build_log_id = %s'
+ sqlQ9 = 'SELECT config_id FROM build_logs_config WHERE build_log_id = %s'
+ sqlQ10 = "UPDATE build_logs SET fail = false, log_hash = %s WHERE build_log_id = %s"
+ build_log_id_list = []
+ cursor.execute(sqlQ1, (build_dict['ebuild_id'],))
+ entries = cursor.fetchall()
+ if not entries == []:
+ for build_log_id in entries:
+ build_log_id_list.append(build_log_id[0])
+ else:
+ build_log_id_list = None
+
+ def build_log_id_match(build_log_id_list, build_dict, build_log_dict):
+ for build_log_id in build_log_id_list:
+ cursor.execute(sqlQ7, (build_log_id,))
+ log_hash = cursor.fetchone()
+ cursor.execute(sqlQ8, (build_log_id,))
+ entries = cursor.fetchall()
+ useflagsdict = {}
+ if entries == []:
+ useflagsdict = None
+ else:
+ for x in entries:
+ useflagsdict[x[0]] = x[1]
+ if log_hash[0] == build_log_dict['log_hash'] and build_dict['build_useflags'] == useflagsdict:
+ cursor.execute(sqlQ9, (build_log_id,))
+ config_id_list = []
+ for config_id in cursor.fetchall():
+ config_id_list.append(config_id[0])
+ if build_dict['config_id'] in config_id_list:
+ return None, True
+ cursor.execute(sqlQ4, (build_log_id, build_dict['config_id'], build_log_dict['logfilename'],))
+ return build_log_id, True
+ return None, False
+
+ def build_log_id_no_match(build_dict, build_log_dict):
+ cursor.execute(sqlQ2, (build_dict['ebuild_id'],))
+ build_log_id = cursor.fetchone()[0]
+ if 'True' in build_log_dict['summary_error_list']:
+ cursor.execute(sqlQ3, (build_log_dict['build_error'], build_log_dict['log_hash'], build_log_id,))
+ else:
+ cursor.execute(sqlQ10, (build_log_dict['log_hash'], build_log_id,))
+ cursor.execute(sqlQ4, (build_log_id, build_dict['config_id'], build_log_dict['logfilename'],))
+ if not build_dict['build_useflags'] is None:
+ for use_id, status in build_dict['build_useflags'].iteritems():
+ cursor.execute(sqlQ6, (build_log_id, use_id, status))
+ return build_log_id
+
+ if build_dict['build_job_id'] is None and build_log_id_list is None:
+ return build_log_id_no_match(build_dict, build_log_dict)
+ elif build_dict['build_job_id'] is None and not build_log_id_list is None:
+ build_log_id, match = build_log_id_match(build_log_id_list, build_dict, build_log_dict)
+ if not match:
+ build_log_id = build_log_id_no_match(build_dict, build_log_dict)
+ return build_log_id
+ elif not build_dict['build_job_id'] is None and not build_log_id_list is None:
+ build_log_id, match = build_log_id_match(build_log_id_list, build_dict, build_log_dict)
+ if not match:
+ build_log_id = build_log_id_no_match(build_dict, build_log_dict)
+ del_old_build_jobs(connection, build_dict['build_job_id'])
+ return build_log_id
+ elif not build_dict['build_job_id'] is None and build_log_id_list is None:
+ build_log_id = build_log_id_no_match(build_dict, build_log_dict)
+ del_old_build_jobs(connection, build_dict['build_job_id'])
+ return build_log_id
diff --git a/gobs/pym/sync.py b/gobs/pym/sync.py
index bfff592..4ee1a4d 100644
--- a/gobs/pym/sync.py
+++ b/gobs/pym/sync.py
@@ -6,19 +6,11 @@ import sys
from git import *
from _emerge.main import emerge_main
-from gobs.readconf import get_conf_settings
reader=get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
-from gobs.ConnectionManager import connectionManager
-CM=connectionManager(gobs_settings_dict)
-#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql_querys import *
-
config_profile = gobs_settings_dict['gobs_config']
-def git_pull():
- conn=CM.getConnection()
+def git_pull(conn):
log_msg = "Git pull"
add_gobs_logs(conn, log_msg, "info", config_profile)
repo = Repo("/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/")
@@ -29,11 +21,9 @@ def git_pull():
add_gobs_logs(conn, log_msg, "info", config_profile)
log_msg = "Git pull ... Done"
add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
return True
-def sync_tree():
- conn=CM.getConnection()
+def sync_tree(conn):
config_id = get_default_config(conn) # HostConfigDir = table configs id
default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id[0] + "/"
mysettings = portage.config(config_root = default_config_root)
@@ -46,8 +36,7 @@ def sync_tree():
fail_sync = emerge_main(args=tmpcmdline)
if fail_sync is True:
log_msg = "Emerge --sync fail!"
- add_gobs_logs(conn, log_msg, "warning", config_profile)
- CM.putConnection(conn)
+ add_gobs_logs(conn, log_msg, "error", config_profile)
return False
else:
# Need to add a config dir so we can use profiles/base for reading the tree.
@@ -61,5 +50,4 @@ def sync_tree():
pass
log_msg = "Emerge --sync ... Done."
add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
return True
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-12-07 14:07 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-12-07 14:07 UTC (permalink / raw
To: gentoo-commits
commit: 1ab0a8e7383a9482aa412d7d240ed89d9706172d
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Fri Dec 7 14:07:33 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Fri Dec 7 14:07:33 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=1ab0a8e7
disable Process
---
gobs/bin/gobs_guest_jobs | 7 ++++---
gobs/pym/build_queru.py | 4 +++-
gobs/pym/manifest.py | 9 ++++++---
3 files changed, 13 insertions(+), 7 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index 43d0028..7c20734 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -34,9 +34,10 @@ def main_loop(config_profile):
continue
else:
init_queru = queruaction(config_profile)
- p = Process(target=init_queru.procces_qureru)
- p.start()
- p.join()
+ init_queru.procces_qureru()
+ # p = Process(target=init_queru.procces_qureru)
+ # p.start()
+ # p.join()
repeat = True
time.sleep(6)
return
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index d434d33..9eace00 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -54,7 +54,7 @@ class queruaction(object):
build_use_flags_list = init_flags.comper_useflags(build_dict)
log_msg = "build_use_flags_list %s" % (build_use_flags_list,)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
- manifest_error = init_manifest.check_file_in_manifest(portdb, cpv, build_dict, build_use_flags_list)
+ manifest_error = init_manifest.check_file_in_manifest(portdb, cpv, build_use_flags_list, repo)
if manifest_error is None:
build_dict['check_fail'] = False
build_cpv_dict = {}
@@ -66,6 +66,8 @@ class queruaction(object):
else:
build_dict['type_fail'] = "Manifest error"
build_dict['check_fail'] = True
+ log_msg = "Manifest error: %s:%s" % cpv, manifest_error
+ add_gobs_logs(conn, log_msg, "info", self._config_profile)
else:
build_dict['type_fail'] = "Wrong ebuild checksum"
build_dict['check_fail'] = True
diff --git a/gobs/pym/manifest.py b/gobs/pym/manifest.py
index fb29f0a..74b3e90 100644
--- a/gobs/pym/manifest.py
+++ b/gobs/pym/manifest.py
@@ -100,12 +100,15 @@ class gobs_manifest(object):
% os.path.join(filesdir, f)
return None
- def check_file_in_manifest(self, portdb, cpv, build_dict, build_use_flags_list):
+ def check_file_in_manifest(self, portdb, cpv, build_use_flags_list):
myfetchlistdict = portage.FetchlistDict(self._pkgdir, self._mysettings, portdb)
my_manifest = portage.Manifest(self._pkgdir, self._mysettings['DISTDIR'], fetchlist_dict=myfetchlistdict, manifest1_compat=False, from_scratch=False)
- if my_manifest.findFile(build_dict['package'] + "-" + build_dict['ebuild_version'] + ".ebuild") is None:
+ ebuild_version = portage.versions.cpv_getversion(cpv)
+ package = portage.versions.cpv_getkey(cpv).split("/")[1]
+ if my_manifest.findFile(package + "-" + ebuild_version + ".ebuild") is None:
return "Ebuild file not found."
- cpv_fetchmap = portdb.getFetchMap(cpv, useflags=build_use_flags_list, mytree=None)
+ tree = portdb.getRepositoryPath(repo)
+ cpv_fetchmap = portdb.getFetchMap(cpv, useflags=build_use_flags_list, mytree=tree)
self._mysettings.unlock()
try:
portage.fetch(cpv_fetchmap, self._mysettings, listonly=0, fetchonly=0, locks_in_subdir='.locks', use_locks=1, try_mirrors=1)
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-12-05 23:56 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-12-05 23:56 UTC (permalink / raw
To: gentoo-commits
commit: 85cb8ba66d5e4589f8197fb5cff9f235b39a8a5d
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Wed Dec 5 23:55:51 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Wed Dec 5 23:55:51 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=85cb8ba6
fist commit for the guest db change
---
gobs/bin/gobs_guest_jobs | 2 +-
gobs/pym/build_queru.py | 22 +++++++++++-------
gobs/pym/pgsql_querys.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 68 insertions(+), 10 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index c3bd6ec..43d0028 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -10,7 +10,7 @@ from gobs.ConnectionManager import connectionManager
CM=connectionManager(gobs_settings_dict)
#selectively import the pgsql/mysql querys
if CM.getName()=='pgsql':
- from gobs.pgsql import *
+ from gobs.pgsql_querys import *
from gobs.check_setup import check_configure_guest
from gobs.build_queru import queruaction
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index d74208c..498a6d7 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -8,7 +8,7 @@ from gobs.ConnectionManager import connectionManager
CM=connectionManager(gobs_settings_dict)
#selectively import the pgsql/mysql querys
if CM.getName()=='pgsql':
- from gobs.pgsql import *
+ from gobs.pgsql_querys import *
import portage
import os
@@ -67,7 +67,7 @@ class queruaction(object):
print('fail_querue_dict', fail_querue_dict)
if fail_querue_dict is None:
fail_querue_dict = {}
- fail_querue_dict['querue_id'] = build_dict['queue_id']
+ fail_querue_dict['build_job_id'] = build_dict['build_job_id']
fail_querue_dict['fail_type'] = build_dict['type_fail']
fail_querue_dict['fail_times'] = 1
print('fail_querue_dict', fail_querue_dict)
@@ -75,7 +75,7 @@ class queruaction(object):
else:
if fail_querue_dict['fail_times'][0] < 6:
fail_querue_dict['fail_times'] = fail_querue_dict['fail_times'][0] + 1
- fail_querue_dict['querue_id'] = build_dict['queue_id']
+ fail_querue_dict['build_job_id'] = build_dict['build_job_id']
fail_querue_dict['fail_type'] = build_dict['type_fail']
update_fail_times(conn, fail_querue_dict)
CM.putConnection(conn)
@@ -109,7 +109,7 @@ class queruaction(object):
os.chmod(settings.get("PORTAGE_LOG_FILE"), 0o224)
else:
build_log_dict['logfilename'] = ""
- move_queru_buildlog(conn, build_dict['queue_id'], build_error, summary_error, build_log_dict)
+ move_queru_buildlog(conn, build_dict['build_job_id'], build_error, summary_error, build_log_dict)
CM.putConnection(conn)
def action_build(self, settings, trees, mtimedb, myopts, myaction, myfiles, spinner, build_dict):
@@ -626,11 +626,15 @@ class queruaction(object):
def make_build_list(self, build_dict, settings, portdb):
conn=CM.getConnection()
- cpv = build_dict['category']+'/'+build_dict['package']+'-'+build_dict['ebuild_version']
- pkgdir = os.path.join(settings['PORTDIR'], build_dict['category'] + "/" + build_dict['package'])
+ package_id = build_dict['package_id']
+ cp, repo = get_cp_repo_from_package_id(conn, package_id)
+ element = cp.split('/')
+ package = element[1]
+ cpv = cp + "-" + build_dict['ebuild_version']
+ pkgdir = self._myportdb.getRepositoryPath(repo) + "/" + cp
init_manifest = gobs_manifest(settings, pkgdir)
try:
- ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir+ "/" + build_dict['package'] + "-" + build_dict['ebuild_version'] + ".ebuild")[0]
+ ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + build_dict['ebuild_version'] + ".ebuild")[0]
except:
ebuild_version_checksum_tree = None
if ebuild_version_checksum_tree == build_dict['checksum']:
@@ -704,8 +708,8 @@ class queruaction(object):
pass
build_dict2 = {}
build_dict2 = get_packages_to_build(conn, self._config_profile)
- if build_dict['queue_id'] == build_dict2['queue_id']:
- log_msg = "qurery %s was not removed" % (build_dict['queue_id'],)
+ if build_dict['build_job_id'] == build_dict2['build_job_id']:
+ log_msg = "qurery %s was not removed" % (build_dict['build_job_id'],)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
print("qurery was not removed")
build_dict['type_fail'] = "Querey was not removed"
diff --git a/gobs/pym/pgsql_querys.py b/gobs/pym/pgsql_querys.py
index 3c0813d..6f75e53 100644
--- a/gobs/pym/pgsql_querys.py
+++ b/gobs/pym/pgsql_querys.py
@@ -314,3 +314,57 @@ def del_old_build_jobs(connection, queue_id):
cursor.execute(sqlQ2, (build_job_id,))
cursor.execute(sqlQ3, (build_job_id,))
connection.commit()
+
+def get_packages_to_build(connection, config):
+ cursor =connection.cursor()
+ sqlQ1 = "SELECT build_job_id.build_jobs, ebuild_id.build_jobs, package_id.ebuilds FROM build_jobs, ebuilds WHERE config_id.build_jobs = (SELECT config_id FROM configs WHERE config = %s)
+ AND extract(epoch from (NOW()) - timestamp.build_jobs) > 7200 AND ebuild_id.build_jobs = ebuild_id.ebuilds
+ AND ebuilds.active = 'True' ORDER BY LIMIT 1"
+ sqlQ2 = 'SELECT version, checksum FROM ebuilds WHERE ebuild_id = %s'
+ sqlQ3 = 'SELECT flag.uses, status.build_jobs_use FROM build_jobs_use, uses WHERE build_job_id.build_jobs_use = %s use_id.build_jobs_use = use_id.uses'
+ cursor.execute(sqlQ1, (config,))
+ build_dict={}
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ build_dict['build_job_id'] = entries[0]
+ build_dict['ebuild_id']= entries[1]
+ build_dict['package_id'] = entries[2]
+ cursor.execute(sqlQ2, (entries[1],))
+ entries = cursor.fetchone()
+ build_dict['ebuild_version'] = entries[0]
+ build_dict['checksum']=entries[1]
+ #add a enabled and disabled list to the objects in the item list
+ cursor.execute(sqlQ3, (build_dict['build_job_id'],))
+ uses={}
+ for row in cursor.fetchall():
+ uses[ row[0] ] = row[1]
+ build_dict['build_useflags']=uses
+ return build_dict
+
+def update_fail_times(connection, fail_querue_dict):
+ cursor = connection.cursor()
+ sqlQ1 = 'UPDATE build_jobs_retest SET fail_times = %s WHERE build_job_id = %s AND fail_type = %s'
+ sqlQ2 = 'UPDATE build_jobs SET time_stamp = NOW() WHERE build_job_id = %s'
+ cursor.execute(sqlQ1, (fail_querue_dict['fail_times'], fail_querue_dict['build_job_id'], fail_querue_dict['fail_type'],))
+ cursor.execute(sqlQ2, (fail_querue_dict['build_job_id'],))
+ connection.commit()
+
+def get_fail_querue_dict(connection, build_dict):
+ cursor = connection.cursor()
+ fail_querue_dict = {}
+ sqlQ = 'SELECT fail_times FROM build_jobs_retest WHERE build_job_id = %s AND fail_type = %s'
+ cursor.execute(sqlQ, (build_dict['build_job_id'], build_dict['type_fail'],))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ fail_querue_dict['fail_times'] = entries
+ return fail_querue_dict
+
+def add_fail_querue_dict(connection, fail_querue_dict):
+ cursor = connection.cursor()
+ sqlQ1 = 'INSERT INTO build_jobs_retest (build_job_id, fail_type, fail_times) VALUES ( %s, %s, %s)'
+ sqlQ2 = 'UPDATE build_jobs SET time_stamp = NOW() WHERE build_job_id = %s'
+ cursor.execute(sqlQ1, (fail_querue_dict['build_job_id'],fail_querue_dict['fail_type'], fail_querue_dict['fail_times']))
+ cursor.execute(sqlQ2, (fail_querue_dict['build_job_id'],))
+ connection.commit()
\ No newline at end of file
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-11-29 22:22 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-11-29 22:22 UTC (permalink / raw
To: gentoo-commits
commit: dbdc835ba201fb580985101955725e85f1b83586
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Thu Nov 29 22:10:06 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Thu Nov 29 22:10:06 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=dbdc835b
Rework on the db and some code
---
gobs/bin/gobs_host_jobs | 3 +-
gobs/pym/ConnectionManager.py | 3 +-
gobs/pym/buildquerydb.py | 2 +-
gobs/pym/categories.py | 30 -
gobs/pym/depgraph.py | 1637 +++++++++++++++++++++++------------------
gobs/pym/jobs.py | 136 ++--
gobs/pym/old_cpv.py | 14 +-
gobs/pym/package.py | 561 ++++++++-------
gobs/pym/pgsql.py | 8 +-
gobs/pym/pgsql_querys.py | 308 ++++++++
gobs/pym/repoman_gobs.py | 10 +-
gobs/pym/text.py | 4 +-
gobs/pym/updatedb.py | 129 ++--
13 files changed, 1673 insertions(+), 1172 deletions(-)
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
index 5ece453..72683c9 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs
@@ -1,5 +1,4 @@
#!/usr/bin/python
-# Copyright 2006-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -15,7 +14,7 @@ from gobs.ConnectionManager import connectionManager
CM=connectionManager(gobs_settings_dict)
#selectively import the pgsql/mysql querys
if CM.getName()=='pgsql':
- from gobs.pgsql import *
+ from gobs.pgsql_querys import add_gobs_logs
import logging
import time
diff --git a/gobs/pym/ConnectionManager.py b/gobs/pym/ConnectionManager.py
index ff0e07f..5e8a763 100644
--- a/gobs/pym/ConnectionManager.py
+++ b/gobs/pym/ConnectionManager.py
@@ -11,6 +11,7 @@ class connectionManager(object):
if not cls._instance:
cls._instance = super(connectionManager, cls).__new__(cls, *args, **kwargs)
#read the sql user/host etc and store it in the local object
+ cls._backend=settings_dict['sql_backend']
cls._host=settings_dict['sql_host']
cls._user=settings_dict['sql_user']
cls._password=settings_dict['sql_passwd']
@@ -21,7 +22,7 @@ class connectionManager(object):
cls._connectionNumber=numberOfconnections
#always create 1 connection
cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password)
- cls._name='pgsql'
+ cls._name=cls._backend
except ImportError:
print("Please install a recent version of dev-python/psycopg for Python")
sys.exit(1)
diff --git a/gobs/pym/buildquerydb.py b/gobs/pym/buildquerydb.py
index e0745e2..abd5ea9 100644
--- a/gobs/pym/buildquerydb.py
+++ b/gobs/pym/buildquerydb.py
@@ -68,7 +68,7 @@ def add_buildquery_main(config_id):
log_msg = "Check configs done"
add_gobs_logs(conn, log_msg, "info", config_profile)
# Get default config from the configs table and default_config=1
- default_config_root = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
+ default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
# Set config_root (PORTAGE_CONFIGROOT) to default_config_root
mysettings = portage.config(config_root = default_config_root)
myportdb = portage.portdbapi(mysettings=mysettings)
diff --git a/gobs/pym/categories.py b/gobs/pym/categories.py
deleted file mode 100644
index dae1207..0000000
--- a/gobs/pym/categories.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#from gobs.text import gobs_text
-from gobs.text import get_file_text
-import portage
-from gobs.readconf import get_conf_settings
-reader=get_conf_settings()
-gobs_settings_dict=reader.read_gobs_settings_all()
-# make a CM
-from gobs.ConnectionManager import connectionManager
-CM=connectionManager(gobs_settings_dict)
-#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql import *
-
-class gobs_categories(object):
-
- def __init__(self, mysettings):
- self._mysettings = mysettings
-
- def update_categories_db(self, categories):
- conn=CM.getConnection()
- # Update categories_meta in the db
- categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
- categories_metadata_xml_checksum_tree = portage.checksum.sha256hash(categories_dir + "metadata.xml")[0]
- categories_metadata_xml_text_tree = get_file_text(categories_dir + "metadata.xml")
- categories_metadata_xml_checksum_db = get_categories_checksum_db(conn, categories)
- if categories_metadata_xml_checksum_db is None:
- add_new_categories_meta_sql(conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
- elif categories_metadata_xml_checksum_db != categories_metadata_xml_checksum_tree:
- update_categories_meta_sql(conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
- CM.putConnection(conn)
diff --git a/gobs/pym/depgraph.py b/gobs/pym/depgraph.py
index 7c46cde..0a6afc8 100644
--- a/gobs/pym/depgraph.py
+++ b/gobs/pym/depgraph.py
@@ -1,6 +1,5 @@
# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# Copy of ../pym/_emerge/depgraph.py from Portage
from __future__ import print_function
@@ -19,14 +18,19 @@ from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
from portage.dbapi import dbapi
+from portage.dbapi.dep_expand import dep_expand
from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
- check_required_use, human_readable_required_use, _repo_separator, \
- _RequiredUseBranch, _RequiredUseLeaf
-from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
-from portage.exception import InvalidAtom, InvalidDependString, PortageException
+ check_required_use, human_readable_required_use, match_from_list, \
+ _repo_separator
+from portage.dep._slot_operator import ignore_built_slot_operator_deps
+from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
+ _get_eapi_attrs
+from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
+ PackageNotFound, PortageException)
from portage.output import colorize, create_color_func, \
darkgreen, green
bad = create_color_func("BAD")
+from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild.getmaskingstatus import \
_getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
@@ -73,6 +77,9 @@ from _emerge.resolver.output import Display
if sys.hexversion >= 0x3000000:
basestring = str
long = int
+ _unicode = str
+else:
+ _unicode = unicode
class _scheduler_graph_config(object):
def __init__(self, trees, pkg_cache, graph, mergelist):
@@ -85,9 +92,9 @@ def _wildcard_set(atoms):
pkgs = InternalPackageSet(allow_wildcard=True)
for x in atoms:
try:
- x = Atom(x, allow_wildcard=True)
+ x = Atom(x, allow_wildcard=True, allow_repo=False)
except portage.exception.InvalidAtom:
- x = Atom("*/" + x, allow_wildcard=True)
+ x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
pkgs.add(x)
return pkgs
@@ -110,6 +117,8 @@ class _frozen_depgraph_config(object):
self._pkg_cache = {}
self._highest_license_masked = {}
dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
+ ignore_built_slot_operator_deps = myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
@@ -124,7 +133,8 @@ class _frozen_depgraph_config(object):
FakeVartree(trees[myroot]["root_config"],
pkg_cache=self._pkg_cache,
pkg_root_config=self.roots[myroot],
- dynamic_deps=dynamic_deps)
+ dynamic_deps=dynamic_deps,
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
@@ -366,12 +376,15 @@ class _dynamic_depgraph_config(object):
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
- self._slot_collision_info = {}
+ # This should be ordered such that the backtracker will
+ # attempt to solve conflicts which occurred earlier first,
+ # since an earlier conflict can be the cause of a conflict
+ # which occurs later.
+ self._slot_collision_info = OrderedDict()
# Slot collision nodes are not allowed to block other packages since
# blocker validation is only able to account for one package per slot.
self._slot_collision_nodes = set()
self._parent_atoms = {}
- self._slot_conflict_parent_atoms = set()
self._slot_conflict_handler = None
self._circular_dependency_handler = None
self._serialized_tasks_cache = None
@@ -400,7 +413,9 @@ class _dynamic_depgraph_config(object):
self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
+ self._needed_required_use_config_changes = backtrack_parameters.needed_required_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
+ self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
self._need_restart = False
# For conditions that always require user intervention, such as
# unsatisfied REQUIRED_USE (currently has no autounmask support).
@@ -410,6 +425,8 @@ class _dynamic_depgraph_config(object):
self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
self._success_without_autounmask = False
self._traverse_ignored_deps = False
+ self._complete_mode = False
+ self._slot_operator_deps = {}
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
@@ -487,8 +504,6 @@ class _dynamic_depgraph_config(object):
class depgraph(object):
pkg_tree_map = RootConfig.pkg_tree_map
-
- _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
@@ -521,10 +536,6 @@ class depgraph(object):
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
- if self._frozen_config.myopts.get("--root-deps") is not None and \
- myroot != self._frozen_config.target_root:
- continue
-
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
@@ -599,11 +610,17 @@ class depgraph(object):
"due to non matching USE:\n\n", noiselevel=-1)
for pkg, flags in self._dynamic_config.ignored_binaries.items():
- writemsg(" =%s" % pkg.cpv, noiselevel=-1)
+ flag_display = []
+ for flag in sorted(flags):
+ if flag not in pkg.use.enabled:
+ flag = "-" + flag
+ flag_display.append(flag)
+ flag_display = " ".join(flag_display)
+ # The user can paste this line into package.use
+ writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
- writemsg(" for %s" % (pkg.root,), noiselevel=-1)
- writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
- noiselevel=-1)
+ writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
msg = [
"",
@@ -615,7 +632,7 @@ class depgraph(object):
for line in msg:
if line:
line = colorize("INFORM", line)
- writemsg_stdout(line + "\n", noiselevel=-1)
+ writemsg(line + "\n", noiselevel=-1)
def _show_missed_update(self):
@@ -801,37 +818,400 @@ class depgraph(object):
def _process_slot_conflicts(self):
"""
+ If there are any slot conflicts and backtracking is enabled,
+ _complete_graph should complete the graph before this method
+ is called, so that all relevant reverse dependencies are
+ available for use in backtracking decisions.
+ """
+ for (slot_atom, root), slot_nodes in \
+ self._dynamic_config._slot_collision_info.items():
+ self._process_slot_conflict(root, slot_atom, slot_nodes)
+
+ def _process_slot_conflict(self, root, slot_atom, slot_nodes):
+ """
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
"""
- for (slot_atom, root), slot_nodes \
- in self._dynamic_config._slot_collision_info.items():
- all_parent_atoms = set()
- for pkg in slot_nodes:
- parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
- if not parent_atoms:
+ debug = "--debug" in self._frozen_config.myopts
+
+ slot_parent_atoms = set()
+ for pkg in slot_nodes:
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ continue
+ slot_parent_atoms.update(parent_atoms)
+
+ conflict_pkgs = []
+ conflict_atoms = {}
+ for pkg in slot_nodes:
+
+ if self._dynamic_config._allow_backtracking and \
+ pkg in self._dynamic_config._runtime_pkg_mask:
+ if debug:
+ writemsg_level(
+ "!!! backtracking loop detected: %s %s\n" % \
+ (pkg,
+ self._dynamic_config._runtime_pkg_mask[pkg]),
+ level=logging.DEBUG, noiselevel=-1)
+
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if parent_atoms is None:
+ parent_atoms = set()
+ self._dynamic_config._parent_atoms[pkg] = parent_atoms
+
+ all_match = True
+ for parent_atom in slot_parent_atoms:
+ if parent_atom in parent_atoms:
continue
- all_parent_atoms.update(parent_atoms)
+ # Use package set for matching since it will match via
+ # PROVIDE when necessary, while match_from_list does not.
+ parent, atom = parent_atom
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,), allow_repo=True)
+ if atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ parent_atoms.add(parent_atom)
+ else:
+ all_match = False
+ conflict_atoms.setdefault(parent_atom, set()).add(pkg)
- for pkg in slot_nodes:
- parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
- if parent_atoms is None:
- parent_atoms = set()
- self._dynamic_config._parent_atoms[pkg] = parent_atoms
- for parent_atom in all_parent_atoms:
- if parent_atom in parent_atoms:
+ if not all_match:
+ conflict_pkgs.append(pkg)
+
+ if conflict_pkgs and \
+ self._dynamic_config._allow_backtracking and \
+ not self._accept_blocker_conflicts():
+ remaining = []
+ for pkg in conflict_pkgs:
+ if self._slot_conflict_backtrack_abi(pkg,
+ slot_nodes, conflict_atoms):
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config.setdefault("slot_conflict_abi", set()).add(pkg)
+ else:
+ remaining.append(pkg)
+ if remaining:
+ self._slot_confict_backtrack(root, slot_atom,
+ slot_parent_atoms, remaining)
+
+ def _slot_confict_backtrack(self, root, slot_atom,
+ all_parents, conflict_pkgs):
+
+ debug = "--debug" in self._frozen_config.myopts
+ existing_node = self._dynamic_config._slot_pkg_map[root][slot_atom]
+ backtrack_data = []
+ # The ordering of backtrack_data can make
+ # a difference here, because both mask actions may lead
+ # to valid, but different, solutions and the one with
+ # 'existing_node' masked is usually the better one. Because
+ # of that, we choose an order such that
+ # the backtracker will first explore the choice with
+ # existing_node masked. The backtracker reverses the
+ # order, so the order it uses is the reverse of the
+ # order shown here. See bug #339606.
+ if existing_node in conflict_pkgs and \
+ existing_node is not conflict_pkgs[-1]:
+ conflict_pkgs.remove(existing_node)
+ conflict_pkgs.append(existing_node)
+ for to_be_masked in conflict_pkgs:
+ # For missed update messages, find out which
+ # atoms matched to_be_selected that did not
+ # match to_be_masked.
+ parent_atoms = \
+ self._dynamic_config._parent_atoms.get(to_be_masked, set())
+ conflict_atoms = set(parent_atom for parent_atom in all_parents \
+ if parent_atom not in parent_atoms)
+ backtrack_data.append((to_be_masked, conflict_atoms))
+
+ if len(backtrack_data) > 1:
+ # NOTE: Generally, we prefer to mask the higher
+ # version since this solves common cases in which a
+ # lower version is needed so that all dependencies
+ # will be satisfied (bug #337178). However, if
+ # existing_node happens to be installed then we
+ # mask that since this is a common case that is
+ # triggered when --update is not enabled.
+ if existing_node.installed:
+ pass
+ elif any(pkg > existing_node for pkg in conflict_pkgs):
+ backtrack_data.reverse()
+
+ to_be_masked = backtrack_data[-1][0]
+
+ self._dynamic_config._backtrack_infos.setdefault(
+ "slot conflict", []).append(backtrack_data)
+ self._dynamic_config._need_restart = True
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot conflict:")
+ msg.append(" first package: %s" % existing_node)
+ msg.append(" package to mask: %s" % to_be_masked)
+ msg.append(" slot: %s" % slot_atom)
+ msg.append(" parents: %s" % ", ".join( \
+ "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
+ msg.append("")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
+ """
+ If one or more conflict atoms have a slot/sub-slot dep that can be resolved
+ by rebuilding the parent package, then schedule the rebuild via
+ backtracking, and return True. Otherwise, return False.
+ """
+
+ found_update = False
+ for parent_atom, conflict_pkgs in conflict_atoms.items():
+ parent, atom = parent_atom
+ if atom.slot_operator != "=" or not parent.built:
+ continue
+
+ if pkg not in conflict_pkgs:
+ continue
+
+ for other_pkg in slot_nodes:
+ if other_pkg in conflict_pkgs:
+ continue
+
+ dep = Dependency(atom=atom, child=other_pkg,
+ parent=parent, root=pkg.root)
+
+ if self._slot_operator_update_probe(dep):
+ self._slot_operator_update_backtrack(dep)
+ found_update = True
+
+ return found_update
+
+ def _slot_operator_update_backtrack(self, dep, new_child_slot=None):
+ if new_child_slot is None:
+ child = dep.child
+ else:
+ child = new_child_slot
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to missed slot abi update:")
+ msg.append(" child package: %s" % child)
+ if new_child_slot is not None:
+ msg.append(" new child slot package: %s" % new_child_slot)
+ msg.append(" parent package: %s" % dep.parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ abi_masks = {}
+ if new_child_slot is None:
+ if not child.installed:
+ abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
+ if not dep.parent.installed:
+ abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
+ if abi_masks:
+ config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
+
+ # trigger replacement of installed packages if necessary
+ abi_reinstalls = set()
+ if dep.parent.installed:
+ abi_reinstalls.add((dep.parent.root, dep.parent.slot_atom))
+ if new_child_slot is None and child.installed:
+ abi_reinstalls.add((child.root, child.slot_atom))
+ if abi_reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(abi_reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _slot_operator_update_probe(self, dep, new_child_slot=False):
+ """
+ slot/sub-slot := operators tend to prevent updates from getting pulled in,
+ since installed packages pull in packages with the slot/sub-slot that they
+ were built against. Detect this case so that we can schedule rebuilds
+ and reinstalls when appropriate.
+ NOTE: This function only searches for updates that involve upgrades
+ to higher versions, since the logic required to detect when a
+ downgrade would be desirable is not implemented.
+ """
+
+ if dep.child.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
+ modified_use=self._pkg_use_enabled(dep.child)):
+ return None
+
+ if dep.parent.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
+ modified_use=self._pkg_use_enabled(dep.parent)):
+ return None
+
+ debug = "--debug" in self._frozen_config.myopts
+ want_downgrade = None
+
+ for replacement_parent in self._iter_similar_available(dep.parent,
+ dep.parent.slot_atom):
+
+ for atom in replacement_parent.validated_atoms:
+ if not atom.slot_operator == "=" or \
+ atom.blocker or \
+ atom.cp != dep.atom.cp:
+ continue
+
+ # Discard USE deps, we're only searching for an approximate
+ # pattern, and dealing with USE states is too complex for
+ # this purpose.
+ atom = atom.without_use
+
+ if replacement_parent.built and \
+ portage.dep._match_slot(atom, dep.child):
+ # Our selected replacement_parent appears to be built
+ # for the existing child selection. So, discard this
+ # parent and search for another.
+ break
+
+ for pkg in self._iter_similar_available(
+ dep.child, atom):
+ if pkg.slot == dep.child.slot and \
+ pkg.sub_slot == dep.child.sub_slot:
+ # If slot/sub-slot is identical, then there's
+ # no point in updating.
continue
- # Use package set for matching since it will match via
- # PROVIDE when necessary, while match_from_list does not.
- parent, atom = parent_atom
- atom_set = InternalPackageSet(
- initial_atoms=(atom,), allow_repo=True)
- if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
- parent_atoms.add(parent_atom)
+ if new_child_slot:
+ if pkg.slot == dep.child.slot:
+ continue
+ if pkg < dep.child:
+ # the new slot only matters if the
+ # package version is higher
+ continue
else:
- self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
+ if pkg.slot != dep.child.slot:
+ continue
+ if pkg < dep.child:
+ if want_downgrade is None:
+ want_downgrade = self._downgrade_probe(dep.child)
+ # be careful not to trigger a rebuild when
+ # the only version available with a
+ # different slot_operator is an older version
+ if not want_downgrade:
+ continue
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % pkg)
+ msg.append(" new parent package: %s" % replacement_parent)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return pkg
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % None)
+ msg.append(" new parent package: %s" % None)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return None
+
+ def _downgrade_probe(self, pkg):
+ """
+ Detect cases where a downgrade of the given package is considered
+ desirable due to the current version being masked or unavailable.
+ """
+ available_pkg = None
+ for available_pkg in self._iter_similar_available(pkg,
+ pkg.slot_atom):
+ if available_pkg >= pkg:
+ # There's an available package of the same or higher
+ # version, so downgrade seems undesirable.
+ return False
+
+ return available_pkg is not None
+
+ def _iter_similar_available(self, graph_pkg, atom):
+ """
+ Given a package that's in the graph, do a rough check to
+ see if a similar package is available to install. The given
+ graph_pkg itself may be yielded only if it's not installed.
+ """
+
+ usepkgonly = "--usepkgonly" in self._frozen_config.myopts
+ useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
+ use_ebuild_visibility = self._frozen_config.myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+
+ for pkg in self._iter_match_pkgs_any(
+ graph_pkg.root_config, atom):
+ if pkg.cp != graph_pkg.cp:
+ # discard old-style virtual match
+ continue
+ if pkg.installed:
+ continue
+ if pkg in self._dynamic_config._runtime_pkg_mask:
+ continue
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ if not self._pkg_visibility_check(pkg):
+ continue
+ if pkg.built:
+ if self._equiv_binary_installed(pkg):
+ continue
+ if not (not use_ebuild_visibility and
+ (usepkgonly or useoldpkg_atoms.findAtomForPackage(
+ pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
+ not self._equiv_ebuild_visible(pkg):
+ continue
+ yield pkg
+
+ def _slot_operator_trigger_reinstalls(self):
+ """
+ Search for packages with slot-operator deps on older slots, and schedule
+ rebuilds if they can link to a newer slot that's in the graph.
+ """
+
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
+
+ for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
+
+ for dep in slot_info:
+ if not (dep.child.built and dep.parent and
+ isinstance(dep.parent, Package) and dep.parent.built):
+ continue
+
+ # Check for slot update first, since we don't want to
+ # trigger reinstall of the child package when a newer
+ # slot will be used instead.
+ if rebuild_if_new_slot:
+ new_child = self._slot_operator_update_probe(dep,
+ new_child_slot=True)
+ if new_child:
+ self._slot_operator_update_backtrack(dep,
+ new_child_slot=new_child)
+ break
+
+ if dep.want_update:
+ if self._slot_operator_update_probe(dep):
+ self._slot_operator_update_backtrack(dep)
+ break
def _reinstall_for_flags(self, pkg, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
@@ -845,18 +1225,22 @@ class depgraph(object):
in ("y", "auto"))
newuse = "--newuse" in self._frozen_config.myopts
changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
+ feature_flags = _get_feature_flags(
+ _get_eapi_attrs(pkg.metadata["EAPI"]))
if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
if flags:
return flags
elif changed_use or binpkg_respect_use:
- flags = orig_iuse.intersection(orig_use).symmetric_difference(
- cur_iuse.intersection(cur_use))
+ flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
if flags:
return flags
return None
@@ -1100,12 +1484,13 @@ class depgraph(object):
# package selection, since we want to prompt the user
# for USE adjustment rather than have REQUIRED_USE
# affect package selection and || dep choices.
- """if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
+ if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
eapi_has_required_use(pkg.metadata["EAPI"]):
required_use_is_sat = check_required_use(
pkg.metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag)
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.metadata["EAPI"])
if not required_use_is_sat:
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
@@ -1119,9 +1504,10 @@ class depgraph(object):
if atom is None:
atom = Atom("=" + pkg.cpv)
self._dynamic_config._unsatisfied_deps_for_display.append(
- ((pkg.root, atom), {"myparent":dep.parent}))
+ ((pkg.root, atom),
+ {"myparent" : dep.parent, "show_req_use" : pkg}))
self._dynamic_config._skip_restart = True
- return 0"""
+ return 0
if not pkg.onlydeps:
@@ -1150,121 +1536,7 @@ class depgraph(object):
(dep.parent, dep.atom))
return 1
else:
- # A slot conflict has occurred.
- # The existing node should not already be in
- # runtime_pkg_mask, since that would trigger an
- # infinite backtracking loop.
- if self._dynamic_config._allow_backtracking and \
- existing_node in \
- self._dynamic_config._runtime_pkg_mask:
- if "--debug" in self._frozen_config.myopts:
- writemsg(
- "!!! backtracking loop detected: %s %s\n" % \
- (existing_node,
- self._dynamic_config._runtime_pkg_mask[
- existing_node]), noiselevel=-1)
- elif self._dynamic_config._allow_backtracking and \
- not self._accept_blocker_conflicts() and \
- not self.need_restart():
-
- self._add_slot_conflict(pkg)
- if dep.atom is not None and dep.parent is not None:
- self._add_parent_atom(pkg, (dep.parent, dep.atom))
-
- if arg_atoms:
- for parent_atom in arg_atoms:
- parent, atom = parent_atom
- self._add_parent_atom(pkg, parent_atom)
- self._process_slot_conflicts()
-
- backtrack_data = []
- fallback_data = []
- all_parents = set()
- # The ordering of backtrack_data can make
- # a difference here, because both mask actions may lead
- # to valid, but different, solutions and the one with
- # 'existing_node' masked is usually the better one. Because
- # of that, we choose an order such that
- # the backtracker will first explore the choice with
- # existing_node masked. The backtracker reverses the
- # order, so the order it uses is the reverse of the
- # order shown here. See bug #339606.
- for to_be_selected, to_be_masked in (existing_node, pkg), (pkg, existing_node):
- # For missed update messages, find out which
- # atoms matched to_be_selected that did not
- # match to_be_masked.
- parent_atoms = \
- self._dynamic_config._parent_atoms.get(to_be_selected, set())
- if parent_atoms:
- conflict_atoms = self._dynamic_config._slot_conflict_parent_atoms.intersection(parent_atoms)
- if conflict_atoms:
- parent_atoms = conflict_atoms
-
- all_parents.update(parent_atoms)
-
- all_match = True
- for parent, atom in parent_atoms:
- i = InternalPackageSet(initial_atoms=(atom,),
- allow_repo=True)
- if not i.findAtomForPackage(to_be_masked):
- all_match = False
- break
-
- fallback_data.append((to_be_masked, parent_atoms))
-
- if all_match:
- # 'to_be_masked' does not violate any parent atom, which means
- # there is no point in masking it.
- pass
- else:
- backtrack_data.append((to_be_masked, parent_atoms))
-
- if not backtrack_data:
- # This shouldn't happen, but fall back to the old
- # behavior if this gets triggered somehow.
- backtrack_data = fallback_data
-
- if len(backtrack_data) > 1:
- # NOTE: Generally, we prefer to mask the higher
- # version since this solves common cases in which a
- # lower version is needed so that all dependencies
- # will be satisfied (bug #337178). However, if
- # existing_node happens to be installed then we
- # mask that since this is a common case that is
- # triggered when --update is not enabled.
- if existing_node.installed:
- pass
- elif pkg > existing_node:
- backtrack_data.reverse()
-
- to_be_masked = backtrack_data[-1][0]
-
- self._dynamic_config._backtrack_infos["slot conflict"] = backtrack_data
- self._dynamic_config._need_restart = True
- if "--debug" in self._frozen_config.myopts:
- msg = []
- msg.append("")
- msg.append("")
- msg.append("backtracking due to slot conflict:")
- if backtrack_data is fallback_data:
- msg.append("!!! backtrack_data fallback")
- msg.append(" first package: %s" % existing_node)
- msg.append(" second package: %s" % pkg)
- msg.append(" package to mask: %s" % to_be_masked)
- msg.append(" slot: %s" % pkg.slot_atom)
- msg.append(" parents: %s" % ", ".join( \
- "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
- msg.append("")
- writemsg_level("".join("%s\n" % l for l in msg),
- noiselevel=-1, level=logging.DEBUG)
- return 0
-
- # A slot collision has occurred. Sometimes this coincides
- # with unresolvable blockers, so the slot collision will be
- # shown later if there are no unresolvable blockers.
self._add_slot_conflict(pkg)
- slot_collision = True
-
if debug:
writemsg_level(
"%s%s %s\n" % ("Slot Conflict:".ljust(15),
@@ -1273,6 +1545,8 @@ class depgraph(object):
modified_use=self._pkg_use_enabled(existing_node))),
level=logging.DEBUG, noiselevel=-1)
+ slot_collision = True
+
if slot_collision:
# Now add this node to the graph so that self.display()
# can show use flags and --tree portage.output. This node is
@@ -1329,10 +1603,27 @@ class depgraph(object):
# Installing package A, we need to make sure package A's deps are met.
# emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
# If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
- if arg_atoms:
- depth = 0
+ if arg_atoms and depth > 0:
+ for parent, atom in arg_atoms:
+ if parent.reset_depth:
+ depth = 0
+ break
+
+ if previously_added and pkg.depth is not None:
+ depth = min(pkg.depth, depth)
pkg.depth = depth
deep = self._dynamic_config.myparams.get("deep", 0)
+ update = "--update" in self._frozen_config.myopts
+
+ dep.want_update = (not self._dynamic_config._complete_mode and
+ (arg_atoms or update) and
+ not (deep is not True and depth > deep))
+
+ dep.child = pkg
+ if (not pkg.onlydeps and pkg.built and
+ dep.atom and dep.atom.slot_operator_built):
+ self._add_slot_operator_dep(dep)
+
recurse = deep is True or depth + 1 <= deep
dep_stack = self._dynamic_config._dep_stack
if "recurse" not in self._dynamic_config.myparams:
@@ -1364,6 +1655,14 @@ class depgraph(object):
self._dynamic_config._parent_atoms[pkg] = parent_atoms
parent_atoms.add(parent_atom)
+ def _add_slot_operator_dep(self, dep):
+ slot_key = (dep.root, dep.child.slot_atom)
+ slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
+ if slot_info is None:
+ slot_info = []
+ self._dynamic_config._slot_operator_deps[slot_key] = slot_info
+ slot_info.append(dep)
+
def _add_slot_conflict(self, pkg):
self._dynamic_config._slot_collision_nodes.add(pkg)
slot_key = (pkg.slot_atom, pkg.root)
@@ -1379,10 +1678,10 @@ class depgraph(object):
myroot = pkg.root
metadata = pkg.metadata
removal_action = "remove" in self._dynamic_config.myparams
+ eapi_attrs = _get_eapi_attrs(pkg.metadata["EAPI"])
edepend={}
- depkeys = ["DEPEND","RDEPEND","PDEPEND"]
- for k in depkeys:
+ for k in Package._dep_keys:
edepend[k] = metadata[k]
if not pkg.built and \
@@ -1409,31 +1708,44 @@ class depgraph(object):
# Removal actions never traverse ignored buildtime
# dependencies, so it's safe to discard them early.
edepend["DEPEND"] = ""
+ edepend["HDEPEND"] = ""
ignore_build_time_deps = True
+ ignore_depend_deps = ignore_build_time_deps
+ ignore_hdepend_deps = ignore_build_time_deps
+
if removal_action:
depend_root = myroot
else:
- depend_root = self._frozen_config._running_root.root
- root_deps = self._frozen_config.myopts.get("--root-deps")
- if root_deps is not None:
- if root_deps is True:
- depend_root = myroot
- elif root_deps == "rdeps":
- ignore_build_time_deps = True
+ if eapi_attrs.hdepend:
+ depend_root = myroot
+ else:
+ depend_root = self._frozen_config._running_root.root
+ root_deps = self._frozen_config.myopts.get("--root-deps")
+ if root_deps is not None:
+ if root_deps is True:
+ depend_root = myroot
+ elif root_deps == "rdeps":
+ ignore_depend_deps = True
# If rebuild mode is not enabled, it's safe to discard ignored
# build-time dependencies. If you want these deps to be traversed
# in "complete" mode then you need to specify --with-bdeps=y.
- if ignore_build_time_deps and \
- not self._rebuild.rebuild:
- edepend["DEPEND"] = ""
+ if not self._rebuild.rebuild:
+ if ignore_depend_deps:
+ edepend["DEPEND"] = ""
+ if ignore_hdepend_deps:
+ edepend["HDEPEND"] = ""
deps = (
(depend_root, edepend["DEPEND"],
self._priority(buildtime=True,
- optional=(pkg.built or ignore_build_time_deps),
- ignored=ignore_build_time_deps)),
+ optional=(pkg.built or ignore_depend_deps),
+ ignored=ignore_depend_deps)),
+ (self._frozen_config._running_root.root, edepend["HDEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_hdepend_deps),
+ ignored=ignore_hdepend_deps)),
(myroot, edepend["RDEPEND"],
self._priority(runtime=True)),
(myroot, edepend["PDEPEND"],
@@ -1455,7 +1767,10 @@ class depgraph(object):
try:
dep_string = portage.dep.use_reduce(dep_string,
- uselist=self._pkg_use_enabled(pkg), is_valid_flag=pkg.iuse.is_valid_flag)
+ uselist=self._pkg_use_enabled(pkg),
+ is_valid_flag=pkg.iuse.is_valid_flag,
+ opconvert=True, token_class=Atom,
+ eapi=pkg.metadata['EAPI'])
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
@@ -1467,7 +1782,9 @@ class depgraph(object):
# practical to ignore this issue for installed packages.
try:
dep_string = portage.dep.use_reduce(dep_string,
- uselist=self._pkg_use_enabled(pkg))
+ uselist=self._pkg_use_enabled(pkg),
+ opconvert=True, token_class=Atom,
+ eapi=pkg.metadata['EAPI'])
except portage.exception.InvalidDependString as e:
self._dynamic_config._masked_installed.add(pkg)
del e
@@ -1488,9 +1805,6 @@ class depgraph(object):
if not dep_string:
continue
- dep_string = portage.dep.paren_enclose(dep_string,
- unevaluated_atom=True)
-
if not self._add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_string,
allow_unsatisfied):
@@ -1524,7 +1838,9 @@ class depgraph(object):
if debug:
writemsg_level("\nParent: %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
- writemsg_level("Depstring: %s\n" % (dep_string,),
+ dep_repr = portage.dep.paren_enclose(dep_string,
+ unevaluated_atom=True, opconvert=True)
+ writemsg_level("Depstring: %s\n" % (dep_repr,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Priority: %s\n" % (dep_priority,),
noiselevel=-1, level=logging.DEBUG)
@@ -1602,16 +1918,11 @@ class depgraph(object):
self._dynamic_config._slot_pkg_map[dep.child.root].get(
dep.child.slot_atom) is None:
myarg = None
- if dep.root == self._frozen_config.target_root:
- try:
- myarg = next(self._iter_atoms_for_pkg(dep.child))
- except StopIteration:
- pass
- except InvalidDependString:
- if not dep.child.installed:
- # This shouldn't happen since the package
- # should have been masked.
- raise
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child), None)
+ except InvalidDependString:
+ if not dep.child.installed:
+ raise
if myarg is None:
# Existing child selection may not be valid unless
@@ -1717,14 +2028,11 @@ class depgraph(object):
self._dynamic_config._slot_pkg_map[dep.child.root].get(
dep.child.slot_atom) is None:
myarg = None
- if dep.root == self._frozen_config.target_root:
- try:
- myarg = next(self._iter_atoms_for_pkg(dep.child))
- except StopIteration:
- pass
- except InvalidDependString:
- if not dep.child.installed:
- raise
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child), None)
+ except InvalidDependString:
+ if not dep.child.installed:
+ raise
if myarg is None:
ignored = True
@@ -1818,9 +2126,14 @@ class depgraph(object):
# Yield ~, =*, < and <= atoms first, since those are more likely to
# cause slot conflicts, and we want those atoms to be displayed
# in the resulting slot conflict message (see bug #291142).
+ # Give similar treatment to slot/sub-slot atoms.
conflict_atoms = []
normal_atoms = []
+ abi_atoms = []
for atom in cp_atoms:
+ if atom.slot_operator_built:
+ abi_atoms.append(atom)
+ continue
conflict = False
for child_pkg in atom_pkg_graph.child_nodes(atom):
existing_node, matches = \
@@ -1833,7 +2146,7 @@ class depgraph(object):
else:
normal_atoms.append(atom)
- for atom in chain(conflict_atoms, normal_atoms):
+ for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
child_pkgs = atom_pkg_graph.child_nodes(atom)
# if more than one child, yield highest version
if len(child_pkgs) > 1:
@@ -1846,34 +2159,22 @@ class depgraph(object):
Yields non-disjunctive deps. Raises InvalidDependString when
necessary.
"""
- i = 0
- while i < len(dep_struct):
- x = dep_struct[i]
+ for x in dep_struct:
if isinstance(x, list):
- for y in self._queue_disjunctive_deps(
- pkg, dep_root, dep_priority, x):
- yield y
- elif x == "||":
- self._queue_disjunction(pkg, dep_root, dep_priority,
- [ x, dep_struct[ i + 1 ] ] )
- i += 1
+ if x and x[0] == "||":
+ self._queue_disjunction(pkg, dep_root, dep_priority, [x])
+ else:
+ for y in self._queue_disjunctive_deps(
+ pkg, dep_root, dep_priority, x):
+ yield y
else:
- try:
- x = portage.dep.Atom(x, eapi=pkg.metadata["EAPI"])
- except portage.exception.InvalidAtom:
- if not pkg.installed:
- raise portage.exception.InvalidDependString(
- "invalid atom: '%s'" % x)
+ # Note: Eventually this will check for PROPERTIES=virtual
+ # or whatever other metadata gets implemented for this
+ # purpose.
+ if x.cp.startswith('virtual/'):
+ self._queue_disjunction(pkg, dep_root, dep_priority, [x])
else:
- # Note: Eventually this will check for PROPERTIES=virtual
- # or whatever other metadata gets implemented for this
- # purpose.
- if x.cp.startswith('virtual/'):
- self._queue_disjunction( pkg, dep_root,
- dep_priority, [ str(x) ] )
- else:
- yield str(x)
- i += 1
+ yield x
def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
self._dynamic_config._dep_disjunctive_stack.append(
@@ -1886,10 +2187,8 @@ class depgraph(object):
"""
pkg, dep_root, dep_priority, dep_struct = \
self._dynamic_config._dep_disjunctive_stack.pop()
- dep_string = portage.dep.paren_enclose(dep_struct,
- unevaluated_atom=True)
if not self._add_pkg_dep_string(
- pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
+ pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
return 0
return 1
@@ -1999,8 +2298,18 @@ class depgraph(object):
writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
return 0, myfavorites
mytbz2=portage.xpak.tbz2(x)
- mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
- if os.path.realpath(x) != \
+ mykey = None
+ cat = mytbz2.getfile("CATEGORY")
+ if cat is not None:
+ cat = _unicode_decode(cat.strip(),
+ encoding=_encodings['repo.content'])
+ mykey = cat + "/" + os.path.basename(x)[:-5]
+
+ if mykey is None:
+ writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+ elif os.path.realpath(x) != \
os.path.realpath(bindb.bintree.getname(mykey)):
writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
self._dynamic_config._skip_restart = True
@@ -2193,13 +2502,8 @@ class depgraph(object):
return 0, []
for cpv in owners:
- slot = vardb.aux_get(cpv, ["SLOT"])[0]
- if not slot:
- # portage now masks packages with missing slot, but it's
- # possible that one was installed by an older version
- atom = Atom(portage.cpv_getkey(cpv))
- else:
- atom = Atom("%s:%s" % (portage.cpv_getkey(cpv), slot))
+ pkg = vardb._pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
args.append(AtomArg(arg=atom, atom=atom,
root_config=root_config))
@@ -2247,6 +2551,7 @@ class depgraph(object):
args = revised_greedy_args
del revised_greedy_args
+ args.extend(self._gen_reinstall_sets())
self._set_args(args)
myfavorites = set(myfavorites)
@@ -2254,7 +2559,8 @@ class depgraph(object):
if isinstance(arg, (AtomArg, PackageArg)):
myfavorites.add(arg.atom)
elif isinstance(arg, SetArg):
- myfavorites.add(arg.arg)
+ if not arg.internal:
+ myfavorites.add(arg.arg)
myfavorites = list(myfavorites)
if debug:
@@ -2264,7 +2570,33 @@ class depgraph(object):
self._dynamic_config._initial_arg_list = args[:]
return self._resolve(myfavorites)
-
+
+ def _gen_reinstall_sets(self):
+
+ atom_list = []
+ for root, atom in self._rebuild.rebuild_list:
+ atom_list.append((root, '__auto_rebuild__', atom))
+ for root, atom in self._rebuild.reinstall_list:
+ atom_list.append((root, '__auto_reinstall__', atom))
+ for root, atom in self._dynamic_config._slot_operator_replace_installed:
+ atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
+
+ set_dict = {}
+ for root, set_name, atom in atom_list:
+ set_dict.setdefault((root, set_name), []).append(atom)
+
+ for (root, set_name), atoms in set_dict.items():
+ yield SetArg(arg=(SETPREFIX + set_name),
+ # Set reset_depth=False here, since we don't want these
+ # special sets to interact with depth calculations (see
+ # the emerge --deep=DEPTH option), though we want them
+ # to behave like normal arguments in most other respects.
+ pset=InternalPackageSet(initial_atoms=atoms),
+ force_reinstall=True,
+ internal=True,
+ reset_depth=False,
+ root_config=self._frozen_config.roots[root])
+
def _resolve(self, myfavorites):
"""Given self._dynamic_config._initial_arg_list, pull in the root nodes,
call self._creategraph to process theier deps and return
@@ -2276,10 +2608,7 @@ class depgraph(object):
pprovideddict = pkgsettings.pprovideddict
virtuals = pkgsettings.getvirtuals()
args = self._dynamic_config._initial_arg_list[:]
- for root, atom in chain(self._rebuild.rebuild_list,
- self._rebuild.reinstall_list):
- args.append(AtomArg(arg=atom, atom=atom,
- root_config=self._frozen_config.roots[root]))
+
for arg in self._expand_set_args(args, add_to_digraph=True):
for atom in arg.pset.getAtoms():
self._spinner_update()
@@ -2389,22 +2718,12 @@ class depgraph(object):
except self._unknown_internal_error:
return False, myfavorites
- digraph_set = frozenset(self._dynamic_config.digraph)
-
- if digraph_set.intersection(
- self._dynamic_config._needed_unstable_keywords) or \
- digraph_set.intersection(
- self._dynamic_config._needed_p_mask_changes) or \
- digraph_set.intersection(
- self._dynamic_config._needed_use_config_changes) or \
- digraph_set.intersection(
- self._dynamic_config._needed_license_changes) :
- #We failed if the user needs to change the configuration
- self._dynamic_config._success_without_autounmask = True
+ if (self._dynamic_config._slot_collision_info and
+ not self._accept_blocker_conflicts()) or \
+ (self._dynamic_config._allow_backtracking and
+ "slot conflict" in self._dynamic_config._backtrack_infos):
return False, myfavorites
- digraph_set = None
-
if self._rebuild.trigger_rebuilds():
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
@@ -2413,6 +2732,32 @@ class depgraph(object):
self._dynamic_config._need_restart = True
return False, myfavorites
+ if "config" in self._dynamic_config._backtrack_infos and \
+ ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
+ "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
+ self.need_restart():
+ return False, myfavorites
+
+ # Any failures except those due to autounmask *alone* should return
+ # before this point, since the success_without_autounmask flag that's
+ # set below is reserved for cases where there are *zero* other
+ # problems. For reference, see backtrack_depgraph, where it skips the
+ # get_best_run() call when success_without_autounmask is True.
+
+ digraph_nodes = self._dynamic_config.digraph.nodes
+
+ if any(x in digraph_nodes for x in
+ self._dynamic_config._needed_unstable_keywords) or \
+ any(x in digraph_nodes for x in
+ self._dynamic_config._needed_p_mask_changes) or \
+ any(x in digraph_nodes for x in
+ self._dynamic_config._needed_use_config_changes) or \
+ any(x in digraph_nodes for x in
+ self._dynamic_config._needed_license_changes) :
+ #We failed if the user needs to change the configuration
+ self._dynamic_config._success_without_autounmask = True
+ return False, myfavorites
+
# We're true here unless we are missing binaries.
return (True, myfavorites)
@@ -2485,14 +2830,15 @@ class depgraph(object):
slots = set()
for cpv in vardb.match(atom):
# don't mix new virtuals with old virtuals
- if portage.cpv_getkey(cpv) == highest_pkg.cp:
- slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
+ pkg = vardb._pkg_str(cpv, None)
+ if pkg.cp == highest_pkg.cp:
+ slots.add(pkg.slot)
- slots.add(highest_pkg.metadata["SLOT"])
+ slots.add(highest_pkg.slot)
if len(slots) == 1:
return []
greedy_pkgs = []
- slots.remove(highest_pkg.metadata["SLOT"])
+ slots.remove(highest_pkg.slot)
while slots:
slot = slots.pop()
slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
@@ -2506,7 +2852,7 @@ class depgraph(object):
return [pkg.slot_atom for pkg in greedy_pkgs]
blockers = {}
- blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
+ blocker_dep_keys = Package._dep_keys
for pkg in greedy_pkgs + [highest_pkg]:
dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
try:
@@ -2567,6 +2913,22 @@ class depgraph(object):
"""This will raise InvalidDependString if necessary. If trees is
None then self._dynamic_config._filtered_trees is used."""
+ if not isinstance(depstring, list):
+ eapi = None
+ is_valid_flag = None
+ if parent is not None:
+ eapi = parent.metadata['EAPI']
+ if not parent.installed:
+ is_valid_flag = parent.iuse.is_valid_flag
+ depstring = portage.dep.use_reduce(depstring,
+ uselist=myuse, opconvert=True, token_class=Atom,
+ is_valid_flag=is_valid_flag, eapi=eapi)
+
+ if (self._dynamic_config.myparams.get(
+ "ignore_built_slot_operator_deps", "n") == "y" and
+ parent and parent.built):
+ ignore_built_slot_operator_deps(depstring)
+
pkgsettings = self._frozen_config.pkgsettings[root]
if trees is None:
trees = self._dynamic_config._filtered_trees
@@ -2751,7 +3113,7 @@ class depgraph(object):
if target_atom is not None and isinstance(node, Package):
affecting_use = set()
- for dep_str in "DEPEND", "RDEPEND", "PDEPEND":
+ for dep_str in Package._dep_keys:
try:
affecting_use.update(extract_affecting_use(
node.metadata[dep_str], target_atom,
@@ -2832,13 +3194,13 @@ class depgraph(object):
if priorities is None:
# This edge comes from _parent_atoms and was not added to
# the graph, and _parent_atoms does not contain priorities.
- dep_strings.add(node.metadata["DEPEND"])
- dep_strings.add(node.metadata["RDEPEND"])
- dep_strings.add(node.metadata["PDEPEND"])
+ for k in Package._dep_keys:
+ dep_strings.add(node.metadata[k])
else:
for priority in priorities:
if priority.buildtime:
- dep_strings.add(node.metadata["DEPEND"])
+ for k in Package._buildtime_keys:
+ dep_strings.add(node.metadata[k])
if priority.runtime:
dep_strings.add(node.metadata["RDEPEND"])
if priority.runtime_post:
@@ -2930,7 +3292,7 @@ class depgraph(object):
def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
- check_backtrack=False, check_autounmask_breakage=False):
+ check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
"""
When check_backtrack=True, no output is produced and
the method either returns or raises _backtrack_mask if
@@ -3037,7 +3399,8 @@ class depgraph(object):
if not check_required_use(
pkg.metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag):
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.metadata["EAPI"]):
required_use_unsatisfied.append(pkg)
continue
root_slot = (pkg.root, pkg.slot_atom)
@@ -3082,7 +3445,7 @@ class depgraph(object):
untouchable_flags = \
frozenset(chain(pkg.use.mask, pkg.use.force))
- if untouchable_flags.intersection(
+ if any(x in untouchable_flags for x in
chain(need_enable, need_disable)):
continue
@@ -3096,8 +3459,10 @@ class depgraph(object):
new_use.add(flag)
for flag in need_disable:
new_use.discard(flag)
- if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ if check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]) \
+ and not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
@@ -3132,7 +3497,7 @@ class depgraph(object):
untouchable_flags = \
frozenset(chain(myparent.use.mask, myparent.use.force))
- if untouchable_flags.intersection(involved_flags):
+ if any(x in untouchable_flags for x in involved_flags):
continue
required_use = myparent.metadata.get("REQUIRED_USE")
@@ -3145,8 +3510,12 @@ class depgraph(object):
new_use.discard(flag)
else:
new_use.add(flag)
- if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
+ if check_required_use(required_use, old_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.metadata["EAPI"]) and \
+ not check_required_use(required_use, new_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.metadata["EAPI"]):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (myparent.cpv, \
human_readable_required_use(required_use))
@@ -3211,62 +3580,72 @@ class depgraph(object):
mask_docs = False
- if required_use_unsatisfied:
+ if show_req_use is None and required_use_unsatisfied:
# We have an unmasked package that only requires USE adjustment
# in order to satisfy REQUIRED_USE, and nothing more. We assume
# that the user wants the latest version, so only the first
# instance is displayed.
- pkg = required_use_unsatisfied[0]
+ show_req_use = required_use_unsatisfied[0]
+ self._dynamic_config._needed_required_use_config_changesuse_config_changes[pkg] = (new_use, new_changes)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_required_use_config_changes", [])
+ backtrack_infos["config"]["needed_required_use_config_changes"].append((pkg, (new_use, new_changes)))
+
+ if show_req_use is not None:
+
+ pkg = show_req_use
output_cpv = pkg.cpv + _repo_separator + pkg.repo
- writemsg_stdout("\n!!! " + \
+ writemsg("\n!!! " + \
colorize("BAD", "The ebuild selected to satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " has unmet requirements.") + "\n",
noiselevel=-1)
use_display = pkg_use_display(pkg, self._frozen_config.myopts)
- writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
+ writemsg("- %s %s\n" % (output_cpv, use_display),
noiselevel=-1)
- writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
+ writemsg("\n The following REQUIRED_USE flag constraints " + \
"are unsatisfied:\n", noiselevel=-1)
reduced_noise = check_required_use(
pkg.metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag).tounicode()
- writemsg_stdout(" %s\n" % \
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.metadata["EAPI"]).tounicode()
+ writemsg(" %s\n" % \
human_readable_required_use(reduced_noise),
noiselevel=-1)
normalized_required_use = \
" ".join(pkg.metadata["REQUIRED_USE"].split())
if reduced_noise != normalized_required_use:
- writemsg_stdout("\n The above constraints " + \
+ writemsg("\n The above constraints " + \
"are a subset of the following complete expression:\n",
noiselevel=-1)
- writemsg_stdout(" %s\n" % \
+ writemsg(" %s\n" % \
human_readable_required_use(normalized_required_use),
noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
elif show_missing_use:
- writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
- writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
+ writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
for pkg, mreasons in show_missing_use:
- writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
+ writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
elif masked_packages:
- writemsg_stdout("\n!!! " + \
+ writemsg("\n!!! " + \
colorize("BAD", "All ebuilds that could satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
- writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
+ writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
have_eapi_mask = show_masked_packages(masked_packages)
if have_eapi_mask:
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
msg = ("The current version of portage supports " + \
"EAPI '%s'. You must upgrade to a newer version" + \
" of portage before EAPI masked packages can" + \
" be installed.") % portage.const.EAPI
- writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
mask_docs = True
else:
cp_exists = False
@@ -3276,7 +3655,7 @@ class depgraph(object):
cp_exists = True
break
- writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
if isinstance(myparent, AtomArg) and \
not cp_exists and \
self._frozen_config.myopts.get(
@@ -3286,7 +3665,7 @@ class depgraph(object):
if cat == "null":
cat = None
- writemsg_stdout("\nemerge: searching for similar names..."
+ writemsg("\nemerge: searching for similar names..."
, noiselevel=-1)
all_cp = set()
@@ -3334,16 +3713,16 @@ class depgraph(object):
matches = matches_orig_case
if len(matches) == 1:
- writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
, noiselevel=-1)
elif len(matches) > 1:
- writemsg_stdout(
+ writemsg(
"\nemerge: Maybe you meant any of these: %s?\n" % \
(", ".join(matches),), noiselevel=-1)
else:
# Generally, this would only happen if
# all dbapis are empty.
- writemsg_stdout(" nothing similar found.\n"
+ writemsg(" nothing similar found.\n"
, noiselevel=-1)
msg = []
if not isinstance(myparent, AtomArg):
@@ -3356,12 +3735,12 @@ class depgraph(object):
(node)), node_type))
if msg:
- writemsg_stdout("\n".join(msg), noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n".join(msg), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
if mask_docs:
show_mask_docs()
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
for db, pkg_type, built, installed, db_keys in \
@@ -3379,60 +3758,12 @@ class depgraph(object):
"""
db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
-
- if hasattr(db, "xmatch"):
- # For portdbapi we match only against the cpv, in order
- # to bypass unnecessary cache access for things like IUSE
- # and SLOT. Later, we cache the metadata in a Package
- # instance, and use that for further matching. This
- # optimization is especially relevant since
- # pordbapi.aux_get() does not cache calls that have
- # myrepo or mytree arguments.
- cpv_list = db.xmatch("match-all-cpv-only", atom)
- else:
- cpv_list = db.match(atom)
-
- # USE=multislot can make an installed package appear as if
- # it doesn't satisfy a slot dependency. Rebuilding the ebuild
- # won't do any good as long as USE=multislot is enabled since
- # the newly built package still won't have the expected slot.
- # Therefore, assume that such SLOT dependencies are already
- # satisfied rather than forcing a rebuild.
+ atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
+ cp_list = db.cp_list(atom_exp.cp)
+ matched_something = False
installed = pkg_type == 'installed'
- if installed and not cpv_list and atom.slot:
-
- if "remove" in self._dynamic_config.myparams:
- # We need to search the portdbapi, which is not in our
- # normal dbs list, in order to find the real SLOT.
- portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
- db_keys = list(portdb._aux_cache_keys)
- dbs = [(portdb, "ebuild", False, False, db_keys)]
- else:
- dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
- for cpv in db.match(atom.cp):
- slot_available = False
- for other_db, other_type, other_built, \
- other_installed, other_keys in dbs:
- try:
- if atom.slot == \
- other_db.aux_get(cpv, ["SLOT"])[0]:
- slot_available = True
- break
- except KeyError:
- pass
- if not slot_available:
- continue
- inst_pkg = self._pkg(cpv, "installed",
- root_config, installed=installed, myrepo = atom.repo)
- # Remove the slot from the atom and verify that
- # the package matches the resulting atom.
- if portage.match_from_list(
- atom.without_slot, [inst_pkg]):
- yield inst_pkg
- return
-
- if cpv_list:
+ if cp_list:
atom_set = InternalPackageSet(initial_atoms=(atom,),
allow_repo=True)
if atom.repo is None and hasattr(db, "getRepositories"):
@@ -3441,8 +3772,13 @@ class depgraph(object):
repo_list = [atom.repo]
# descending order
- cpv_list.reverse()
- for cpv in cpv_list:
+ cp_list.reverse()
+ for cpv in cp_list:
+ # Call match_from_list on one cpv at a time, in order
+ # to avoid unnecessary match_from_list comparisons on
+ # versions that are never yielded from this method.
+ if not match_from_list(atom_exp, [cpv]):
+ continue
for repo in repo_list:
try:
@@ -3459,26 +3795,65 @@ class depgraph(object):
# Make sure that cpv from the current repo satisfies the atom.
# This might not be the case if there are several repos with
# the same cpv, but different metadata keys, like SLOT.
- # Also, for portdbapi, parts of the match that require
- # metadata access are deferred until we have cached the
- # metadata in a Package instance.
+ # Also, parts of the match that require metadata access
+ # are deferred until we have cached the metadata in a
+ # Package instance.
if not atom_set.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
+ matched_something = True
yield pkg
+ # USE=multislot can make an installed package appear as if
+ # it doesn't satisfy a slot dependency. Rebuilding the ebuild
+ # won't do any good as long as USE=multislot is enabled since
+ # the newly built package still won't have the expected slot.
+ # Therefore, assume that such SLOT dependencies are already
+ # satisfied rather than forcing a rebuild.
+ if not matched_something and installed and atom.slot is not None:
+
+ if "remove" in self._dynamic_config.myparams:
+ # We need to search the portdbapi, which is not in our
+ # normal dbs list, in order to find the real SLOT.
+ portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
+ db_keys = list(portdb._aux_cache_keys)
+ dbs = [(portdb, "ebuild", False, False, db_keys)]
+ else:
+ dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+
+ cp_list = db.cp_list(atom_exp.cp)
+ if cp_list:
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom.without_slot,), allow_repo=True)
+ atom_exp_without_slot = atom_exp.without_slot
+ cp_list.reverse()
+ for cpv in cp_list:
+ if not match_from_list(atom_exp_without_slot, [cpv]):
+ continue
+ slot_available = False
+ for other_db, other_type, other_built, \
+ other_installed, other_keys in dbs:
+ try:
+ if atom.slot == \
+ other_db._pkg_str(_unicode(cpv), None).slot:
+ slot_available = True
+ break
+ except (KeyError, InvalidData):
+ pass
+ if not slot_available:
+ continue
+ inst_pkg = self._pkg(cpv, "installed",
+ root_config, installed=installed, myrepo=atom.repo)
+ # Remove the slot from the atom and verify that
+ # the package matches the resulting atom.
+ if atom_set.findAtomForPackage(inst_pkg):
+ yield inst_pkg
+ return
+
def _select_pkg_highest_available(self, root, atom, onlydeps=False):
- cache_key = (root, atom, atom.unevaluated_atom, onlydeps)
+ cache_key = (root, atom, atom.unevaluated_atom, onlydeps, self._dynamic_config._autounmask)
ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
if ret is not None:
- pkg, existing = ret
- if pkg and not existing:
- existing = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
- if existing and existing == pkg:
- # Update the cache to reflect that the
- # package has been added to the graph.
- ret = pkg, pkg
- self._dynamic_config._highest_pkg_cache[cache_key] = ret
return ret
ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
self._dynamic_config._highest_pkg_cache[cache_key] = ret
@@ -3495,21 +3870,55 @@ class depgraph(object):
True if the user has not explicitly requested for this package
to be replaced (typically via an atom on the command line).
"""
- if "selective" not in self._dynamic_config.myparams and \
- pkg.root == self._frozen_config.target_root:
- if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
- modified_use=self._pkg_use_enabled(pkg)):
- return True
- try:
- next(self._iter_atoms_for_pkg(pkg))
- except StopIteration:
- pass
- except portage.exception.InvalidDependString:
- pass
- else:
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ return True
+
+ arg = False
+ try:
+ for arg, atom in self._iter_atoms_for_pkg(pkg):
+ if arg.force_reinstall:
+ return False
+ except InvalidDependString:
+ pass
+
+ if "selective" in self._dynamic_config.myparams:
+ return True
+
+ return not arg
+
+ def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
+ try:
+ pkg_eb = self._pkg(
+ pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
+ except portage.exception.PackageNotFound:
+ pkg_eb_visible = False
+ for pkg_eb in self._iter_match_pkgs(pkg.root_config,
+ "ebuild", Atom("=%s" % (pkg.cpv,))):
+ if self._pkg_visibility_check(pkg_eb, autounmask_level):
+ pkg_eb_visible = True
+ break
+ if not pkg_eb_visible:
return False
+ else:
+ if not self._pkg_visibility_check(pkg_eb, autounmask_level):
+ return False
+
return True
+ def _equiv_binary_installed(self, pkg):
+ build_time = pkg.metadata.get('BUILD_TIME')
+ if not build_time:
+ return False
+
+ try:
+ inst_pkg = self._pkg(pkg.cpv, "installed",
+ pkg.root_config, installed=True)
+ except PackageNotFound:
+ return False
+
+ return build_time == inst_pkg.metadata.get('BUILD_TIME')
+
class _AutounmaskLevel(object):
__slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
"allow_missing_keywords", "allow_unmasks")
@@ -3525,7 +3934,8 @@ class depgraph(object):
"""
Iterate over the different allowed things to unmask.
- 1. USE
+ 0. USE
+ 1. USE + license
2. USE + ~arch + license
3. USE + ~arch + license + missing keywords
4. USE + ~arch + license + masks
@@ -3544,8 +3954,12 @@ class depgraph(object):
autounmask_level = self._AutounmaskLevel()
autounmask_level.allow_use_changes = True
+ yield autounmask_level
- for only_use_changes in (True, False):
+ autounmask_level.allow_license_changes = True
+ yield autounmask_level
+
+ for only_use_changes in (False,):
autounmask_level.allow_unstable_keywords = (not only_use_changes)
autounmask_level.allow_license_changes = (not only_use_changes)
@@ -3573,7 +3987,7 @@ class depgraph(object):
pkg = None
if self._dynamic_config._autounmask is True:
- pkg = None
+ reset_pkg(pkg)
for autounmask_level in self._autounmask_levels():
if pkg is not None:
@@ -3734,7 +4148,7 @@ class depgraph(object):
if pkg not in self._dynamic_config.digraph.nodes:
return False
- for key in "DEPEND", "RDEPEND", "PDEPEND", "LICENSE":
+ for key in Package._dep_keys + ("LICENSE",):
dep = pkg.metadata[key]
old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
@@ -3749,7 +4163,7 @@ class depgraph(object):
new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
for ppkg, atom in parent_atoms:
if not atom.use or \
- not atom.use.required.intersection(changes):
+ not any(x in atom.use.required for x in changes):
continue
else:
return True
@@ -3759,12 +4173,14 @@ class depgraph(object):
if new_changes != old_changes:
#Don't do the change if it violates REQUIRED_USE.
required_use = pkg.metadata.get("REQUIRED_USE")
- if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
+ if required_use and check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]) and \
+ not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.metadata["EAPI"]):
return old_use
- if pkg.use.mask.intersection(new_changes) or \
- pkg.use.force.intersection(new_changes):
+ if any(x in pkg.use.mask for x in new_changes) or \
+ any(x in pkg.use.force for x in new_changes):
return old_use
self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
@@ -3776,199 +4192,6 @@ class depgraph(object):
self._dynamic_config._need_restart = True
return new_use
- def change_required_use(self, pkg):
- """
- Checks if the use flags listed in 'use' satisfy all
- constraints specified in 'constraints'.
-
- @param required_use: REQUIRED_USE string
- @type required_use: String
- @param use: Enabled use flags
- @param use: List
- @param iuse_match: Callable that takes a single flag argument and returns
- True if the flag is matched, false otherwise,
- @param iuse_match: Callable
- @rtype: Bool
- @return: Indicates if REQUIRED_USE constraints are satisfied
- """
-
- required_use = pkg.metadata["REQUIRED_USE"]
- use =self._pkg_use_enabled(pkg)
- iuse_match = pkg.iuse.is_valid_flag
-
- def is_active(token):
- if token.startswith("!"):
- flag = token[1:]
- is_negated = True
- else:
- flag = token
- is_negated = False
-
- if not flag or not iuse_match(flag):
- msg = _("USE flag '%s' is not in IUSE") \
- % (flag,)
- e = InvalidData(msg, category='IUSE.missing')
- raise InvalidDependString(msg, errors=(e,))
-
- return (flag in use and not is_negated) or \
- (flag not in use and is_negated)
-
- def is_satisfied(operator, argument):
- if not argument:
- #|| ( ) -> True
- return True
-
- if operator == "||":
- return (True in argument)
- elif operator == "^^":
- return (argument.count(True) == 1)
- elif operator[-1] == "?":
- return (False not in argument)
-
- mysplit = required_use.split()
- level = 0
- stack = [[]]
- tree = portage.dep._RequiredUseBranch()
- node = tree
- need_bracket = False
- target_use = {}
-
- for token in mysplit:
- if token == "(":
- if not need_bracket:
- child = portage.dep._RequiredUseBranch(parent=node)
- node._children.append(child)
- node = child
-
- need_bracket = False
- stack.append([])
- level += 1
- elif token == ")":
- if need_bracket:
- raise InvalidDependString(
- _("malformed syntax: '%s'") % required_use)
- if level > 0:
- level -= 1
- l = stack.pop()
- op = None
- if stack[level]:
- if stack[level][-1] in ("||", "^^"):
- op = stack[level].pop()
- satisfied = is_satisfied(op, l)
- stack[level].append(satisfied)
- node._satisfied = satisfied
-
- elif not isinstance(stack[level][-1], bool) and \
- stack[level][-1][-1] == "?":
- op = stack[level].pop()
- if is_active(op[:-1]):
- satisfied = is_satisfied(op, l)
- stack[level].append(satisfied)
- node._satisfied = satisfied
- else:
- node._satisfied = True
- last_node = node._parent._children.pop()
- if last_node is not node:
- raise AssertionError(
- "node is not last child of parent")
- node = node._parent
- continue
-
- if op is None:
- satisfied = False not in l
- node._satisfied = satisfied
- if l:
- stack[level].append(satisfied)
-
- if len(node._children) <= 1 or \
- node._parent._operator not in ("||", "^^"):
- last_node = node._parent._children.pop()
- if last_node is not node:
- raise AssertionError(
- "node is not last child of parent")
- for child in node._children:
- node._parent._children.append(child)
- if isinstance(child, portage.dep._RequiredUseBranch):
- child._parent = node._parent
-
- elif not node._children:
- last_node = node._parent._children.pop()
- if last_node is not node:
- raise AssertionError(
- "node is not last child of parent")
-
- elif len(node._children) == 1 and op in ("||", "^^"):
- last_node = node._parent._children.pop()
- if last_node is not node:
- raise AssertionError(
- "node is not last child of parent")
- node._parent._children.append(node._children[0])
- if isinstance(node._children[0], portage.dep._RequiredUseBranch):
- node._children[0]._parent = node._parent
- node = node._children[0]
- if node._operator is None and \
- node._parent._operator not in ("||", "^^"):
- last_node = node._parent._children.pop()
- if last_node is not node:
- raise AssertionError(
- "node is not last child of parent")
- for child in node._children:
- node._parent._children.append(child)
- if isinstance(child, portage.dep._RequiredUseBranch):
- child._parent = node._parent
-
- node = node._parent
- else:
- raise InvalidDependString(
- _("malformed syntax: '%s'") % required_use)
- elif token in ("||", "^^"):
- if need_bracket:
- raise InvalidDependString(
- _("malformed syntax: '%s'") % required_use)
- need_bracket = True
- stack[level].append(token)
- child = portage.dep._RequiredUseBranch(operator=token, parent=node)
- node._children.append(child)
- node = child
- else:
- if need_bracket or "(" in token or ")" in token or \
- "|" in token or "^" in token:
- raise InvalidDependString(
- _("malformed syntax: '%s'") % required_use)
-
- if token[-1] == "?":
- need_bracket = True
- stack[level].append(token)
- child = portage.dep._RequiredUseBranch(operator=token, parent=node)
- node._children.append(child)
- node = child
- else:
- satisfied = is_active(token)
- stack[level].append(satisfied)
- node._children.append(portage.dep._RequiredUseLeaf(token, satisfied))
- print("satisfied:", satisfied)
- if satisfied is False:
- new_changes = {}
- new_changes[token] = True
- print("new_changes:", new_changes)
- if pkg.use.mask.intersection(new_changes) or \
- pkg.use.force.intersection(new_changes):
- print("mask or force")
- else:
- print("new_changes2:", new_changes)
- if token in pkg.use.enabled:
- target_use[token] = False
- elif not token in pkg.use.enabled:
- target_use[token] = True
- return target_use
-
- if level != 0 or need_bracket:
- raise InvalidDependString(
- _("malformed syntax: '%s'") % required_use)
-
- tree._satisfied = False not in stack[0]
- return target_use
-
def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
root_config = self._frozen_config.roots[root]
pkgsettings = self._frozen_config.pkgsettings[root]
@@ -4128,37 +4351,24 @@ class depgraph(object):
if not use_ebuild_visibility and (usepkgonly or useoldpkg):
if pkg.installed and pkg.masks:
continue
- else:
- try:
- pkg_eb = self._pkg(
- pkg.cpv, "ebuild", root_config, myrepo=pkg.repo)
- except portage.exception.PackageNotFound:
- pkg_eb_visible = False
- for pkg_eb in self._iter_match_pkgs(pkg.root_config,
- "ebuild", Atom("=%s" % (pkg.cpv,))):
- if self._pkg_visibility_check(pkg_eb, autounmask_level):
- pkg_eb_visible = True
- break
- if not pkg_eb_visible:
- continue
- else:
- if not self._pkg_visibility_check(pkg_eb, autounmask_level):
- continue
+ elif not self._equiv_ebuild_visible(pkg,
+ autounmask_level=autounmask_level):
+ continue
# Calculation of USE for unbuilt ebuilds is relatively
# expensive, so it is only performed lazily, after the
# above visibility checks are complete.
myarg = None
- if root == self._frozen_config.target_root:
- try:
- myarg = next(self._iter_atoms_for_pkg(pkg))
- except StopIteration:
- pass
- except portage.exception.InvalidDependString:
- if not installed:
- # masked by corruption
- continue
+ try:
+ for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
+ if myarg.force_reinstall:
+ reinstall = True
+ break
+ except InvalidDependString:
+ if not installed:
+ # masked by corruption
+ continue
if not installed and myarg:
found_available_arg = True
@@ -4169,17 +4379,8 @@ class depgraph(object):
# since IUSE cannot be adjusted by the user.
continue
- if pkg.metadata.get("REQUIRED_USE") and eapi_has_required_use(pkg.metadata["EAPI"]):
- required_use_is_sat = check_required_use(pkg.metadata["REQUIRED_USE"],
- self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag)
- if not required_use_is_sat:
- if autounmask_level and autounmask_level.allow_use_changes \
- and not pkg.built:
- target_use = self.change_required_use(pkg)
- if not target_use is None:
- use = self._pkg_use_enabled(pkg, target_use)
-
if atom.use:
+
matched_pkgs_ignore_use.append(pkg)
if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
target_use = {}
@@ -4197,7 +4398,7 @@ class depgraph(object):
missing_disabled = atom.use.missing_disabled.difference(pkg.iuse.all)
if atom.use.enabled:
- if atom.use.enabled.intersection(missing_disabled):
+ if any(x in atom.use.enabled for x in missing_disabled):
use_match = False
can_adjust_use = False
need_enabled = atom.use.enabled.difference(use)
@@ -4206,11 +4407,11 @@ class depgraph(object):
if need_enabled:
use_match = False
if can_adjust_use:
- if pkg.use.mask.intersection(need_enabled):
+ if any(x in pkg.use.mask for x in need_enabled):
can_adjust_use = False
if atom.use.disabled:
- if atom.use.disabled.intersection(missing_enabled):
+ if any(x in atom.use.disabled for x in missing_enabled):
use_match = False
can_adjust_use = False
need_disabled = atom.use.disabled.intersection(use)
@@ -4219,8 +4420,8 @@ class depgraph(object):
if need_disabled:
use_match = False
if can_adjust_use:
- if pkg.use.force.difference(
- pkg.use.mask).intersection(need_disabled):
+ if any(x in pkg.use.force and x not in
+ pkg.use.mask for x in need_disabled):
can_adjust_use = False
if not use_match:
@@ -4486,9 +4687,19 @@ class depgraph(object):
"recurse" not in self._dynamic_config.myparams:
return 1
+ complete_if_new_use = self._dynamic_config.myparams.get(
+ "complete_if_new_use", "y") == "y"
+ complete_if_new_ver = self._dynamic_config.myparams.get(
+ "complete_if_new_ver", "y") == "y"
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
+ complete_if_new_slot = rebuild_if_new_slot
+
if "complete" not in self._dynamic_config.myparams and \
- self._dynamic_config.myparams.get("complete_if_new_ver", "y") == "y":
- # Enable complete mode if an installed package version will change.
+ (complete_if_new_use or
+ complete_if_new_ver or complete_if_new_slot):
+ # Enable complete mode if an installed package will change somehow.
+ use_change = False
version_change = False
for node in self._dynamic_config.digraph:
if not isinstance(node, Package) or \
@@ -4496,12 +4707,35 @@ class depgraph(object):
continue
vardb = self._frozen_config.roots[
node.root].trees["vartree"].dbapi
- inst_pkg = vardb.match_pkgs(node.slot_atom)
- if inst_pkg and (inst_pkg[0] > node or inst_pkg[0] < node):
- version_change = True
- break
- if version_change:
+ if complete_if_new_use or complete_if_new_ver:
+ inst_pkg = vardb.match_pkgs(node.slot_atom)
+ if inst_pkg and inst_pkg[0].cp == node.cp:
+ inst_pkg = inst_pkg[0]
+ if complete_if_new_ver and \
+ (inst_pkg < node or node < inst_pkg):
+ version_change = True
+ break
+
+ # Intersect enabled USE with IUSE, in order to
+ # ignore forced USE from implicit IUSE flags, since
+ # they're probably irrelevant and they are sensitive
+ # to use.mask/force changes in the profile.
+ if complete_if_new_use and \
+ (node.iuse.all != inst_pkg.iuse.all or
+ self._pkg_use_enabled(node).intersection(node.iuse.all) !=
+ self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
+ use_change = True
+ break
+
+ if complete_if_new_slot:
+ cp_list = vardb.match_pkgs(Atom(node.cp))
+ if (cp_list and cp_list[0].cp == node.cp and
+ not any(node.slot == pkg.slot for pkg in cp_list)):
+ version_change = True
+ break
+
+ if use_change or version_change:
self._dynamic_config.myparams["complete"] = True
if "complete" not in self._dynamic_config.myparams:
@@ -4515,6 +4749,7 @@ class depgraph(object):
# scheduled for replacement. Also, toggle the "deep"
# parameter so that all dependencies are traversed and
# accounted for.
+ self._dynamic_config._complete_mode = True
self._select_atoms = self._select_atoms_from_graph
if "remove" in self._dynamic_config.myparams:
self._select_package = self._select_pkg_from_installed
@@ -4673,7 +4908,7 @@ class depgraph(object):
# For installed packages, always ignore blockers from DEPEND since
# only runtime dependencies should be relevant for packages that
# are already built.
- dep_keys = ["RDEPEND", "PDEPEND"]
+ dep_keys = Package._runtime_keys
for myroot in self._frozen_config.trees:
if self._frozen_config.myopts.get("--root-deps") is not None and \
@@ -5130,16 +5365,24 @@ class depgraph(object):
root_config.root]["root_config"] = root_config
def _resolve_conflicts(self):
+
+ if "complete" not in self._dynamic_config.myparams and \
+ self._dynamic_config._allow_backtracking and \
+ self._dynamic_config._slot_collision_nodes and \
+ not self._accept_blocker_conflicts():
+ self._dynamic_config.myparams["complete"] = True
+
if not self._complete_graph():
raise self._unknown_internal_error()
+ self._process_slot_conflicts()
+
+ self._slot_operator_trigger_reinstalls()
+
if not self._validate_blockers():
self._dynamic_config._skip_restart = True
raise self._unknown_internal_error()
- if self._dynamic_config._slot_collision_info:
- self._process_slot_conflicts()
-
def _serialize_tasks(self):
debug = "--debug" in self._frozen_config.myopts
@@ -5434,7 +5677,7 @@ class depgraph(object):
for node in nodes:
parents = mygraph.parent_nodes(node,
ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
- if parents and set(parents).intersection(asap_nodes):
+ if any(x in asap_nodes for x in parents):
selected_nodes = [node]
break
else:
@@ -6119,7 +6362,7 @@ class depgraph(object):
if is_latest:
unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
elif is_latest_in_slot:
- unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+ unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
else:
unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
else:
@@ -6159,7 +6402,7 @@ class depgraph(object):
if is_latest:
p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
elif is_latest_in_slot:
- p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+ p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
else:
p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
else:
@@ -6184,7 +6427,7 @@ class depgraph(object):
if is_latest:
use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
elif is_latest_in_slot:
- use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(adjustments)))
+ use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
else:
use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
@@ -6201,7 +6444,7 @@ class depgraph(object):
if is_latest:
license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
elif is_latest_in_slot:
- license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], " ".join(sorted(missing_licenses))))
+ license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
else:
license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
@@ -6308,27 +6551,27 @@ class depgraph(object):
settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
if len(roots) > 1:
- writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+ writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
if root in unstable_keyword_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
+ writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
+ writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
if root in p_mask_change_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
+ writemsg("\nThe following " + colorize("BAD", "mask changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout(format_msg(p_mask_change_msg[root]), noiselevel=-1)
+ writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
if root in use_changes_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
+ writemsg("\nThe following " + colorize("BAD", "USE changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout(format_msg(use_changes_msg[root]), noiselevel=-1)
+ writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
if root in license_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
+ writemsg("\nThe following " + colorize("BAD", "license changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout(format_msg(license_msg[root]), noiselevel=-1)
+ writemsg(format_msg(license_msg[root]), noiselevel=-1)
protect_obj = {}
if write_to_file:
@@ -6375,7 +6618,7 @@ class depgraph(object):
for line in msg:
if line:
line = colorize("INFORM", line)
- writemsg_stdout(line + "\n", noiselevel=-1)
+ writemsg(line + "\n", noiselevel=-1)
if ask and write_to_file and file_to_write_to:
prompt = "\nWould you like to add these " + \
@@ -6407,14 +6650,14 @@ class depgraph(object):
file_to_write_to.get((abs_user_config, "package.license")))
if problems:
- writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
+ writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
noiselevel=-1)
- writemsg_stdout("".join(problems), noiselevel=-1)
+ writemsg("".join(problems), noiselevel=-1)
elif write_to_file and roots:
- writemsg_stdout("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
+ writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
noiselevel=-1)
elif not pretend and not autounmask_write and roots:
- writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
+ writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
noiselevel=-1)
@@ -6425,35 +6668,8 @@ class depgraph(object):
the merge list where it is most likely to be seen, but if display()
is not going to be called then this method should be called explicitly
to ensure that the user is notified of problems with the graph.
-
- All output goes to stderr, except for unsatisfied dependencies which
- go to stdout for parsing by programs such as autounmask.
"""
- # Note that show_masked_packages() sends its output to
- # stdout, and some programs such as autounmask parse the
- # output in cases when emerge bails out. However, when
- # show_masked_packages() is called for installed packages
- # here, the message is a warning that is more appropriate
- # to send to stderr, so temporarily redirect stdout to
- # stderr. TODO: Fix output code so there's a cleaner way
- # to redirect everything to stderr.
- sys.stdout.flush()
- sys.stderr.flush()
- stdout = sys.stdout
- try:
- sys.stdout = sys.stderr
- self._display_problems()
- finally:
- sys.stdout = stdout
- sys.stdout.flush()
- sys.stderr.flush()
-
- # This goes to stdout for parsing by programs like autounmask.
- for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
- self._show_unsatisfied_dep(*pargs, **kwargs)
-
- def _display_problems(self):
if self._dynamic_config._circular_deps_for_display is not None:
self._show_circular_deps(
self._dynamic_config._circular_deps_for_display)
@@ -6572,6 +6788,9 @@ class depgraph(object):
show_mask_docs()
writemsg("\n", noiselevel=-1)
+ for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+ self._show_unsatisfied_dep(*pargs, **kwargs)
+
def saveNomergeFavorites(self):
"""Find atoms in favorites that are not in the mergelist and add them
to the world file if necessary."""
@@ -6618,6 +6837,9 @@ class depgraph(object):
continue
if arg.root_config.root != root_config.root:
continue
+ if arg.internal:
+ # __auto_* sets
+ continue
k = arg.name
if k in ("selected", "world") or \
not root_config.sets[k].world_candidate:
@@ -6629,9 +6851,13 @@ class depgraph(object):
all_added.extend(added_favorites)
all_added.sort()
for a in all_added:
+ if a.startswith(SETPREFIX):
+ filename = "world_sets"
+ else:
+ filename = "world"
writemsg_stdout(
- ">>> Recording %s in \"world\" favorites file...\n" % \
- colorize("INFORM", str(a)), noiselevel=-1)
+ ">>> Recording %s in \"%s\" favorites file...\n" %
+ (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
if all_added:
world_set.update(all_added)
@@ -7010,13 +7236,8 @@ class _dep_check_composite_db(dbapi):
return ret[:]
def _visible(self, pkg):
- if pkg.installed and "selective" not in self._depgraph._dynamic_config.myparams:
- try:
- arg = next(self._depgraph._iter_atoms_for_pkg(pkg))
- except (StopIteration, portage.exception.InvalidDependString):
- arg = None
- if arg:
- return False
+ if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
+ return False
if pkg.installed and \
(pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
# Account for packages with masks (like KEYWORDS masks)
@@ -7032,24 +7253,8 @@ class _dep_check_composite_db(dbapi):
if not avoid_update:
if not use_ebuild_visibility and usepkgonly:
return False
- else:
- try:
- pkg_eb = self._depgraph._pkg(
- pkg.cpv, "ebuild", pkg.root_config,
- myrepo=pkg.repo)
- except portage.exception.PackageNotFound:
- pkg_eb_visible = False
- for pkg_eb in self._depgraph._iter_match_pkgs(
- pkg.root_config, "ebuild",
- Atom("=%s" % (pkg.cpv,))):
- if self._depgraph._pkg_visibility_check(pkg_eb):
- pkg_eb_visible = True
- break
- if not pkg_eb_visible:
- return False
- else:
- if not self._depgraph._pkg_visibility_check(pkg_eb):
- return False
+ elif not self._depgraph._equiv_ebuild_visible(pkg):
+ return False
in_graph = self._depgraph._dynamic_config._slot_pkg_map[
self._root].get(pkg.slot_atom)
@@ -7329,8 +7534,6 @@ def get_mask_info(root_config, cpv, pkgsettings,
mreasons = ["corruption"]
else:
eapi = metadata['EAPI']
- if eapi[:1] == '-':
- eapi = eapi[1:]
if not portage.eapi_is_supported(eapi):
mreasons = ['EAPI %s' % eapi]
else:
@@ -7387,10 +7590,11 @@ def show_masked_packages(masked_packages):
# above via mreasons.
pass
- writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
+ writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
+ noiselevel=-1)
if comment and comment not in shown_comments:
- writemsg_stdout(filename + ":\n" + comment + "\n",
+ writemsg(filename + ":\n" + comment + "\n",
noiselevel=-1)
shown_comments.add(comment)
portdb = root_config.trees["porttree"].dbapi
@@ -7400,13 +7604,14 @@ def show_masked_packages(masked_packages):
continue
msg = ("A copy of the '%s' license" + \
" is located at '%s'.\n\n") % (l, l_path)
- writemsg_stdout(msg, noiselevel=-1)
+ writemsg(msg, noiselevel=-1)
shown_licenses.add(l)
return have_eapi_mask
def show_mask_docs():
- writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
- writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
+ writemsg("For more information, see the MASKED PACKAGES "
+ "section in the emerge\n", noiselevel=-1)
+ writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
def show_blocker_docs_link():
writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
diff --git a/gobs/pym/jobs.py b/gobs/pym/jobs.py
index 292d57d..6b5340b 100644
--- a/gobs/pym/jobs.py
+++ b/gobs/pym/jobs.py
@@ -9,7 +9,7 @@ from gobs.ConnectionManager import connectionManager
CM=connectionManager(gobs_settings_dict)
#selectively import the pgsql/mysql querys
if CM.getName()=='pgsql':
- from gobs.pgsql import *
+ from gobs.pgsql_querys import *
from gobs.sync import git_pull, sync_tree
from gobs.buildquerydb import add_buildquery_main, del_buildquery_main
@@ -17,75 +17,77 @@ from gobs.updatedb import update_db_main
def jobs_main(config_profile):
conn = CM.getConnection()
- job = check_job_list(conn, config_profile)
- if job is None:
+ jobs_id = get_jobs_id(conn, config_profile)
+ if jobs_id is None:
CM.putConnection(conn)
- return
- log_msg = "Job: %s Type: %s" % (job[1], job[0],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- if job[0] == "addbuildquery":
- update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- result = add_buildquery_main(config_profile)
- if result is True:
- update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done.." % (job[1],)
+ return
+ for job_id in jobs_id:
+ job = get_job(conn, job_id)
+ log_msg = "Job: %s Type: %s" % (job_id, job,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ if job == "addbuildquery":
+ update_job_list(conn, "Runing", job_id)
+ log_msg = "Job %s is runing." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail." % (job[1],)
+ result = add_buildquery_main(config_profile)
+ if result is True:
+ update_job_list(conn, "Done", job_id)
+ log_msg = "Job %s is done.." % (job_id,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ else:
+ update_job_list(conn, "Fail", job_id)
+ log_msg = "Job %s did fail." % (job_id,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ elif job == "delbuildquery":
+ update_job_list(conn, "Runing", job_id)
+ log_msg = "Job %s is runing." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- elif job[0] == "delbuildquery":
- update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- result = del_buildquery_main(config_profile)
- if result is True:
- update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done.." % (job[1],)
+ result = del_buildquery_main(config_profile)
+ if result is True:
+ update_job_list(conn, "Done", job_id)
+ log_msg = "Job %s is done.." % (job_id,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ else:
+ update_job_list(conn, "Fail", job_id)
+ log_msg = "Job %s did fail." % (job_id,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ elif job == "gitsync":
+ update_job_list(conn, "Runing", job_id)
+ log_msg = "Job %s is runing." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail." % (job[1],)
+ result = git_pull()
+ if result is True:
+ update_job_list(conn, "Done", job_id)
+ log_msg = "Job %s is done.." % (job[1],)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ else:
+ update_job_list(conn, "Fail", job_id)
+ log_msg = "Job %s did fail." % (job_id,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ elif job == "emergesync":
+ update_job_list(conn, "Runing", job_id)
+ log_msg = "Job %s is runing." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- elif job[0] == "gitsync":
- update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- result = git_pull()
- if result is True:
- update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done.." % (job[1],)
+ result = sync_tree()
+ if result is True:
+ update_job_list(conn, "Done", job_id)
+ log_msg = "Job %s is done.." % (job_id,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ else:
+ update_job_list(conn, "Fail", job_id)
+ log_msg = "Job %s did fail." % (job_id,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ elif job == "updatedb":
+ update_job_list(conn, "Runing", job_id)
+ log_msg = "Job %s is runing." % (job_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- elif job[0] == "emergesync":
- update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- result = sync_tree()
- if result is True:
- update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done.." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- elif job[0] == "updatedb":
- update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- result = update_db_main()
- if result is True:
- update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done.." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail." % (job[1],)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- return
\ No newline at end of file
+ result = update_db_main()
+ if result is True:
+ update_job_list(conn, "Done", job_id)
+ log_msg = "Job %s is done.." % (job_id,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ else:
+ update_job_list(conn, "Fail", job_id)
+ log_msg = "Job %s did fail." % (job_id,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ return
\ No newline at end of file
diff --git a/gobs/pym/old_cpv.py b/gobs/pym/old_cpv.py
index 93af29f..3d47c50 100644
--- a/gobs/pym/old_cpv.py
+++ b/gobs/pym/old_cpv.py
@@ -16,17 +16,21 @@ class gobs_old_cpv(object):
self._mysettings = mysettings
self._myportdb = myportdb
- def mark_old_ebuild_db(self, categories, package, package_id):
+ def mark_old_ebuild_db(self, package_id):
conn=CM.getConnection()
- ebuild_list_tree = sorted(self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None))
+ # Get the ebuild list for cp
+ cp, repo = get_cp_repo_from_package_id(conn, package_id)
+ mytree = []
+ mytree.append(self._myportdb.getRepositoryPath(repo))
+ ebuild_list_tree = self._myportdb.cp_list((cp, use_cache=1, mytree=mytree)
# Get ebuild list on categories, package in the db
- ebuild_list_db = cp_list_db(conn,package_id)
+ ebuild_list_db = cp_list_db(conn, package_id)
# Check if don't have the ebuild in the tree
# Add it to the no active list
old_ebuild_list = []
for ebuild_line in ebuild_list_db:
- ebuild_line_db = categories + "/" + package + "-" + ebuild_line[0]
- if not ebuild_line_db in ebuild_list_tree:
+ cpv_db = cp + "-" + ebuild_line[0]
+ if not cpv_db in ebuild_list_tree:
old_ebuild_list.append(ebuild_line)
# Set no active on ebuilds in the db that no longer in tree
if old_ebuild_list != []:
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
index 771572f..cb6a1f1 100644
--- a/gobs/pym/package.py
+++ b/gobs/pym/package.py
@@ -14,8 +14,10 @@ config_profile = gobs_settings_dict['gobs_config']
from gobs.ConnectionManager import connectionManager
CM=connectionManager(gobs_settings_dict)
#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql import *
+iif CM.getName()=='pgsql':
+ from gobs.pgsql_querys import *
+if CM.getName()=='mysql':
+ from gobs.mysql_querys import *
class gobs_package(object):
@@ -23,113 +25,123 @@ class gobs_package(object):
self._mysettings = mysettings
self._myportdb = myportdb
- def change_config(self, config_id):
- # Change config_root config_id = table configs.id
- my_new_setup = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
- mysettings_setup = portage.config(config_root = my_new_setup)
- return mysettings_setup
+ def change_config(self, config_setup):
+ # Change config_root config_setup = table config
+ my_new_setup = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_setup + "/"
+ mysettings_setup = portage.config(config_root = my_new_setup)
+ return mysettings_setup
- def config_match_ebuild(self, categories, package, config_list):
+ def config_match_ebuild(self, cp, config_id_list):
config_cpv_listDict ={}
- if config_list == []:
- return config_cpv_listDict
- for config_id in config_list:
+ if config_id_list == []:
+ return config_cpv_listDict
+ conn=CM.getConnection()
+ for config_id in config_id_list:
# Change config/setup
- mysettings_setup = self.change_config(config_id)
- myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
- # Get latest cpv from portage with the config
- latest_ebuild = myportdb_setup.xmatch('bestmatch-visible', categories + "/" + package)
- latest_ebuild_version = unicode("")
- # Check if could get cpv from portage
- if latest_ebuild != "":
- # Get the version of cpv
- latest_ebuild_version = portage.versions.cpv_getversion(latest_ebuild)
- # Get the iuse and use flags for that config/setup
- init_useflags = gobs_use_flags(mysettings_setup, myportdb_setup, latest_ebuild)
- iuse_flags_list, final_use_list = init_useflags.get_flags()
- iuse_flags_list2 = []
- for iuse_line in iuse_flags_list:
- iuse_flags_list2.append( init_useflags.reduce_flag(iuse_line))
- # Dic the needed info
- attDict = {}
- attDict['ebuild_version'] = latest_ebuild_version
- attDict['useflags'] = final_use_list
- attDict['iuse'] = iuse_flags_list2
- attDict['package'] = package
- attDict['categories'] = categories
- config_cpv_listDict[config_id] = attDict
- # Clean some cache
- myportdb_setup.close_caches()
- portage.portdbapi.portdbapi_instances.remove(myportdb_setup)
- return config_cpv_listDict
-
- def get_ebuild_metadata(self, ebuild_line):
- # Get the auxdbkeys infos for the ebuild
- try:
- ebuild_auxdb_list = self._myportdb.aux_get(ebuild_line, portage.auxdbkeys)
- except:
- ebuild_auxdb_list = []
- else:
- for i in range(len(ebuild_auxdb_list)):
- if ebuild_auxdb_list[i] == '':
- ebuild_auxdb_list[i] = ''
- return ebuild_auxdb_list
-
- def get_packageDict(self, pkgdir, ebuild_line, categories, package, config_id):
- attDict = {}
- ebuild_version_tree = portage.versions.cpv_getversion(ebuild_line)
- ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
- ebuild_version_text = get_ebuild_text(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")
- init_repoman = gobs_repoman(self._mysettings, self._myportdb)
- repoman_error = init_repoman.check_repoman(categories, package, ebuild_version_tree, config_id)
- ebuild_version_metadata_tree = self.get_ebuild_metadata(ebuild_line)
- # if there some error to get the metadata we add rubish to the
- # ebuild_version_metadata_tree and set ebuild_version_checksum_tree to 0
- # so it can be updated next time we update the db
- if ebuild_version_metadata_tree == []:
- log_msg = " QA: %s Have broken metadata" % (ebuild_line,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- ebuild_version_metadata_tree = ['','','','','','','','','','','','','','','','','','','','','','','','','']
- ebuild_version_checksum_tree = ['0']
- # add the ebuild to the dict packages
- attDict['categories'] = categories
- attDict['package'] = package
- attDict['ebuild_version_tree'] = ebuild_version_tree
- attDict['ebuild_version_checksum_tree']= ebuild_version_checksum_tree
- attDict['ebuild_version_metadata_tree'] = ebuild_version_metadata_tree
- attDict['ebuild_version_text'] = ebuild_version_text[0]
- attDict['ebuild_version_revision'] = ebuild_version_text[1]
- attDict['ebuild_error'] = repoman_error
- return attDict
-
- def get_metadataDict(self, packageDict, ebuild_id_list):
- # Make the metadataDict from packageDict
- ebuild_i = 0
- metadataDict ={}
- for k, v in packageDict.iteritems():
- attDict = {}
- metadata_restrictions = []
- for i in v['ebuild_version_metadata_tree'][4].split():
- metadata_restrictions.append(i)
- metadata_keyword = []
- for i in v['ebuild_version_metadata_tree'][8].split():
- metadata_keyword.append(i)
- metadata_iuse = []
- for i in v['ebuild_version_metadata_tree'][10].split():
- metadata_iuse.append(i)
- attDict['restrictions'] = metadata_restrictions
- attDict['keyword'] = metadata_keyword
- attDict['iuse'] = metadata_iuse
- metadataDict[ebuild_id_list[ebuild_i]] = attDict
- ebuild_i = ebuild_i +1
- return metadataDict
+ for config_id in config_id_list:
+
+ # Change config/setup
+ config_setup = get_config_db(conn, config_id)
+ mysettings_setup = self.change_config(config_setup)
+ myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
+
+ # Get the latest cpv from portage with the config that we can build
+ build_cpv = myportdb_setup.xmatch('bestmatch-visible', cp)
+
+ # Check if could get cpv from portage and add it to the config_cpv_listDict.
+ if build_cpv != "":
+
+ # Get the iuse and use flags for that config/setup and cpv
+ init_useflags = gobs_use_flags(mysettings_setup, myportdb_setup, build_cpv)
+ iuse_flags_list, final_use_list = init_useflags.get_flags()
+ iuse_flags_list2 = []
+ for iuse_line in iuse_flags_list:
+ iuse_flags_list2.append( init_useflags.reduce_flag(iuse_line))
+
+ # Dict the needed info
+ attDict = {}
+ attDict['cpv'] = build_cpv
+ attDict['useflags'] = final_use_list
+ attDict['iuse'] = iuse_flags_list2
+ config_cpv_listDict[config_id] = attDict
+
+ # Clean some cache
+ myportdb_setup.close_caches()
+ portage.portdbapi.portdbapi_instances.remove(myportdb_setup)
+ CM.putConnection(conn)
+ return config_cpv_listDict
+
+ def get_ebuild_metadata(self, cpv, repo):
+ # Get the auxdbkeys infos for the ebuild
+ try:
+ ebuild_auxdb_list = self._myportdb.aux_get(cpv, portage.auxdbkeys, myrepo=repo)
+ except:
+ ebuild_auxdb_list = []
+ else:
+ for i in range(len(ebuild_auxdb_list)):
+ if ebuild_auxdb_list[i] == '':
+ ebuild_auxdb_list[i] = ''
+ return ebuild_auxdb_list
+
+ def get_packageDict(self, pkgdir, cpv, repo, config_id):
+ attDict = {}
+ conn=CM.getConnection()
+
+ #Get categories, package and version from cpv
+ ebuild_version_tree = portage.versions.cpv_getversion(cpv)
+ element = portage.versions.cpv_getkey(cpv).split('/')
+ categories = element[0]
+ package = element[1]
+
+ # Make a checksum of the ebuild
+ try:
+ ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
+ except:
+ ebuild_version_checksum_tree = "0"
+ log_msg = "QA: Can't checksum the ebuild file. %s on repo %s" % (cpv, repo,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ log_msg = "C %s:%s ... Fail." % (cpv, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ ebuild_version_text_tree = '0'
+ else:
+ ebuild_version_text_tree = get_ebuild_text(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")
+
+ # run repoman on the ebuild
+ #init_repoman = gobs_repoman(self._mysettings, self._myportdb)
+ #repoman_error = init_repoman.check_repoman(pkgdir, cpv, config_id)
+ #if repoman_error != []:
+ # log_msg = "Repoman: %s have errors on repo %s" % (cpv, repo,)
+ # add_gobs_logs(conn, log_msg, "info", config_profile)
+ repoman_error = []
+
+ # Get the ebuild metadata
+ ebuild_version_metadata_tree = self.get_ebuild_metadata(cpv, repo)
+ # if there some error to get the metadata we add rubish to the
+ # ebuild_version_metadata_tree and set ebuild_version_checksum_tree to 0
+ # so it can be updated next time we update the db
+ if ebuild_version_metadata_tree == []:
+ log_msg = " QA: %s have broken metadata on repo %s" % (cpv, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ ebuild_version_metadata_tree = ['','','','','','','','','','','','','','','','','','','','','','','','','']
+ ebuild_version_checksum_tree = '0'
+
+ # add the ebuild info to the dict packages
+ attDict['repo'] = repo
+ attDict['ebuild_version_tree'] = ebuild_version_tree
+ attDict['ebuild_version_checksum_tree']= ebuild_version_checksum_tree
+ attDict['ebuild_version_metadata_tree'] = ebuild_version_metadata_tree
+ #attDict['ebuild_version_text_tree'] = ebuild_version_text_tree[0]
+ attDict['ebuild_version_revision_tree'] = ebuild_version_text_tree[1]
+ attDict['ebuild_error'] = repoman_error
+ CM.putConnection(conn)
+ return attDict
def add_new_ebuild_buildquery_db(self, ebuild_id_list, packageDict, config_cpv_listDict):
conn=CM.getConnection()
# Get the needed info from packageDict and config_cpv_listDict and put that in buildqueue
# Only add it if ebuild_version in packageDict and config_cpv_listDict match
if config_cpv_listDict is not None:
- message = None
+ message = []
# Unpack config_cpv_listDict
for k, v in config_cpv_listDict.iteritems():
config_id = k
@@ -147,183 +159,198 @@ class gobs_package(object):
i = 0
for k, v in packageDict.iteritems():
ebuild_id = ebuild_id_list[i]
- use_flags_list = []
- use_enable_list = []
- for u, s in use_flagsDict.iteritems():
- use_flags_list.append(u)
- use_enable_list.append(s)
- # Comper ebuild_version and add the ebuild_version to buildqueue
- if portage.vercmp(v['ebuild_version_tree'], latest_ebuild_version) == 0:
- add_new_package_buildqueue(conn,ebuild_id, config_id, use_flags_list, use_enable_list, message)
- # B = Build cpv use-flags config
- log_msg = "B %s/%s-%s USE: %s %s" % \
- (v['categories'], v['package'], latest_ebuild_version, use_enable, config_id,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+
+ # Comper and add the cpv to buildqueue
+ if build_cpv == k:
+ add_new_package_buildqueue(conn, ebuild_id, config_id, use_flagsDict, messages)
+
+ # B = Build cpv use-flags config
+ config_setup = get_config_db(conn, config_id)
+
+ # FIXME log_msg need a fix to log the use flags corect.
+ log_msg = "B %s:%s USE: %s %s" % \
+ (k, v['repo'], use_enable, config_setup,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
i = i +1
CM.putConnection(conn)
def get_package_metadataDict(self, pkgdir, package):
- # Make package_metadataDict
- attDict = {}
- package_metadataDict = {}
- changelog_checksum_tree = portage.checksum.sha256hash(pkgdir + "/ChangeLog")
- changelog_text_tree = get_file_text(pkgdir + "/ChangeLog")
- metadata_xml_checksum_tree = portage.checksum.sha256hash(pkgdir + "/metadata.xml")
- metadata_xml_text_tree = get_file_text(pkgdir + "/metadata.xml")
- attDict['changelog_checksum'] = changelog_checksum_tree[0]
- attDict['changelog_text'] = changelog_text_tree
- attDict['metadata_xml_checksum'] = metadata_xml_checksum_tree[0]
- attDict[' metadata_xml_text'] = metadata_xml_text_tree
- package_metadataDict[package] = attDict
- return package_metadataDict
-
- def add_new_package_db(self, categories, package):
+ # Make package_metadataDict
+ attDict = {}
+ package_metadataDict = {}
+ changelog_checksum_tree = portage.checksum.sha256hash(pkgdir + "/ChangeLog")
+ changelog_text_tree = get_file_text(pkgdir + "/ChangeLog")
+ metadata_xml_checksum_tree = portage.checksum.sha256hash(pkgdir + "/metadata.xml")
+ metadata_xml_text_tree = get_file_text(pkgdir + "/metadata.xml")
+ attDict['changelog_checksum'] = changelog_checksum_tree[0]
+ attDict['changelog_text'] = changelog_text_tree
+ attDict['metadata_xml_checksum'] = metadata_xml_checksum_tree[0]
+ attDict['metadata_xml_text'] = metadata_xml_text_tree
+ package_metadataDict[package] = attDict
+ return package_metadataDict
+
+ def add_new_package_db(self, categories, package, repo):
conn=CM.getConnection()
- # add new categories package ebuild to tables package and ebuilds
- # C = Checking
- # N = New Package
- log_msg = "C %s/%s" % (categories, package,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- log_msg = "N %s/%s" % (categories, package,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR + cp
- categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
- # Get the ebuild list for cp
- ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
- if ebuild_list_tree == []:
- CM.putConnection(conn)
- return
- config_list = get_config_list(conn)
- config_cpv_listDict = self.config_match_ebuild(categories, package, config_list)
- config_id = get_default_config(conn)
- packageDict ={}
- for ebuild_line in sorted(ebuild_list_tree):
- # Make the needed packageDict
- packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
- # Add the ebuild to db
- return_id = add_new_package_sql(conn,packageDict)
- ebuild_id_list = return_id[0]
- package_id_list = return_id[1]
- package_id = package_id_list[0]
- # Add metadataDict to db
- metadataDict = self.get_metadataDict(packageDict, ebuild_id_list)
- add_new_metadata(conn,metadataDict)
- # Add any qa and repoman erro for the ebuild to buildlog
- qa_error = []
- init_manifest = gobs_manifest(self._mysettings, pkgdir)
- manifest_error = init_manifest.digestcheck()
- if manifest_error is not None:
- qa_error.append(manifest_error)
- log_msg = "QA: %s/%s %s" % (categories, package, qa_error,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
- # Add the ebuild to the buildqueru table if needed
- self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
- # Add some checksum on some files
- package_metadataDict = self.get_package_metadataDict(pkgdir, package)
- add_new_package_metadata(conn,package_id, package_metadataDict)
- # Add the manifest file to db
- try:
- manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
- except:
- manifest_checksum_tree = "0"
- get_manifest_text = "0"
- log_msg = "QA: Can't checksum the Manifest file. %c/%s" % (categories, package,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- else:
- get_manifest_text = get_file_text(pkgdir + "/Manifest")
- add_new_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree)
- log_msg = "C %s/%s ... Done." % (categories, package)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+ # Add new categories package ebuild to tables package and ebuilds
+ # C = Checking
+ # N = New Package
+ log_msg = "C %s/%s:%s" % (categories, package, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ log_msg = "N %s/%s:%s" % (categories, package, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ pkgdir = self._myportdb.getRepositoryPath(repo) + "/" + categories + "/" + package # Get RepoDIR + cp
+
+ # Get the cp manifest file checksum.
+ try:
+ manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
+ except:
+ manifest_checksum_tree = "0"
+ log_msg = "QA: Can't checksum the Manifest file. %s/%s:%s" % (categories, package, repo,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ log_msg = "C %s/%s:%s ... Fail." % (categories, package, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ CM.putConnection(conn)
+ return
+ package_id = add_new_manifest_sql(conn, categories, package, repo, manifest_checksum_tree)
+
+ # Get the ebuild list for cp
+ mytree = []
+ mytree.append(self._myportdb.getRepositoryPath(repo))
+ ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=mytree)
+ if ebuild_list_tree == []:
+ log_msg = "QA: Can't get the ebuilds list. %s/%s:%s" % (categories, package, repo,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ log_msg = "C %s/%s:%s ... Fail." % (categories, package, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ CM.putConnection(conn)
+ return
+
+ # set config to default config
+ default_config = get_default_config(conn)
- def update_package_db(self, categories, package, package_id):
+ # Make the needed packageDict with ebuild infos so we can add it later to the db.
+ packageDict ={}
+ ebuild_id_list = []
+ for cpv in sorted(ebuild_list_tree):
+ packageDict[cpv] = self.get_packageDict(pkgdir, cpv, repo, default_config)
+
+ # Add new ebuilds to the db
+ ebuild_id_list = add_new_ebuild_sql(conn, package_id, packageDict)
+
+ # Get the best cpv for the configs and add it to config_cpv_listDict
+ configs_id_list = get_config_id_list(conn)
+ config_cpv_listDict = self.config_match_ebuild(categories + "/" + package, configs_id_list)
+
+ # Add the ebuild to the buildquery table if needed
+ self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
+
+ log_msg = "C %s/%s:%s ... Done." % (categories, package, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ print(categories, package, repo)
+ CM.putConnection(conn)
+
+ def update_package_db(self, package_id):
conn=CM.getConnection()
# Update the categories and package with new info
- # C = Checking
- log_msg = "C %s/%s" % (categories, package,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR with cp
- # Get the checksum from the Manifest file.
- try:
- manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
- except:
- # We did't fine any Manifest file
- manifest_checksum_tree = '0'
- ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
- if ebuild_list_tree == []:
- CM.putConnection(conn)
- log_msg = "QA: No Manifest file or ebuilds in %s/%s." % (categories, package,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- log_msg = "C %s/%s ... Done." % (categories, package,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- return
+ # C = Checking
+ cp, repo = get_cp_repo_from_package_id(conn, package_id)
+ element = cp.split('/')
+ package = element[1]
+ log_msg = "C %s:%s" % (cp, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ pkgdir = self._myportdb.getRepositoryPath(repo) + "/" + cp # Get RepoDIR + cp
+
+ # Get the cp mainfest file checksum
+ try:
+ manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
+ except:
+ manifest_checksum_tree = "0"
+ log_msg = "QA: Can't checksum the Manifest file. %s:%s" % (cp, repo,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ log_msg = "C %s:%s ... Fail." % (cp, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ CM.putConnection(conn)
+ return
+
# Get the checksum from the db in package table
- manifest_checksum_db = get_manifest_db(conn,package_id)
- # if we have the same checksum return else update the package
- ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
- if manifest_checksum_tree != manifest_checksum_db:
+ manifest_checksum_db = get_manifest_db(conn, package_id)
+
+ # if we have the same checksum return else update the package
+ if manifest_checksum_tree != manifest_checksum_db:
+
# U = Update
- log_msg = "U %s/%s" % (categories, package)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- # Get package_metadataDict and update the db with it
- package_metadataDict = self.get_package_metadataDict(pkgdir, package)
- update_new_package_metadata(conn,package_id, package_metadataDict)
- # Get config_cpv_listDict
- config_list = get_config_list(conn)
- config_cpv_listDict = self.config_match_ebuild(categories, package, config_list)
- config_id = get_default_config(conn)
- packageDict ={}
- for ebuild_line in sorted(ebuild_list_tree):
- old_ebuild_list = []
- # split out ebuild version
- ebuild_version_tree = portage.versions.cpv_getversion(ebuild_line)
- # Get the checksum of the ebuild in tree and db
- ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
- ebuild_version_manifest_checksum_db = get_ebuild_checksum(conn,package_id, ebuild_version_tree)
- # Check if the checksum have change
- if ebuild_version_manifest_checksum_db is None or ebuild_version_checksum_tree != ebuild_version_manifest_checksum_db:
- # Get packageDict for ebuild
- packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
- if ebuild_version_manifest_checksum_db is None:
- # N = New ebuild
- log_msg = "N %s/%s-%s" % (categories, package, ebuild_version_tree,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- else:
- # U = Updated ebuild
- log_msg = "U %s/%s-%s" % (categories, package, ebuild_version_tree,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- # Fix so we can use add_new_package_sql(packageDict) to update the ebuilds
- old_ebuild_list.append(ebuild_version_tree)
- add_old_ebuild(conn,package_id, old_ebuild_list)
- update_active_ebuild(conn,package_id, ebuild_version_tree)
- # Use packageDictand and metadataDict to update the db
- return_id = add_new_package_sql(conn,packageDict)
- ebuild_id_list = return_id[0]
- metadataDict = self.get_metadataDict(packageDict, ebuild_id_list)
- add_new_metadata(conn,metadataDict)
- # Get the text in Manifest and update it
- try:
- get_manifest_text = get_file_text(pkgdir + "/Manifest")
- except:
- get_manifest_text = "0"
- update_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree)
- # Add any qa and repoman erros to buildlog
- qa_error = []
- init_manifest = gobs_manifest(self._mysettings, pkgdir)
- manifest_error = init_manifest.digestcheck()
- if manifest_error is not None:
- qa_error.append(manifest_error)
- log_msg = "QA: %s/%s %s" % (categories, package, qa_error,)
- add_gobs_logs(conn, log_msg, "info", config_profile)
- add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
- # Add the ebuild to the buildqueru table if needed
- self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
- # Mark or remove any old ebuilds
- init_old_cpv = gobs_old_cpv(self._myportdb, self._mysettings)
- init_old_cpv.mark_old_ebuild_db(categories, package, package_id)
- log_msg = "C %s/%s ... Done." % (categories, package)
- add_gobs_logs(conn, log_msg, "info", config_profile)
+ log_msg = "U %s:%s" % (cp, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+
+ # Get the ebuild list for cp
+ mytree = []
+ mytree.append(self._myportdb.getRepositoryPath(repo))
+ ebuild_list_tree = self._myportdb.cp_list(cp, use_cache=1, mytree=mytree)
+ if ebuild_list_tree == []:
+ log_msg = "QA: Can't get the ebuilds list. %s:%s" % (cp, repo,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ log_msg = "C %s:%s ... Fail." % (cp, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ CM.putConnection(conn)
+ return
+ packageDict ={}
+ for cpv in sorted(ebuild_list_tree):
+ old_ebuild_list = []
+
+ # split out ebuild version
+ ebuild_version_tree = portage.versions.cpv_getversion(cpv)
+
+ # Get the checksum of the ebuild in tree and db
+ # Make a checksum of the ebuild
+ try:
+ ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
+ except:
+ ebuild_version_checksum_tree = '0'
+ manifest_checksum_tree = '0'
+ log_msg = "QA: Can't checksum the ebuild file. %s on repo %s" % (cpv, repo,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ log_msg = "C %s:%s ... Fail." % (cpv, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ ebuild_version_manifest_checksum_db = get_ebuild_checksum(conn, package_id, ebuild_version_tree)
+
+
+ # Check if the checksum have change
+ if ebuild_version_manifest_checksum_db is None or ebuild_version_checksum_tree != ebuild_version_manifest_checksum_db:
+
+ # set config to default config
+ default_config = get_default_config(conn)
+
+ # Get packageDict for ebuild
+ packageDict[cpv] = self.get_packageDict(pkgdir, cpv, repo, default_config)
+ if ebuild_version_manifest_checksum_db is None:
+ # N = New ebuild
+ log_msg = "N %s:%s" % (cpv, repo,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ else:
+ # U = Updated ebuild
+ log_msg = "U %s:%s" % (cpv, repo,)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+
+ # Fix so we can use add_new_ebuild_sql() to update the ebuilds
+ old_ebuild_list.append(ebuild_version_tree)
+ add_old_ebuild(conn, package_id, old_ebuild_list)
+ update_active_ebuild_to_fales(conn, package_id, ebuild_version_tree)
+ # Use packageDictand to update the db
+ # Add new ebuilds to the db
+ ebuild_id_list = add_new_ebuild_sql(conn, package_id, packageDict)
+
+ # update the cp manifest checksum
+ update_manifest_sql(conn, package_id, manifest_checksum_tree)
+
+ # Get the best cpv for the configs and add it to config_cpv_listDict
+ configs_id_list = get_config_id_list(conn)
+ config_cpv_listDict = self.config_match_ebuild(cp, configs_id_list)
+
+ # Add the ebuild to the buildqueru table if needed
+ self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
+
+ log_msg = "C %s:%s ... Done." % (cp, repo)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
CM.putConnection(conn)
def update_ebuild_db(self, build_dict):
diff --git a/gobs/pym/pgsql.py b/gobs/pym/pgsql.py
index be557d2..f8de5ff 100644
--- a/gobs/pym/pgsql.py
+++ b/gobs/pym/pgsql.py
@@ -225,7 +225,7 @@ def add_new_metadata(connection, metadataDict):
def add_new_package_sql(connection, packageDict):
#lets have a new cursor for each metod as per best practice
cursor = connection.cursor()
- sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
+ sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
ebuild_id_list = []
package_id_list = []
for k, v in packageDict.iteritems():
@@ -235,7 +235,7 @@ def add_new_package_sql(connection, packageDict):
v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
- v['ebuild_version_metadata_tree'][16]]
+ v['ebuild_version_metadata_tree'][16], v['ebuild_version_metadata_tree'][4]]
cursor.execute(sqlQ, params)
mid = cursor.fetchone()
mid=mid[0]
@@ -248,7 +248,7 @@ def add_new_package_sql(connection, packageDict):
def add_new_ebuild_sql(connection, packageDict, new_ebuild_list):
#lets have a new cursor for each metod as per best practice
cursor = connection.cursor()
- sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
+ sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
ebuild_id_list = []
package_id_list = []
for k, v in packageDict.iteritems():
@@ -260,7 +260,7 @@ def add_new_ebuild_sql(connection, packageDict, new_ebuild_list):
v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
- v['ebuild_version_metadata_tree'][16]]
+ v['ebuild_version_metadata_tree'][16], v['ebuild_version_metadata_tree'][4]]
cursor.execute(sqlQ, params)
mid = cursor.fetchone()
mid=mid[0]
diff --git a/gobs/pym/pgsql_querys.py b/gobs/pym/pgsql_querys.py
new file mode 100644
index 0000000..9184a20
--- /dev/null
+++ b/gobs/pym/pgsql_querys.py
@@ -0,0 +1,308 @@
+#every function takes a connection as a parameter that is provided by the CM
+from __future__ import print_function
+
+# Queryes to add the logs
+def add_gobs_logs(connection, log_msg, log_type, config):
+ cursor = connection.cursor()
+ sqlQ = 'INSERT INTO logs (config_id, type, msg) VALUES ( (SELECT config_id FROM configs WHERE config = %s), %s, %s )'
+ cursor.execute(sqlQ, (config, log_type, log_msg))
+ connection.commit()
+
+ # Queryes to handel the jobs table
+ def get_jobs_id(connection, config_profile):
+ cursor = connection.cursor()
+ sqlQ = "SELECT job_id FROM jobs WHERE status = 'Waiting' AND config_id = (SELECT config_id FROM configs WHERE config = %s)"
+ cursor.execute(sqlQ, (config_profile,))
+ entries = cursor.fetchall()
+ if entries is None:
+ return None
+ jobs_id = []
+ for job_id in entries:
+ jobs_id.append(job_id[0])
+ return sorted(jobs_id)
+
+ def get_job(connection, job_id):
+ cursor = connection.cursor()
+ sqlQ ='SELECT job FROM jobs WHERE job_id = %s'
+ cursor.execute(sqlQ, (job_id,))
+ job = cursor.fetchone()
+ return job[0]
+
+ def update_job_list(connection, status, job_id):
+ cursor = connection.cursor()
+ sqlQ = 'UPDATE jobs SET status = %s WHERE job_id = %s'
+ cursor.execute(sqlQ, (status, job_id,))
+ connection.commit()
+
+ # Queryes to handel the configs* tables
+ def get_config_list_all(connection):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT config FROM configs'
+ cursor.execute(sqlQ)
+ entries = cursor.fetchall()
+ return entries
+ def update_make_conf(connection, configsDict):
+ cursor = connection.cursor()
+ sqlQ1 = 'UPDATE configs_metadata SET checksum = %s, make_conf_text = %s, active = %s, config_error = %s WHERE config_id = (SELECT config_id FROM configs WHERE config = %s)'
+ for k, v in configsDict.iteritems():
+ params = [v['make_conf_checksum_tree'], v['make_conf_text'], v['active'], v['config_error'], k]
+ cursor.execute(sqlQ1, params)
+ connection.commit()
+
+ def get_default_config(connection):
+ cursor = connection.cursor()
+ sqlQ = "SELECT config FROM configs WHERE default_config = 'True'"
+ cursor.execute(sqlQ)
+ entries = cursor.fetchone()
+ return entries
+
+ def update_repo_db(connection, repo_list):
+ cursor = connection.cursor()
+ sqlQ1 = 'SELECT repo_id FROM repos WHERE repo = %s'
+ sqlQ2 = 'INSERT INTO repos (repo) VALUES ( %s )'
+ for repo in repo_list:
+ cursor.execute(sqlQ1, (repo,))
+ entries = cursor.fetchone()
+ if entries is None:
+ cursor.execute(sqlQ2, (repo,))
+ connection.commit()
+ return
+def get_package_id(connection, categories, package, repo):
+ cursor = connection.cursor()
+ sqlQ ='SELECT package_id FROM packages WHERE category = %s AND package = %s AND repo_id = (SELECT repo_id FROM repos WHERE repo = %s)'
+ params = categories, package, repo
+ cursor.execute(sqlQ, params)
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+# Add new info to the packages table
+
+def get_repo_id(connection, repo):
+ cursor = connection.cursor()
+ sqlQ ='SELECT repo_id FROM repos WHERE repo = %s'
+ cursor.execute(sqlQ, (repo,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def add_new_manifest_sql(connection, categories, package, repo, manifest_checksum_tree):
+ cursor = connection.cursor()
+ sqlQ = "INSERT INTO packages (category, package, repo_id, checksum, active) VALUES (%s, %s, %s, %s, 'True') RETURNING package_id"
+ repo_id = get_repo_id(connection, repo)
+ cursor.execute(sqlQ, (categories, package, repo_id, manifest_checksum_tree,))
+ package_id = cursor.fetchone()[0]
+ connection.commit()
+ return package_id
+
+def get_restriction_id(connection, restriction):
+ cursor = connection.cursor()
+ sqlQ ='SELECT restriction_id FROM restrictions WHERE restriction = %s'
+ cursor.execute(sqlQ, (restriction,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def get_use_id(connection, use_flag):
+ cursor = connection.cursor()
+ sqlQ ='SELECT use_id FROM uses WHERE flag = %s'
+ cursor.execute(sqlQ, (use_flag,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def get_keyword_id(connection, keyword):
+ cursor = connection.cursor()
+ sqlQ ='SELECT keyword_id FROM keywords WHERE keyword = %s'
+ cursor.execute(sqlQ, (keyword,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse_list):
+ cursor = connection.cursor()
+ sqlQ1 = 'INSERT INTO keywords (keyword) VALUES ( %s ) RETURNING keyword_id'
+ sqlQ3 = 'INSERT INTO restrictions (restriction) VALUES ( %s ) RETURNING restriction_id'
+ sqlQ4 = 'INSERT INTO ebuilds_restrictions (ebuild_id, restriction_id) VALUES ( %s, %s )'
+ sqlQ5 = 'INSERT INTO uses (flag) VALUES ( %s ) RETURNING use_id'
+ sqlQ6 = 'INSERT INTO ebuilds_iuse (ebuild_id, use_id, status) VALUES ( %s, %s, %s)'
+ sqlQ7 = 'INSERT INTO ebuilds_keywords (ebuild_id, keyword_id, status) VALUES ( %s, %s, %s)'
+ # FIXME restriction need some filter as iuse and keyword have.
+ for restriction in restrictions:
+ restriction_id = get_restriction_id(connection, restriction)
+ if restriction_id is None:
+ cursor.execute(sqlQ3, (restriction,))
+ restriction_id = cursor.fetchone()[0]
+ cursor.execute(sqlQ4, (ebuild_id, restriction_id,))
+ for iuse in iuse_list:
+ set_iuse = 'disable'
+ if iuse[0] in ["+"]:
+ iuse = iuse[1:]
+ set_iuse = 'enable'
+ elif iuse[0] in ["-"]:
+ iuse = iuse[1:]
+ use_id = get_use_id(connection, iuse)
+ if use_id is None:
+ cursor.execute(sqlQ5, (iuse,))
+ use_id = cursor.fetchone()[0]
+ for keyword in keywords:
+ set_keyword = 'stable'
+ if keyword[0] in ["~"]:
+ keyword = keyword[1:]
+ set_keyword = 'unstable'
+ elif keyword[0] in ["-"]:
+ keyword = keyword[1:]
+ set_keyword = 'testing'
+ keyword_id = get_keyword_id(connection, keyword)
+ if keyword_id is None:
+ cursor.execute(sqlQ1, (keyword,))
+ keyword_id = cursor.fetchone()[0]
+ cursor.execute(sqlQ7, (ebuild_id, keyword_id, set_keyword,))
+ connection.commit()cursor.execute(sqlQ6, (ebuild_id, use_id, set_iuse,))
+
+def add_new_ebuild_sql(connection, package_id, ebuildDict):
+ cursor = connection.cursor()
+ sqlQ1 = 'SELECT repo_id FROM packages WHERE package_id = %s'
+ sqlQ2 = "INSERT INTO ebuilds (package_id, version, checksum, active) VALUES (%s, %s, %s, 'True') RETURNING ebuild_id"
+ sqlQ4 = "INSERT INTO ebuilds_metadata (ebuild_id, revision) VALUES (%s, %s)"
+ ebuild_id_list = []
+ cursor.execute(sqlQ1, (package_id,))
+ repo_id = cursor.fetchone()[0]
+ for k, v in ebuildDict.iteritems():
+ cursor.execute(sqlQ2, (package_id, v['ebuild_version_tree'], v['ebuild_version_checksum_tree'],))
+ ebuild_id = cursor.fetchone()[0]
+ cursor.execute(sqlQ4, (ebuild_id, v['ebuild_version_revision_tree'],))
+ ebuild_id_list.append(ebuild_id)
+ restrictions = []
+ keywords = []
+ iuse = []
+ for i in v['ebuild_version_metadata_tree'][4].split():
+ restrictions.append(i)
+ for i in v['ebuild_version_metadata_tree'][8].split():
+ keywords.append(i)
+ for i in v['ebuild_version_metadata_tree'][10].split():
+ iuse.append(i)
+ add_new_ebuild_metadata_sql(connection, ebuild_id, keywords, restrictions, iuse)
+ connection.commit()
+ return ebuild_id_list
+
+def get_config_id_list(connection):
+ cursor = connection.cursor()
+ sqlQ = "SELECT configs.config_id FROM configs, configs_metadata WHERE configs.default_config = 'False' AND configs_metadata.active = 'True' AND configs.config_id = configs_metadata.config_id"
+ cursor.execute(sqlQ)
+ entries = cursor.fetchall()
+ if entries == ():
+ return None
+ else:
+ config_id_list = []
+ for config_id in entries:
+ config_id_list.append(config_id[0])
+ return config_id_list
+
+def get_config_db(connection, config_id):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT config FROM configs WHERE config_id = %s'
+ cursor.execute(sqlQ,(config_id,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+def add_new_package_buildqueue(connection, ebuild_id, config_id, use_flagsDict, messages):
+ cursor = connection.cursor()
+ sqlQ1 = 'INSERT INTO build_jobs (ebuild_id, config_id) VALUES (%s, %s) RETURNING build_job_id'
+ sqlQ3 = 'INSERT INTO build_jobs_use (build_job_id, use_id, status) VALUES (%s, (SELECT use_id FROM uses WHERE flag = %s), %s)'
+ cursor.execute(sqlQ1, (ebuild_id, config_id,))
+ build_job_id = cursor.fetchone()[0]
+ for k, v in use_flagsDict.iteritems():
+ cursor.execute(sqlQ3, (build_job_id, k, v,))
+ connection.commit()
+
+def get_manifest_db(connection, package_id):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT checksum FROM packages WHERE package_id = %s'
+ cursor.execute(sqlQ, (package_id,))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ # If entries is not None we need [0]
+ return entries[0]
+
+def get_cp_from_package_id(connection, package_id):
+ cursor = connection.cursor()
+ sqlQ = "SELECT ARRAY_TO_STRING(ARRAY[category, package] , '/') AS cp FROM packages WHERE package_id = %s"
+ cursor.execute(sqlQ, (package_id,))
+ return cursor.fetchone()
+
+def get_cp_repo_from_package_id(connection, package_id):
+ cursor =connection.cursor()
+ sqlQ = 'SELECT repos.repo FROM repos, packages WHERE repos.repo_id = packages.repo_id AND packages.package_id = %s'
+ cp = get_cp_from_package_id(connection, package_id)
+ cursor.execute(sqlQ, (package_id,))
+ repo = cursor.fetchone()
+ return cp[0], repo[0]
+
+def get_ebuild_checksum(connection, package_id, ebuild_version_tree):
+ cursor = connection.cursor()
+ sqlQ = "SELECT checksum FROM ebuilds WHERE package_id = %s AND version = %s AND active = 'True'"
+ cursor.execute(sqlQ, (package_id, ebuild_version_tree))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ # If entries is not None we need [0]
+ return entries[0]
+
+def add_old_ebuild(connection, package_id, old_ebuild_list):
+ cursor = connection.cursor()
+ sqlQ1 = "UPDATE ebuilds SET active = 'False' WHERE package_id = %s AND version = %s"
+ sqlQ2 = "SELECT ebuild_id FROM ebuilds WHERE package_id = %s AND version = %s AND active = 'True'"
+ sqlQ3 = "SELECT build_job_id FROM build_jobs WHERE ebuild_id = %s"
+ sqlQ4 = 'DELETE FROM build_jobs_use WHERE build_job_id = %s'
+ sqlQ5 = 'DELETE FROM build_jobs WHERE build_job_id = %s'
+ for old_ebuild in old_ebuild_list:
+ cursor.execute(sqlQ2, (package_id, old_ebuild[0]))
+ ebuild_id_list = cursor.fetchall()
+ if ebuild_id_list is not None:
+ for ebuild_id in ebuild_id_list:
+ cursor.execute(sqlQ3, (ebuild_id))
+ build_job_id_list = cursor.fetchall()
+ if build_job_id_list is not None:
+ for build_job_id in build_job_id_list:
+ cursor.execute(sqlQ4, (build_job_id))
+ cursor.execute(sqlQ5, (build_job_id))
+ cursor.execute(sqlQ1, (package_id, old_ebuild[0]))
+ connection.commit()
+
+def update_active_ebuild_to_fales(connection, package_id, ebuild_version_tree):
+ cursor = connection.cursor()
+ sqlQ ="UPDATE ebuilds SET active = 'False' WHERE package_id = %s AND version = %s AND active = 'True'"
+ cursor.execute(sqlQ, (package_id, ebuild_version_tree))
+ connection.commit()
+
+def update_manifest_sql(connection, package_id, manifest_checksum_tree):
+ cursor = connection.cursor()
+ sqlQ = 'UPDATE packages SET checksum = %s WHERE package_id = %s'
+ cursor.execute(sqlQ, (manifest_checksum_tree, package_id,))
+ connection.commit()
+
+def get_build_jobs_id_list_config(connection, config_id):
+ cursor = connection.cursor()
+ sqlQ = 'SELECT build_job_id FROM build_jobs WHERE config_id = %s'
+ cursor.execute(sqlQ, (config_id,))
+ entries = cursor.fetchall()
+ return entries
+
+def del_old_build_jobs(connection, queue_id):
+ cursor = connection.cursor()
+ sqlQ1 = 'DELETE FROM build_jobs_use WHERE build_job_id = %s'
+ sqlQ2 = 'DELETE FROM build_jobs_retest WHERE build_job_id = %s'
+ sqlQ3 = 'DELETE FROM build_jobs WHERE build_job_id = %s'
+ cursor.execute(sqlQ1, (build_job_id,))
+ cursor.execute(sqlQ2, (build_job_id,))
+ cursor.execute(sqlQ3, (build_job_id,))
+ connection.commit()
diff --git a/gobs/pym/repoman_gobs.py b/gobs/pym/repoman_gobs.py
index ef10f9c..adb4466 100644
--- a/gobs/pym/repoman_gobs.py
+++ b/gobs/pym/repoman_gobs.py
@@ -15,11 +15,13 @@ class gobs_repoman(object):
self._mysettings = mysettings
self._myportdb = myportdb
- def check_repoman(self, categories, package, ebuild_version_tree, config_id):
+ def check_repoman(self, pkgdir, cpv, repo, config_id):
# We run repoman run_checks on the ebuild
- pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package
+ ebuild_version_tree = portage.versions.cpv_getversion(cpv)
+ element = portage.versions.cpv_getkey(cpv).split('/')
+ categories = element[0]
+ package = element[1]
full_path = pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild"
- cpv = categories + "/" + package + "-" + ebuild_version_tree
root = '/'
trees = {
root : {'porttree' : portage.portagetree(root, settings=self._mysettings)}
@@ -28,7 +30,7 @@ class gobs_repoman(object):
allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
allvars.update(Package.metadata_keys)
allvars = sorted(allvars)
- myaux = dict(zip(allvars, self._myportdb.aux_get(cpv, allvars)))
+ myaux = dict(zip(allvars, self._myportdb.aux_get(cpv, allvars, myrepo=repo)))
pkg = Package(cpv=cpv, metadata=myaux, root_config=root_config, type_name='ebuild')
fails = []
try:
diff --git a/gobs/pym/text.py b/gobs/pym/text.py
index 2f1f689..3f7b040 100644
--- a/gobs/pym/text.py
+++ b/gobs/pym/text.py
@@ -16,7 +16,7 @@ def get_file_text(filename):
textfile.close()
return text
-def get_ebuild_text(filename):
+def get_ebuild_cvs_revision(filename):
"""Return the ebuild contents"""
try:
ebuildfile = open(filename)
@@ -33,7 +33,7 @@ def get_ebuild_text(filename):
cvs_revision = field[3]
except:
cvs_revision = ''
- return text, cvs_revision
+ return cvs_revision
def get_log_text_list(filename):
"""Return the log contents as a list"""
diff --git a/gobs/pym/updatedb.py b/gobs/pym/updatedb.py
index e643fc8..fb185d2 100755
--- a/gobs/pym/updatedb.py
+++ b/gobs/pym/updatedb.py
@@ -18,35 +18,25 @@ from gobs.ConnectionManager import connectionManager
CM=connectionManager(gobs_settings_dict)
#selectively import the pgsql/mysql querys
if CM.getName()=='pgsql':
- from gobs.pgsql import *
+ from gobs.pgsql_querys import *
+if CM.getName()=='mysql':
+ from gobs.mysql_querys import *
from gobs.check_setup import check_make_conf
-from gobs.arch import gobs_arch
from gobs.package import gobs_package
-from gobs.categories import gobs_categories
-from gobs.old_cpv import gobs_old_cpv
-from gobs.categories import gobs_categories
-from gobs.sync import git_pull, sync_tree
import portage
def init_portage_settings():
-
- """ Get the BASE Setup/Config for portage.settings
- @type: module
- @module: The SQL Backend
- @type: dict
- @parms: config options from the config file (host_setup_root)
- @rtype: settings
- @returns new settings
- """
# check config setup
conn=CM.getConnection()
check_make_conf()
log_msg = "Check configs done"
add_gobs_logs(conn, log_msg, "info", config_profile)
+
# Get default config from the configs table and default_config=1
config_id = get_default_config(conn) # HostConfigDir = table configs id
default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id[0] + "/"
+
# Set config_root (PORTAGE_CONFIGROOT) to default_config_root
mysettings = portage.config(config_root = default_config_root)
log_msg = "Setting default config to: %s" % (config_id[0],)
@@ -54,59 +44,67 @@ def init_portage_settings():
CM.putConnection(conn)
return mysettings
-def update_cpv_db_pool(mysettings, myportdb, init_package, package_line):
+def update_cpv_db_pool(mysettings, myportdb, init_package, package_line, repo):
conn=CM.getConnection()
# split the cp to categories and package
element = package_line.split('/')
categories = element[0]
package = element[1]
+
# Check if we don't have the cp in the package table
- package_id = have_package_db(conn,categories, package)
+ package_id = get_package_id(conn, categories, package, repo)
CM.putConnection(conn)
if package_id is None:
+
# Add new package with ebuilds
init_package.add_new_package_db(categories, package)
+
# Ceck if we have the cp in the package table
elif package_id is not None:
+
# Update the packages with ebuilds
- init_package.update_package_db(categories, package, package_id)
- # Update the metadata for categories
- init_categories = gobs_categories(mysettings)
- init_categories.update_categories_db(categories)
+ init_package.update_package_db(package_id)
+ return
def update_cpv_db():
- """Code to update the cpv in the database.
- @type:settings
- @parms: portage.settings
- @type: module
- @module: The SQL Backend
- @type: dict
- @parms: config options from the config file
- """
conn=CM.getConnection()
mysettings = init_portage_settings()
log_msg = "Checking categories, package, ebuilds"
add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
+
# Setup portdb, package
myportdb = portage.portdbapi(mysettings=mysettings)
init_package = gobs_package(mysettings, myportdb)
- package_id_list_tree = []
- # Will run some update checks and update package if needed
- # Get categories/package list from portage
- package_list_tree = myportdb.cp_all()
- # Use all exept 2 cores when multiprocessing
- pool_cores= multiprocessing.cpu_count()
- if pool_cores >= 3:
- use_pool_cores = pool_cores - 2
- else:
- use_pool_cores = 1
- pool = multiprocessing.Pool(processes=use_pool_cores)
- # Run the update package for all package in the list in
- # a multiprocessing pool
- for package_line in sorted(package_list_tree):
- #update_cpv_db_pool(mysettings, package_line)
- pool.apply_async(update_cpv_db_pool, (mysettings, myportdb, init_package, package_line,))
+ repo_list = ()
+ repos_trees_list = []
+
+ # Use all cores when multiprocessing
+ pool_cores= multiprocessing.cpu_count()
+ pool = multiprocessing.Pool(processes=pool_cores)
+
+ # Will run some update checks and update package if needed
+ # Get categories/package list from portage and repos
+
+ # Get the repos and update the repos db
+ repo_list = myportdb.getRepositories()
+ update_repo_db(conn, repo_list)
+ CM.putConnection(conn)
+
+ # Get the rootdirs for the repos
+ repo_trees_list = myportdb.porttrees
+ for repo_dir in repo_trees_list:
+ repo = myportdb.getRepositoryName(repo_dir)
+ repo_dir_list = []
+ repo_dir_list.append(repo_dir)
+
+ # Get the package list from the repo
+ package_id_list_tree = []
+ package_list_tree = myportdb.cp_all(trees=repo_dir_list)
+
+ # Run the update package for all package in the list and in a multiprocessing pool
+ for package_line in sorted(package_list_tree):
+ pool.apply_async(update_cpv_db_pool, (mysettings, myportdb, init_package, package_line, repo,))
+ # update_cpv_db_pool(mysettings, myportdb, init_package, package_line, repo)
pool.close()
pool.join()
conn=CM.getConnection()
@@ -116,30 +114,15 @@ def update_cpv_db():
def update_db_main():
# Main
- conn=CM.getConnection()
- # Logging
- log_msg = "Update db started."
- add_gobs_logs(conn, log_msg, "info", config_profile)
- # Sync portage and profile/settings
- resutalt = git_pull()
- if resutalt is False:
- log_msg = "Update db ... Fail."
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
- return False
- resutalt = sync_tree()
- if resutalt is False:
- log_msg = "Update db ... Fail."
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
- return False
- # Init settings for the default config
- mysettings = init_portage_settings()
- init_arch = gobs_arch()
- init_arch.update_arch_db()
- # Update the cpv db
- update_cpv_db()
- log_msg = "Update db ... Done."
- add_gobs_logs(conn, log_msg, "info", config_profile)
- CM.putConnection(conn)
- return True
\ No newline at end of file
+ conn=CM.getConnection()
+
+ # Logging
+ log_msg = "Update db started."
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+
+ # Update the cpv db
+ update_cpv_db()
+ log_msg = "Update db ... Done."
+ add_gobs_logs(conn, log_msg, "info", config_profile)
+ CM.putConnection(conn)
+ return True
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-07-17 0:07 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-07-17 0:07 UTC (permalink / raw
To: gentoo-commits
commit: 816a06e04dc01b20dd03b8fb7d6864602bfb90d0
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Tue Jul 17 00:04:51 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Tue Jul 17 00:04:51 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=816a06e0
fix for depgraph
---
gobs/bin/gobs_host_jobs | 1 +
gobs/pym/build_queru.py | 9 +++++++++
2 files changed, 10 insertions(+), 0 deletions(-)
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
index 2f1adc8..5ece453 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs
@@ -27,6 +27,7 @@ def main():
conn = CM.getConnection()
add_gobs_logs(conn, "Job deamon started", "info", config_profile)
CM.putConnection(conn)
+ repeat = True
while repeat:
jobs_main(config_profile)
repeat = False
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index 7c6c680..d74208c 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -702,6 +702,15 @@ class queruaction(object):
f.close
except:
pass
+ build_dict2 = {}
+ build_dict2 = get_packages_to_build(conn, self._config_profile)
+ if build_dict['queue_id'] == build_dict2['queue_id']:
+ log_msg = "qurery %s was not removed" % (build_dict['queue_id'],)
+ add_gobs_logs(conn, log_msg, "info", self._config_profile)
+ print("qurery was not removed")
+ build_dict['type_fail'] = "Querey was not removed"
+ build_dict['check_fail'] = True
+ self.log_fail_queru(build_dict, settings)
if build_fail is False or depclean_fail is False:
CM.putConnection(conn)
return False
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-06-26 22:10 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-06-26 22:10 UTC (permalink / raw
To: gentoo-commits
commit: 2ad00baf0f966a594443cd140871af0ca5b59c4e
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Tue Jun 26 22:09:32 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Tue Jun 26 22:09:32 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=2ad00baf
fix for logging
---
gobs/bin/gobs_guest_jobs | 7 +++----
gobs/pym/build_log.py | 8 ++++----
gobs/pym/build_queru.py | 15 ++++++++-------
gobs/pym/buildquerydb.py | 15 ++++++++-------
gobs/pym/check_setup.py | 7 ++++---
gobs/pym/jobs.py | 32 ++++++++++++++++----------------
gobs/pym/package.py | 32 ++++++++++++++++----------------
gobs/pym/sync.py | 2 +-
gobs/pym/updatedb.py | 2 +-
9 files changed, 61 insertions(+), 59 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index bcf77d1..c3bd6ec 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -21,11 +21,12 @@ import sys
import os
import time
from multiprocessing import Process
-import logging
def main_loop(config_profile):
repeat = True
- logging.info("Job and build deamon started.")
+ conn=CM.getConnection()
+ add_gobs_logs(conn, "Job and build deamon started.", "info", config_profile)
+ CM.putConnection(conn)
while repeat:
jobs_main(config_profile)
if check_configure_guest(config_profile) is not True:
@@ -42,8 +43,6 @@ def main_loop(config_profile):
def main():
# Main
- logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], \
- format='%(levelname)s: %(asctime)s %(message)s', level=logging.INFO)
config_profile = gobs_settings_dict['gobs_config']
#we provide the main_loop with the ConnectionManager so we can hand out connections from within the loop
main_loop(config_profile)
diff --git a/gobs/pym/build_log.py b/gobs/pym/build_log.py
index ec1fdd4..bf63ddc 100644
--- a/gobs/pym/build_log.py
+++ b/gobs/pym/build_log.py
@@ -47,7 +47,7 @@ class gobs_buildlog(object):
categories = cpvr_list[0]
package = cpvr_list[1]
ebuild_version = cpv_getversion(pkg.cpv)
- log_msg = "cpv: %s" % pkg.cpv
+ log_msg = "cpv: %s" % (pkg.cpv.)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
init_package = gobs_package(settings, myportdb)
package_id = have_package_db(conn, categories, package)
@@ -550,7 +550,7 @@ class gobs_buildlog(object):
for sum_log_line in sum_build_log_list:
summary_error = summary_error + " " + sum_log_line
build_log_dict['logfilename'] = settings.get("PORTAGE_LOG_FILE").split(self._config_profile)[1]
- log_msg = "Logfile name: %s" % settings.get("PORTAGE_LOG_FILE")
+ log_msg = "Logfile name: %s" % (settings.get("PORTAGE_LOG_FILE"),)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
if build_dict['queue_id'] is None:
build_id = self.add_new_ebuild_buildlog(settings, pkg, build_dict, build_error, summary_error, build_log_dict)
@@ -564,7 +564,7 @@ class gobs_buildlog(object):
self.write_msg_file(msg_line, emerge_info_logfilename)
os.chmod(settings.get("PORTAGE_LOG_FILE"), 0o664)
os.chmod(emerge_info_logfilename, 0o664)
- log_msg = "Package: %s logged to db." % pkg.cpv
+ log_msg = "Package: %s logged to db." % (pkg.cpv,)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
else:
# FIXME Remove the log some way so
@@ -573,6 +573,6 @@ class gobs_buildlog(object):
# os.remove(settings.get("PORTAGE_LOG_FILE"))
#except:
# pass
- log_msg = "Package %s NOT logged to db." % pkg.cpv
+ log_msg = "Package %s NOT logged to db." % (pkg.cpv,)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
CM.putConnection(conn)
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index 4ed6110..a800b96 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -634,7 +634,7 @@ class queruaction(object):
if ebuild_version_checksum_tree == build_dict['checksum']:
init_flags = gobs_use_flags(settings, portdb, cpv)
build_use_flags_list = init_flags.comper_useflags(build_dict)
- log_msg = "build_use_flags_list %s" % build_use_flags_list
+ log_msg = "build_use_flags_list %s" % (build_use_flags_list,)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
manifest_error = init_manifest.check_file_in_manifest(portdb, cpv, build_dict, build_use_flags_list)
if manifest_error is None:
@@ -669,7 +669,7 @@ class queruaction(object):
with open("/etc/portage/package.use/gobs.use", "a") as f:
f.write(filetext)
f.write('\n')
- log_msg = "build_cpv_list: %s" % build_cpv_list
+ log_msg = "build_cpv_list: %s" % (build_cpv_list,)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
argscmd = []
if not "nooneshort" in build_dict['post_message']:
@@ -678,12 +678,12 @@ class queruaction(object):
argscmd.append("--usepkg")
for build_cpv in build_cpv_list:
argscmd.append(build_cpv)
- log_msg = "argscmd: %s" % argscmd
+ log_msg = "argscmd: %s" % (argscmd,)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
# Call main_emerge to build the package in build_cpv_list
build_fail = self.emerge_main(argscmd, build_dict)
# Run depclean
- log_msg = "build_fail: %s" % build_fail
+ log_msg = "build_fail: %s" % (build_fail,)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
if not "noclean" in build_dict['post_message']:
depclean_fail = main_depclean()
@@ -706,11 +706,12 @@ class queruaction(object):
if build_dict is None:
CM.putConnection(conn)
return
- log_msg = "build_dict: %s" % build_dict
+ log_msg = "build_dict: %s" % (build_dict,)
add_gobs_logs(conn, log_msg, "info", self._config_profile)
if not build_dict['ebuild_id'] is None and build_dict['checksum'] is not None:
buildqueru_cpv_dict = self.make_build_list(build_dict, settings, portdb)
- logging.info('buildqueru_cpv_dict: %s', buildqueru_cpv_dict)
+ log_msg = "buildqueru_cpv_dict: %s" % (buildqueru_cpv_dict,)
+ add_gobs_logs(conn, log_msg, "info", self._config_profile)
if buildqueru_cpv_dict is None:
CM.putConnection(conn)
return
@@ -718,7 +719,7 @@ class queruaction(object):
CM.putConnection(conn)
return
if not build_dict['post_message'] is [] and build_dict['ebuild_id'] is None:
- CM.closeAllConnections()
+ CM.putConnection(conn)
return
if not build_dict['ebuild_id'] is None and build_dict['checksum'] is None:
del_old_queue(conn, build_dict['queue_id'])
diff --git a/gobs/pym/buildquerydb.py b/gobs/pym/buildquerydb.py
index 44f3373..e0745e2 100644
--- a/gobs/pym/buildquerydb.py
+++ b/gobs/pym/buildquerydb.py
@@ -35,7 +35,7 @@ def add_cpv_query_pool(mysettings, init_package, config_id, package_line):
element = package_line.split('/')
categories = element[0]
package = element[1]
- log_msg = "C %s/%s" % categories package
+ log_msg = "C %s/%s" % (categories, package,)
add_gobs_logs(conn, log_msg, "info", config_profile)
pkgdir = mysettings['PORTDIR'] + "/" + categories + "/" + package
config_id_list = []
@@ -55,14 +55,14 @@ def add_cpv_query_pool(mysettings, init_package, config_id, package_line):
if ebuild_id is not None:
ebuild_id_list.append(ebuild_id)
init_package.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
- log_msg = "C %s/%s ... Done." % categories package
+ log_msg = "C %s/%s ... Done." % (categories, package,)
add_gobs_logs(conn, log_msg, "info", config_profile)
CM.putConnection(conn)
return
def add_buildquery_main(config_id):
conn=CM.getConnection()
- log_msg = "Adding build querys for: %s" % config_id
+ log_msg = "Adding build querys for: %s" % (config_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
check_make_conf()
log_msg = "Check configs done"
@@ -75,7 +75,7 @@ def add_buildquery_main(config_id):
init_package = gobs_package(mysettings, myportdb)
# get the cp list
package_list_tree = package_list_tree = myportdb.cp_all()
- log_msg = "Setting default config to: %s" % config_id
+ log_msg = "Setting default config to: %s" % (config_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
# Use all exept 2 cores when multiprocessing
pool_cores= multiprocessing.cpu_count()
@@ -88,18 +88,19 @@ def add_buildquery_main(config_id):
pool.apply_async(add_cpv_query_pool, (mysettings, init_package, config_id, package_line,))
pool.close()
pool.join()
- log_msg = "Adding build querys for: %s ... Done." % config_id
+ log_msg = "Adding build querys for: %s ... Done." % (config_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
+ CM.putConnection(conn)
return True
def del_buildquery_main(config_id):
- log_msg = "Removeing build querys for: %s" % config_id
+ log_msg = "Removeing build querys for: %s" % (config_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
querue_id_list = get_queue_id_list_config(conn, config_id)
if querue_id_list is not None:
for querue_id in querue_id_list:
del_old_queue(conn, querue_id)
- log_msg = "Removeing build querys for: %s ... Done." % config_id
+ log_msg = "Removeing build querys for: %s ... Done." % (config_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
CM.putConnection(conn)
return True
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py
index 609b6f8..354ddfb 100644
--- a/gobs/pym/check_setup.py
+++ b/gobs/pym/check_setup.py
@@ -41,16 +41,17 @@ def check_make_conf():
except Exception as e:
attDict['config_error'] = e
attDict['active'] = 'False'
- log_msg = "%s FAIL!" % config_id[0]
+ log_msg = "%s FAIL!" % (config_id[0],)
add_gobs_logs(conn, log_msg, "info", config_profile)
else:
attDict['config_error'] = ''
attDict['active'] = 'True'
- log_msg = "%s PASS" % config_id[0]
+ log_msg = "%s PASS" % (config_id[0],)
add_gobs_logs(conn, log_msg, "info", config_profile)
# Get the checksum of make.conf
make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0]
- logging.info("make.conf checksum is %s on %s", make_conf_checksum_tree, config_id[0])
+ log_msg = "make.conf checksum is %s on %s" % (make_conf_checksum_tree, config_id[0],)
+ add_gobs_logs(conn, log_msg, "info", config_profile)
attDict['make_conf_text'] = get_file_text(make_conf_file)
attDict['make_conf_checksum_tree'] = make_conf_checksum_tree
configsDict[config_id[0]]=attDict
diff --git a/gobs/pym/jobs.py b/gobs/pym/jobs.py
index adea5a1..292d57d 100644
--- a/gobs/pym/jobs.py
+++ b/gobs/pym/jobs.py
@@ -21,71 +21,71 @@ def jobs_main(config_profile):
if job is None:
CM.putConnection(conn)
return
- log_msg = "Job: %s Type: %s", % job[1], job[0]
+ log_msg = "Job: %s Type: %s" % (job[1], job[0],)
add_gobs_logs(conn, log_msg, "info", config_profile)
if job[0] == "addbuildquery":
update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing.", % job[1]
+ log_msg = "Job %s is runing." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
result = add_buildquery_main(config_profile)
if result is True:
update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done..", % job[1]
+ log_msg = "Job %s is done.." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
else:
update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail.", % job[1]
+ log_msg = "Job %s did fail." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
elif job[0] == "delbuildquery":
update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing.", % job[1]
+ log_msg = "Job %s is runing." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
result = del_buildquery_main(config_profile)
if result is True:
update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done..", % job[1]
+ log_msg = "Job %s is done.." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
else:
update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail.", % job[1]
+ log_msg = "Job %s did fail." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
elif job[0] == "gitsync":
update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing.", % job[1]
+ log_msg = "Job %s is runing." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
result = git_pull()
if result is True:
update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done..", % job[1]
+ log_msg = "Job %s is done.." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
else:
update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail.", % job[1]
+ log_msg = "Job %s did fail." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
elif job[0] == "emergesync":
update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing.", % job[1]
+ log_msg = "Job %s is runing." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
result = sync_tree()
if result is True:
update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done..", % job[1]
+ log_msg = "Job %s is done.." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
else:
update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail.", % job[1]
+ log_msg = "Job %s did fail." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
elif job[0] == "updatedb":
update_job_list(conn, "Runing", job[1])
- log_msg = "Job %s is runing.", % job[1]
+ log_msg = "Job %s is runing." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
result = update_db_main()
if result is True:
update_job_list(conn, "Done", job[1])
- log_msg = "Job %s is done..", % job[1]
+ log_msg = "Job %s is done.." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
else:
update_job_list(conn, "Fail", job[1])
- log_msg = "Job %s did fail.", % job[1]
+ log_msg = "Job %s did fail." % (job[1],)
add_gobs_logs(conn, log_msg, "info", config_profile)
return
\ No newline at end of file
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
index c646cdb..b9b2a56 100644
--- a/gobs/pym/package.py
+++ b/gobs/pym/package.py
@@ -89,7 +89,7 @@ class gobs_package(object):
# ebuild_version_metadata_tree and set ebuild_version_checksum_tree to 0
# so it can be updated next time we update the db
if ebuild_version_metadata_tree == []:
- log_msg = " QA: %s Have broken metadata" % ebuild_line
+ log_msg = " QA: %s Have broken metadata" % (ebuild_line,)
add_gobs_logs(conn, log_msg, "info", config_profile)
ebuild_version_metadata_tree = ['','','','','','','','','','','','','','','','','','','','','','','','','']
ebuild_version_checksum_tree = ['0']
@@ -158,8 +158,8 @@ class gobs_package(object):
if portage.vercmp(v['ebuild_version_tree'], latest_ebuild_version) == 0:
add_new_package_buildqueue(conn,ebuild_id, config_id, use_flags_list, use_enable_list, message)
B = Build cpv use-flags config
- log_msg = ("B %s/%s-%s USE: %s %s" % v['categories'] v['package'] \
- latest_ebuild_version use_enable config_id
+ log_msg = ("B %s/%s-%s USE: %s %s" % (v['categories'], v['package'], \
+ latest_ebuild_version, use_enable, config_id,)
add_gobs_logs(conn, log_msg, "info", config_profile)
i = i +1
CM.putConnection(conn)
@@ -184,9 +184,9 @@ class gobs_package(object):
# add new categories package ebuild to tables package and ebuilds
# C = Checking
# N = New Package
- log_msg = ("C %s/%s" % categories package
+ log_msg = ("C %s/%s" % (categories, package,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- log_msg = ("N %s/%s" % categories package
+ log_msg = ("N %s/%s" % (categories, package,)
add_gobs_logs(conn, log_msg, "info", config_profile)
pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR + cp
categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
@@ -216,7 +216,7 @@ class gobs_package(object):
manifest_error = init_manifest.digestcheck()
if manifest_error is not None:
qa_error.append(manifest_error)
- log_msg = ("QA: %s/%s %s" % categories package qa_error
+ log_msg = ("QA: %s/%s %s" % (categories, package, qa_error,)
add_gobs_logs(conn, log_msg, "info", config_profile)
add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
# Add the ebuild to the buildqueru table if needed
@@ -230,20 +230,20 @@ class gobs_package(object):
except:
manifest_checksum_tree = "0"
get_manifest_text = "0"
- log_msg = "QA: Can't checksum the Manifest file. %c/%s" % categories package
+ log_msg = "QA: Can't checksum the Manifest file. %c/%s" % (categories, package,)
add_gobs_logs(conn, log_msg, "info", config_profile)
else:
get_manifest_text = get_file_text(pkgdir + "/Manifest")
add_new_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree)
CM.putConnection(conn)
- log_msg = "C %s/%s ... Done." % categories package
+ log_msg = "C %s/%s ... Done." % (categories, package)
add_gobs_logs(conn, log_msg, "info", config_profile)
def update_package_db(self, categories, package, package_id):
conn=CM.getConnection()
# Update the categories and package with new info
# C = Checking
- log_msg = "C %s/%s" % categories package
+ log_msg = "C %s/%s" % (categories, package,)
add_gobs_logs(conn, log_msg, "info", config_profile)
pkgdir = self._mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR with cp
# Get the checksum from the Manifest file.
@@ -255,9 +255,9 @@ class gobs_package(object):
ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
if ebuild_list_tree == []:
CM.putConnection(conn)
- log_msg = "QA: No Manifest file or ebuilds in %s/%s." % categories package
+ log_msg = "QA: No Manifest file or ebuilds in %s/%s." % (categories, package,)
add_gobs_logs(conn, log_msg, "info", config_profile)
- log_msg = "C %s/%s ... Done." % categories package
+ log_msg = "C %s/%s ... Done." % (categories, package,)
add_gobs_logs(conn, log_msg, "info", config_profile)
return
# Get the checksum from the db in package table
@@ -266,7 +266,7 @@ class gobs_package(object):
ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
if manifest_checksum_tree != manifest_checksum_db:
# U = Update
- log_msg = "U %s/%s" % categories package
+ log_msg = "U %s/%s" % (categories, package)
add_gobs_logs(conn, log_msg, "info", config_profile)
# Get package_metadataDict and update the db with it
package_metadataDict = self.get_package_metadataDict(pkgdir, package)
@@ -289,11 +289,11 @@ class gobs_package(object):
packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
if ebuild_version_manifest_checksum_db is None:
# N = New ebuild
- log_msg = "N %s/%s-%s" % categories package ebuild_version_tree
+ log_msg = "N %s/%s-%s" % (categories, package, ebuild_version_tree,)
add_gobs_logs(conn, log_msg, "info", config_profile)
else:
# U = Updated ebuild
- log_msg = "U %s/%s-%s" % categories package ebuild_version_tree
+ log_msg = "U %s/%s-%s" % (categories, package, ebuild_version_tree,)
add_gobs_logs(conn, log_msg, "info", config_profile)
# Fix so we can use add_new_package_sql(packageDict) to update the ebuilds
old_ebuild_list.append(ebuild_version_tree)
@@ -316,7 +316,7 @@ class gobs_package(object):
manifest_error = init_manifest.digestcheck()
if manifest_error is not None:
qa_error.append(manifest_error)
- log_msg = "QA: %s/%s %s" % categories, package, qa_error
+ log_msg = "QA: %s/%s %s" % (categories, package, qa_error,)
add_gobs_logs(conn, log_msg, "info", config_profile)
add_qa_repoman(conn,ebuild_id_list, qa_error, packageDict, config_id)
# Add the ebuild to the buildqueru table if needed
@@ -325,7 +325,7 @@ class gobs_package(object):
init_old_cpv = gobs_old_cpv(self._myportdb, self._mysettings)
init_old_cpv.mark_old_ebuild_db(categories, package, package_id)
CM.putConnection(conn)
- log_msg = "C %s/%s ... Done." % categories package
+ log_msg = "C %s/%s ... Done." % (categories, package)
add_gobs_logs(conn, log_msg, "info", config_profile)
def update_ebuild_db(self, build_dict):
diff --git a/gobs/pym/sync.py b/gobs/pym/sync.py
index 953d379..4bd4883 100644
--- a/gobs/pym/sync.py
+++ b/gobs/pym/sync.py
@@ -24,7 +24,7 @@ def git_pull():
repo_remote = repo.remotes.origin
repo_remote.pull()
master = repo.head.reference
- log_msg = "Git log: %s" % master.log()
+ log_msg = "Git log: %s" % (master.log(),9
add_gobs_logs(conn, log_msg, "info", config_profile)
log_msg = "Git pull ... Done"
add_gobs_logs(conn, log_msg, "info", config_profile)
diff --git a/gobs/pym/updatedb.py b/gobs/pym/updatedb.py
index b522c24..6bfc5ef 100755
--- a/gobs/pym/updatedb.py
+++ b/gobs/pym/updatedb.py
@@ -50,7 +50,7 @@ def init_portage_settings():
default_config_root = "/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id[0] + "/"
# Set config_root (PORTAGE_CONFIGROOT) to default_config_root
mysettings = portage.config(config_root = default_config_root)
- log_msg = "Setting default config to: %s" % config_id[0]
+ log_msg = "Setting default config to: %s" % (config_id[0],)
add_gobs_logs(conn, log_msg, "info", config_profile)
return mysettings
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-05-13 17:59 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-05-13 17:59 UTC (permalink / raw
To: gentoo-commits
commit: 74d67a9f83eab62b8a30e073c43cc83dddb49dae
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sun May 13 17:59:08 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sun May 13 17:59:08 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=74d67a9f
alot of fixes
---
gobs/bin/gobs_guest_jobs | 10 ++++------
gobs/pym/ConnectionManager.py | 4 ++--
gobs/pym/depclean.py | 6 ++++++
3 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index c490366..bcf77d1 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -29,17 +29,15 @@ def main_loop(config_profile):
while repeat:
jobs_main(config_profile)
if check_configure_guest(config_profile) is not True:
- time.sleep(60)
+ time.sleep(6)
continue
else:
init_queru = queruaction(config_profile)
p = Process(target=init_queru.procces_qureru)
p.start()
- p.join()
- CM.closeAllConnections()
- continue
- repeat = False
- time.sleep(60)
+ p.join()
+ repeat = True
+ time.sleep(6)
return
def main():
diff --git a/gobs/pym/ConnectionManager.py b/gobs/pym/ConnectionManager.py
index 404e62f..ff0e07f 100644
--- a/gobs/pym/ConnectionManager.py
+++ b/gobs/pym/ConnectionManager.py
@@ -17,7 +17,7 @@ class connectionManager(object):
cls._database=settings_dict['sql_db']
#shouldnt we include port also?
try:
- from psycopg2 import pool
+ from psycopg2 import pool, extensions
cls._connectionNumber=numberOfconnections
#always create 1 connection
cls._pool=pool.ThreadedConnectionPool(1,cls._connectionNumber,host=cls._host,database=cls._database,user=cls._user,password=cls._password)
@@ -36,7 +36,7 @@ class connectionManager(object):
return self._pool.getconn()
def putConnection(self, connection):
- self._pool.putconn(connection)
+ self._pool.putconn(connection , key=None, close=True)
def closeAllConnections(self):
self._pool.closeall()
diff --git a/gobs/pym/depclean.py b/gobs/pym/depclean.py
index 1df844a..ad75a48 100644
--- a/gobs/pym/depclean.py
+++ b/gobs/pym/depclean.py
@@ -1,6 +1,7 @@
from __future__ import print_function
import errno
import logging
+import textwrap
import portage
from portage._sets.base import InternalPackageSet
from _emerge.main import parse_opts
@@ -14,6 +15,11 @@ from _emerge.unmerge import unmerge
from portage.util import cmp_sort_key, writemsg, \
writemsg_level, writemsg_stdout
from portage.util.digraph import digraph
+from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
+red, yellow
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+warn = create_color_func("WARN")
def main_depclean():
mysettings, mytrees, mtimedb = load_emerge_config()
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-05-09 23:12 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-05-09 23:12 UTC (permalink / raw
To: gentoo-commits
commit: af97e220cade1aea00741eea082e5274abf61bb5
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Wed May 9 23:12:12 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Wed May 9 23:12:12 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=af97e220
testing use flags autounmask
---
gobs/bin/gobs_guest_jobs | 1 +
gobs/pym/build_queru.py | 350 +---------------------------------------------
2 files changed, 3 insertions(+), 348 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index 59c9da8..c490366 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -36,6 +36,7 @@ def main_loop(config_profile):
p = Process(target=init_queru.procces_qureru)
p.start()
p.join()
+ CM.closeAllConnections()
continue
repeat = False
time.sleep(60)
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index bdaa064..004afbb 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -21,7 +21,6 @@ from gobs.manifest import gobs_manifest
from gobs.depclean import main_depclean
from gobs.flags import gobs_use_flags
from _emerge.depgraph import depgraph, backtrack_depgraph
-
from portage import _encodings
from portage import _unicode_decode
from portage.versions import cpv_getkey
@@ -113,352 +112,6 @@ class queruaction(object):
move_queru_buildlog(conn, build_dict['queue_id'], build_error, summary_error, build_log_dict)
CM.putConnection(conn)
- # copy of ../pym/_emerge/depgraph.py from portage
- def _show_unsatisfied_dep(self, mydepgraph, root, atom, myparent=None, arg=None,
- check_backtrack=False, check_autounmask_breakage=False):
- """
- When check_backtrack=True, no output is produced and
- the method either returns or raises _backtrack_mask if
- a matching package has been masked by backtracking.
- """
- backtrack_mask = False
- autounmask_broke_use_dep = False
- atom_set = InternalPackageSet(initial_atoms=(atom.without_use,),
- allow_repo=True)
- atom_set_with_use = InternalPackageSet(initial_atoms=(atom,),
- allow_repo=True)
- xinfo = '"%s"' % atom.unevaluated_atom
- if arg:
- xinfo='"%s"' % arg
- if isinstance(myparent, AtomArg):
- xinfo = _unicode_decode('"%s"') % (myparent,)
- # Discard null/ from failed cpv_expand category expansion.
- xinfo = xinfo.replace("null/", "")
- if root != mydepgraph._frozen_config._running_root.root:
- xinfo = "%s for %s" % (xinfo, root)
- masked_packages = []
- missing_use = []
- missing_use_adjustable = set()
- required_use_unsatisfied = []
- masked_pkg_instances = set()
- have_eapi_mask = False
- pkgsettings = mydepgraph._frozen_config.pkgsettings[root]
- root_config = mydepgraph._frozen_config.roots[root]
- portdb = mydepgraph._frozen_config.roots[root].trees["porttree"].dbapi
- vardb = mydepgraph._frozen_config.roots[root].trees["vartree"].dbapi
- bindb = mydepgraph._frozen_config.roots[root].trees["bintree"].dbapi
- dbs = mydepgraph._dynamic_config._filtered_trees[root]["dbs"]
- for db, pkg_type, built, installed, db_keys in dbs:
- if installed:
- continue
- if hasattr(db, "xmatch"):
- cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
- else:
- cpv_list = db.match(atom.without_use)
- if atom.repo is None and hasattr(db, "getRepositories"):
- repo_list = db.getRepositories()
- else:
- repo_list = [atom.repo]
- # descending order
- cpv_list.reverse()
- for cpv in cpv_list:
- for repo in repo_list:
- if not db.cpv_exists(cpv, myrepo=repo):
- continue
-
- metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
- built, installed, db_keys, myrepo=repo, _pkg_use_enabled=mydepgraph._pkg_use_enabled)
- if metadata is not None and \
- portage.eapi_is_supported(metadata["EAPI"]):
- if not repo:
- repo = metadata.get('repository')
- pkg = mydepgraph._pkg(cpv, pkg_type, root_config,
- installed=installed, myrepo=repo)
- # pkg.metadata contains calculated USE for ebuilds,
- # required later for getMissingLicenses.
- metadata = pkg.metadata
- if pkg.invalid:
- # Avoid doing any operations with packages that
- # have invalid metadata. It would be unsafe at
- # least because it could trigger unhandled
- # exceptions in places like check_required_use().
- masked_packages.append(
- (root_config, pkgsettings, cpv, repo, metadata, mreasons))
- continue
- if not atom_set.findAtomForPackage(pkg,
- modified_use=mydepgraph._pkg_use_enabled(pkg)):
- continue
- if pkg in mydepgraph._dynamic_config._runtime_pkg_mask:
- backtrack_reasons = \
- mydepgraph._dynamic_config._runtime_pkg_mask[pkg]
- mreasons.append('backtracking: %s' % \
- ', '.join(sorted(backtrack_reasons)))
- backtrack_mask = True
- if not mreasons and mydepgraph._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
- modified_use=mydepgraph._pkg_use_enabled(pkg)):
- mreasons = ["exclude option"]
- if mreasons:
- masked_pkg_instances.add(pkg)
- if atom.unevaluated_atom.use:
- try:
- if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
- or atom.violated_conditionals(mydepgraph._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
- missing_use.append(pkg)
- if atom_set_with_use.findAtomForPackage(pkg):
- autounmask_broke_use_dep = True
- if not mreasons:
- continue
- except InvalidAtom:
- writemsg("violated_conditionals raised " + \
- "InvalidAtom: '%s' parent: %s" % \
- (atom, myparent), noiselevel=-1)
- raise
- if not mreasons and \
- not pkg.built and \
- pkg.metadata.get("REQUIRED_USE") and \
- eapi_has_required_use(pkg.metadata["EAPI"]):
- if not check_required_use(
- pkg.metadata["REQUIRED_USE"],
- mydepgraph._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag):
- required_use_unsatisfied.append(pkg)
- continue
- root_slot = (pkg.root, pkg.slot_atom)
- if pkg.built and root_slot in mydepgraph._rebuild.rebuild_list:
- mreasons = ["need to rebuild from source"]
- elif pkg.installed and root_slot in mydepgraph._rebuild.reinstall_list:
- mreasons = ["need to rebuild from source"]
- elif pkg.built and not mreasons:
- mreasons = ["use flag configuration mismatch"]
- masked_packages.append(
- (root_config, pkgsettings, cpv, repo, metadata, mreasons))
-
- if check_backtrack:
- if backtrack_mask:
- raise mydepgraph._backtrack_mask()
- else:
- return
-
- if check_autounmask_breakage:
- if autounmask_broke_use_dep:
- raise mydepgraph._autounmask_breakage()
- else:
- return
-
- missing_use_reasons = []
- missing_iuse_reasons = []
- for pkg in missing_use:
- use = mydepgraph._pkg_use_enabled(pkg)
- missing_iuse = []
- #Use the unevaluated atom here, because some flags might have gone
- #lost during evaluation.
- required_flags = atom.unevaluated_atom.use.required
- missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
-
- mreasons = []
- if missing_iuse:
- mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
- missing_iuse_reasons.append((pkg, mreasons))
- else:
- need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
- need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
-
- untouchable_flags = \
- frozenset(chain(pkg.use.mask, pkg.use.force))
- if untouchable_flags.intersection(
- chain(need_enable, need_disable)):
- continue
-
- missing_use_adjustable.add(pkg)
- required_use = pkg.metadata.get("REQUIRED_USE")
- required_use_warning = ""
- if required_use:
- old_use = mydepgraph._pkg_use_enabled(pkg)
- new_use = set(mydepgraph._pkg_use_enabled(pkg))
- for flag in need_enable:
- new_use.add(flag)
- for flag in need_disable:
- new_use.discard(flag)
- if check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
- required_use_warning = ", this change violates use flag constraints " + \
- "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
-
- if need_enable or need_disable:
- changes = []
- changes.extend(colorize("red", "+" + x) \
- for x in need_enable)
- changes.extend(colorize("blue", "-" + x) \
- for x in need_disable)
- mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
- missing_use_reasons.append((pkg, mreasons))
-
- if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
- # Lets see if the violated use deps are conditional.
- # If so, suggest to change them on the parent.
- # If the child package is masked then a change to
- # parent USE is not a valid solution (a normal mask
- # message should be displayed instead).
- if pkg in masked_pkg_instances:
- continue
- mreasons = []
- violated_atom = atom.unevaluated_atom.violated_conditionals(mydepgraph._pkg_use_enabled(pkg), \
- pkg.iuse.is_valid_flag, mydepgraph._pkg_use_enabled(myparent))
- if not (violated_atom.use.enabled or violated_atom.use.disabled):
- #all violated use deps are conditional
- changes = []
- conditional = violated_atom.use.conditional
- involved_flags = set(chain(conditional.equal, conditional.not_equal, \
- conditional.enabled, conditional.disabled))
- untouchable_flags = \
- frozenset(chain(myparent.use.mask, myparent.use.force))
- if untouchable_flags.intersection(involved_flags):
- continue
- required_use = myparent.metadata.get("REQUIRED_USE")
- required_use_warning = ""
- if required_use:
- old_use = mydepgraph._pkg_use_enabled(myparent)
- new_use = set(mydepgraph._pkg_use_enabled(myparent))
- for flag in involved_flags:
- if flag in old_use:
- new_use.discard(flag)
- else:
- new_use.add(flag)
- if check_required_use(required_use, old_use, myparent.iuse.is_valid_flag) and \
- not check_required_use(required_use, new_use, myparent.iuse.is_valid_flag):
- required_use_warning = ", this change violates use flag constraints " + \
- "defined by %s: '%s'" % (myparent.cpv, \
- human_readable_required_use(required_use))
- for flag in involved_flags:
- if flag in mydepgraph._pkg_use_enabled(myparent):
- changes.append(colorize("blue", "-" + flag))
- else:
- changes.append(colorize("red", "+" + flag))
- mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
- if (myparent, mreasons) not in missing_use_reasons:
- missing_use_reasons.append((myparent, mreasons))
-
- unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
- in missing_use_reasons if pkg not in masked_pkg_instances]
- unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
- in missing_iuse_reasons if pkg not in masked_pkg_instances]
- show_missing_use = False
- if unmasked_use_reasons:
- # Only show the latest version.
- show_missing_use = []
- pkg_reason = None
- parent_reason = None
- for pkg, mreasons in unmasked_use_reasons:
- if pkg is myparent:
- if parent_reason is None:
- #This happens if a use change on the parent
- #leads to a satisfied conditional use dep.
- parent_reason = (pkg, mreasons)
- elif pkg_reason is None:
- #Don't rely on the first pkg in unmasked_use_reasons,
- #being the highest version of the dependency.
- pkg_reason = (pkg, mreasons)
- if pkg_reason:
- show_missing_use.append(pkg_reason)
- if parent_reason:
- show_missing_use.append(parent_reason)
- elif unmasked_iuse_reasons:
- masked_with_iuse = False
- for pkg in masked_pkg_instances:
- #Use atom.unevaluated here, because some flags might have gone
- #lost during evaluation.
- if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
- # Package(s) with required IUSE are masked,
- # so display a normal masking message.
- masked_with_iuse = True
- break
- if not masked_with_iuse:
- show_missing_use = unmasked_iuse_reasons
- if required_use_unsatisfied:
- # If there's a higher unmasked version in missing_use_adjustable
- # then we want to show that instead.
- for pkg in missing_use_adjustable:
- if pkg not in masked_pkg_instances and \
- pkg > required_use_unsatisfied[0]:
- required_use_unsatisfied = False
- break
- mask_docs = False
-
- if required_use_unsatisfied:
- # We have an unmasked package that only requires USE adjustment
- # in order to satisfy REQUIRED_USE, and nothing more. We assume
- # that the user wants the latest version, so only the first
- # instance is displayed.
- pkg = required_use_unsatisfied[0]
- output_cpv = pkg.cpv + _repo_separator + pkg.repo
- writemsg_stdout("\n!!! " + \
- colorize("BAD", "The ebuild selected to satisfy ") + \
- colorize("INFORM", xinfo) + \
- colorize("BAD", " has unmet requirements.") + "\n",
- noiselevel=-1)
- use_display = pkg_use_display(pkg, mydepgraph._frozen_config.myopts)
- writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
- noiselevel=-1)
- writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
- "are unsatisfied:\n", noiselevel=-1)
- reduced_noise = check_required_use(
- pkg.metadata["REQUIRED_USE"],
- mydepgraph._pkg_use_enabled(pkg),
- pkg.iuse.is_valid_flag).tounicode()
- writemsg_stdout(" %s\n" % \
- human_readable_required_use(reduced_noise),
- noiselevel=-1)
- normalized_required_use = \
- " ".join(pkg.metadata["REQUIRED_USE"].split())
- if reduced_noise != normalized_required_use:
- writemsg_stdout("\n The above constraints " + \
- "are a subset of the following complete expression:\n",
- noiselevel=-1)
- writemsg_stdout(" %s\n" % \
- human_readable_required_use(normalized_required_use),
- noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
-
- elif show_missing_use:
- writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
- writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
- for pkg, mreasons in show_missing_use:
- writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
-
- elif masked_packages:
- writemsg_stdout("\n!!! " + \
- colorize("BAD", "All ebuilds that could satisfy ") + \
- colorize("INFORM", xinfo) + \
- colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
- writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
- have_eapi_mask = show_masked_packages(masked_packages)
- if have_eapi_mask:
- writemsg_stdout("\n", noiselevel=-1)
- msg = ("The current version of portage supports " + \
- "EAPI '%s'. You must upgrade to a newer version" + \
- " of portage before EAPI masked packages can" + \
- " be installed.") % portage.const.EAPI
- writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
- mask_docs = True
- else:
- cp_exists = False
- msg = []
- if not isinstance(myparent, AtomArg):
- # It's redundant to show parent for AtomArg since
- # it's the same as 'xinfo' displayed above.
- dep_chain = self._get_dep_chain(myparent, atom)
- for node, node_type in dep_chain:
- msg.append('(dependency required by "%s" [%s])' % \
- (colorize('INFORM', _unicode_decode("%s") % \
- (node)), node_type))
- if msg:
- writemsg_stdout("\n".join(msg), noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
- if mask_docs:
- show_mask_docs()
- writemsg_stdout("\n", noiselevel=-1)
-
def action_build(self, settings, trees, mtimedb, myopts, myaction, myfiles, spinner, build_dict):
if '--usepkgonly' not in myopts:
@@ -502,6 +155,7 @@ class queruaction(object):
if not success:
for pargs, kwargs in mydepgraph._dynamic_config._unsatisfied_deps_for_display:
mydepgraph._show_unsatisfied_dep(mydepgraph, *pargs, **kwargs)
+ mydepgraph.display_problems()
settings, trees, mtimedb = load_emerge_config()
myparams = create_depgraph_params(myopts, myaction)
try:
@@ -1068,7 +722,7 @@ class queruaction(object):
CM.putConnection(conn)
return
if not build_dict['post_message'] is [] and build_dict['ebuild_id'] is None:
- CM.putConnection(conn)
+ CM.closeAllConnections()
return
if not build_dict['ebuild_id'] is None and build_dict['checksum'] is None:
del_old_queue(conn, build_dict['queue_id'])
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-05-06 10:41 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-05-06 10:41 UTC (permalink / raw
To: gentoo-commits
commit: 6d495fec6d0268d1eb7b793d0c26af6947b0f6e9
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sun May 6 10:41:29 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sun May 6 10:41:29 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=6d495fec
move the jobs to jobs.py
---
gobs/bin/gobs_guest_jobs | 67 ++++++----------------------------------
gobs/bin/gobs_host_jobs | 46 ++-------------------------
gobs/pym/depclean.py | 12 +++++++
gobs/pym/jobs.py | 77 ++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 102 insertions(+), 100 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index 7c1fdeb..59c9da8 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -13,9 +13,8 @@ if CM.getName()=='pgsql':
from gobs.pgsql import *
from gobs.check_setup import check_configure_guest
-from gobs.sync import git_pull, sync_tree
from gobs.build_queru import queruaction
-from gobs.buildquerydb import add_buildquery_main, del_buildquery_main
+from gobs.jobs import jobs_main
import portage
import sys
@@ -28,64 +27,18 @@ def main_loop(config_profile):
repeat = True
logging.info("Job and build deamon started.")
while repeat:
- conn = CM.getConnection()
- job = check_job_list(conn, config_profile)
- if job is None:
- CM.putConnection(conn)
- if check_configure_guest(config_profile) is not True:
- time.sleep(60)
- continue
- else:
- init_queru = queruaction(config_profile)
- p = Process(target=init_queru.procces_qureru)
- p.start()
- p.join()
- continue
+ jobs_main(config_profile)
+ if check_configure_guest(config_profile) is not True:
+ time.sleep(60)
+ continue
else:
- logging.info("Job: %s Type: %s Config: %s", job[1], job[0], config_profile)
- if job[0] == "addbuildquery":
- update_job_list(conn, "Runing", job[1])
- logging.info("Job %s is runing. Config: %s", job[0], config_profile)
- result = add_buildquery_main(config_profile)
- if result is True:
- update_job_list(conn, "Done", job[1])
- logging.info("Job %s is done. Config: %s", job[0], config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- logging.info("Job %s did fail. Config: %s", job[0], config_profile)
- elif job[0] == "delbuildquery":
- update_job_list(conn, "Runing", job[1])
- logging.info("Job %s is runing. Config: %s", job[0], config_profile)
- result = del_buildquery_main(config_profile)
- if result is True:
- update_job_list(conn, "Done", job[1])
- logging.info("Job %s is done. Config: %s", job[0], config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- logging.info("Job %s did fail. Config: %s", job[0], config_profile)
- elif job[0] == "gitsync":
- update_job_list(conn, "Runing", job[1])
- logging.info("Job %s is runing. Config: %s", job[0], config_profile)
- result = git_pull()
- if result is True:
- update_job_list(conn, "Done", job[1])
- logging.info("Job %s is done. Config: %s", job[0], config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- logging.info("Job %s did fail. Config: %s", job[0], config_profile)
- elif job[0] == "emergesync":
- update_job_list(conn, "Runing", job[1])
- logging.info("Job %s is runing. Config: %s", job[0], config_profile)
- result = sync_tree()
- if result is True:
- update_job_list(conn, "Done", job[1])
- logging.info("Job %s is done. Config: %s", job[0], config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ init_queru = queruaction(config_profile)
+ p = Process(target=init_queru.procces_qureru)
+ p.start()
+ p.join()
+ continue
repeat = False
time.sleep(60)
- CM.putConnection(conn)
return
def main():
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
index ea39f3b..ba309b5 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs
@@ -5,8 +5,7 @@
from __future__ import print_function
from gobs.readconf import get_conf_settings
-from gobs.updatedb import update_db_main
-from gobs.sync import git_pull, sync_tree
+from gobs.jobs import jobs_main
reader = get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
@@ -30,47 +29,8 @@ def main():
repeat = True
logging.info("Job deamon started.")
while repeat:
- conn = CM.getConnection()
- job = check_job_list(conn, config_profile)
-
- if job is None:
- time.sleep(60)
- CM.putConnection(conn)
- continue
- else:
- logging.info("Job: %s Type: %s Config: %s", job[1], job[0], config_profile)
- if job[0] == "updatedb":
- update_job_list(conn, "Runing", job[1])
- logging.info("Job %s is runing. Config: %s", job[0], config_profile)
- result = update_db_main()
- if result is True:
- update_job_list(conn, "Done", job[1])
- logging.info("Job %s is done. Config: %s", job[0], config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- logging.info("Job %s did fail. Config: %s", job[0], config_profile)
- elif job[0] == "gitsync":
- update_job_list(conn, "Runing", job[1])
- logging.info("Job %s is runing. Config: %s", job[0], config_profile)
- result = git_pull()
- if result is True:
- update_job_list(conn, "Done", job[1])
- logging.info("Job %s is done. Config: %s", job[0], config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- logging.info("Job %s did fail. Config: %s", job[0], config_profile)
- elif job[0] == "emergesync":
- update_job_list(conn, "Runing", job[1])
- logging.info("Job %s is runing. Config: %s", job[0], config_profile)
- result = sync_tree()
- if result is True:
- update_job_list(conn, "Done", job[1])
- logging.info("Job %s is done. Config: %s", job[0], config_profile)
- else:
- update_job_list(conn, "Fail", job[1])
- logging.info("Job %s did fail. Config: %s", job[0], config_profile)
- repeat = False
- CM.putConnection(conn)
+ jobs_main(config_profile)
+ repeat = False
time.sleep(60)
CM.closeAllConnections()
diff --git a/gobs/pym/depclean.py b/gobs/pym/depclean.py
index b6096b6..6173c20 100644
--- a/gobs/pym/depclean.py
+++ b/gobs/pym/depclean.py
@@ -1,5 +1,6 @@
from __future__ import print_function
import errno
+import logging
import portage
from portage._sets.base import InternalPackageSet
from _emerge.main import parse_opts
@@ -55,6 +56,17 @@ def main_depclean():
unmerge(root_config, myopts, "unmerge", cleanlist, mtimedb["ldpath"], ordered=ordered, scheduler=scheduler)
print("Number removed: "+str(len(cleanlist)))
return True
+ else:
+ logging.info("conflicting packages: %s", conflict_package_list)
+ tmpcmdline = []
+ tmpcmdline.append("--depclean")
+ tmpcmdline.append("--exclude")
+ for conflict_package in conflict_package_list:
+ tmpcmdline.append(portage.versions.pkg_cp(conflict_package)
+ myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
+ unmerge(root_config, myopts, "unmerge", cleanlist, mtimedb["ldpath"], ordered=ordered, scheduler=scheduler)
+ print("Number removed: "+str(len(cleanlist)))
+ return True
return True
def calc_depclean(settings, trees, ldpath_mtimes,
diff --git a/gobs/pym/jobs.py b/gobs/pym/jobs.py
new file mode 100644
index 0000000..a49cae2
--- /dev/null
+++ b/gobs/pym/jobs.py
@@ -0,0 +1,77 @@
+from __future__ import print_function
+
+# Get the options from the config file set in gobs.readconf
+from gobs.readconf import get_conf_settings
+reader=get_conf_settings()
+gobs_settings_dict=reader.read_gobs_settings_all()
+
+from gobs.ConnectionManager import connectionManager
+CM=connectionManager(gobs_settings_dict)
+#selectively import the pgsql/mysql querys
+if CM.getName()=='pgsql':
+ from gobs.pgsql import *
+
+from gobs.sync import git_pull, sync_tree
+from gobs.buildquerydb import add_buildquery_main, del_buildquery_main
+from gobs.updatedb import update_db_main
+
+import logging
+
+def jobs_main(config_profile):
+ conn = CM.getConnection()
+ job = check_job_list(conn, config_profile)
+ if job is None:
+ CM.putConnection(conn)
+ return
+ logging.info("Job: %s Type: %s Config: %s", job[1], job[0], config_profile)
+ if job[0] == "addbuildquery":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = add_buildquery_main(config_profile)
+ if result is True:
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ elif job[0] == "delbuildquery":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = del_buildquery_main(config_profile)
+ if result is True:
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ elif job[0] == "gitsync":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = git_pull()
+ if result is True:
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ elif job[0] == "emergesync":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = sync_tree()
+ if result is True:
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ elif job[0] == "updatedb":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = update_db_main()
+ if result is True:
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ return
\ No newline at end of file
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-05-04 22:32 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-05-04 22:32 UTC (permalink / raw
To: gentoo-commits
commit: 87c6c7bcd4add66a3e3aac0d7becc3ce267539fb
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Fri May 4 22:31:37 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Fri May 4 22:31:37 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=87c6c7bc
fix the logfile location
---
gobs/bin/gobs_guest_jobs | 2 +-
gobs/pym/build_log.py | 4 +---
gobs/pym/build_queru.py | 2 +-
3 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index 6fd55ae..7c1fdeb 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -85,7 +85,7 @@ def main_loop(config_profile):
logging.info("Job %s did fail. Config: %s", job[0], config_profile)
repeat = False
time.sleep(60)
- CM.putConnection(conn)
+ CM.putConnection(conn)
return
def main():
diff --git a/gobs/pym/build_log.py b/gobs/pym/build_log.py
index 0e0f834..56fc029 100644
--- a/gobs/pym/build_log.py
+++ b/gobs/pym/build_log.py
@@ -548,9 +548,7 @@ class gobs_buildlog(object):
if sum_build_log_list != []:
for sum_log_line in sum_build_log_list:
summary_error = summary_error + " " + sum_log_line
- # FIXME: use port_logdir in stead of the \/var\/log\/portage\/ string
- logging.info("logdir: %s", settings.get("PORT_LOGDIR"))
- build_log_dict['logfilename'] = re.sub("\/var\/log\/portage\/", "", settings.get("PORTAGE_LOG_FILE"))
+ build_log_dict['logfilename'] = settings.get("PORTAGE_LOG_FILE").split(self._config_profile)[1]
logging.info("Logfile name: %s", settings.get("PORTAGE_LOG_FILE"))
if build_dict['queue_id'] is None:
build_id = self.add_new_ebuild_buildlog(settings, pkg, build_dict, build_error, summary_error, build_log_dict)
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index 373bb8e..5ddcdf9 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -104,7 +104,7 @@ class queruaction(object):
for sum_log_line in sum_build_log_list:
summary_error = summary_error + " " + sum_log_line
if settings.get("PORTAGE_LOG_FILE") is not None:
- build_log_dict['logfilename'] = re.sub("\/var\/log\/portage\/", "", settings.get("PORTAGE_LOG_FILE"))
+ build_log_dict['logfilename'] = settings.get("PORTAGE_LOG_FILE").split(self._config_profile)[1]
os.chmod(settings.get("PORTAGE_LOG_FILE"), 0o224)
else:
build_log_dict['logfilename'] = ""
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-05-01 1:12 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-05-01 1:12 UTC (permalink / raw
To: gentoo-commits
commit: 013ae1a327652f270df045b3779f29aaaeb4a2c1
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Tue May 1 01:11:42 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Tue May 1 01:11:42 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=013ae1a3
fix code in check_setup.py
---
gobs/bin/gobs_guest_jobs | 3 ++-
gobs/pym/check_setup.py | 17 +++++++++--------
gobs/pym/updatedb.py | 7 ++++---
3 files changed, 15 insertions(+), 12 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index 43b8cc1..6fd55ae 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -33,6 +33,7 @@ def main_loop(config_profile):
if job is None:
CM.putConnection(conn)
if check_configure_guest(config_profile) is not True:
+ time.sleep(60)
continue
else:
init_queru = queruaction(config_profile)
@@ -83,7 +84,7 @@ def main_loop(config_profile):
update_job_list(conn, "Fail", job[1])
logging.info("Job %s did fail. Config: %s", job[0], config_profile)
repeat = False
- time.sleep(10)
+ time.sleep(60)
CM.putConnection(conn)
return
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py
index c0ba2ee..3a5b404 100644
--- a/gobs/pym/check_setup.py
+++ b/gobs/pym/check_setup.py
@@ -2,6 +2,8 @@ from __future__ import print_function
import portage
import os
import errno
+import logging
+
from git import *
from gobs.text import get_file_text
@@ -16,11 +18,10 @@ if CM.getName()=='pgsql':
from gobs.pgsql import *
def check_make_conf():
- # FIXME: mark any config updating true in the db when updating the configs
# Get the config list
conn=CM.getConnection()
config_list_all = get_config_list_all(conn)
- print("Checking configs for changes and errors")
+ logging.info("Checking configs for changes and errors")
configsDict = {}
for config_id in config_list_all:
attDict={}
@@ -40,20 +41,20 @@ def check_make_conf():
except Exception as e:
attDict['config_error'] = e
attDict['active'] = 'False'
- print("Fail", config_id[0])
+ logging.info("%s FAIL!", config_id[0])
else:
attDict['config_error'] = ''
attDict['active'] = 'True'
- print("Pass", config_id[0])
+ logging.info("%s PASS", config_id[0])
# Get the checksum of make.conf
make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0]
- # Check if we have change the make.conf and update the db with it
+ logging.info("make.conf checksum is %s on %s", make_conf_checksum_tree, config_id[0])
attDict['make_conf_text'] = get_file_text(make_conf_file)
attDict['make_conf_checksum_tree'] = make_conf_checksum_tree
- configsDict[config_id]=attDict
- update__make_conf(conn,configsDict)
+ configsDict[config_id[0]]=attDict
+ update__make_conf(conn, configsDict)
CM.putConnection(conn)
- print("Updated configurtions")
+ logging.info("Checking configs for changes and errors ... Done")
def check_make_conf_guest(config_profile):
conn=CM.getConnection()
diff --git a/gobs/pym/updatedb.py b/gobs/pym/updatedb.py
index 0277396..f718bc4 100755
--- a/gobs/pym/updatedb.py
+++ b/gobs/pym/updatedb.py
@@ -103,9 +103,10 @@ def update_cpv_db(mysettings):
# Run the update package for all package in the list in
# a multiprocessing pool
for package_line in sorted(package_list_tree):
- pool.apply_async(update_cpv_db_pool, (mysettings, package_line,))
- pool.close()
- pool.join()
+ update_cpv_db_pool(mysettings, package_line)
+ # pool.apply_async(update_cpv_db_pool, (mysettings, package_line,))
+ # pool.close()
+ # pool.join()
logging.info("Checking categories, package and ebuilds done")
def update_db_main():
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-04-30 12:08 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-04-30 12:08 UTC (permalink / raw
To: gentoo-commits
commit: bbae8b8f038b93563a7bfc5dec8f2adc6db65edd
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Mon Apr 30 12:05:33 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Mon Apr 30 12:05:33 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=bbae8b8f
adding git pull and emerge sync
---
gobs/bin/gobs_guest_jobs | 22 +++++++++++++++++++++-
gobs/bin/gobs_host_jobs | 22 +++++++++++++++++++++-
gobs/pym/sync.py | 5 +++--
3 files changed, 45 insertions(+), 4 deletions(-)
diff --git a/gobs/bin/gobs_guest_jobs b/gobs/bin/gobs_guest_jobs
index 2c497d6..43b8cc1 100755
--- a/gobs/bin/gobs_guest_jobs
+++ b/gobs/bin/gobs_guest_jobs
@@ -13,7 +13,7 @@ if CM.getName()=='pgsql':
from gobs.pgsql import *
from gobs.check_setup import check_configure_guest
-from gobs.sync import git_pull
+from gobs.sync import git_pull, sync_tree
from gobs.build_queru import queruaction
from gobs.buildquerydb import add_buildquery_main, del_buildquery_main
@@ -62,6 +62,26 @@ def main_loop(config_profile):
else:
update_job_list(conn, "Fail", job[1])
logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ elif job[0] == "gitsync":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = git_pull()
+ if result is True:
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ elif job[0] == "emergesync":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = sync_tree()
+ if result is True:
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
repeat = False
time.sleep(10)
CM.putConnection(conn)
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
index 812b942..ea39f3b 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs
@@ -6,7 +6,7 @@ from __future__ import print_function
from gobs.readconf import get_conf_settings
from gobs.updatedb import update_db_main
-from gobs.buildquerydb import add_buildquery_main, del_buildquery_main
+from gobs.sync import git_pull, sync_tree
reader = get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
@@ -49,6 +49,26 @@ def main():
else:
update_job_list(conn, "Fail", job[1])
logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ elif job[0] == "gitsync":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = git_pull()
+ if result is True:
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
+ elif job[0] == "emergesync":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = sync_tree()
+ if result is True:
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
repeat = False
CM.putConnection(conn)
time.sleep(60)
diff --git a/gobs/pym/sync.py b/gobs/pym/sync.py
index a39ff4d..ee4bd66 100644
--- a/gobs/pym/sync.py
+++ b/gobs/pym/sync.py
@@ -18,12 +18,13 @@ if CM.getName()=='pgsql':
def git_pull():
logging.info("Git pull")
- repo = Repo("/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/")
+ repo = Repo("/var/cache/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/")
repo_remote = repo.remotes.origin
repo_remote.pull()
master = repo.head.reference
- print(master.log())
+ logging.info("Git log: %s", master.log())
logging.info("Git pull ... Done.")
+ return True
def sync_tree():
conn=CM.getConnection()
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-04-29 14:43 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-04-29 14:43 UTC (permalink / raw
To: gentoo-commits
commit: a2502fc4a74dfd0aced82648ed3a7a4593a2a476
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 29 14:42:49 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sun Apr 29 14:42:49 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=a2502fc4
adding updatedb to job host deamon
---
gobs/bin/gobs_host_jobs | 28 +++++++++++++++++--------
gobs/bin/{gobs_host_jobs => gobs_host_jobs~} | 28 +++++++++++++++++--------
gobs/pym/pgsql.py | 6 ++--
gobs/{bin/gobs_updatedb => pym/updatedb.py} | 11 ++-------
4 files changed, 44 insertions(+), 29 deletions(-)
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
index e6e5ead..87e856e 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs
@@ -6,6 +6,8 @@ from __future__ import print_function
from gobs.init_setup_profile import setup_profile_main
from gobs.readconf import get_conf_settings
+from gobs.updatedb import update_db_main
+
reader = get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
@@ -15,31 +17,39 @@ CM=connectionManager(gobs_settings_dict)
#selectively import the pgsql/mysql querys
if CM.getName()=='pgsql':
from gobs.pgsql import *
-# import logging
+import logging
def main():
# Main
config_profile = gobs_settings_dict['gobs_config']
# Logging
- #logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], \
- # format='%(levelname)s: %(asctime)s %(message)s', level=logging.INFO)
- conn = CM.getConnection()
- CM.putConnection(conn)
+ logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], \
+ format='%(levelname)s: %(asctime)s %(message)s', level=logging.INFO)
repeat = True
+ logging.info("Job deamon started.")
while repeat:
conn = CM.getConnection()
job = check_job_list(conn, config_profile)
- print("job jobid:" + job)
+
if job is None:
time.sleep(60)
CM.putConnection(conn)
continue
else:
- update_job_list("Runing", job[1])
+ logging.info("Job: %s Type: %s Config: %s", job[1], job[0], config_profile)
+ if job[0] == "updatedb":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = update_db_main()
+ if result is True
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
repeat = False
CM.putConnection(conn)
time.sleep(60)
- connectionManager.closeAllConnections()
-
+
if __name__ == "__main__":
main()
\ No newline at end of file
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs~
similarity index 54%
copy from gobs/bin/gobs_host_jobs
copy to gobs/bin/gobs_host_jobs~
index e6e5ead..87e856e 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs~
@@ -6,6 +6,8 @@ from __future__ import print_function
from gobs.init_setup_profile import setup_profile_main
from gobs.readconf import get_conf_settings
+from gobs.updatedb import update_db_main
+
reader = get_conf_settings()
gobs_settings_dict=reader.read_gobs_settings_all()
@@ -15,31 +17,39 @@ CM=connectionManager(gobs_settings_dict)
#selectively import the pgsql/mysql querys
if CM.getName()=='pgsql':
from gobs.pgsql import *
-# import logging
+import logging
def main():
# Main
config_profile = gobs_settings_dict['gobs_config']
# Logging
- #logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], \
- # format='%(levelname)s: %(asctime)s %(message)s', level=logging.INFO)
- conn = CM.getConnection()
- CM.putConnection(conn)
+ logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], \
+ format='%(levelname)s: %(asctime)s %(message)s', level=logging.INFO)
repeat = True
+ logging.info("Job deamon started.")
while repeat:
conn = CM.getConnection()
job = check_job_list(conn, config_profile)
- print("job jobid:" + job)
+
if job is None:
time.sleep(60)
CM.putConnection(conn)
continue
else:
- update_job_list("Runing", job[1])
+ logging.info("Job: %s Type: %s Config: %s", job[1], job[0], config_profile)
+ if job[0] == "updatedb":
+ update_job_list(conn, "Runing", job[1])
+ logging.info("Job %s is runing. Config: %s", job[0], config_profile)
+ result = update_db_main()
+ if result is True
+ update_job_list(conn, "Done", job[1])
+ logging.info("Job %s is done. Config: %s", job[0], config_profile)
+ else:
+ update_job_list(conn, "Fail", job[1])
+ logging.info("Job %s did fail. Config: %s", job[0], config_profile)
repeat = False
CM.putConnection(conn)
time.sleep(60)
- connectionManager.closeAllConnections()
-
+
if __name__ == "__main__":
main()
\ No newline at end of file
diff --git a/gobs/pym/pgsql.py b/gobs/pym/pgsql.py
index 5ebe500..64f1b46 100644
--- a/gobs/pym/pgsql.py
+++ b/gobs/pym/pgsql.py
@@ -639,7 +639,7 @@ def make_conf_error(connection,config_profile):
def check_job_list(connection, config_profile):
cursor = connection.cursor()
- sqlQ1 = 'SELECT idnr FROM configs WHERE id = %s'
+ sqlQ1 = 'SELECT id_nr FROM configs WHERE id = %s'
sqlQ2 = "SELECT job, jobnr FROM jobs_list WHERE status = 'Waiting' AND config_id = %s"
cursor.execute(sqlQ1, (config_profile,))
config_nr = cursor.fetchone()
@@ -649,8 +649,8 @@ def check_job_list(connection, config_profile):
return None
return job
-def update_job_list(status, jobid):
+def update_job_list(connection, status, jobid):
cursor = connection.cursor()
- sqlQ = 'UPDATE jobs_list SET ststus = %s WHERE jobid = %s'
+ sqlQ = 'UPDATE jobs_list SET status = %s WHERE jobnr = %s'
cursor.execute(sqlQ, (status, jobid,))
connection.commit()
diff --git a/gobs/bin/gobs_updatedb b/gobs/pym/updatedb.py
similarity index 95%
rename from gobs/bin/gobs_updatedb
rename to gobs/pym/updatedb.py
index d7bfbee..738179f 100755
--- a/gobs/bin/gobs_updatedb
+++ b/gobs/pym/updatedb.py
@@ -1,10 +1,9 @@
-#!/usr/bin/python
# Copyright 2006-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
""" This code will update the sql backend with needed info for
the Frontend and the Guest deamon. """
-
+from __future__ import print_function
import sys
import os
import multiprocessing
@@ -110,11 +109,9 @@ def update_cpv_db(mysettings):
pool.join()
logging.info("Checking categories, package and ebuilds done")
-def main():
+def update_db_main():
# Main
# Logging
- logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], \
- format='%(levelname)s: %(asctime)s %(message)s', level=logging.INFO)
logging.info("Update db started.")
# Sync portage and profile/settings
git_pull
@@ -127,6 +124,4 @@ def main():
update_cpv_db(mysettings)
CM.closeAllConnections()
logging.info("Update db ... Done.")
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
+ return True
\ No newline at end of file
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-04-29 13:16 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-04-29 13:16 UTC (permalink / raw
To: gentoo-commits
commit: ef2323376f424e347d5f983b133bd9e114a44c61
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 29 13:15:54 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sun Apr 29 13:15:54 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=ef232337
start of the job host deamon part2
---
gobs/bin/gobs_host_jobs | 6 +++---
gobs/pym/pgsql.py | 6 +++---
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
index 856041c..e6e5ead 100755
--- a/gobs/bin/gobs_host_jobs
+++ b/gobs/bin/gobs_host_jobs
@@ -28,14 +28,14 @@ def main():
repeat = True
while repeat:
conn = CM.getConnection()
- job jobid = check_job_list(conn, config_profile)
- print("job jobid:" + job + " " + jobid)
+ job = check_job_list(conn, config_profile)
+ print("job jobid:" + job)
if job is None:
time.sleep(60)
CM.putConnection(conn)
continue
else:
- update_job_list("Runing", jobid)
+ update_job_list("Runing", job[1])
repeat = False
CM.putConnection(conn)
time.sleep(60)
diff --git a/gobs/pym/pgsql.py b/gobs/pym/pgsql.py
index 21c204d..6ac08c8 100644
--- a/gobs/pym/pgsql.py
+++ b/gobs/pym/pgsql.py
@@ -644,10 +644,10 @@ def check_job_list(connection, config_profile):
cursor.execute(sqlQ1, (config_profile,))
config_nr = cursor.fetchone()
cursor.execute(sqlQ2, (config_nr,))
- job jobid = cursor.fetchone()
+ job = cursor.fetchone()
if job is None:
- return None None
- return job jobid
+ return None
+ return job
def update_job_list(status, jobid)
cursor = connection.cursor()
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-04-29 13:13 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-04-29 13:13 UTC (permalink / raw
To: gentoo-commits
commit: 9d85fe87e992047e0972eceb82c0a81e819ed6f7
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sun Apr 29 13:09:55 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sun Apr 29 13:09:55 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=9d85fe87
start of the job host deamon
---
gobs/bin/gobs_host_jobs | 45 +++++++++++++++++++++++++++++++++++++++++++++
gobs/pym/pgsql.py | 18 ++++++++++++++++++
2 files changed, 63 insertions(+), 0 deletions(-)
diff --git a/gobs/bin/gobs_host_jobs b/gobs/bin/gobs_host_jobs
new file mode 100755
index 0000000..856041c
--- /dev/null
+++ b/gobs/bin/gobs_host_jobs
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from gobs.init_setup_profile import setup_profile_main
+from gobs.readconf import get_conf_settings
+reader = get_conf_settings()
+gobs_settings_dict=reader.read_gobs_settings_all()
+
+# make a CM
+from gobs.ConnectionManager import connectionManager
+CM=connectionManager(gobs_settings_dict)
+#selectively import the pgsql/mysql querys
+if CM.getName()=='pgsql':
+ from gobs.pgsql import *
+# import logging
+
+def main():
+ # Main
+ config_profile = gobs_settings_dict['gobs_config']
+ # Logging
+ #logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], \
+ # format='%(levelname)s: %(asctime)s %(message)s', level=logging.INFO)
+ conn = CM.getConnection()
+ CM.putConnection(conn)
+ repeat = True
+ while repeat:
+ conn = CM.getConnection()
+ job jobid = check_job_list(conn, config_profile)
+ print("job jobid:" + job + " " + jobid)
+ if job is None:
+ time.sleep(60)
+ CM.putConnection(conn)
+ continue
+ else:
+ update_job_list("Runing", jobid)
+ repeat = False
+ CM.putConnection(conn)
+ time.sleep(60)
+ connectionManager.closeAllConnections()
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/gobs/pym/pgsql.py b/gobs/pym/pgsql.py
index db70f6c..21c204d 100644
--- a/gobs/pym/pgsql.py
+++ b/gobs/pym/pgsql.py
@@ -636,3 +636,21 @@ def add_fail_querue_dict(connection, fail_querue_dict):
def make_conf_error(connection,config_profile):
pass
+
+def check_job_list(connection, config_profile):
+ cursor = connection.cursor()
+ sqlQ1 = 'SELECT idnr FROM config WHERE id = %s'
+ sqlQ2 = "SELECT job, jobnr FROM jobs_list WHERE status = 'Waiting' AND config_id = %s"
+ cursor.execute(sqlQ1, (config_profile,))
+ config_nr = cursor.fetchone()
+ cursor.execute(sqlQ2, (config_nr,))
+ job jobid = cursor.fetchone()
+ if job is None:
+ return None None
+ return job jobid
+
+def update_job_list(status, jobid)
+ cursor = connection.cursor()
+ sqlQ = 'UPDATE jobs_list SET ststus = %s WHERE jobid = %s'
+ cursor.execute(sqlQ, (status, jobid,))
+ connection.commit()
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-04-28 16:01 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-04-28 16:01 UTC (permalink / raw
To: gentoo-commits
commit: 7a6764b06e02586172b5bb34c3989d490289ddda
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sat Apr 28 16:00:52 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sat Apr 28 16:00:52 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=7a6764b0
add logging and multiprocessing to gobs_setup_profile
---
gobs/bin/gobs_setup_profile | 8 +++
gobs/pym/init_setup_profile.py | 122 +++++++++++++++++++++++----------------
2 files changed, 80 insertions(+), 50 deletions(-)
diff --git a/gobs/bin/gobs_setup_profile b/gobs/bin/gobs_setup_profile
index 902ffe8..9a1dd04 100755
--- a/gobs/bin/gobs_setup_profile
+++ b/gobs/bin/gobs_setup_profile
@@ -3,9 +3,17 @@
# Distributed under the terms of the GNU General Public License v2
from gobs.init_setup_profile import setup_profile_main
+from gobs.readconf import get_conf_settings
+reader = get_conf_settings()
+gobs_settings_dict=reader.read_gobs_settings_all()
+
+import logging
def main():
# Main
+ # Logging
+ logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], \
+ format='%(levelname)s: %(asctime)s %(message)s', level=logging.INFO)
setup_profile_main(args=None)
if __name__ == "__main__":
diff --git a/gobs/pym/init_setup_profile.py b/gobs/pym/init_setup_profile.py
index e647e1f..a18865e 100644
--- a/gobs/pym/init_setup_profile.py
+++ b/gobs/pym/init_setup_profile.py
@@ -7,6 +7,7 @@
import sys
import os
+import logging
# Get the options from the config file set in gobs.readconf
from gobs.readconf import get_conf_settings
@@ -19,9 +20,42 @@ CM=connectionManager(gobs_settings_dict)
if CM.getName()=='pgsql':
from gobs.pgsql import *
-from gobs.check_setup import check_make_conf, git_pull
+from gobs.check_setup import check_make_conf
+from gobs.sync import git_pull
from gobs.package import gobs_package
import portage
+import multiprocessing
+def add_cpv_query_pool(init_package, config_id_list, package_line)
+ conn=CM.getConnection()
+ # FIXME: remove the check for gobs when in tree
+ if package_line != "dev-python/gobs":
+ build_dict = {}
+ packageDict = {}
+ ebuild_id_list = []
+ # split the cp to categories and package
+ element = package_line.split('/')
+ categories = element[0]
+ package = element[1]
+ logging.info"C %s/%s", categories, package # C = Checking
+ pkgdir = mysettings['PORTDIR'] + "/" + categories + "/" + package
+ config_cpv_listDict = init_package.config_match_ebuild(categories, package, config_id_list)
+ if config_cpv_listDict != {}:
+ cpv = categories + "/" + package + "-" + config_cpv_listDict[config_id]['ebuild_version']
+ attDict = {}
+ attDict['categories'] = categories
+ attDict['package'] = package
+ attDict['ebuild_version_tree'] = config_cpv_listDict[config_id]['ebuild_version']
+ packageDict[cpv] = attDict
+ build_dict['checksum'] = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + config_cpv_listDict[config_id]['ebuild_version'] + ".ebuild")[0]
+ build_dict['package_id'] = have_package_db(conn, categories, package)[0]
+ build_dict['ebuild_version'] = config_cpv_listDict[config_id]['ebuild_version']
+ ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
+ if ebuild_id is not None:
+ ebuild_id_list.append(ebuild_id)
+ init_package.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
+ logging.info"C %s/%s ... Done.", categories, package
+ CM.putConnection(conn)
+ return
def setup_profile_main(args=None):
"""
@@ -31,56 +65,44 @@ def setup_profile_main(args=None):
conn=CM.getConnection()
if args is None:
args = sys.argv[1:]
- if args[0] == "-add":
- git_pull()
- check_make_conf()
- print "Check configs done"
- # Get default config from the configs table and default_config=1
- config_id = args[1]
- default_config_root = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
- # Set config_root (PORTAGE_CONFIGROOT) to default_config_root
- mysettings = portage.config(config_root = default_config_root)
- myportdb = portage.portdbapi(mysettings=mysettings)
- init_package = gobs_package(mysettings, myportdb)
- # get the cp list
- package_list_tree = package_list_tree = myportdb.cp_all()
- print "Setting default config to:", config_id
- config_id_list = []
- config_id_list.append(config_id)
- for package_line in sorted(package_list_tree):
- # FIXME: remove the check for gobs when in tree
- if package_line != "dev-python/gobs":
- build_dict = {}
- packageDict = {}
- ebuild_id_list = []
- # split the cp to categories and package
- element = package_line.split('/')
- categories = element[0]
- package = element[1]
- print "C", categories + "/" + package # C = Checking
- pkgdir = mysettings['PORTDIR'] + "/" + categories + "/" + package
- config_cpv_listDict = init_package.config_match_ebuild(categories, package, config_id_list)
- if config_cpv_listDict != {}:
- cpv = categories + "/" + package + "-" + config_cpv_listDict[config_id]['ebuild_version']
- attDict = {}
- attDict['categories'] = categories
- attDict['package'] = package
- attDict['ebuild_version_tree'] = config_cpv_listDict[config_id]['ebuild_version']
- packageDict[cpv] = attDict
- build_dict['checksum'] = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + config_cpv_listDict[config_id]['ebuild_version'] + ".ebuild")[0]
- build_dict['package_id'] = have_package_db(conn, categories, package)[0]
- build_dict['ebuild_version'] = config_cpv_listDict[config_id]['ebuild_version']
- ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
- if ebuild_id is not None:
- ebuild_id_list.append(ebuild_id)
- init_package.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
+ if args[0] == "-add":
+ config_id = args[1]
+ logging.info("Adding build querys for %s:", config_id)
+ git_pull()
+ check_make_conf()
+ logging.info("Check configs done")
+ # Get default config from the configs table and default_config=1
+ default_config_root = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
+ # Set config_root (PORTAGE_CONFIGROOT) to default_config_root
+ mysettings = portage.config(config_root = default_config_root)
+ myportdb = portage.portdbapi(mysettings=mysettings)
+ init_package = gobs_package(mysettings, myportdb)
+ # get the cp list
+ package_list_tree = package_list_tree = myportdb.cp_all()
+ logging.info("Setting default config to:", config_id)
+ config_id_list = []
+ config_id_list.append(config_id)
+ # Use all exept 2 cores when multiprocessing
+ pool_cores= multiprocessing.cpu_count()
+ if pool_cores >= 3:
+ use_pool_cores = pool_cores - 2
+ else:
+ use_pool_cores = 1
+ pool = multiprocessing.Pool(processes=use_pool_cores)
+ for package_line in sorted(package_list_tree):
+ pool.apply_async(add_cpv_query_pool, (init_package, config_id_list, package_line,))
+ pool.close()
+ pool.join()
+ logging.info("Adding build querys for: %s ... Done.", config_id)
- if args[0] == "-del":
- config_id = args[1]
- querue_id_list = get_queue_id_list_config(conn, config_id)
- if querue_id_list is not None:
- for querue_id in querue_id_list:
- del_old_queue(conn, querue_id)
+ if args[0] == "-del":
+ config_id = args[1]
+ logging.info("Removeing build querys for %s:", config_id)
+ querue_id_list = get_queue_id_list_config(conn, config_id)
+ if querue_id_list is not None:
+ for querue_id in querue_id_list:
+ del_old_queue(conn, querue_id)
+ logging.info("Removeing build querys for: %s ... Done.", config_id)
CM.putConnection(conn)
\ No newline at end of file
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-04-27 22:22 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-04-27 22:22 UTC (permalink / raw
To: gentoo-commits
commit: 9f2f1da3f752f3c1e418db69191d8b7a447ee0de
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 27 22:22:11 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Fri Apr 27 22:22:11 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=9f2f1da3
fix a typo in gobs_update_db
---
gobs/bin/gobs_updatedb | 3 ++-
gobs/pym/sync.py | 4 +++-
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb
index c59ac19..d162ff2 100755
--- a/gobs/bin/gobs_updatedb
+++ b/gobs/bin/gobs_updatedb
@@ -53,7 +53,7 @@ def init_portage_settings():
default_config_root = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id[0] + "/"
# Set config_root (PORTAGE_CONFIGROOT) to default_config_root
mysettings = portage.config(config_root = default_config_root)
- logging.info("Setting default config to: %", config_id[0])
+ logging.info("Setting default config to: %s", config_id[0])
return mysettings
def update_cpv_db_pool(mysettings, package_line):
@@ -123,6 +123,7 @@ def main():
mysettings = init_portage_settings()
init_arch = gobs_arch()
init_arch.update_arch_db()
+ sys.exit()
# Update the cpv db
update_cpv_db(mysettings)
CM.closeAllConnections()
diff --git a/gobs/pym/sync.py b/gobs/pym/sync.py
index aecae32..2262f57 100644
--- a/gobs/pym/sync.py
+++ b/gobs/pym/sync.py
@@ -9,11 +9,13 @@ from _emerge.actions import load_emerge_config, action_sync
from _emerge.main import parse_opts
def git_pull():
+ logging.info("Git pull")
repo = Repo("/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/")
repo_remote = repo.remotes.origin
repo_remote.pull()
master = repo.head.reference
print(master.log())
+ logging.info("Git pull ... Done."
def sync_tree():
settings, trees, mtimedb = load_emerge_config()
@@ -22,7 +24,7 @@ def sync_tree():
tmpcmdline.append("--sync")
tmpcmdline.append("--quiet")
myaction, myopts, myfiles = parse_opts(tmpcmdline)
- logging.info("Eemerge --sync")
+ logging.info("Emerge --sync")
fail_sync = 0
#fail_sync = action_sync(settings, trees, mtimedb, myopts, myaction)
if fail_sync is True:
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2012-04-27 20:59 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2012-04-27 20:59 UTC (permalink / raw
To: gentoo-commits
commit: cfa1e254447b719a2a3fc59d7ecd9c0e933feefb
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 27 20:58:48 2012 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Fri Apr 27 20:58:48 2012 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=cfa1e254
Add logging sync.py
---
gobs/bin/gobs_updatedb | 15 ++++++++-------
gobs/bin/{gobs_updatedb => gobs_updatedb~} | 12 ++++++------
gobs/pym/sync.py | 14 +++++++++++---
3 files changed, 25 insertions(+), 16 deletions(-)
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb
index 4fba322..babaf38 100755
--- a/gobs/bin/gobs_updatedb
+++ b/gobs/bin/gobs_updatedb
@@ -114,14 +114,15 @@ def main():
# Main
# Logging
logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], level=logging.INFO)
- # Init settings for the default config
+ # Sync portage and profile/settings
git_pull
- if sync_tree():
- mysettings = init_portage_settings()
- init_arch = gobs_arch()
- init_arch.update_arch_db()
- # Update the cpv db
- update_cpv_db(mysettings)
+ sync_tree()
+ # Init settings for the default config
+ mysettings = init_portage_settings()
+ init_arch = gobs_arch()
+ init_arch.update_arch_db()
+ # Update the cpv db
+ update_cpv_db(mysettings)
CM.closeAllConnections()
if __name__ == "__main__":
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb~
similarity index 96%
copy from gobs/bin/gobs_updatedb
copy to gobs/bin/gobs_updatedb~
index 4fba322..4dce5ce 100755
--- a/gobs/bin/gobs_updatedb
+++ b/gobs/bin/gobs_updatedb~
@@ -116,12 +116,12 @@ def main():
logging.basicConfig(filename=gobs_settings_dict['gobs_logfile'], level=logging.INFO)
# Init settings for the default config
git_pull
- if sync_tree():
- mysettings = init_portage_settings()
- init_arch = gobs_arch()
- init_arch.update_arch_db()
- # Update the cpv db
- update_cpv_db(mysettings)
+ sync_tree()
+ mysettings = init_portage_settings()
+ init_arch = gobs_arch()
+ init_arch.update_arch_db()
+ # Update the cpv db
+ update_cpv_db(mysettings)
CM.closeAllConnections()
if __name__ == "__main__":
diff --git a/gobs/pym/sync.py b/gobs/pym/sync.py
index cfaebfe..22ff5e7 100644
--- a/gobs/pym/sync.py
+++ b/gobs/pym/sync.py
@@ -2,6 +2,8 @@ from __future__ import print_function
import portage
import os
import errno
+import logging
+import sys
from git import *
from _emerge.actions import load_emerge_config, action_sync
from _emerge.main import parse_opts
@@ -20,6 +22,12 @@ def sync_tree():
tmpcmdline.append("--sync")
tmpcmdline.append("--quiet")
myaction, myopts, myfiles = parse_opts(tmpcmdline)
- fail_sync = action_sync(settings, trees, mtimedb, myopts, myaction)
- print("fail_sync", fail_sync)
- return fail_sync
\ No newline at end of file
+ logging.info("Eemerge --sync")
+ fail_sync = 1
+ #fail_sync = action_sync(settings, trees, mtimedb, myopts, myaction)
+ if fail_sync is True:
+ logging.warning("Emerge --sync fail!")
+ sys.exit()
+ else:
+ logging.info("Emerge --sync ... Done."
+ sys.exit()
\ No newline at end of file
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-11-01 21:14 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-11-01 21:14 UTC (permalink / raw
To: gentoo-commits
commit: df9aee3cadb80863146a5a352bef9885268658d3
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Tue Nov 1 21:13:53 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Tue Nov 1 21:13:53 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=df9aee3c
fix some CM.putConnection(conn)
---
gobs/bin/gobs_portage_hooks | 75 -------------------------------------------
gobs/pym/package.py | 1 +
2 files changed, 1 insertions(+), 75 deletions(-)
diff --git a/gobs/bin/gobs_portage_hooks b/gobs/bin/gobs_portage_hooks
deleted file mode 100755
index bf574dc..0000000
--- a/gobs/bin/gobs_portage_hooks
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/python
-from __future__ import print_function
-import os
-import sys
-# Get the options from the config file set in gobs.readconf
-from gobs.readconf import get_conf_settings
-reader=get_conf_settings()
-gobs_settings_dict=reader.read_gobs_settings_all()
-# make a CM
-from gobs.ConnectionManager import connectionManager
-CM=connectionManager(gobs_settings_dict)
-#selectively import the pgsql/mysql querys
-if CM.getName()=='pgsql':
- from gobs.pgsql import *
-
-from gobs.package import gobs_package
-from gobs.build_log import gobs_buildlog
-from gobs.flags import gobs_use_flags
-from portage.util import writemsg, writemsg_level, writemsg_stdout
-import portage
-
-def get_build_dict_db(mysettings, config_profile, gobs_settings_dict):
- conn=CM.getConnection()
- myportdb = portage.portdbapi(mysettings=mysettings)
- categories = os.environ['CATEGORY']
- package = os.environ['PN']
- ebuild_version = os.environ['PVR']
- cpv = categories + "/" + package + "-" + ebuild_version
- init_package = gobs_package(mysettings, myportdb)
- package_id = have_package_db(conn, categories, package)
- # print("package_id %s" % package_id, file=sys.stdout)
- build_dict = {}
- mybuild_dict = {}
- build_dict['ebuild_version'] = ebuild_version
- build_dict['package_id'] = package_id
- build_dict['cpv'] = cpv
- build_dict['categories'] = categories
- build_dict['package'] = package
- build_dict['config_profile'] = config_profile
- init_useflags = gobs_use_flags(mysettings, myportdb, cpv)
- iuse_flags_list, final_use_list = init_useflags.get_flags_looked()
- #print 'final_use_list', final_use_list
- if final_use_list != []:
- build_dict['build_useflags'] = final_use_list
- else:
- build_dict['build_useflags'] = None
- #print "build_dict['build_useflags']", build_dict['build_useflags']
- pkgdir = os.path.join(mysettings['PORTDIR'], categories + "/" + package)
- ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir+ "/" + package + "-" + ebuild_version + ".ebuild")[0]
- build_dict['checksum'] = ebuild_version_checksum_tree
- ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
- if ebuild_id is None:
- #print 'have any ebuild', get_ebuild_checksum(conn, package_id, ebuild_version)
- init_package.update_ebuild_db(build_dict)
- ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
- build_dict['ebuild_id'] = ebuild_id
- queue_id = check_revision(conn, build_dict)
- if queue_id is None:
- build_dict['queue_id'] = None
- else:
- build_dict['queue_id'] = queue_id
- return build_dict
-
-def main():
- # Main
- config_profile = gobs_settings_dict['gobs_config']
- #we provide the main_loop with the ConnectionManager so we can hand out connections from within the loop
- mysettings = portage.settings
- build_dict = get_build_dict_db( mysettings, config_profile, gobs_settings_dict)
- init_buildlog = gobs_buildlog(mysettings, build_dict)
- init_buildlog.add_buildlog_main()
- #connectionManager.closeAllConnections()
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
index 4f1f48f..1f592ee 100644
--- a/gobs/pym/package.py
+++ b/gobs/pym/package.py
@@ -182,6 +182,7 @@ class gobs_package(object):
# Get the ebuild list for cp
ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
if ebuild_list_tree == []:
+ CM.putConnection(conn)
return None
config_list = get_config_list(conn)
config_cpv_listDict = self.config_match_ebuild(categories, package, config_list)
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-10-29 0:14 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-10-29 0:14 UTC (permalink / raw
To: gentoo-commits
commit: b728ae2804796599b7f18056a9e135a4c809fc25
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sat Oct 29 00:12:57 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sat Oct 29 00:12:57 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=b728ae28
Add gobs_setup_profile
---
gobs/bin/gobs_setup_profile | 12 ++++++
gobs/pym/build_log.py | 10 +++--
gobs/pym/build_queru.py | 2 +-
gobs/pym/check_setup.py | 7 ++++
gobs/pym/init_setup_profile.py | 79 ++++++++++++++++++++++++++++++++++++++++
gobs/pym/package.py | 11 +-----
gobs/pym/pgsql.py | 7 ++++
7 files changed, 114 insertions(+), 14 deletions(-)
diff --git a/gobs/bin/gobs_setup_profile b/gobs/bin/gobs_setup_profile
new file mode 100755
index 0000000..902ffe8
--- /dev/null
+++ b/gobs/bin/gobs_setup_profile
@@ -0,0 +1,12 @@
+#!/usr/bin/python
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from gobs.init_setup_profile import setup_profile_main
+
+def main():
+ # Main
+ setup_profile_main(args=None)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/gobs/pym/build_log.py b/gobs/pym/build_log.py
index 354e8d3..80a186a 100644
--- a/gobs/pym/build_log.py
+++ b/gobs/pym/build_log.py
@@ -562,9 +562,11 @@ class gobs_buildlog(object):
os.chmod(emerge_info_logfilename, 0664)
print("Package: ", pkg.cpv, "logged to db.")
else:
- try:
- os.remove(settings.get("PORTAGE_LOG_FILE"))
- except:
- pass
+ # FIXME Remove the log some way so
+ # mergetask._locate_failure_log(x) works in action_build()
+ #try:
+ # os.remove(settings.get("PORTAGE_LOG_FILE"))
+ #except:
+ # pass
print("Package: ", pkg.cpv, "NOT logged to db.")
CM.putConnection(conn)
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index 9794bbd..3f0bde8 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -670,7 +670,7 @@ class queruaction(object):
build_fail = self.emerge_main(argscmd, build_dict)
# Run depclean
print('build_fail', build_fail)
- if not "clean" in build_dict['post_message']:
+ if not "noclean" in build_dict['post_message']:
depclean_fail = main_depclean()
try:
os.remove("/etc/portage/package.use/gobs.use")
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py
index cc9b55b..b5f612b 100644
--- a/gobs/pym/check_setup.py
+++ b/gobs/pym/check_setup.py
@@ -14,6 +14,13 @@ CM=connectionManager(gobs_settings_dict)
if CM.getName()=='pgsql':
from gobs.pgsql import *
+def git_pull():
+ repo = Repo("/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/")
+ repo_remote = repo.remotes.origin
+ repo_remote.pull()
+ master = repo.head.reference
+ print(master.log())
+
def check_make_conf():
# FIXME: mark any config updating true in the db when updating the configs
# Get the config list
diff --git a/gobs/pym/init_setup_profile.py b/gobs/pym/init_setup_profile.py
new file mode 100644
index 0000000..6f640d9
--- /dev/null
+++ b/gobs/pym/init_setup_profile.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+""" This code will update the sql backend with needed info for
+ the Frontend and the Guest deamon. """
+
+import sys
+import os
+
+# Get the options from the config file set in gobs.readconf
+from gobs.readconf import get_conf_settings
+reader=get_conf_settings()
+gobs_settings_dict=reader.read_gobs_settings_all()
+# make a CM
+from gobs.ConnectionManager import connectionManager
+CM=connectionManager(gobs_settings_dict)
+#selectively import the pgsql/mysql querys
+if CM.getName()=='pgsql':
+ from gobs.pgsql import *
+
+from gobs.check_setup import check_make_conf, git_pull
+from gobs.arch import gobs_arch
+from gobs.package import gobs_package
+from gobs.categories import gobs_categories
+from gobs.old_cpv import gobs_old_cpv
+from gobs.categories import gobs_categories
+import portage
+
+def setup_profile_main(args=None):
+ """
+ @param args: command arguments (default: sys.argv[1:])
+ @type args: list
+ """
+ conn=CM.getConnection()
+ if args is None:
+ args = sys.argv[1:]
+ if args[0] is "-add":
+ git_pull()
+ check_make_conf()
+ print "Check configs done"
+ # Get default config from the configs table and default_config=1
+ config_id = args[1]
+ default_config_root = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id + "/"
+ # Set config_root (PORTAGE_CONFIGROOT) to default_config_root
+ mysettings = portage.config(config_root = default_config_root)
+ myportdb = portage.portdbapi(mysettings=mysettings)
+ init_package = gobs_package
+ # get the cp list
+ package_list_tree = package_list_tree = myportdb.cp_all()(mysettings, myportdb)
+ print "Setting default config to:", config_id
+ for package_line in sorted(package_list_tree):
+ build_dict = {}
+ packageDict = {}
+ ebuild_id_list = []
+ # split the cp to categories and package
+ element = package_line.split('/')
+ categories = element[0]
+ package = element[1]
+ print "C", categories + "/" + package # C = Checking
+ pkgdir = mysettings['PORTDIR'] + "/" + categories + "/" + package
+ config_cpv_listDict = gobs_package.config_match_cp(categories, package, config_id)
+ packageDict['ebuild_version_tree'] = config_cpv_listDict['ebuild_version']
+ build_dict['checksum'] = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + config_cpv_listDict['ebuild_version'] + ".ebuild")[0]
+ build_dict['package_id'] = have_package_db(categories, package)
+ build_dict['ebuild_version'] = config_cpv_listDict['ebuild_version']
+ ebuild_id = get_ebuild_id_db_checksum(connection, build_dict)
+ if ebuild_id is not None:
+ ebuild_id_list.append(ebuild_id)
+ gobs_package.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
+ if args[0] is "-del":
+ config_id = args[1]
+ querue_id_list = get_queue_id_list_config(conn, config_id)
+ if querue_id_list is not None:
+ for querue_id in querue_id_list:
+ del_old_queue(conn, queue_id)
+ CM.putConnection(conn)
+
+
\ No newline at end of file
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
index 884ce7c..bbd5998 100644
--- a/gobs/pym/package.py
+++ b/gobs/pym/package.py
@@ -68,20 +68,16 @@ class gobs_package(object):
return config_cpv_listDict
def config_match_cp(self, categories, package, config_id):
- conn=CM.getConnection()
config_cpv_listDict ={}
- # Change config/setup
- mysettings_setup = self.change_config(config_id)
- myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
# Get latest cpv from portage with the config
- latest_ebuild = myportdb_setup.xmatch('bestmatch-visible', categories + "/" + package)
+ latest_ebuild = self._myportdb_setup.xmatch('bestmatch-visible', categories + "/" + package)
latest_ebuild_version = unicode("")
# Check if could get cpv from portage
if latest_ebuild != "":
# Get the version of cpv
latest_ebuild_version = portage.versions.cpv_getversion(latest_ebuild)
# Get the iuse and use flags for that config/setup
- init_useflags = gobs_use_flags(mysettings_setup, myportdb_setup, latest_ebuild)
+ init_useflags = gobs_use_flags(self._mysettings, self._myportdb, latest_ebuild)
iuse_flags_list, final_use_list = init_useflags.get_flags()
iuse_flags_list2 = []
for iuse_line in iuse_flags_list:
@@ -95,9 +91,6 @@ class gobs_package(object):
attDict['categories'] = categories
config_cpv_listDict[config_id] = attDict
# Clean some cache
- myportdb_setup.close_caches()
- portage.portdbapi.portdbapi_instances.remove(myportdb_setup)
- CM.putConnection(conn)
return config_cpv_listDict
def get_ebuild_metadata(self, ebuild_line):
diff --git a/gobs/pym/pgsql.py b/gobs/pym/pgsql.py
index 9e7d3f5..b241d1b 100644
--- a/gobs/pym/pgsql.py
+++ b/gobs/pym/pgsql.py
@@ -322,6 +322,13 @@ def have_package_buildqueue(connection, ebuild_id, config_id):
entries = cursor.fetchone()
return entries
+def get queue_id_list_config(connection, config_id)
+ cursor = connection.cursor()
+ sqlQ = 'SELECT queue_id FROM buildqueue WHERE config_id = %s'
+ cursor.execute(sqlQ, (config_id,))
+ entries = cursor.fetchoall()
+ return entries
+
def add_new_package_buildqueue(connection, ebuild_id, config_id, iuse_flags_list, use_enable, message):
cursor = connection.cursor()
sqlQ="SELECT insert_buildqueue( %s, %s, %s, %s, %s )"
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-09-28 10:53 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-09-28 10:53 UTC (permalink / raw
To: gentoo-commits
commit: 3b8ee5e9c27cadccaf4f4571d67bc06a6e401fc7
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 28 10:53:41 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Wed Sep 28 10:53:41 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=3b8ee5e9
add CM.putConnection(conn)
---
gobs/bin/gobs_buildquerys | 1 -
gobs/pym/build_queru.py | 6 ++++++
gobs/pym/check_setup.py | 7 +++++--
3 files changed, 11 insertions(+), 3 deletions(-)
diff --git a/gobs/bin/gobs_buildquerys b/gobs/bin/gobs_buildquerys
index 81c0f26..c2e1923 100755
--- a/gobs/bin/gobs_buildquerys
+++ b/gobs/bin/gobs_buildquerys
@@ -28,7 +28,6 @@ def main_loop(config_profile):
continue # retunr to the start of the function
else:
init_queru.procces_qureru()
- continue # retunr to the start of the function
def main():
# Main
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index e1de617..88eed9f 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -105,6 +105,7 @@ class queruaction(object):
else:
build_log_dict['logfilename'] = ""
move_queru_buildlog(conn, build_dict['queue_id'], build_error, summary_error, build_log_dict)
+ CM.putConnection(conn)
def action_build(self, settings, trees, mtimedb, myopts, myaction, myfiles, spinner, build_dict):
@@ -692,17 +693,22 @@ class queruaction(object):
settings, trees, mtimedb = load_emerge_config()
portdb = trees[settings["ROOT"]]["porttree"].dbapi
if build_dict is None:
+ CM.putConnection(conn)
return
print("build_dict", build_dict)
if not build_dict['ebuild_id'] is None and build_dict['checksum'] is not None:
buildqueru_cpv_dict = self.make_build_list(build_dict, settings, portdb)
print('buildqueru_cpv_dict', buildqueru_cpv_dict)
if buildqueru_cpv_dict is None:
+ CM.putConnection(conn)
return
fail_build_procces = self.build_procces(buildqueru_cpv_dict, build_dict, settings, portdb)
+ CM.putConnection(conn)
return
if not build_dict['post_message'] is [] and build_dict['ebuild_id'] is None:
+ CM.putConnection(conn)
return
if not build_dict['ebuild_id'] is None and build_dict['checksum'] is None:
del_old_queue(conn, build_dict['queue_id'])
+ CM.putConnection(conn)
return
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py
index 58948ec..cc9b55b 100644
--- a/gobs/pym/check_setup.py
+++ b/gobs/pym/check_setup.py
@@ -51,6 +51,7 @@ def check_make_conf():
attDict['make_conf_checksum_tree'] = make_conf_checksum_tree
configsDict[config_id]=attDict
update__make_conf(conn,configsDict)
+ CM.putConnection(conn)
print("Updated configurtions")
def check_make_conf_guest(config_profile):
@@ -59,11 +60,13 @@ def check_make_conf_guest(config_profile):
make_conf_checksum_db = get_profile_checksum(conn,config_profile)
print('make_conf_checksum_db', make_conf_checksum_db)
if make_conf_checksum_db is None:
+ CM.putConnection(conn)
return "1"
make_conf_file = "/etc/portage/make.conf"
make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0]
print('make_conf_checksum_tree', make_conf_checksum_tree)
if make_conf_checksum_tree != make_conf_checksum_db[0]:
+ CM.putConnection(conn)
return "2"
# Check if we can open the file and close it
# Check if we have some error in the file (portage.util.getconfig)
@@ -76,11 +79,12 @@ def check_make_conf_guest(config_profile):
mysettings.validate()
# With errors we return false
except Exception as e:
+ CM.putConnection(conn)
return "3"
+ CM.putConnection(conn)
return "4"
def check_configure_guest(config_profile):
- conn=CM.getConnection()
pass_make_conf = check_make_conf_guest(config_profile)
print(pass_make_conf)
if pass_make_conf == "1":
@@ -91,7 +95,6 @@ def check_configure_guest(config_profile):
return False
elif pass_make_conf == "3":
# set the config as no working
- make_conf_error(conn, config_profile)
return False
elif pass_make_conf == "4":
# make.conf check OK
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-09-28 0:33 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-09-28 0:33 UTC (permalink / raw
To: gentoo-commits
commit: 12cec5b4dca202068d165cd72f760534a437e888
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 28 00:32:16 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Wed Sep 28 00:32:16 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=12cec5b4
fix def of settings and portdb
---
gobs/bin/gobs_buildquerys | 2 +-
gobs/pym/build_queru.py | 24 +++++++++++++-----------
2 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/gobs/bin/gobs_buildquerys b/gobs/bin/gobs_buildquerys
index fc2506d..81c0f26 100755
--- a/gobs/bin/gobs_buildquerys
+++ b/gobs/bin/gobs_buildquerys
@@ -28,7 +28,7 @@ def main_loop(config_profile):
continue # retunr to the start of the function
else:
init_queru.procces_qureru()
- sys.exit()
+ continue # retunr to the start of the function
def main():
# Main
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index 65743ad..e1de617 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -53,7 +53,7 @@ bad = create_color_func("BAD")
class queruaction(object):
def __init__(self, config_profile):
- self._mysettings = portage.settings
+ self._mysettings = portage.config(config_root = "/")
self._config_profile = config_profile
self._myportdb = portage.portdb
@@ -611,19 +611,19 @@ class queruaction(object):
return retval
- def make_build_list(self, build_dict):
+ def make_build_list(self, build_dict, settings, portdb):
cpv = build_dict['category']+'/'+build_dict['package']+'-'+build_dict['ebuild_version']
- pkgdir = os.path.join(portage.settings['PORTDIR'], build_dict['category'] + "/" + build_dict['package'])
- init_manifest = gobs_manifest(portage.settings, pkgdir)
+ pkgdir = os.path.join(settings['PORTDIR'], build_dict['category'] + "/" + build_dict['package'])
+ init_manifest = gobs_manifest(settings, pkgdir)
try:
ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir+ "/" + build_dict['package'] + "-" + build_dict['ebuild_version'] + ".ebuild")[0]
except:
ebuild_version_checksum_tree = None
if ebuild_version_checksum_tree == build_dict['checksum']:
- init_flags = gobs_use_flags(portage.settings, portage.portdb, cpv)
+ init_flags = gobs_use_flags(settings, portdb, cpv)
build_use_flags_list = init_flags.comper_useflags(build_dict)
print("build_use_flags_list", build_use_flags_list)
- manifest_error = init_manifest.check_file_in_manifest(portage.portdb, cpv, build_dict, build_use_flags_list)
+ manifest_error = init_manifest.check_file_in_manifest(portdb, cpv, build_dict, build_use_flags_list)
if manifest_error is None:
build_dict['check_fail'] = False
build_use_flags_dict = {}
@@ -640,13 +640,13 @@ class queruaction(object):
build_dict['type_fail'] = "Wrong ebuild checksum"
build_dict['check_fail'] = True
if build_dict['check_fail'] is True:
- self.log_fail_queru(build_dict, portage.settings)
+ self.log_fail_queru(build_dict, settings)
return None
return build_cpv_dict
- def build_procces(self, buildqueru_cpv_dict, build_dict):
+ def build_procces(self, buildqueru_cpv_dict, build_dict, settings, portdb):
build_cpv_list = []
- abs_user_config = os.path.join(portage.settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+ abs_user_config = os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
print('abs_user_config', abs_user_config)
for k, v in buildqueru_cpv_dict.iteritems():
build_use_flags_list = []
@@ -689,15 +689,17 @@ class queruaction(object):
conn=CM.getConnection()
build_dict = {}
build_dict = get_packages_to_build(conn, self._config_profile)
+ settings, trees, mtimedb = load_emerge_config()
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
if build_dict is None:
return
print("build_dict", build_dict)
if not build_dict['ebuild_id'] is None and build_dict['checksum'] is not None:
- buildqueru_cpv_dict = self.make_build_list(build_dict)
+ buildqueru_cpv_dict = self.make_build_list(build_dict, settings, portdb)
print('buildqueru_cpv_dict', buildqueru_cpv_dict)
if buildqueru_cpv_dict is None:
return
- fail_build_procces = self.build_procces(buildqueru_cpv_dict, build_dict)
+ fail_build_procces = self.build_procces(buildqueru_cpv_dict, build_dict, settings, portdb)
return
if not build_dict['post_message'] is [] and build_dict['ebuild_id'] is None:
return
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-09-26 23:25 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-09-26 23:25 UTC (permalink / raw
To: gentoo-commits
commit: 93a89cfd8ee53292632f081909cc0440f5b6052b
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Mon Sep 26 23:23:49 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Mon Sep 26 23:23:49 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=93a89cfd
Add copy of action_build post_emerge emerge_main and fix some code
---
gobs/bin/gobs_buildquerys | 12 +-
gobs/pym/build_queru.py | 701 ++++++++++++++++++++++++++++++++++++++++-----
gobs/pym/check_setup.py | 21 +-
gobs/pym/flags.py | 37 ---
gobs/pym/pgsql.py | 39 ++--
5 files changed, 664 insertions(+), 146 deletions(-)
diff --git a/gobs/bin/gobs_buildquerys b/gobs/bin/gobs_buildquerys
index 7b8121b..fc2506d 100755
--- a/gobs/bin/gobs_buildquerys
+++ b/gobs/bin/gobs_buildquerys
@@ -17,26 +17,24 @@ import portage
import sys
import os
-def main_loop(CM, config_profile):
+def main_loop(config_profile):
repeat = True
#get a connection from the pool
- conn=CM.getConnection()
init_queru = queruaction(config_profile)
- fail_querue_dict = {}
while repeat:
- if check_configure_guest(conn, config_profile) is not True:
+ #FIXME do a git reop check
+ if check_configure_guest( config_profile) is not True:
# time.sleep(60)
continue # retunr to the start of the function
else:
- fail_querue_list = init_queru.procces_qureru(fail_querue_dict)
+ init_queru.procces_qureru()
sys.exit()
- CM.putConnection(conn)
def main():
# Main
config_profile = gobs_settings_dict['gobs_config']
#we provide the main_loop with the ConnectionManager so we can hand out connections from within the loop
- main_loop(CM, config_profile)
+ main_loop(config_profile)
connectionManager.closeAllConnections()
if __name__ == "__main__":
diff --git a/gobs/pym/build_queru.py b/gobs/pym/build_queru.py
index 0e481da..7f5e3c1 100644
--- a/gobs/pym/build_queru.py
+++ b/gobs/pym/build_queru.py
@@ -12,10 +12,43 @@ if CM.getName()=='pgsql':
import portage
import os
+import re
+import sys
+import signal
from gobs.manifest import gobs_manifest
from gobs.depclean import main_depclean
from gobs.flags import gobs_use_flags
-from _emerge.main import emerge_main
+from portage import _encodings
+from portage import _unicode_decode
+from portage.versions import cpv_getkey
+import portage.xpak, errno, re, time
+from _emerge.main import parse_opts, profile_check, apply_priorities, repo_name_duplicate_check, \
+ config_protect_check, check_procfs, ensure_required_sets, expand_set_arguments, \
+ validate_ebuild_environment, chk_updated_info_files, display_preserved_libs
+from _emerge.actions import action_config, action_sync, action_metadata, \
+ action_regen, action_search, action_uninstall, \
+ adjust_configs, chk_updated_cfg_files, display_missing_pkg_set, \
+ display_news_notification, getportageversion, load_emerge_config
+from portage.util import cmp_sort_key, writemsg, \
+ writemsg_level, writemsg_stdout, shlex_split
+from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.Scheduler import Scheduler
+from _emerge.clear_caches import clear_caches
+from _emerge.unmerge import unmerge
+from _emerge.emergelog import emergelog
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from portage._global_updates import _global_updates
+from portage._sets import SETPREFIX
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.stdout_spinner import stdout_spinner
+from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
+ red, yellow, colorize, xtermTitle, xtermTitleReset
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
class queruaction(object):
@@ -24,40 +57,561 @@ class queruaction(object):
self._config_profile = config_profile
self._myportdb = portage.portdb
- def log_fail_queru(self, build_dict, fail_querue_dict):
- fail_times = 0
- if fail_querue_dict == {}:
- attDict = {}
- attDict[build_dict['type_fail']] = 1
- attDict['build_info'] = build_dict
- fail_querue_dict[build_dict['querue_id']] = attDict
- return fail_querue_dict
+ def log_fail_queru(self, build_dict, settings):
+ conn=CM.getConnection()
+ print('build_dict', build_dict)
+ fail_querue_dict = get_fail_querue_dict(conn, build_dict)
+ print('fail_querue_dict', fail_querue_dict)
+ if fail_querue_dict is None:
+ fail_querue_dict = {}
+ fail_querue_dict['querue_id'] = build_dict['queue_id']
+ fail_querue_dict['fail_type'] = build_dict['type_fail']
+ fail_querue_dict['fail_times'] = 1
+ print('fail_querue_dict', fail_querue_dict)
+ add_fail_querue_dict(conn, fail_querue_dict)
else:
- # FIXME:If is 5 remove fail_querue_dict[build_dict['querue_id'] from
- # fail_querue_dict and add log to db.
- if not fail_querue_dict[build_dict['querue_id']] is None:
- if fail_querue_dict[build_dict['querue_id']][build_dict['type_fail']] is None:
- fail_querue_dict[build_dict['querue_id']][build_dict['type_fail']] = 1
- return fail_querue_dict
+ if fail_querue_dict['fail_times'][0] < 6:
+ fail_querue_dict['fail_times'] = fail_querue_dict['fail_times'][0] + 1
+ fail_querue_dict['querue_id'] = build_dict['queue_id']
+ fail_querue_dict['fail_type'] = build_dict['type_fail']
+ update_fail_times(conn, fail_querue_dict)
+ return
+ else:
+ build_log_dict = {}
+ error_log_list = []
+ qa_error_list = []
+ repoman_error_list = []
+ sum_build_log_list = []
+ sum_build_log_list.append("fail")
+ error_log_list.append(build_dict['type_fail'])
+ build_log_dict['repoman_error_list'] = repoman_error_list
+ build_log_dict['qa_error_list'] = qa_error_list
+ build_log_dict['summary_error_list'] = sum_build_log_list
+ if build_dict['type_fail'] == 'merge fail':
+ error_log_list = []
+ for k, v in build_dict['failed_merge'].iteritems():
+ error_log_list.append(v['fail_msg'])
+ build_log_dict['error_log_list'] = error_log_list
+ build_error = ""
+ if error_log_list != []:
+ for log_line in error_log_list:
+ build_error = build_error + log_line
+ summary_error = ""
+ if sum_build_log_list != []:
+ for sum_log_line in sum_build_log_list:
+ summary_error = summary_error + " " + sum_log_line
+ if settings.get("PORTAGE_LOG_FILE") is not None:
+ build_log_dict['logfilename'] = re.sub("\/var\/log\/portage\/", "", settings.get("PORTAGE_LOG_FILE"))
else:
- fail_times = fail_querue_dict[build_dict['querue_id']][build_dict['type_fail']]
- fail_times = fail_times + 1
- if not fail_times is 5:
- fail_querue_dict[build_dict['querue_id']][build_dict['type_fail']] = fail_times
- return fail_querue_dict
- else:
- # FIXME:If is 5 remove fail_querue_dict[build_dict['querue_id']] from
- # fail_querue_dict and add log to db.
- return fail_querue_dict
+ build_log_dict['logfilename'] = ""
+ move_queru_buildlog(conn, build_dict['queue_id'], build_error, summary_error, build_log_dict)
+
+ def action_build(self, settings, trees, mtimedb, myopts, myaction, myfiles, spinner, build_dict):
+
+ if '--usepkgonly' not in myopts:
+ old_tree_timestamp_warn(settings['PORTDIR'], settings)
+
+ # It's best for config updates in /etc/portage to be processed
+ # before we get here, so warn if they're not (bug #267103).
+ chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
+
+ resume = False
+
+ ldpath_mtimes = mtimedb["ldpath"]
+ favorites=[]
+ buildpkgonly = "--buildpkgonly" in myopts
+ pretend = "--pretend" in myopts
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+ ask = "--ask" in myopts
+ enter_invalid = '--ask-enter-invalid' in myopts
+ nodeps = "--nodeps" in myopts
+ oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
+ tree = "--tree" in myopts
+ if nodeps and tree:
+ tree = False
+ del myopts["--tree"]
+ portage.writemsg(colorize("WARN", " * ") + \
+ "--tree is broken with --nodeps. Disabling...\n")
+ debug = "--debug" in myopts
+ verbose = "--verbose" in myopts
+ quiet = "--quiet" in myopts
+
+ myparams = create_depgraph_params(myopts, myaction)
+ try:
+ success, mydepgraph, favorites = backtrack_depgraph(
+ settings, trees, myopts, myparams, myaction, myfiles, spinner)
+ except portage.exception.PackageSetNotFound as e:
+ root_config = trees[settings["ROOT"]]["root_config"]
+ display_missing_pkg_set(root_config, e.value)
+ build_dict['type_fail'] = "depgraph fail"
+ build_dict['check_fail'] = True
+ use_changes = None
+ if mydepgraph._dynamic_config._needed_use_config_changes:
+ use_changes = {}
+ for pkg, needed_use_config_changes in mydepgraph._dynamic_config._needed_use_config_changes.items():
+ new_use, changes = needed_use_config_changes
+ use_changes[pkg.cpv] = changes
+ iteritems_packages = {}
+ for k, v in use_changes.iteritems():
+ k_package = portage.versions.cpv_getkey(k)
+ iteritems_packages[ k_package ] = v
+ print('iteritems_packages', iteritems_packages)
+ build_cpv_dict = iteritems_packages
+ if use_changes is not None:
+ for k, v in build_cpv_dict.iteritems():
+ build_use_flags_list = []
+ for x, y in v.iteritems():
+ if y is True:
+ build_use_flags_list.append(x)
+ if y is False:
+ build_use_flags_list.append("-" + x)
+ print(k, build_use_flags_list)
+ if not build_use_flags_list == []:
+ build_use_flags = ""
+ for flags in build_use_flags_list:
+ build_use_flags = build_use_flags + flags + ' '
+ filetext = k + ' ' + build_use_flags
+ print('filetext', filetext)
+ with open("/etc/portage/package.use/gobs.use", "a") as f:
+ f.write(filetext)
+ f.write('\n')
+
+ settings, trees, mtimedb = load_emerge_config()
+ myparams = create_depgraph_params(myopts, myaction)
+ try:
+ success, mydepgraph, favorites = backtrack_depgraph(
+ settings, trees, myopts, myparams, myaction, myfiles, spinner)
+ except portage.exception.PackageSetNotFound as e:
+ root_config = trees[settings["ROOT"]]["root_config"]
+ display_missing_pkg_set(root_config, e.value)
+ build_dict['type_fail'] = "depgraph fail"
+ build_dict['check_fail'] = True
+ if not success:
+ mydepgraph.display_problems()
+ build_dict['type_fail'] = "depgraph fail"
+ build_dict['check_fail'] = True
+
+ if build_dict['check_fail'] is True:
+ self.log_fail_queru(build_dict, settings)
+ return 1, settings, trees, mtimedb
+
+ if "--buildpkgonly" in myopts:
+ graph_copy = mydepgraph._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ print("\n!!! --buildpkgonly requires all dependencies to be merged.")
+ print("!!! Cannot merge requested packages. Merge deps and try again.\n")
+ return 1, settings, trees, mtimedb
+
+ mydepgraph.saveNomergeFavorites()
+
+ mergetask = Scheduler(settings, trees, mtimedb, myopts,
+ spinner, favorites=favorites,
+ graph_config=mydepgraph.schedulerGraph())
+
+ del mydepgraph
+ clear_caches(trees)
+
+ retval = mergetask.merge()
+ print('retval', retval)
+ if retval:
+ build_dict['type_fail'] = 'merge fail'
+ build_dict['check_fail'] = True
+ attict = {}
+ failed_pkgs_dict = {}
+ for x in mergetask._failed_pkgs_all:
+ attict['fail_msg'] = str(x.pkg)[0] + ' ' + str(x.pkg)[1] + ' ' + re.sub("\/var\/log\/portage\/", "", mergetask._locate_failure_log(x))
+ failed_pkgs_dict[str(x.pkg.cpv)] = attict
+ build_dict['failed_merge'] = failed_pkgs_dict
+ self.log_fail_queru(build_dict, settings)
+ if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
+ if "yes" == settings.get("AUTOCLEAN"):
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+ unmerge(trees[settings["ROOT"]]["root_config"],
+ myopts, "clean", [],
+ ldpath_mtimes, autoclean=1)
else:
- attDict = {}
- attDict[build_dict['type_fail']] = 1
- attDict['build_info'] = build_dict
- fail_querue_dict[build_dict['querue_id']] = attDict
- return fail_querue_dict
+ portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ + " AUTOCLEAN is disabled. This can cause serious"
+ + " problems due to overlapping packages.\n")
+
+ return retval, settings, trees, mtimedb
+
+ def post_emerge(self, myaction, myopts, myfiles, target_root, trees, mtimedb, retval):
+
+ root_config = trees[target_root]["root_config"]
+ vardbapi = trees[target_root]["vartree"].dbapi
+ settings = vardbapi.settings
+ info_mtimes = mtimedb["info"]
+
+ # Load the most current variables from ${ROOT}/etc/profile.env
+ settings.unlock()
+ settings.reload()
+ settings.regenerate()
+ settings.lock()
+
+ config_protect = shlex_split(settings.get("CONFIG_PROTECT", ""))
+ infodirs = settings.get("INFOPATH","").split(":") + \
+ settings.get("INFODIR","").split(":")
+
+ os.chdir("/")
+
+ if retval == os.EX_OK:
+ exit_msg = " *** exiting successfully."
+ else:
+ exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+ emergelog("notitles" not in settings.features, exit_msg)
+
+ _flush_elog_mod_echo()
+
+ if not vardbapi._pkgs_changed:
+ display_news_notification(root_config, myopts)
+ # If vdb state has not changed then there's nothing else to do.
+ return
+
+ vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+ portage.util.ensure_dirs(vdb_path)
+ vdb_lock = None
+ if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+ vardbapi.lock()
+ vdb_lock = True
+
+ if vdb_lock:
+ try:
+ if "noinfo" not in settings.features:
+ chk_updated_info_files(target_root,
+ infodirs, info_mtimes, retval)
+ mtimedb.commit()
+ finally:
+ if vdb_lock:
+ vardbapi.unlock()
+
+ chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+ display_news_notification(root_config, myopts)
+ if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
+ display_preserved_libs(vardbapi, myopts)
+
+ postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_emerge")
+ if os.access(postemerge, os.X_OK):
+ hook_retval = portage.process.spawn(
+ [postemerge], env=settings.environ())
+ if hook_retval != os.EX_OK:
+ writemsg_level(
+ " %s spawn failed of %s\n" % (bad("*"), postemerge,),
+ level=logging.ERROR, noiselevel=-1)
+
+ if "--quiet" not in myopts and \
+ myaction is None and "@world" in myfiles:
+ show_depclean_suggestion()
+
+ return
+
+ def emerge_main(self, args, build_dict):
+
+ portage._disable_legacy_globals()
+ portage.dep._internal_warnings = True
+ # Disable color until we're sure that it should be enabled (after
+ # EMERGE_DEFAULT_OPTS has been parsed).
+ portage.output.havecolor = 0
+ # This first pass is just for options that need to be known as early as
+ # possible, such as --config-root. They will be parsed again later,
+ # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
+ # the value of --config-root).
+ myaction, myopts, myfiles = parse_opts(args, silent=True)
+ if "--debug" in myopts:
+ os.environ["PORTAGE_DEBUG"] = "1"
+ if "--config-root" in myopts:
+ os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
+ if "--root" in myopts:
+ os.environ["ROOT"] = myopts["--root"]
+ if "--accept-properties" in myopts:
+ os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+ settings, trees, mtimedb = load_emerge_config()
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ rval = profile_check(trees, myaction)
+ if rval != os.EX_OK:
+ return rval
+
+ tmpcmdline = []
+ if "--ignore-default-opts" not in myopts:
+ tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
+ tmpcmdline.extend(args)
+ myaction, myopts, myfiles = parse_opts(tmpcmdline)
+
+ if myaction not in ('help', 'info', 'version') and \
+ myopts.get('--package-moves') != 'n' and \
+ _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
+ mtimedb.commit()
+ # Reload the whole config from scratch.
+ settings, trees, mtimedb = load_emerge_config(trees=trees)
+ portdb = trees[settings["ROOT"]]["porttree"].dbapi
+
+ xterm_titles = "notitles" not in settings.features
+ if xterm_titles:
+ xtermTitle("emerge")
+
+ adjust_configs(myopts, trees)
+ apply_priorities(settings)
+
+ spinner = stdout_spinner()
+ if "candy" in settings.features:
+ spinner.update = spinner.update_scroll
+
+ if "--quiet" not in myopts:
+ portage.deprecated_profile_check(settings=settings)
+ if portage.const._ENABLE_REPO_NAME_WARN:
+ # Bug #248603 - Disable warnings about missing
+ # repo_name entries for stable branch.
+ repo_name_check(trees)
+ repo_name_duplicate_check(trees)
+ config_protect_check(trees)
+ check_procfs()
+
+ if "getbinpkg" in settings.features:
+ myopts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in myopts:
+ myopts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in myopts:
+ myopts["--usepkgonly"] = True
+
+ if "--getbinpkg" in myopts:
+ myopts["--usepkg"] = True
+
+ if "--usepkgonly" in myopts:
+ myopts["--usepkg"] = True
+
+ if "buildpkg" in settings.features or "--buildpkgonly" in myopts:
+ myopts["--buildpkg"] = True
+
+ if "--buildpkgonly" in myopts:
+ # --buildpkgonly will not merge anything, so
+ # it cancels all binary package options.
+ for opt in ("--getbinpkg", "--getbinpkgonly",
+ "--usepkg", "--usepkgonly"):
+ myopts.pop(opt, None)
+
+ for mytrees in trees.values():
+ mydb = mytrees["porttree"].dbapi
+ # Freeze the portdbapi for performance (memoize all xmatch results).
+ mydb.freeze()
+
+ if myaction in ('search', None) and \
+ "--usepkg" in myopts:
+ # Populate the bintree with current --getbinpkg setting.
+ # This needs to happen before expand_set_arguments(), in case
+ # any sets use the bintree.
+ mytrees["bintree"].populate(
+ getbinpkgs="--getbinpkg" in myopts)
+
+ del mytrees, mydb
+
+ for x in myfiles:
+ ext = os.path.splitext(x)[1]
+ if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
+ print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
+ break
+
+ root_config = trees[settings["ROOT"]]["root_config"]
+ if myaction == "list-sets":
+ writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
+ return os.EX_OK
+
+ ensure_required_sets(trees)
+
+ # only expand sets for actions taking package arguments
+ oldargs = myfiles[:]
+ if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
+ myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
+ if retval != os.EX_OK:
+ return retval
+
+ # Need to handle empty sets specially, otherwise emerge will react
+ # with the help message for empty argument lists
+ if oldargs and not myfiles:
+ print("emerge: no targets left after set expansion")
+ return 0
+
+ if ("--tree" in myopts) and ("--columns" in myopts):
+ print("emerge: can't specify both of \"--tree\" and \"--columns\".")
+ return 1
+
+ if '--emptytree' in myopts and '--noreplace' in myopts:
+ writemsg_level("emerge: can't specify both of " + \
+ "\"--emptytree\" and \"--noreplace\".\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--quiet" in myopts):
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = -1
+
+ if "--fetch-all-uri" in myopts:
+ myopts["--fetchonly"] = True
+
+ if "--skipfirst" in myopts and "--resume" not in myopts:
+ myopts["--resume"] = True
+
+ # Allow -p to remove --ask
+ if "--pretend" in myopts:
+ myopts.pop("--ask", None)
+
+ # forbid --ask when not in a terminal
+ # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
+ if ("--ask" in myopts) and (not sys.stdin.isatty()):
+ portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
+ noiselevel=-1)
+ return 1
+
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = 0
+ if "python-trace" in settings.features:
+ import portage.debug as portage_debug
+ portage_debug.set_trace(True)
+
+ if not ("--quiet" in myopts):
+ if '--nospinner' in myopts or \
+ settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ spinner.update = spinner.update_basic
+
+ if "--debug" in myopts:
+ print("myaction", myaction)
+ print("myopts", myopts)
+
+ pretend = "--pretend" in myopts
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+ buildpkgonly = "--buildpkgonly" in myopts
+
+ # check if root user is the current user for the actions where emerge needs this
+ if portage.secpass < 2:
+ # We've already allowed "--version" and "--help" above.
+ if "--pretend" not in myopts and myaction not in ("search","info"):
+ need_superuser = myaction in ('clean', 'depclean', 'deselect',
+ 'prune', 'unmerge') or not \
+ (fetchonly or \
+ (buildpkgonly and secpass >= 1) or \
+ myaction in ("metadata", "regen", "sync"))
+ if portage.secpass < 1 or \
+ need_superuser:
+ if need_superuser:
+ access_desc = "superuser"
+ else:
+ access_desc = "portage group"
+ # Always show portage_group_warning() when only portage group
+ # access is required but the user is not in the portage group.
+ from portage.data import portage_group_warning
+ if "--ask" in myopts:
+ myopts["--pretend"] = True
+ del myopts["--ask"]
+ print(("%s access is required... " + \
+ "adding --pretend to options\n") % access_desc)
+ if portage.secpass < 1 and not need_superuser:
+ portage_group_warning()
+ else:
+ sys.stderr.write(("emerge: %s access is required\n") \
+ % access_desc)
+ if portage.secpass < 1 and not need_superuser:
+ portage_group_warning()
+ return 1
+
+ disable_emergelog = False
+ if disable_emergelog:
+ """ Disable emergelog for everything except build or unmerge
+ operations. This helps minimize parallel emerge.log entries that can
+ confuse log parsers. We especially want it disabled during
+ parallel-fetch, which uses --resume --fetchonly."""
+ _emerge.emergelog._disable = True
+
+ else:
+ if 'EMERGE_LOG_DIR' in settings:
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
+ except portage.exception.PortageException as e:
+ writemsg_level("!!! Error creating directory for " + \
+ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
+ (settings['EMERGE_LOG_DIR'], e),
+ noiselevel=-1, level=logging.ERROR)
+ else:
+ global _emerge_log_dir
+ _emerge_log_dir = settings['EMERGE_LOG_DIR']
+
+ if not "--pretend" in myopts:
+ emergelog(xterm_titles, "Started emerge on: "+\
+ _unicode_decode(
+ time.strftime("%b %d, %Y %H:%M:%S", time.localtime()),
+ encoding=_encodings['content'], errors='replace'))
+ myelogstr=""
+ if myopts:
+ myelogstr=" ".join(myopts)
+ if myaction:
+ myelogstr+=" "+myaction
+ if myfiles:
+ myelogstr += " " + " ".join(oldargs)
+ emergelog(xterm_titles, " *** emerge " + myelogstr)
+ del oldargs
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
+ sys.exit(128 + signum)
+ signal.signal(signal.SIGINT, emergeexitsig)
+ signal.signal(signal.SIGTERM, emergeexitsig)
+
+ def emergeexit():
+ """This gets out final log message in before we quit."""
+ if "--pretend" not in myopts:
+ emergelog(xterm_titles, " *** terminating.")
+ if xterm_titles:
+ xtermTitleReset()
+ portage.atexit_register(emergeexit)
+
+
+ # "update", "system", or just process files
+ validate_ebuild_environment(trees)
+
+ for x in myfiles:
+ if x.startswith(SETPREFIX) or \
+ is_valid_package_atom(x, allow_repo=True):
+ continue
+ if x[:1] == os.sep:
+ continue
+ try:
+ os.lstat(x)
+ continue
+ except OSError:
+ pass
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ if "--pretend" not in myopts:
+ display_news_notification(root_config, myopts)
+ retval, settings, trees, mtimedb = self.action_build(settings, trees, mtimedb,
+ myopts, myaction, myfiles, spinner, build_dict)
+ self.post_emerge(myaction, myopts, myfiles, settings["ROOT"],
+ trees, mtimedb, retval)
+
+ return retval
def make_build_list(self, build_dict):
- conn=CM.getConnection()
cpv = build_dict['category']+'/'+build_dict['package']+'-'+build_dict['ebuild_version']
pkgdir = os.path.join(self._mysettings['PORTDIR'], build_dict['category'] + "/" + build_dict['package'])
init_manifest = gobs_manifest(self._mysettings, pkgdir)
@@ -66,36 +620,34 @@ class queruaction(object):
except:
ebuild_version_checksum_tree = None
if ebuild_version_checksum_tree == build_dict['checksum']:
- if portage.getmaskingstatus(cpv, settings=self._mysettings, portdb=self._myportdb) == []:
- init_flags = gobs_use_flags(self._mysettings, self._myportdb, cpv)
- build_use_flags_list = init_flags.comper_useflags(build_dict)
- print("build_use_flags_list", build_use_flags_list)
- manifest_error = init_manifest.check_file_in_manifest(self._myportdb, cpv, build_dict, build_use_flags_list)
- if manifest_error is None:
- build_dict['check_fail'] = False
- build_cpv_dict = init_flags.get_needed_dep_useflags(build_use_flags_list)
- print(build_cpv_dict, build_use_flags_list, cpv)
- build_use_flags_dict = {}
- if build_use_flags_list is None:
- build_use_flags_dict['None'] = None
- if build_cpv_dict is None:
- build_cpv_dict = {}
- build_cpv_dict[cpv] = build_use_flags_dict
- else:
- build_cpv_dict[cpv] = build_use_flags_dict
- print(build_cpv_dict)
- return build_cpv_dict, build_dict
- else:
- build_dict['1'] = 1
+ init_flags = gobs_use_flags(self._mysettings, self._myportdb, cpv)
+ build_use_flags_list = init_flags.comper_useflags(build_dict)
+ print("build_use_flags_list", build_use_flags_list)
+ manifest_error = init_manifest.check_file_in_manifest(self._myportdb, cpv, build_dict, build_use_flags_list)
+ if manifest_error is None:
+ build_dict['check_fail'] = False
+ build_use_flags_dict = {}
+ if build_use_flags_list is None:
+ build_use_flags_dict['None'] = None
+ build_cpv_dict = {}
+ build_cpv_dict[cpv] = build_use_flags_dict
+ print(build_cpv_dict)
+ return build_cpv_dict
else:
- build_dict['2'] = 2
+ build_dict['type_fail'] = "Manifest error"
+ build_dict['check_fail'] = True
else:
- build_dict['3'] = 3
- build_dict['check_fail'] = True
- return build_cpv_dict, build_dict
+ build_dict['type_fail'] = "Wrong ebuild checksum"
+ build_dict['check_fail'] = True
+ if build_dict['check_fail'] is True:
+ self.log_fail_queru(build_dict, portage.settings)
+ return None
+ return build_cpv_dict
def build_procces(self, buildqueru_cpv_dict, build_dict):
build_cpv_list = []
+ abs_user_config = os.path.join(self._mysettings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+ print('abs_user_config', abs_user_config)
for k, v in buildqueru_cpv_dict.iteritems():
build_use_flags_list = []
for x, y in v.iteritems():
@@ -104,13 +656,16 @@ class queruaction(object):
if y is False:
build_use_flags_list.append("-" + x)
print(k, build_use_flags_list)
- if build_use_flags_list == []:
- build_cpv_list.append("=" + k)
- else:
+ build_cpv_list.append("=" + k)
+ if not build_use_flags_list == []:
build_use_flags = ""
for flags in build_use_flags_list:
- build_use_flags = build_use_flags + flags + ","
- build_cpv_list.append("=" + k + "[" + build_use_flags + "]")
+ build_use_flags = build_use_flags + flags + " "
+ filetext = k + ' ' + build_use_flags
+ print('filetext', filetext)
+ with open("/etc/portage/package.use/gobs.use", "a") as f:
+ f.write(filetext)
+ f.write('\n')
print('build_cpv_list', build_cpv_list)
argscmd = []
if not "nooneshort" in build_dict['post_message']:
@@ -121,37 +676,31 @@ class queruaction(object):
argscmd.append(build_cpv)
print(argscmd)
# Call main_emerge to build the package in build_cpv_list
- try:
- build_fail = emerge_main(args=argscmd)
- except:
- build_fail = False
+ build_fail = self.emerge_main(argscmd, build_dict)
# Run depclean
+ print('build_fail', build_fail)
if not "nodepclean" in build_dict['post_message']:
depclean_fail = main_depclean()
if build_fail is False or depclean_fail is False:
return False
return True
- def procces_qureru(self, fail_querue_dict):
+ def procces_qureru(self):
conn=CM.getConnection()
build_dict = {}
build_dict = get_packages_to_build(conn, self._config_profile)
+ if build_dict is None:
+ return
print("build_dict", build_dict)
- if build_dict is None and fail_querue_dict == {}:
- return fail_querue_dict
- if build_dict is None and fail_querue_dict != {}:
- return fail_querue_dict
if not build_dict['ebuild_id'] is None and build_dict['checksum'] is not None:
- buildqueru_cpv_dict, build_dict = self.make_build_list(build_dict)
+ buildqueru_cpv_dict = self.make_build_list(build_dict)
print('buildqueru_cpv_dict', buildqueru_cpv_dict)
if buildqueru_cpv_dict is None:
- return fail_querue_dict
+ return
fail_build_procces = self.build_procces(buildqueru_cpv_dict, build_dict)
- if build_dict['check_fail'] is True:
- fail_querue_dict = self.log_fail_queru(build_dict, fail_querue_dict)
- return fail_querue_dict
+ return
if not build_dict['post_message'] is [] and build_dict['ebuild_id'] is None:
- return fail_querue_dict
+ return
if not build_dict['ebuild_id'] is None and build_dict['checksum'] is None:
del_old_queue(conn, build_dict['queue_id'])
- return fail_querue_dict
+ return
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py
index 7dcd053..6b52f29 100644
--- a/gobs/pym/check_setup.py
+++ b/gobs/pym/check_setup.py
@@ -14,10 +14,10 @@ CM=connectionManager(gobs_settings_dict)
if CM.getName()=='pgsql':
from gobs.pgsql import *
-def check_make_conf(conn):
+def check_make_conf():
# FIXME: mark any config updating true in the db when updating the configs
# Get the config list
- ##selective import the pgsql/mysql queries
+ conn=CM.getConnection()
config_list_all = get_config_list_all(conn)
print("Checking configs for changes and errors")
configsDict = {}
@@ -53,13 +53,17 @@ def check_make_conf(conn):
update__make_conf(conn,configsDict)
print("Updated configurtions")
-def check_make_conf_guest(connection, config_profile):
- make_conf_checksum_db = get_profile_checksum(connection,config_profile)[0]
+def check_make_conf_guest(config_profile):
+ conn=CM.getConnection()
+ print('config_profile', config_profile)
+ make_conf_checksum_db = get_profile_checksum(conn,config_profile)
+ print('make_conf_checksum_db', make_conf_checksum_db)
if make_conf_checksum_db is None:
return "1"
make_conf_file = "/etc/portage/make.conf"
make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0]
- if make_conf_checksum_tree != make_conf_checksum_db:
+ print('make_conf_checksum_tree', make_conf_checksum_tree)
+ if make_conf_checksum_tree != make_conf_checksum_db[0]:
return "2"
# Check if we can open the file and close it
# Check if we have some error in the file (portage.util.getconfig)
@@ -75,8 +79,9 @@ def check_make_conf_guest(connection, config_profile):
return "3"
return "4"
-def check_configure_guest(connection, config_profile):
- pass_make_conf = check_make_conf_guest(connection, config_profile)
+def check_configure_guest(config_profile):
+ conn=CM.getConnection()
+ pass_make_conf = check_make_conf_guest(config_profile)
print(pass_make_conf)
if pass_make_conf == "1":
# profile not active or updatedb is runing
@@ -86,7 +91,7 @@ def check_configure_guest(connection, config_profile):
return False
elif pass_make_conf == "3":
# set the config as no working
- make_conf_error(connection,config_profile)
+ make_conf_error(conn, config_profile)
return False
elif pass_make_conf == "4":
# make.conf check OK
diff --git a/gobs/pym/flags.py b/gobs/pym/flags.py
index e01fed3..ef63361 100644
--- a/gobs/pym/flags.py
+++ b/gobs/pym/flags.py
@@ -154,43 +154,6 @@ class gobs_use_flags(object):
final_flags = self.filter_flags(final_use, use_expand_hidden, usemasked, useforced)
return iuse_flags, final_flags
- def get_needed_dep_useflags(self, build_use_flags_list):
- cpv = self._cpv
- tmpcmdline = []
- tmpcmdline.append("-p")
- tmpcmdline.append("--autounmask")
- tmpcmdline.append("=" + self._cpv)
- print(tmpcmdline)
- myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
- print(myaction, myopts, myfiles)
- myparams = create_depgraph_params(myopts, myaction)
- print(myparams)
- settings, trees, mtimedb = load_emerge_config()
- try:
- success, mydepgraph, favorites = backtrack_depgraph(
- settings, trees, myopts, myparams, myaction, myfiles, spinner=None)
- print("success mydepgraph favorites", success, mydepgraph, favorites)
- except portage.exception.PackageSetNotFound as e:
- root_config = trees[settings["ROOT"]]["root_config"]
- display_missing_pkg_set(root_config, e.value)
- return 1
- use_changes = None
- mydepgraph._show_merge_list()
- mydepgraph.display_problems()
- if mydepgraph._dynamic_config._needed_use_config_changes:
- use_changes = {}
- for pkg, needed_use_config_changes in mydepgraph._dynamic_config._needed_use_config_changes.items():
- new_use, changes = needed_use_config_changes
- use_changes[pkg.cpv] = changes
- if use_changes is None:
- return None
- iteritems_packages = {}
- for k, v in use_changes.iteritems():
- k_package = portage.versions.cpv_getkey(k)
- iteritems_packages[ k_package ] = v
- print('iteritems_packages', iteritems_packages)
- return iteritems_packages
-
def comper_useflags(self, build_dict):
iuse_flags, use_enable = self.get_flags()
iuse = []
diff --git a/gobs/pym/pgsql.py b/gobs/pym/pgsql.py
index c66f205..9979b1b 100644
--- a/gobs/pym/pgsql.py
+++ b/gobs/pym/pgsql.py
@@ -414,7 +414,7 @@ def cp_all_old_db(connection, old_package_id_list):
def del_old_queue(connection, queue_id):
cursor = connection.cursor()
sqlQ1 = 'DELETE FROM ebuildqueuedwithuses WHERE queue_id = %s'
- sqlQ2 = 'DELETE FROM temp_errors_queue_qa WHERE queue_id = %s'
+ sqlQ2 = 'DELETE FROM querue_retest WHERE querue_id = %s'
sqlQ3 = 'DELETE FROM buildqueue WHERE queue_id = %s'
cursor.execute(sqlQ1, (queue_id,))
cursor.execute(sqlQ2, (queue_id,))
@@ -568,28 +568,31 @@ def add_new_arch_db(connection, arch_list):
cursor.execute(sqlQ, (arch,))
connection.commit()
-def check_fail_times(connection, logDict):
- queue_id = logDict['queue_id']
- qa_error = logDict['qa_error_list'][0]
+def update_fail_times(connection, fail_querue_dict):
cursor = connection.cursor()
- sqlQ = 'SELECT errors FROM temp_errors_queue_qa WHERE queue_id = %s AND what_error = %s'
- cursor.execute(sqlQ, (queue_id, qa_error,))
- return cursor.fetchone()
+ sqlQ1 = 'UPDATE querue_retest SET fail_times = %s WHERE querue_id = %s AND fail_type = %s'
+ sqlQ2 = 'UPDATE buildqueue SET timestamp = NOW() WHERE queue_id = %s'
+ cursor.execute(sqlQ1, (fail_querue_dict['fail_times'], fail_querue_dict['querue_id'], fail_querue_dict['fail_type'],))
+ #cursor.execute(sqlQ2, (fail_querue_dict['querue_id'],))
+ connection.commit()
-def add_fail_times(connection, logDict):
- queue_id = logDict['queue_id']
- qa_error = logDict['qa_error_list'][0]
+def get_fail_querue_dict(connection, build_dict):
cursor = connection.cursor()
- sqlQ = 'INSERT INTO temp_errors_queue_qa (queue_id, what_error) VALUES ( %s, %s)'
- cursor.execute(sqlQ, (queue_id, qa_error,))
- connection.commit()
+ fail_querue_dict = {}
+ sqlQ = 'SELECT fail_times FROM querue_retest WHERE querue_id = %s AND fail_type = %s'
+ cursor.execute(sqlQ, (build_dict['queue_id'], build_dict['type_fail'],))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ fail_querue_dict['fail_times'] = entries
+ return fail_querue_dict
-def update_fail_times(connection, logDict):
- queue_id = logDict['queue_id']
- qa_error = logDict['qa_error_list'][0]
+def add_fail_querue_dict(connection, fail_querue_dict):
cursor = connection.cursor()
- sqlQ1 = 'UPDATE buildqueue SET timestamp = NOW() WHERE queue_id = %s'
- cursor.execute(sqlQ1, (queue_id,))
+ sqlQ1 = 'INSERT INTO querue_retest (querue_id, fail_type, fail_times) VALUES ( %s, %s, %s)'
+ sqlQ2 = 'UPDATE buildqueue SET timestamp = NOW() WHERE queue_id = %s'
+ cursor.execute(sqlQ1, (fail_querue_dict['querue_id'],fail_querue_dict['fail_type'], fail_querue_dict['fail_times']))
+ #cursor.execute(sqlQ2, (fail_querue_dict['querue_id'],))
connection.commit()
def make_conf_error(connection,config_profile):
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-08-31 1:46 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-08-31 1:46 UTC (permalink / raw
To: gentoo-commits
commit: 7d1ec147e3fa1a86649bc51d6f1bf145de3d7194
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 31 01:45:47 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Wed Aug 31 01:45:47 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=7d1ec147
fix error when foo-ggg/baa is emty
---
gobs/bin/gobs_updatedb | 4 +++-
gobs/pym/package.py | 2 ++
2 files changed, 5 insertions(+), 1 deletions(-)
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb
index 794cb78..34aac67 100755
--- a/gobs/bin/gobs_updatedb
+++ b/gobs/bin/gobs_updatedb
@@ -91,7 +91,9 @@ def update_cpv_db(mysettings):
if package_id is None:
# Add new package with ebuilds
package_id = init_package.add_new_package_db(categories, package)
- package_id_list_tree.append(package_id)
+ # FIXME log some way that package_id was None
+ if not package_id is None:
+ package_id_list_tree.append(package_id)
# Ceck if we have the cp in the package table
elif package_id is not None:
# Update the packages with ebuilds
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
index 0efc8aa..78af8be 100644
--- a/gobs/pym/package.py
+++ b/gobs/pym/package.py
@@ -184,6 +184,8 @@ class gobs_package(object):
categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
# Get the ebuild list for cp
ebuild_list_tree = self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
+ if ebuild_list_tree is []:
+ return None
config_cpv_listDict = self.config_match_ebuild(categories, package)
config_id = get_default_config(conn)
packageDict ={}
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-08-30 23:02 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-08-30 23:02 UTC (permalink / raw
To: gentoo-commits
commit: ac1c0c0ab281c910245461649715558d8ad277fc
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Tue Aug 30 23:00:05 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Tue Aug 30 23:00:05 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=ac1c0c0a
fix a bug for the configs/setups test
---
gobs/bin/gobs_updatedb | 2 +-
gobs/pym/check_setup.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb
index f140e5f..794cb78 100755
--- a/gobs/bin/gobs_updatedb
+++ b/gobs/bin/gobs_updatedb
@@ -48,7 +48,7 @@ def init_portage_settings():
print master.log()
conn=CM.getConnection()
- check_make_conf
+ check_make_conf(conn)
print "Check configs done"
# Get default config from the configs table and default_config=1
config_id = get_default_config(conn) # HostConfigDir = table configs id
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py
index e3a9075..c90a10b 100644
--- a/gobs/pym/check_setup.py
+++ b/gobs/pym/check_setup.py
@@ -23,7 +23,7 @@ def check_make_conf(conn):
for config_id in config_list_all:
attDict={}
# Set the config dir
- check_config_dir = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + config_id[0] + "/"
+ check_config_dir = "/var/lib/gobs/" + gobs_settings_dict['gobs_gitreponame'] + "/" + config_id[0] + "/"
make_conf_file = check_config_dir + "etc/portage/make.conf"
# Check if we can open the file and close it
# Check if we have some error in the file (portage.util.getconfig)
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-07-31 13:43 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-07-31 13:43 UTC (permalink / raw
To: gentoo-commits
commit: 082e83c77c4368bb1c63c2fdf7e07167cea6b435
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 31 13:43:09 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sun Jul 31 13:43:09 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=082e83c7
fix and clean code for gobs_updatedb
---
gobs/bin/gobs_updatedb | 6 +++---
gobs/pym/arch.py | 25 +++++++++++++------------
gobs/pym/categories.py | 26 +++++++++++++++-----------
gobs/pym/old_cpv.py | 29 ++++++++++++++++++-----------
gobs/pym/package.py | 18 +++++++-----------
5 files changed, 56 insertions(+), 48 deletions(-)
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb
index a9c8e4a..36b26bd 100755
--- a/gobs/bin/gobs_updatedb
+++ b/gobs/bin/gobs_updatedb
@@ -28,7 +28,7 @@ from gobs.old_cpv import gobs_old_cpv
from gobs.categories import gobs_categories
import portage
-def init_portage_settings(CM, gobs_settings_dict):
+def init_portage_settings(gobs_settings_dict):
""" Get the BASE Setup/Config for portage.settings
@type: module
@@ -101,8 +101,8 @@ def update_cpv_db(mysettings, gobs_settings_dict):
def main():
# Main
# Init settings for the default config
- mysettings = init_portage_settings(CM,gobs_settings_dict)
- init_arch = gobs_arch(CM)
+ mysettings = init_portage_settings(gobs_settings_dict)
+ init_arch = gobs_arch()
init_arch.update_arch_db()
# Update the cpv db
update_cpv_db(mysettings, gobs_settings_dict)
diff --git a/gobs/pym/arch.py b/gobs/pym/arch.py
index 7e11041..1a26083 100644
--- a/gobs/pym/arch.py
+++ b/gobs/pym/arch.py
@@ -1,24 +1,25 @@
import portage
+from gobs.readconf import get_conf_settings
+reader=get_conf_settings()
+gobs_settings_dict=reader.read_gobs_settings_all()
+# make a CM
+from gobs.ConnectionManager import connectionManager
+CM=connectionManager(gobs_settings_dict)
+#selectively import the pgsql/mysql querys
+if CM.getName()=='pgsql':
+ from gobs.pgsql import *
class gobs_arch(object):
- def __init__(self, CM):
- #selective import the pgsql/mysql queries
- if CM.getName() is 'pgsql':
- from gobs.pgsql import *
- self._CM = CM
- self._conn = CM.getConnection()
-
- def __del__(self):
- self._CM.putConnection(self._conn)
-
def update_arch_db(self):
+ conn = CM.getConnection()
# FIXME: check for new keyword
# Add arch db (keywords)
- if get_arch_db(self._conn) is None:
+ if get_arch_db(conn) is None:
arch_list = portage.archlist
for arch in arch_list:
if arch[0] not in ["~","-"]:
arch_list.append("-" + arch)
arch_list.append("-*")
- add_new_arch_db(self._conn,arch_list)
\ No newline at end of file
+ add_new_arch_db(conn,arch_list)
+ CM.putConnection(conn)
\ No newline at end of file
diff --git a/gobs/pym/categories.py b/gobs/pym/categories.py
index f9f4367..dae1207 100644
--- a/gobs/pym/categories.py
+++ b/gobs/pym/categories.py
@@ -1,26 +1,30 @@
#from gobs.text import gobs_text
from gobs.text import get_file_text
import portage
+from gobs.readconf import get_conf_settings
+reader=get_conf_settings()
+gobs_settings_dict=reader.read_gobs_settings_all()
+# make a CM
+from gobs.ConnectionManager import connectionManager
+CM=connectionManager(gobs_settings_dict)
+#selectively import the pgsql/mysql querys
+if CM.getName()=='pgsql':
+ from gobs.pgsql import *
class gobs_categories(object):
- def __init__(self, CM, mysettings):
- self._CM = CM
- self._conn=CM.getConnection()
+ def __init__(self, mysettings):
self._mysettings = mysettings
- if CM.getName() is 'pgsql':
- from gobs.pgsql import *
-
- def __del__(self):
- self._CM.putConnection(self._conn)
def update_categories_db(self, categories):
+ conn=CM.getConnection()
# Update categories_meta in the db
categories_dir = self._mysettings['PORTDIR'] + "/" + categories + "/"
categories_metadata_xml_checksum_tree = portage.checksum.sha256hash(categories_dir + "metadata.xml")[0]
categories_metadata_xml_text_tree = get_file_text(categories_dir + "metadata.xml")
- categories_metadata_xml_checksum_db = get_categories_checksum_db(self._conn, categories)
+ categories_metadata_xml_checksum_db = get_categories_checksum_db(conn, categories)
if categories_metadata_xml_checksum_db is None:
- add_new_categories_meta_sql(self._conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
+ add_new_categories_meta_sql(conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
elif categories_metadata_xml_checksum_db != categories_metadata_xml_checksum_tree:
- update_categories_meta_sql(self._conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
+ update_categories_meta_sql(conn,categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
+ CM.putConnection(conn)
diff --git a/gobs/pym/old_cpv.py b/gobs/pym/old_cpv.py
index c577033..4923bf7 100644
--- a/gobs/pym/old_cpv.py
+++ b/gobs/pym/old_cpv.py
@@ -1,15 +1,22 @@
from __future__ import print_function
+from gobs.readconf import get_conf_settings
+reader=get_conf_settings()
+gobs_settings_dict=reader.read_gobs_settings_all()
+# make a CM
+from gobs.ConnectionManager import connectionManager
+CM=connectionManager(gobs_settings_dict)
+#selectively import the pgsql/mysql querys
+if CM.getName()=='pgsql':
+ from gobs.pgsql import *
+
class gobs_old_cpv(object):
- def __init__(self, CM, myportdb, mysettings):
- self._CM = CM
+ def __init__(self, myportdb, mysettings):
self._mysettings = mysettings
self._myportdb = myportdb
- if CM.getName() is 'pgsql':
- from gobs.pgsql import *
def mark_old_ebuild_db(self, categories, package, package_id):
- conn=self._CM.getConnection()
+ conn=CM.getConnection()
ebuild_list_tree = sorted(self._myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None))
# Get ebuild list on categories, package in the db
ebuild_list_db = cp_list_db(conn,package_id)
@@ -24,7 +31,7 @@ class gobs_old_cpv(object):
if old_ebuild_list != []:
for old_ebuild in old_ebuild_list:
print("O", categories + "/" + package + "-" + old_ebuild[0])
- self.dbquerys.add_old_ebuild(conn,package_id, old_ebuild_list)
+ add_old_ebuild(conn,package_id, old_ebuild_list)
# Check if we have older no activ ebuilds then 60 days
ebuild_old_list_db = cp_list_old_db(conn,package_id)
# Delete older ebuilds in the db
@@ -32,10 +39,10 @@ class gobs_old_cpv(object):
for del_ebuild_old in ebuild_old_list_db:
print("D", categories + "/" + package + "-" + del_ebuild_old[1])
del_old_ebuild(conn,ebuild_old_list_db)
- self._CM.putConnection(conn)
+ CM.putConnection(conn)
def mark_old_package_db(self, package_id_list_tree):
- conn=self._CM.getConnection()
+ conn=CM.getConnection()
# Get categories/package list from db
package_list_db = cp_all_db(conn)
old_package_id_list = []
@@ -59,10 +66,10 @@ class gobs_old_cpv(object):
element = get_cp_from_package_id(conn,i)
print("D", element)
del_old_package(conn,del_package_id_old_list)
- self._CM.putConnection(conn)
+ CM.putConnection(conn)
def mark_old_categories_db(self):
- conn=self._CM.getConnection()
+ conn=CM.getConnection()
# Get categories list from the tree and db
categories_list_tree = self._mysettings.categories
categories_list_db =get_categories_db(conn)
@@ -79,4 +86,4 @@ class gobs_old_cpv(object):
for real_old_categories in categories_old_list:
del_old_categories(conn,real_old_categoriess)
print("D", real_old_categories)
- self._CM.putConnection(conn)
\ No newline at end of file
+ CM.putConnection(conn)
\ No newline at end of file
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
index f240e4e..8d94936 100644
--- a/gobs/pym/package.py
+++ b/gobs/pym/package.py
@@ -18,7 +18,7 @@ if CM.getName()=='pgsql':
class gobs_package(object):
- def __init__(self, mysettings, CM, myportdb, gobs_settings_dict):
+ def __init__(self, mysettings, myportdb, gobs_settings_dict):
self._mysettings = mysettings
self._gobs_settings_dict = gobs_settings_dict
self._myportdb = myportdb
@@ -41,16 +41,12 @@ class gobs_package(object):
# Change config/setup
mysettings_setup = self.change_config(config_id)
myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
- # Get cpv from portage with the config
- ebuild_match_list = myportdb_setup.match(categories + "/" + package, use_cache=1)
- latest_ebuild = ""
+ # Get latest cpv from portage with the config
+ latest_ebuild = myportdb_setup.xmatch('bestmatch-visible', categories + "/" + package)
latest_ebuild_version = unicode("")
# Check if could get cpv from portage
- # and get the lastes ebuild on that config/setup
- if ebuild_match_list != []:
- for ebuild_match_line in ebuild_match_list:
- latest_ebuild = ebuild_match_line
- # Get the version of cpv
+ if latest_ebuild != "":
+ # Get the version of cpv
latest_ebuild_version = portage.versions.cpv_getversion(latest_ebuild)
# Get the iuse and use flags for that config/setup
init_useflags = gobs_use_flags(mysettings_setup, myportdb_setup, latest_ebuild)
@@ -268,7 +264,7 @@ class gobs_package(object):
add_new_metadata(conn,metadataDict)
# Get the text in Manifest and update it
get_manifest_text = get_file_text(pkgdir + "/Manifest")
- self._dbquerys.update_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree)
+ update_manifest_sql(conn,package_id, get_manifest_text, manifest_checksum_tree)
# Add any qa and repoman erros to buildlog
qa_error = []
init_manifest = gobs_manifest(self._mysettings, pkgdir)
@@ -280,7 +276,7 @@ class gobs_package(object):
# Add the ebuild to the buildqueru table if needed
self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
# Mark or remove any old ebuilds
- init_old_cpv = gobs_old_cpv(self._CM, self._myportdb, self._mysettings)
+ init_old_cpv = gobs_old_cpv(self._myportdb, self._mysettings)
init_old_cpv.mark_old_ebuild_db(categories, package, package_id)
CM.putConnection(conn)
^ permalink raw reply related [flat|nested] 32+ messages in thread
* [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/
@ 2011-04-23 14:23 Magnus Granberg
0 siblings, 0 replies; 32+ messages in thread
From: Magnus Granberg @ 2011-04-23 14:23 UTC (permalink / raw
To: gentoo-commits
commit: d945420083cced61110ca49fc5941c0cbe0f3606
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sat Apr 23 14:21:46 2011 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sat Apr 23 14:21:46 2011 +0000
URL: http://git.overlays.gentoo.org/gitweb/?p=dev/zorry.git;a=commit;h=d9454200
added the gobs dir (Gentoo Open Build System)
---
gobs/bin/gobs_updatedb | 103 ++++++++++++
gobs/pym/arch.py | 19 +++
gobs/pym/categories.py | 20 +++
gobs/pym/check_setup.py | 49 ++++++
gobs/pym/flags.py | 115 ++++++++++++++
gobs/pym/initsql.py | 41 +++++
gobs/pym/manifest.py | 99 ++++++++++++
gobs/pym/old_cpv.py | 73 +++++++++
gobs/pym/package.py | 268 +++++++++++++++++++++++++++++++
gobs/pym/pgsqlbackend.py | 394 ++++++++++++++++++++++++++++++++++++++++++++++
gobs/pym/readconf.py | 45 ++++++
gobs/pym/repoman.py | 47 ++++++
gobs/pym/text.py | 41 +++++
13 files changed, 1314 insertions(+), 0 deletions(-)
diff --git a/gobs/bin/gobs_updatedb b/gobs/bin/gobs_updatedb
new file mode 100755
index 0000000..a6afa17
--- /dev/null
+++ b/gobs/bin/gobs_updatedb
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+""" This code will update the sql backend with needed info for
+ the Frontend and the Guest deamon. """
+
+import sys
+import os
+from gobs.readconf import get_conf_settings
+from gobs.initsql import init_sql_db
+from gobs.check_setup import check_config
+from gobs.arch import arch
+from gobs.package import gobs_package
+from gobs.categories import gobs_categories
+from gobs.old_cpv import gobs_old_cpv
+from gobs.categories import gobs_categories
+import portage
+
+def init_portage_settings(database, gobs_settings_dict):
+ """ Get the BASE Setup/Config for portage.settings
+ @type: module
+ @module: The SQL Backend
+ @type: dict
+ @parms: config options from the config file (host_setup_root)
+ @rtype: settings
+ @returns new settings
+ """
+ # check config setup
+ init_check = check_config(database, gobs_settings_dict)
+ init_check.check_make_conf()
+ print "Check configs done"
+ # Get default config from the configs table and default_config=1
+ config_id = database.get_default_config() # HostConfigDir = table configs id
+ default_config_root = gobs_settings_dict['host_setup_root'] +"config/" + config_id[0] + "/"
+ # Set config_root (PORTAGE_CONFIGROOT) to default_config_root
+ mysettings = portage.config(config_root = default_config_root)
+ print "Setting default config to:", config_id[0]
+ return mysettings
+
+def update_cpv_db(mysettings, database, gobs_settings_dict):
+ """Code to update the cpv in the database.
+ @type:settings
+ @parms: portage.settings
+ @type: module
+ @module: The SQL Backend
+ @type: dict
+ @parms: config options from the config file
+ """
+ print "Checking categories, package, ebuilds"
+ # Setup portdb, gobs_categories, gobs_old_cpv, package
+ myportdb = portage.portdbapi(mysettings=mysettings)
+ init_categories = gobs_categories(database, mysettings)
+ init_old_cpv = gobs_old_cpv(database, myportdb, mysettings)
+ init_package = gobs_package(mysettings, database, myportdb, gobs_settings_dict)
+ package_id_list_tree = []
+ # Will run some update checks and update package if needed
+ # Get categories/package list from portage
+ package_list_tree = myportdb.cp_all()
+ # Run the update package for all package in the list
+ for package_line in sorted(package_list_tree):
+ # split the cp to categories and package
+ element = package_line.split('/')
+ categories = element[0]
+ package = element[1]
+ print "C", categories + "/" + package # C = Checking
+ # Check if we don't have the cp in the package table
+ package_id = database.have_package_db(categories, package)
+ if package_id is None:
+ # Add new package with ebuilds
+ package_id = init_package.add_new_package_db(categories, package)
+ package_id_list_tree.append(package_id)
+ # Ceck if we have the cp in the package table
+ elif package_id is not None:
+ # Update the packages with ebuilds
+ init_package.update_package_db(categories, package, package_id)
+ package_id_list_tree.append(package_id)
+ # Update the metadata for categories
+ init_categories.update_categories_db(categories)
+ # Remove any old ebuild, package and categories
+ init_old_cpv.mark_old_ebuild_db(categories, package, package_id)
+ init_old_cpv.mark_old_package_db(sorted(package_id_list_tree))
+ init_old_cpv.mark_old_categories_db()
+ print "Checking categories, package and ebuilds done"
+
+def main():
+ # Main
+ # Get the options from the config file set in gobs.readconf
+ init_config = get_conf_settings()
+ gobs_settings_dict = init_config.read_gobs_settings_all()
+ # Init the Database
+ init_sql = init_sql_db()
+ database = init_sql.init_sql_backend()
+ # Init settings for the default config
+ mysettings = init_portage_settings(database, gobs_settings_dict)
+ # Update the arch db if needed
+ init_arch = arch(database)
+ init_arch.update_arch_db()
+ # Update the cpv db
+ update_cpv_db(mysettings, database, gobs_settings_dict)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/gobs/pym/arch.py b/gobs/pym/arch.py
new file mode 100644
index 0000000..32feb58
--- /dev/null
+++ b/gobs/pym/arch.py
@@ -0,0 +1,19 @@
+import portage
+
+class arch(object):
+
+ def __init__(self, database):
+ self.database = database
+ self.arch_list_tree = portage.archlist
+ self.arch_list_db = database.get_arch_db()
+
+ def update_arch_db(self):
+ # FIXME: check for new keyword
+ # Add arch db (keywords)
+ if self.arch_list_db is None:
+ arch_list = self.arch_list_tree
+ for arch in arch_list:
+ if arch[0] not in ["~","-"]:
+ arch_list.append("-" + arch)
+ arch_list.append("-*")
+ self.database.add_new_arch_db(arch_list)
\ No newline at end of file
diff --git a/gobs/pym/categories.py b/gobs/pym/categories.py
new file mode 100644
index 0000000..6613cb2
--- /dev/null
+++ b/gobs/pym/categories.py
@@ -0,0 +1,20 @@
+from gobs.text import gobs_text
+import portage
+
+class gobs_categories(object):
+
+ def __init__(self, database, mysettings):
+ self.database = database
+ self.mysettings = mysettings
+ self.init_text = gobs_text()
+
+ def update_categories_db(self, categories):
+ # Update categories_meta in the db
+ categories_dir = self.mysettings['PORTDIR'] + "/" + categories + "/"
+ categories_metadata_xml_checksum_tree = portage.checksum.sha256hash(categories_dir + "metadata.xml")[0]
+ categories_metadata_xml_text_tree = self.init_text.get_file_text(categories_dir + "metadata.xml")
+ categories_metadata_xml_checksum_db = self.database.get_categories_checksum_db(categories)
+ if categories_metadata_xml_checksum_db is None:
+ self.database.add_new_categories_meta_sql(categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
+ elif categories_metadata_xml_checksum_db != categories_metadata_xml_checksum_tree:
+ self.database.update_categories_meta_sql(categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree)
diff --git a/gobs/pym/check_setup.py b/gobs/pym/check_setup.py
new file mode 100644
index 0000000..5b1a0b4
--- /dev/null
+++ b/gobs/pym/check_setup.py
@@ -0,0 +1,49 @@
+import portage
+import os
+import errno
+from gobs.text import gobs_text
+
+class check_config(object):
+
+ def __init__(self, database, gobs_settings_dict):
+ self.database = database
+ self.gobs_settings_dict = gobs_settings_dict
+ self.init_text = gobs_text()
+
+ def check_make_conf(self):
+ # FIXME: mark any config updating true in the db when updating the configs
+ # Get the config list
+ config_list_all = self.database.get_config_list_all()
+ print "Checking configs for changes and errors"
+ configsDict = {}
+ for config_id in config_list_all:
+ attDict={}
+ # Set the config dir
+ check_config_dir = self.gobs_settings_dict['host_setup_root'] + "config/" + config_id[0] + "/"
+ make_conf_file = check_config_dir + "etc/portage/make.conf"
+ # Check if we can open the file and close it
+ # Check if we have some error in the file (portage.util.getconfig)
+ # Check if we envorment error with the config (settings.validate)
+ try:
+ open_make_conf = open(make_conf_file)
+ open_make_conf.close()
+ portage.util.getconfig(make_conf_file, tolerant=0, allow_sourcing=False, expand=True)
+ mysettings = portage.config(config_root = check_config_dir)
+ mysettings.validate()
+ # With errors we update the db on the config and disable the config
+ except Exception as e:
+ attDict['config_error'] = e
+ attDict['active'] = 'False'
+ print "Fail", config_id[0]
+ else:
+ attDict['config_error'] = ''
+ attDict['active'] = 'True'
+ print "Pass", config_id[0]
+ # Get the checksum of make.conf
+ make_conf_checksum_tree = portage.checksum.sha256hash(make_conf_file)[0]
+ # Check if we have change the make.conf and update the db with it
+ attDict['make_conf_text'] = self.init_text.get_file_text(make_conf_file)
+ attDict['make_conf_checksum_tree'] = make_conf_checksum_tree
+ configsDict[config_id]=attDict
+ self.database.update__make_conf(configsDict)
+ print "Updated configurtions"
diff --git a/gobs/pym/flags.py b/gobs/pym/flags.py
new file mode 100644
index 0000000..d41f97c
--- /dev/null
+++ b/gobs/pym/flags.py
@@ -0,0 +1,115 @@
+class gobs_use_flags(object):
+
+ def __init__(self, mysettings, myportdb, cpv):
+ self.mysettings = mysettings
+ self.myportdb = myportdb
+ self.cpv = cpv
+
+ def get_iuse(self):
+ """Gets the current IUSE flags from the tree
+ To be used when a gentoolkit package object is not needed
+ @type: cpv: string
+ @param cpv: cat/pkg-ver
+ @rtype list
+ @returns [] or the list of IUSE flags
+ """
+ return self.myportdb.aux_get(self.cpv, ["IUSE"])[0].split()
+
+ def reduce_flag(self, flag):
+ """Absolute value function for a USE flag
+ @type flag: string
+ @param flag: the use flag to absolute.
+ @rtype: string
+ @return absolute USE flag
+ """
+ if flag[0] in ["+","-"]:
+ return flag[1:]
+ else:
+ return flag
+
+ def reduce_flags(self, the_list):
+ """Absolute value function for a USE flag list
+ @type the_list: list
+ @param the_list: the use flags to absolute.
+ @rtype: list
+ @return absolute USE flags
+ """
+ r=[]
+ for member in the_list:
+ r.append(self.reduce_flag(member))
+ return r
+
+ def filter_flags(self, use, use_expand_hidden, usemasked, useforced):
+ """Filter function to remove hidden or otherwise not normally
+ visible USE flags from a list.
+ @type use: list
+ @param use: the USE flag list to be filtered.
+ @type use_expand_hidden: list
+ @param use_expand_hidden: list of flags hidden.
+ @type usemasked: list
+ @param usemasked: list of masked USE flags.
+ @type useforced: list
+ @param useforced: the forced USE flags.
+ @rtype: list
+ @return the filtered USE flags.
+ """
+ # clean out some environment flags, since they will most probably
+ # be confusing for the user
+ for f in use_expand_hidden:
+ f=f.lower() + "_"
+ for x in use:
+ if f in x:
+ use.remove(x)
+ # clean out any arch's
+ archlist = self.mysettings["PORTAGE_ARCHLIST"].split()
+ for a in use[:]:
+ if a in archlist:
+ use.remove(a)
+ # dbl check if any from usemasked or useforced are still there
+ masked = usemasked + useforced
+ for a in use[:]:
+ if a in masked:
+ use.remove(a)
+ return use
+
+ def get_all_cpv_use(self):
+ """Uses portage to determine final USE flags and settings for an emerge
+ @type cpv: string
+ @param cpv: eg cat/pkg-ver
+ @rtype: lists
+ @return use, use_expand_hidden, usemask, useforce
+ """
+ use = None
+ self.mysettings.unlock()
+ try:
+ self.mysettings.setcpv(self.cpv, use_cache=None, mydb=self.myportdb)
+ use = self.mysettings['PORTAGE_USE'].split()
+ use_expand_hidden = self.mysettings["USE_EXPAND_HIDDEN"].split()
+ usemask = list(self.mysettings.usemask)
+ useforce = list(self.mysettings.useforce)
+ except KeyError:
+ self.mysettings.reset()
+ self.mysettings.lock()
+ return [], [], [], []
+ # reset cpv filter
+ self.mysettings.reset()
+ self.mysettings.lock()
+ return use, use_expand_hidden, usemask, useforce
+
+ def get_flags(self):
+ """Retrieves all information needed to filter out hidden, masked, etc.
+ USE flags for a given package.
+
+ @type cpv: string
+ @param cpv: eg. cat/pkg-ver
+ @type final_setting: boolean
+ @param final_setting: used to also determine the final
+ enviroment USE flag settings and return them as well.
+ @rtype: list or list, list
+ @return IUSE or IUSE, final_flags
+ """
+ final_use, use_expand_hidden, usemasked, useforced = self.get_all_cpv_use()
+ iuse_flags = self.filter_flags(self.get_iuse(), use_expand_hidden, usemasked, useforced)
+ #flags = filter_flags(use_flags, use_expand_hidden, usemasked, useforced)
+ final_flags = self.filter_flags(final_use, use_expand_hidden, usemasked, useforced)
+ return iuse_flags, final_flags
diff --git a/gobs/pym/initsql.py b/gobs/pym/initsql.py
new file mode 100644
index 0000000..2de0e86
--- /dev/null
+++ b/gobs/pym/initsql.py
@@ -0,0 +1,41 @@
+import sys, os, re
+from gobs.readconf import get_conf_settings
+
+class DBStateError(Exception):
+ """If the DB is in a state that is not expected, we raise this."""
+ def __init__(self, value):
+ Exception.__init__(self)
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+class DatabaseConfig(object):
+ # No touchy
+ sql_settings = {}
+ gobs_settings = get_conf_settings()
+ gobs_settings_dict = gobs_settings.read_gobs_settings_all()
+ mode = gobs_settings_dict['sql_backend']
+
+ # Settings for MySQL. You need to create one users in
+ # your MySQL database.
+ # The user needs:
+ # DELETE, INSERT, UPDATE, SELECT
+ # Do NOT change these, set the values in the config file /etc/buildhost/buildhost.conf
+ sql_settings[mode] = {}
+ # settings['sql_pgsql']['charset'] = 'utf8'
+ sql_settings[mode]['host'] = gobs_settings_dict['sql_host']
+ sql_settings[mode]['database'] = gobs_settings_dict['sql_db']
+ sql_settings[mode]['user'] = gobs_settings_dict['sql_user']
+ sql_settings[mode]['password'] = gobs_settings_dict['sql_passwd']
+
+class init_sql_db(object):
+
+ def init_sql_backend(self):
+ from gobs.initsql import DatabaseConfig # import DatebaseConfig
+ database = None
+ # Setup the database backent to use mysql
+ if DatabaseConfig.mode == 'pgsql':
+ from gobs.pgsqlbackend import PgSQLPackageDB
+ database = PgSQLPackageDB(DatabaseConfig.sql_settings['pgsql'])
+ print database
+ return database
\ No newline at end of file
diff --git a/gobs/pym/manifest.py b/gobs/pym/manifest.py
new file mode 100644
index 0000000..69c6f8b
--- /dev/null
+++ b/gobs/pym/manifest.py
@@ -0,0 +1,99 @@
+import os
+import warnings
+from portage import os, _encodings, _unicode_decode
+from portage.exception import DigestException, FileNotFound
+from portage.localization import _
+from portage.manifest import Manifest
+
+class gobs_manifest(object):
+
+ def __init__ (self, mysettings):
+ self.mysettings = mysettings
+
+ # Copy of portage.digestcheck() but without the writemsg() stuff
+ def digestcheck(self, pkdir):
+ """
+ Verifies checksums. Assumes all files have been downloaded.
+ @rtype: int
+ @returns: None on success and error msg on failure
+ """
+
+ myfiles = []
+ justmanifest = None
+ self.mysettings['PORTAGE_QUIET'] = '1'
+
+ if self.mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
+ return None
+ manifest_path = os.path.join(pkgdir, "Manifest")
+ if not os.path.exists(manifest_path):
+ return ("!!! Manifest file not found: '%s'") % manifest_path
+ mf = Manifest(pkgdir, mysettings["DISTDIR"])
+ manifest_empty = True
+ for d in mf.fhashdict.values():
+ if d:
+ manifest_empty = False
+ break
+ if manifest_empty:
+ return ("!!! Manifest is empty: '%s'") % manifest_path
+ try:
+ if "PORTAGE_PARALLEL_FETCHONLY" not in self.mysettings:
+ mf.checkTypeHashes("EBUILD")
+ mf.checkTypeHashes("AUX")
+ mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
+ for f in myfiles:
+ ftype = mf.findFile(f)
+ if ftype is None:
+ return ("!!! Missing digest for '%s'") % (f,)
+ mf.checkFileHashes(ftype, f)
+ except FileNotFound as e:
+ return ("!!! A file listed in the Manifest could not be found: %s") % str(e)
+ except DigestException as e:
+ return ("!!! Digest verification failed: %s\nReason: %s\nGot: %s\nExpected: %s") \
+ % (e.value[0], e.value[1], e.value[2], e.value[3])
+ # Make sure that all of the ebuilds are actually listed in the Manifest.
+ for f in os.listdir(pkgdir):
+ pf = None
+ if f[-7:] == '.ebuild':
+ pf = f[:-7]
+ if pf is not None and not mf.hasFile("EBUILD", f):
+ return ("!!! A file is not listed in the Manifest: '%s'") \
+ % os.path.join(pkgdir, f)
+ """ epatch will just grab all the patches out of a directory, so we have to
+ make sure there aren't any foreign files that it might grab."""
+ filesdir = os.path.join(pkgdir, "files")
+ for parent, dirs, files in os.walk(filesdir):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ parent = _unicode_decode(parent, encoding=_encodings['fs'], errors='replace')
+ return ("!!! Path contains invalid character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], parent)
+ for d in dirs:
+ d_bytes = d
+ try:
+ d = _unicode_decode(d, encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ d = _unicode_decode(d, encoding=_encodings['fs'], errors='replace')
+ return ("!!! Path contains invalid character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], os.path.join(parent, d))
+ if d.startswith(".") or d == "CVS":
+ dirs.remove(d_bytes)
+ for f in files:
+ try:
+ f = _unicode_decode(f, encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ f = _unicode_decode(f, encoding=_encodings['fs'], errors='replace')
+ if f.startswith("."):
+ continue
+ f = os.path.join(parent, f)[len(filesdir) + 1:]
+ return ("!!! File name contains invalid character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], f)
+ if f.startswith("."):
+ continue
+ f = os.path.join(parent, f)[len(filesdir) + 1:]
+ file_type = mf.findFile(f)
+ if file_type != "AUX" and not f.startswith("digest-"):
+ return ("!!! A file is not listed in the Manifest: '%s'") \
+ % os.path.join(filesdir, f)
+ return None
\ No newline at end of file
diff --git a/gobs/pym/old_cpv.py b/gobs/pym/old_cpv.py
new file mode 100644
index 0000000..4cd8ec0
--- /dev/null
+++ b/gobs/pym/old_cpv.py
@@ -0,0 +1,73 @@
+class gobs_old_cpv(object):
+
+ def __init__(self, database, myportdb, mysettings):
+ self.database = database
+ self.myportdb = myportdb
+ self.mysettings = mysettings
+
+ def mark_old_ebuild_db(self, categories, package, package_id):
+ ebuild_list_tree = sorted(self.myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None))
+ # Get ebuild list on categories, package in the db
+ ebuild_list_db = self.database.cp_list_db(package_id)
+ # Check if don't have the ebuild in the tree
+ # Add it to the no active list
+ old_ebuild_list = []
+ for ebuild_line in ebuild_list_db:
+ ebuild_line_db = categories + "/" + package + "-" + ebuild_line[0]
+ if not ebuild_line_db in ebuild_list_tree:
+ old_ebuild_list.append(ebuild_line)
+ # Set no active on ebuilds in the db that no longer in tree
+ if old_ebuild_list != []:
+ for old_ebuild in old_ebuild_list:
+ print "O", categories + "/" + package + "-" + old_ebuild[0]
+ self.database.add_old_ebuild(package_id, old_ebuild_list)
+ # Check if we have older no activ ebuilds then 60 days
+ ebuild_old_list_db = self.database.cp_list_old_db(package_id)
+ # Delete older ebuilds in the db
+ if ebuild_old_list_db != []:
+ for del_ebuild_old in ebuild_old_list_db:
+ print "D", categories + "/" + package + "-" + del_ebuild_old[1]
+ self.database.del_old_ebuild(ebuild_old_list_db)
+
+ def mark_old_package_db(self, package_id_list_tree):
+ # Get categories/package list from db
+ package_list_db = self.database.cp_all_db()
+ old_package_id_list = []
+ # Check if don't have the categories/package in the tree
+ # Add it to the no active list
+ for package_line in package_list_db:
+ if not package_line in package_id_list_tree:
+ old_package_id_list.append(package_line)
+ # Set no active on categories/package and ebuilds in the db that no longer in tree
+ if old_package_id_list != []:
+ mark_old_list = self.database.add_old_package(old_package_id_list)
+ if mark_old_list != []:
+ for x in mark_old_list:
+ element = self.database.get_cp_from_package_id(x)
+ print "O", element[0]
+ # Check if we have older no activ categories/package then 60 days
+ del_package_id_old_list = self.database.cp_all_old_db(old_package_id_list)
+ # Delete older categories/package and ebuilds in the db
+ if del_package_id_old_list != []:
+ for i in del_package_id_old_list:
+ element = self.database.get_cp_from_package_id(i)
+ print "D", element
+ self.database.del_old_package(del_package_id_old_list)
+
+ def mark_old_categories_db(self):
+ # Get categories list from the tree and db
+ categories_list_tree = self.mysettings.categories
+ categories_list_db = self.database.get_categories_db()
+ categories_old_list = []
+ # Check if don't have the categories in the tree
+ # Add it to the no active list
+ for categories_line in categories_list_db:
+ if not categories_line[0] in categories_list_tree:
+ old_c = selfdatabase.get_old_categories(categories_line[0])
+ if old_c is not None:
+ categories_old_list.append(categories_line)
+ # Delete older categories in the db
+ if categories_old_list != []:
+ for real_old_categories in categories_old_list:
+ self.database.del_old_categories(real_old_categoriess)
+ print "D", real_old_categories
diff --git a/gobs/pym/package.py b/gobs/pym/package.py
new file mode 100644
index 0000000..b75c515
--- /dev/null
+++ b/gobs/pym/package.py
@@ -0,0 +1,268 @@
+import portage
+from gobs.flags import gobs_use_flags
+from gobs.repoman import gobs_repoman
+from gobs.manifest import gobs_manifest
+from gobs.text import gobs_text
+from gobs.old_cpv import gobs_old_cpv
+
+class gobs_package(object):
+
+ def __init__(self, mysettings, database, myportdb, gobs_settings_dict):
+ self.mysettings = mysettings
+ self.database = database
+ self.gobs_settings_dict = gobs_settings_dict
+ self.myportdb = myportdb
+ self.init_text = gobs_text()
+
+ def change_config(self, config_id):
+ # Change config_root config_id = table configs.id
+ my_new_setup = self.gobs_settings_dict['host_setup_root'] +"config/" + config_id + "/"
+ mysettings_setup = portage.config(config_root = my_new_setup)
+ return mysettings_setup
+
+ def config_match_ebuild(self, categories, package):
+ config_cpv_listDict ={}
+ # Get a list from table configs/setups with default_config=Fales and active = True
+ config_list_all = self.database.get_config_list()
+ if config_list_all is ():
+ return config_cpv_listDict
+ for i in config_list_all:
+ config_id = i[0]
+ # Change config/setup
+ mysettings_setup = self.change_config(config_id)
+ myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
+ # Get cpv from portage with the config
+ ebuild_match_list = myportdb_setup.match(categories + "/" + package, use_cache=1)
+ latest_ebuild = ""
+ latest_ebuild_version = unicode("")
+ # Check if could get cpv from portage
+ # and get the lastes ebuild on that config/setup
+ if ebuild_match_list != []:
+ for ebuild_match_line in ebuild_match_list:
+ latest_ebuild = ebuild_match_line
+ # Get the version of cpv
+ latest_ebuild_version = portage.versions.cpv_getversion(latest_ebuild)
+ # Get the iuse and use flags for that config/setup
+ init_useflags = gobs_use_flags(mysettings_setup, myportdb_setup, latest_ebuild)
+ iuse_flags_list, final_use_list = init_useflags.get_flags()
+ iuse_flags_list2 = []
+ for iuse_line in iuse_flags_list:
+ iuse_flags_list2.append( init_useflags.reduce_flag(iuse_line))
+ # Dic the needed info
+ attDict = {}
+ attDict['ebuild_version'] = latest_ebuild_version
+ attDict['useflags'] = final_use_list
+ attDict['iuse'] = iuse_flags_list2
+ attDict['package'] = package
+ attDict['categories'] = categories
+ config_cpv_listDict[config_id] = attDict
+ # Clean some cache
+ myportdb_setup.close_caches()
+ portage.portdbapi.portdbapi_instances.remove(myportdb_setup)
+ return config_cpv_listDict
+
+ def get_ebuild_metadata(self, ebuild_line):
+ # Get the auxdbkeys infos for the ebuild
+ try:
+ ebuild_auxdb_list = self.myportdb.aux_get(ebuild_line, portage.auxdbkeys)
+ except:
+ ebuild_auxdb_list = []
+ else:
+ for i in range(len(ebuild_auxdb_list)):
+ if ebuild_auxdb_list[i] == '':
+ ebuild_auxdb_list[i] = ''
+ return ebuild_auxdb_list
+
+ def get_packageDict(self, pkgdir, ebuild_line, categories, package, config_id):
+ attDict = {}
+ ebuild_version_tree = portage.versions.cpv_getversion(ebuild_line)
+ ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
+ ebuild_version_text = self.init_text.get_ebuild_text(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")
+ init_repoman = gobs_repoman(self.mysettings, self.myportdb, self.database)
+ repoman_error = init_repoman.check_repoman(categories, package, ebuild_version_tree, config_id)
+ ebuild_version_metadata_tree = get_ebuild_metadata(ebuild_line)
+ # if there some error to get the metadata we add rubish to the
+ # ebuild_version_metadata_tree and set ebuild_version_checksum_tree to 0
+ # so it can be updated next time we update the db
+ if ebuild_version_metadata_tree == []:
+ ebuild_version_metadata_tree = ['','','','','','','','','','','','','','','','','','','','','','','','','']
+ ebuild_version_checksum_tree = ['0']
+ # add the ebuild to the dict packages
+ attDict['categories'] = categories
+ attDict['package'] = package
+ attDict['ebuild_version_tree'] = ebuild_version_tree
+ attDict['ebuild_version_checksum_tree']= ebuild_version_checksum_tree
+ attDict['ebuild_version_metadata_tree'] = ebuild_version_metadata_tree
+ attDict['ebuild_version_text'] = ebuild_version_text[0]
+ attDict['ebuild_version_revision'] = ebuild_version_text[1]
+ attDict['ebuild_error'] = repoman_error
+ return attDict
+
+ def get_metadataDict(self, packageDict, ebuild_id_list):
+ # Make the metadataDict from packageDict
+ ebuild_i = 0
+ metadataDict ={}
+ for k, v in packageDict.iteritems():
+ attDict = {}
+ metadata_restrictions = []
+ for i in v['ebuild_version_metadata_tree'][4].split():
+ metadata_restrictions.append(i)
+ metadata_keyword = []
+ for i in v['ebuild_version_metadata_tree'][8].split():
+ metadata_keyword.append(i)
+ metadata_iuse = []
+ for i in v['ebuild_version_metadata_tree'][10].split():
+ metadata_iuse.append(i)
+ attDict['restrictions'] = metadata_restrictions
+ attDict['keyword'] = metadata_keyword
+ attDict['iuse'] = metadata_iuse
+ metadataDict[ebuild_id_list[ebuild_i]] = attDict
+ ebuild_i = ebuild_i +1
+ return metadataDict
+
+ def add_new_ebuild_buildquery_db(self, ebuild_id_list, packageDict, config_cpv_listDict):
+ # Get the needed info from packageDict and config_cpv_listDict and put that in buildqueue
+ # Only add it if ebuild_version in packageDict and config_cpv_listDict match
+ if config_cpv_listDict != {}:
+ message = None
+ # Unpack config_cpv_listDict
+ for k, v in config_cpv_listDict.iteritems():
+ config_id = k
+ latest_ebuild_version = v['ebuild_version']
+ iuse_flags_list = list(set(v['iuse']))
+ use_enable= v['useflags']
+ use_disable = list(set(iuse_flags_list).difference(set(use_enable)))
+ # Make a dict with enable and disable use flags for ebuildqueuedwithuses
+ use_flagsDict = {}
+ for x in use_enable:
+ use_flagsDict[x] = True
+ for x in use_disable:
+ use_flagsDict[x] = False
+ # Unpack packageDict
+ i = 0
+ for k, v in packageDict.iteritems():
+ ebuild_id = ebuild_id_list[i]
+ use_flags_list = []
+ use_enable_list = []
+ for u, s in use_flagsDict.iteritems():
+ use_flags_list.append(u)
+ use_enable_list.append(s)
+ # Comper ebuild_version and add the ebuild_version to buildqueue
+ if portage.vercmp(v['ebuild_version_tree'], latest_ebuild_version) == 0:
+ self.database.add_new_package_buildqueue(ebuild_id, config_id, use_flags_list, use_enable_list, message)
+ print "B", config_id, v['categories'] + "/" + v['package'] + "-" + latest_ebuild_version, "USE:", use_enable # B = Build config cpv use-flags
+ i = i +1
+
+ def get_package_metadataDict(self, pkgdir, package):
+ # Make package_metadataDict
+ attDict = {}
+ package_metadataDict = {}
+ changelog_checksum_tree = portage.checksum.sha256hash(pkgdir + "/ChangeLog")
+ changelog_text_tree = self.init_text.get_file_text(pkgdir + "/ChangeLog")
+ metadata_xml_checksum_tree = portage.checksum.sha256hash(pkgdir + "/metadata.xml")
+ metadata_xml_text_tree = self.init_text.get_file_text(pkgdir + "/metadata.xml")
+ attDict['changelog_checksum'] = changelog_checksum_tree[0]
+ attDict['changelog_text'] = changelog_text_tree
+ attDict['metadata_xml_checksum'] = metadata_xml_checksum_tree[0]
+ attDict[' metadata_xml_text'] = metadata_xml_text_tree
+ package_metadataDict[package] = attDict
+ return package_metadataDict
+
+ def add_new_package_db(self, categories, package):
+ # add new categories package ebuild to tables package and ebuilds
+ print "N", categories + "/" + package # N = New Package
+ pkgdir = self.mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR + cp
+ categories_dir = self.mysettings['PORTDIR'] + "/" + categories + "/"
+ # Get the ebuild list for cp
+ ebuild_list_tree = self.portdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
+ config_cpv_listDict = self.config_match_ebuild(categories, package)
+ config_id = self.database.get_default_config()[0]
+ packageDict ={}
+ for ebuild_line in sorted(ebuild_list_tree):
+ # Make the needed packageDict
+ packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
+ # Add the ebuild to db
+ return_id = self.database.add_new_package_sql(packageDict)
+ ebuild_id_list = return_id[0]
+ package_id_list = return_id[1]
+ package_id = package_id_list[0]
+ # Add the manifest file to db
+ manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
+ get_manifest_text = self.init_text.get_file_text(pkgdir + "/Manifest")
+ database.add_new_manifest_sql(package_id, get_manifest_text, manifest_checksum_tree)
+ # Add metadataDict to db
+ metadataDict = self.get_metadataDict(packageDict, ebuild_id_list)
+ database.add_new_metadata(metadataDict)
+ # Add any qa and repoman erro for the ebuild to buildlog
+ qa_error = []
+ init_manifest = gobs_manifest(self.mysettings)
+ manifest_error = init_manifest.digestcheck(pkgdir)
+ if manifest_error is not None:
+ qa_error.append(manifest_error)
+ print "QA:", categories + "/" + package, qa_error
+ self.database.add_qa_repoman(ebuild_id_list, qa_error, packageDict, config_id)
+ # Add the ebuild to the buildqueru table if needed
+ self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
+ # Add some checksum on some files
+ package_metadataDict = self.get_package_metadataDict(pkgdir, package)
+ self.database.add_new_package_metadata(package_id, package_metadataDict)
+ return package_id
+
+ def update_package_db(self, categories, package, package_id):
+ # Update the categories and package with new info
+ pkgdir = self.mysettings['PORTDIR'] + "/" + categories + "/" + package # Get PORTDIR with cp
+ # Get the checksum from the file in portage tree
+ manifest_checksum_tree = portage.checksum.sha256hash(pkgdir + "/Manifest")[0]
+ # Get the checksum from the db in package table
+ manifest_checksum_db = self.database.get_manifest_db(package_id)[0]
+ # if we have the same checksum return else update the package
+ ebuild_list_tree = self.myportdb.cp_list((categories + "/" + package), use_cache=1, mytree=None)
+ if manifest_checksum_tree != manifest_checksum_db:
+ print "U", categories + "/" + package # U = Update
+ # Get package_metadataDict and update the db with it
+ package_metadataDict = self.get_package_metadataDict(pkgdir, package)
+ self.database.update_new_package_metadata(package_id, package_metadataDict)
+ # Get config_cpv_listDict
+ config_cpv_listDict = self.config_match_ebuild(categories, package)
+ config_id = self.database.get_default_config()
+ packageDict ={}
+ for ebuild_line in sorted(ebuild_list_tree):
+ old_ebuild_list = []
+ # split out ebuild version
+ ebuild_version_tree = portage.versions.cpv_getversion(ebuild_line)
+ # Get the checksum of the ebuild in tree and db
+ ebuild_version_checksum_tree = portage.checksum.sha256hash(pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild")[0]
+ ebuild_version_manifest_checksum_db = self.database.get_ebuild_checksum(package_id, ebuild_version_tree)
+ # Check if the checksum have change
+ if ebuild_version_manifest_checksum_db is None or ebuild_version_checksum_tree != ebuild_version_manifest_checksum_db:
+ # Get packageDict for ebuild
+ packageDict[ebuild_line] = self.get_packageDict(pkgdir, ebuild_line, categories, package, config_id)
+ if ebuild_version_manifest_checksum_db is None:
+ print "N", categories + "/" + package + "-" + ebuild_version_tree # N = New ebuild
+ else:
+ print "U", categories + "/" + package + "-" + ebuild_version_tree # U = Updated ebuild
+ # Fix so we can use add_new_package_sql(packageDict) to update the ebuilds
+ old_ebuild_list.append(ebuild_version_tree)
+ self.database.add_old_ebuild(package_id, old_ebuild_list)
+ self.database.update_active_ebuild(package_id, ebuild_version_tree)
+ # Use packageDictand and metadataDict to update the db
+ return_id = self.database.add_new_package_sql(packageDict)
+ ebuild_id_list = return_id[0]
+ metadataDict = self.get_metadataDict(packageDict, ebuild_id_list)
+ self.database.add_new_metadata(metadataDict)
+ # Get the text in Manifest and update it
+ get_manifest_text = self.init_text.get_file_text(pkgdir + "/Manifest")
+ self.database.update_manifest_sql(package_id, get_manifest_text, manifest_checksum_tree)
+ # Add any qa and repoman erros to buildlog
+ qa_error = []
+ init_manifest = gobs_manifest(self.mysettings)
+ manifest_error = init_manifest.digestcheck(pkgdir)
+ if manifest_error is not None:
+ qa_error.append(manifest_error)
+ print "QA:", categories + "/" + package, qa_error
+ self.database.add_qa_repoman(ebuild_id_list, qa_error, packageDict, config_id)
+ # Add the ebuild to the buildqueru table if needed
+ self.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
+ # Mark or remove any old ebuilds
+ init_old_cpv = gobs_old_cpv(self.database, self.myportdb)
+ init_old_cpv.mark_old_ebuild_db(categories, package, package_id, sorted(ebuild_list_tree))
diff --git a/gobs/pym/pgsqlbackend.py b/gobs/pym/pgsqlbackend.py
new file mode 100644
index 0000000..27fdf57
--- /dev/null
+++ b/gobs/pym/pgsqlbackend.py
@@ -0,0 +1,394 @@
+
+class SQLPackageDatabase(object):
+ """we have to store our stuff somewhere
+
+ subclass and redefine init to provide
+ at least self.cursor"""
+
+ # These are set by subclasses
+ db = None
+ cursor = None
+ syntax_placeholder = None
+ syntax_autoincrement = None
+
+ sql = {}
+
+ def get_default_config(self):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT id FROM configs WHERE default_config = True'
+ cursor.execute(sqlQ)
+ return cursor.fetchone()
+
+ def get_config_list(self):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT id FROM configs WHERE default_config = False AND active = True'
+ cursor.execute(sqlQ)
+ return cursor.fetchall()
+
+ def get_config_list_all(self):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT id FROM configs'
+ cursor.execute(sqlQ)
+ return cursor.fetchall()
+
+ def update__make_conf(self, configsDict):
+ cursor = self.conn.cursor()
+ sqlQ = 'UPDATE configs SET make_conf_checksum = %s, make_conf_text = %s, active = %s, config_error = %s WHERE id = %s'
+ for k, v in configsDict.iteritems():
+ params = [v['make_conf_checksum_tree'], v['make_conf_text'], v['active'], v['config_error'], k]
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def have_package_db(self, categories, package):
+ cursor = self.conn.cursor()
+ sqlQ ='SELECT package_id FROM packages WHERE category = %s AND package_name = %s'
+ params = categories, package
+ cursor.execute(sqlQ, params)
+ return cursor.fetchone()
+
+ def get_categories_db(self):
+ cursor = self.conn.cursor()
+ sqlQ =' SELECT category FROM categories'
+ cursor.execute(sqlQ)
+ return cursor.fetchall()
+
+ def get_categories_checksum_db(self, categories):
+ cursor = self.conn.cursor()
+ sqlQ =' SELECT metadata_xml_checksum FROM categories_meta WHERE category = %s'
+ cursor.execute(sqlQ, (categories,))
+ return cursor.fetchone()
+
+ def add_new_categories_meta_sql(self, categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree):
+ cursor = self.conn.cursor()
+ sqlQ = 'INSERT INTO categories_meta (category, metadata_xml_checksum, metadata_xml_text) VALUES ( %s, %s, %s )'
+ params = categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def update_categories_meta_sql(self, categories, categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree):
+ cursor = self.conn.cursor()
+ sqlQ ='UPDATE categories_meta SET metadata_xml_checksum = %s, metadata_xml_text = %s WHERE category = %s'
+ params = (categories_metadata_xml_checksum_tree, categories_metadata_xml_text_tree, categories)
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def add_new_manifest_sql(self, package_id, get_manifest_text, manifest_checksum_tree):
+ cursor = self.conn.cursor()
+ sqlQ = 'INSERT INTO manifest (package_id, manifest, checksum) VALUES ( %s, %s, %s )'
+ params = package_id, get_manifest_text, manifest_checksum_tree
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def add_new_package_metadata(self, package_id, package_metadataDict):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT changelog_checksum FROM packages_meta WHERE package_id = %s'
+ cursor.execute(sqlQ, (package_id,))
+ if cursor.fetchone() is None:
+ sqlQ = 'INSERT INTO packages_meta (package_id, changelog_text, changelog_checksum, metadata_text, metadata_checksum) VALUES ( %s, %s, %s, %s, %s )'
+ for k, v in package_metadataDict.iteritems():
+ params = package_id, v['changelog_text'], v['changelog_checksum'], v[' metadata_xml_text'], v['metadata_xml_checksum']
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def update_new_package_metadata(self, package_id, package_metadataDict):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT changelog_checksum, metadata_checksum FROM packages_meta WHERE package_id = %s'
+ cursor.execute(sqlQ, package_id)
+ entries = cursor.fetchone()
+ changelog_checksum_db = entries[0]
+ metadata_checksum_db = entries[1]
+ for k, v in package_metadataDict.iteritems():
+ if changelog_checksum_db != v['changelog_checksum']:
+ sqlQ = 'UPDATE packages_meta SET changelog_text = %s, changelog_checksum = %s WHERE package_id = %s'
+ params = v['changelog_text'], v['changelog_checksum'], package_id
+ cursor.execute(sqlQ, params)
+ if metadata_checksum_db != v['metadata_xml_checksum']:
+ sqlQ = 'UPDATE packages_meta SET metadata_text = %s, metadata_checksum = %s WHERE package_id = %s'
+ params = v[' metadata_xml_text'], v['metadata_xml_checksum'], package_id
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def get_manifest_db(self, package_id):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT checksum FROM manifest WHERE package_id = %s'
+ cursor.execute(sqlQ, (package_id,))
+ return cursor.fetchone()
+
+ def update_manifest_sql(self, package_id, get_manifest_text, manifest_checksum_tree):
+ cursor = self.conn.cursor()
+ sqlQ = 'UPDATE manifest SET checksum = %s, manifest = %s WHERE package_id = %s'
+ params = (manifest_checksum_tree, get_manifest_text, package_id)
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def add_new_metadata(self, metadataDict):
+ for k, v in metadataDict.iteritems():
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT updaterestrictions( %s, %s )'
+ params = k, v['restrictions']
+ cursor.execute(sqlQ, params)
+ sqlQ = 'SELECT updatekeywords( %s, %s )'
+ params = k, v['keyword']
+ cursor.execute(sqlQ, params)
+ sqlQ = 'SELECT updateiuse( %s, %s )'
+ params = k, v['iuse']
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def add_new_package_sql(self, packageDict):
+ #lets have a new cursor for each metod as per best practice
+ cursor = self.conn.cursor()
+ sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
+ ebuild_id_list = []
+ package_id_list = []
+ for k, v in packageDict.iteritems():
+ params = [v['categories'], v['package'], v['ebuild_version_tree'], v['ebuild_version_revision'], v['ebuild_version_checksum_tree'],
+ v['ebuild_version_text'], v['ebuild_version_metadata_tree'][0], v['ebuild_version_metadata_tree'][1],
+ v['ebuild_version_metadata_tree'][12], v['ebuild_version_metadata_tree'][2], v['ebuild_version_metadata_tree'][3],
+ v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
+ v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
+ v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
+ v['ebuild_version_metadata_tree'][16]]
+ cursor.execute(sqlQ, params)
+ mid = cursor.fetchone()
+ mid=mid[0]
+ ebuild_id_list.append(mid[1])
+ package_id_list.append(mid[0])
+ self.conn.commit()
+ # add_new_metadata(metadataDict)
+ return ebuild_id_list, package_id_list
+
+ def add_new_ebuild_sql(packageDict, new_ebuild_list):
+ #lets have a new cursor for each metod as per best practice
+ cursor = self.conn.cursor()
+ sqlQ="SELECT insert_ebuild( %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'True')"
+ ebuild_id_list = []
+ package_id_list = []
+ for k, v in packageDict.iteritems():
+ for x in new_ebuild_list:
+ if x == v['ebuild_version_tree']:
+ params = [v['categories'], v['package'], v['ebuild_version_tree'], v['ebuild_version_revision'], v['ebuild_version_checksum_tree'],
+ v['ebuild_version_text'], v['ebuild_version_metadata_tree'][0], v['ebuild_version_metadata_tree'][1],
+ v['ebuild_version_metadata_tree'][12], v['ebuild_version_metadata_tree'][2], v['ebuild_version_metadata_tree'][3],
+ v['ebuild_version_metadata_tree'][5],v['ebuild_version_metadata_tree'][6], v['ebuild_version_metadata_tree'][7],
+ v['ebuild_version_metadata_tree'][9], v['ebuild_version_metadata_tree'][11],
+ v['ebuild_version_metadata_tree'][13],v['ebuild_version_metadata_tree'][14], v['ebuild_version_metadata_tree'][15],
+ v['ebuild_version_metadata_tree'][16]]
+ cursor.execute(sqlQ, params)
+ mid = cursor.fetchone()
+ mid=mid[0]
+ ebuild_id_list.append(mid[1])
+ package_id_list.append(mid[0])
+ self.conn.commit()
+ # add_new_metadata(metadataDict)
+ return ebuild_id_list, package_id_list
+
+ def update_active_ebuild(self, package_id, ebuild_version_tree):
+ cursor = self.conn.cursor()
+ sqlQ ="UPDATE ebuilds SET active = 'False', timestamp = now() WHERE package_id = %s AND ebuild_version = %s AND active = 'True'"
+ cursor.execute(sqlQ, (package_id, ebuild_version_tree))
+ self.conn.commit()
+
+ def get_cpv_from_ebuild_id(self, ebuild_id):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT package_id FROM ebuild WHERE id = %s'
+ self.cursor.execute(sql, ebuild_id)
+ entries = self.cursor.fetchone()
+ return entries
+
+ def get_cp_from_package_id(self, package_id):
+ cursor = self.conn.cursor()
+ sqlQ = "SELECT ARRAY_TO_STRING(ARRAY[category, package_name] , '/') AS cp FROM packages WHERE package_id = %s"
+ cursor.execute(sqlQ, (package_id,))
+ return cursor.fetchone()
+
+ def add_new_package_buildqueue(self, ebuild_id, config_id, iuse_flags_list, use_enable, message):
+ cursor = self.conn.cursor()
+ sqlQ="SELECT insert_buildqueue( %s, %s, %s, %s, %s )"
+ if not iuse_flags_list:
+ iuse_flags_list=None
+ use_enable=None
+ params = ebuild_id, unicode(config_id), iuse_flags_list, use_enable, message
+ cursor.execute(sqlQ, params)
+ self.conn.commit()
+
+ def get_ebuild_checksum(self, package_id, ebuild_version_tree):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT ebuild_checksum FROM ebuilds WHERE package_id = %s AND ebuild_version = %s AND active = TRUE'
+ cursor.execute(sqlQ, (package_id, ebuild_version_tree))
+ entries = cursor.fetchone()
+ if entries is None:
+ return None
+ return entries[0]
+
+ def add_old_package(self, old_package_list):
+ cursor = self.conn.cursor()
+ mark_old_list = []
+ sqlQ = "UPDATE ebuilds SET active = 'FALSE', timestamp = NOW() WHERE package_id = %s AND active = 'TRUE' RETURNING package_id"
+ for old_package in old_package_list:
+ cursor.execute(sqlQ, (old_package[0],))
+ entries = cursor.fetchone()
+ if entries is not None:
+ mark_old_list.append(entries[0])
+ self.conn.commit()
+ return mark_old_list
+
+ def get_old_categories(self, categories_line):
+ cursor = self.conn.cursor()
+ sqlQ = "SELECT package_name FROM packages WHERE category = %s"
+ cursor.execute(sqlQ (categories_line))
+ return cursor.fetchone()
+
+ def del_old_categories(self, real_old_categoriess):
+ cursor = self.conn.cursor()
+ sqlQ = 'DELETE FROM categories categories_meta WHERE category = %s'
+ cursor.execute(sqlQ (real_old_categories))
+ self.conn.commit()
+
+ def add_old_ebuild(self, package_id, old_ebuild_list):
+ cursor = self.conn.cursor()
+ sqlQ1 = "UPDATE ebuilds SET active = 'FALSE' WHERE package_id = %s AND ebuild_version = %s"
+ sqlQ2 = "SELECT id FROM ebuilds WHERE package_id = %s AND ebuild_version = %s AND active = 'TRUE'"
+ sqlQ3 = "SELECT queue_id FROM buildqueue WHERE ebuild_id = %s"
+ sqlQ4 = 'DELETE FROM ebuildqueuedwithuses buildqueue WHERE queue_id = %s'
+ for old_ebuild in old_ebuild_list:
+ cursor.execute(sqlQ2, (package_id, old_ebuild[0]))
+ ebuild_id_list = cursor.fetchall()
+ if ebuild_id_list is not None:
+ for ebuild_id in ebuild_id_list:
+ cursor.execute(sqlQ3, (ebuild_id))
+ queue_id_list = cursor.fetchall()
+ if queue_id_list is not None:
+ for queue_id in queue_id_list:
+ cursor.execute(sqlQ4, (queue_id))
+ cursor.execute(sqlQ1, (package_id, old_ebuild[0]))
+ self.conn.commit()
+
+ def cp_all_old_db(self, old_package_id_list):
+ cursor = self.conn.cursor()
+ old_package_list = []
+ for old_package in old_package_id_list:
+ sqlQ = "SELECT package_id FROM ebuilds WHERE package_id = %s AND active = 'FALSE' AND date_part('days', NOW() - timestamp) < 60"
+ cursor.execute(sqlQ, old_package)
+ entries = cursor.fetchone()
+ if entries is None:
+ old_package_list.append(old_package)
+ return old_package_list
+ def cp_all_db(self):
+ cursor = self.conn.cursor()
+ sqlQ = "SELECT package_id FROM packages"
+ cursor.execute(sqlQ)
+ return cursor.fetchall()
+
+ def del_old_ebuild(self, ebuild_old_list_db):
+ cursor = self.conn.cursor()
+ sqlQ2 = 'SELECT build_id FROM buildlog WHERE ebuild_id = %s'
+ sqlQ3 = 'DELETE FROM qa_problems repoman_problems WHERE build_id = %s'
+ sqlQ4 = 'DELETE FROM ebuildhaveskeywords ebuildhavesiuses WHERE ebuild_id = %s'
+ sqlQ5 = 'DELETE FROM ebuildbuildwithuses WHERE build_id = %s'
+ sqlQ6 = 'DELETE FROM ebuildhavesrestrictions buildlog WHERE ebuild_id = %s'
+ for del_ebuild_old in ebuild_old_list_db:
+ cursor.execute(sqlQ2, (del_ebuild_old[0],))
+ build_id_list = cursor.fetchall()
+ for build_id in build_id_list:
+ cursor.execute(sqlQ3, (build_id,))
+ cursor.execute(sqlQ5, (build_id,))
+ cursor.execute(sqlQ4, (del_ebuild_old[0],))
+ cursor.execute(sqlQ6, (del_ebuild_old[0],))
+ self.conn.commit()
+
+ def del_old_package(self, package_id_list):
+ cursor = self.conn.cursor()
+ sqlQ1 = 'SELECT id FROM ebuilds WHERE package_id = %s'
+ sqlQ2 = 'SELECT build_id FROM buildlog WHERE ebuild_id = %s'
+ sqlQ3 = 'DELETE FROM qa_problems, repoman_problems, ebuildbuildwithuses WHERE build_id = %s'
+ sqlQ4 = 'DELETE FROM ebuildhaveskeywords, ebuildhavesiuses, ebuildhavesrestrictions, buildlog WHERE ebuild_id = %s'
+ sqlQ5 = 'DELETE FROM ebuilds, manifest, package_meta, packages WHERE package_id = %s'
+ for package_id in package_id_list:
+ cursor.execute(sqlQ1, package_id)
+ ebuild_id_list = cursor.fetchall()
+ for ebuild_id in ebuild_id_list:
+ cursor.execute(sqlQ2, ebuild_id)
+ build_id_list = cursor.fetchall()
+ for build_id in build_id_list:
+ cursor.execute(sqlQ3, build_id)
+ cursor.execute(sqlQ4, ebuild_id)
+ cursor.execute(sqlQ5, package_id)
+ self.conn.commit()
+
+ def cp_list_db(self, package_id):
+ cursor = self.conn.cursor()
+ sqlQ = "SELECT ebuild_version FROM ebuilds WHERE active = 'TRUE' AND package_id = %s"
+ cursor.execute(sqlQ, (package_id))
+ return cursor.fetchall()
+
+ def cp_list_old_db(self, package_id):
+ cursor = self.conn.cursor()
+ sqlQ ="SELECT id, ebuild_version FROM ebuilds WHERE active = 'FALSE' AND package_id = %s AND date_part('days', NOW() - timestamp) > 60"
+ cursor.execute(sqlQ, package_id)
+ return cursor.fetchall()
+
+ def add_qa_repoman(self, ebuild_id_list, qa_error, packageDict, config_id):
+ ebuild_i = 0
+ cursor = self.conn.cursor()
+ for k, v in packageDict.iteritems():
+ ebuild_id = ebuild_id_list[ebuild_i]
+ sqlQ = 'INSERT INTO buildlog (ebuild_id, config, error_summary, timestamp ) VALUES ( %s, %s, %s, now() ) RETURNING build_id'
+ if v['ebuild_error'] != [] or qa_error != []:
+ if v['ebuild_error'] != [] or qa_error == []:
+ summary = "Repoman"
+ elif v['ebuild_error'] == [] or qa_error != []:
+ summary = "QA"
+ else:
+ summary = "QA:Repoman"
+ params = (ebuild_id, config_id, summary)
+ cursor.execute(sqlQ, params)
+ build_id = cursor.fetchone()
+ if v['ebuild_error'] != []:
+ sqlQ = 'INSERT INTO repoman_problems (problem, build_id ) VALUES ( %s, %s )'
+ for x in v['ebuild_error']:
+ params = (x, build_id)
+ cursor.execute(sqlQ, params)
+ if qa_error != []:
+ sqlQ = 'INSERT INTO qa_problems (problem, build_id ) VALUES ( %s, %s )'
+ for x in qa_error:
+ params = (x, build_id)
+ cursor.execute(sqlQ, params)
+ ebuild_i = ebuild_i +1
+ self.conn.commit()
+
+ def get_arch_db(self):
+ cursor = self.conn.cursor()
+ sqlQ = 'SELECT keyword FROM keywords WHERE keyword = %s'
+ cursor.execute(sqlQ, ('ppc',))
+ return cursor.fetchone()
+
+ def add_new_arch_db(self, arch_list):
+ cursor = self.conn.cursor()
+ sqlQ = 'INSERT INTO keywords (keyword) VALUES ( %s )'
+ for arch in arch_list:
+ cursor.execute(sqlQ, (arch,))
+ self.conn.commit()
+
+ def closeconnection(self):
+ self.conn.close()
+
+class PgSQLPackageDB(SQLPackageDatabase):
+ """override for MySQL backend"""
+
+ syntax_placeholder = "%s"
+
+ def __init__(self, config=None):
+ # Do not complain about correct usage of ** magic
+ # pylint: disable-msg=W0142
+ SQLPackageDatabase.__init__(self)
+
+ if config is None or 'database' not in config:
+ print "No configuration available!"
+ sys.exit(1)
+ try:
+ import psycopg2
+ except ImportError:
+ print "Please install a recent version of dev-python/psycopg for Python"
+ sys.exit(1)
+ self.conn = psycopg2.connect(**config)
diff --git a/gobs/pym/readconf.py b/gobs/pym/readconf.py
new file mode 100644
index 0000000..24e8c0b
--- /dev/null
+++ b/gobs/pym/readconf.py
@@ -0,0 +1,45 @@
+import os
+import sys
+import re
+
+class get_conf_settings(object):
+# open the /etc/buildhost/buildhost.conf file and get the needed
+# settings for gobs
+ def __init__(self):
+ self.configfile = "/etc/buildhost/buildhost.conf"
+
+ def read_gobs_settings_all(self):
+ # It will return a dict with options from the configfile
+ try:
+ open_conffile = open(self.configfile, 'r')
+ except:
+ sys.exit("Fail to open config file:" + self.configfile)
+ textlines = open_conffile.readlines()
+ for line in textlines:
+ element = line.split('=')
+ if element[0] == 'SQLBACKEND': # Databas backend
+ get_sql_backend = element[1]
+ if element[0] == 'SQLDB': # Database
+ get_sql_db = element[1]
+ if element[0] == 'SQLHOST': # Host
+ get_sql_host = element[1]
+ if element[0] == 'SQLUSER': # User
+ get_sql_user = element[1]
+ if element[0] == 'SQLPASSWD': # Password
+ get_sql_passwd = element[1]
+ # Buildhost root (dir for host/setup on host)
+ if element[0] == 'BUILDHOSTROOT':
+ get_buildhost_root = element[1]
+ # Buildhost setup (host/setup on guest)
+ if element[0] == 'BUILDCONFIG':
+ get_build_config = element[1]
+ open_conffile.close()
+ gobs_settings_dict = {}
+ gobs_settings_dict['sql_backend'] = get_sql_backend.rstrip('\n')
+ gobs_settings_dict['sql_db'] = get_sql_db.rstrip('\n')
+ gobs_settings_dict['sql_host'] = get_sql_host.rstrip('\n')
+ gobs_settings_dict['sql_user'] = get_sql_user.rstrip('\n')
+ gobs_settings_dict['sql_passwd'] = get_sql_passwd.rstrip('\n')
+ gobs_settings_dict['host_setup_root'] = get_buildhost_root.rstrip('\n')
+ gobs_settings_dict['guest_setup_root'] = get_build_config.rstrip('\n')
+ return gobs_settings_dict
diff --git a/gobs/pym/repoman.py b/gobs/pym/repoman.py
new file mode 100644
index 0000000..81e0aae
--- /dev/null
+++ b/gobs/pym/repoman.py
@@ -0,0 +1,47 @@
+import portage
+from portage import os, _encodings, _unicode_decode
+from portage import _unicode_encode
+from portage.exception import DigestException, FileNotFound, ParseError, PermissionDenied
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
+import run_checks from repoman.checks
+import codecs
+
+class gobs_repoman(object):
+
+ def __init__(self, mysettings, myportdb, database):
+ self.mysettings = mysettings
+ self.myportdb = myportdb
+ self.database = database
+
+ def check_repoman(self, categories, package, ebuild_version_tree, config_id):
+ # We run repoman run_checks on the ebuild
+ pkgdir = self.mysettings['PORTDIR'] + "/" + categories + "/" + package
+ full_path = pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild"
+ cpv = categories + "/" + package + "-" + ebuild_version_tree
+ root = '/'
+ trees = {
+ root : {'porttree' : portage.portagetree(root, settings=self.mysettings)}
+ }
+ root_config = RootConfig(self.mysettings, trees[root], None)
+ allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
+ allvars.update(Package.metadata_keys)
+ allvars = sorted(allvars)
+ myaux = dict(zip(allvars, self.myportdb.aux_get(cpv, allvars)))
+ pkg = Package(cpv=cpv, metadata=myaux, root_config=root_config)
+ fails = []
+ try:
+ # All ebuilds should have utf_8 encoding.
+ f = codecs.open(_unicode_encode(full_path,
+ encoding = _encodings['fs'], errors = 'strict'),
+ mode = 'r', encoding = _encodings['repo.content'])
+ try:
+ for check_name, e in run_checks(f, pkg):
+ fails.append(check_name + ": " + e)
+ finally:
+ f.close()
+ except UnicodeDecodeError:
+ # A file.UTF8 failure will have already been recorded above.
+ pass
+ # fails will have a list with repoman errors
+ return fails
\ No newline at end of file
diff --git a/gobs/pym/text.py b/gobs/pym/text.py
new file mode 100644
index 0000000..c3d53fc
--- /dev/null
+++ b/gobs/pym/text.py
@@ -0,0 +1,41 @@
+import sys
+import re
+import os
+import errno
+
+class gobs_text(object):
+
+ def get_file_text(self, filename):
+ # Return the filename contents
+ try:
+ textfile = open(filename)
+ except IOError, oe:
+ if oe.errno not in (errno.ENOENT, ):
+ raise
+ return "No file", filename
+ text = ""
+ for line in textfile:
+ text += unicode(line, 'utf-8')
+ textfile.close()
+ return text
+
+ def get_ebuild_text(self, filename):
+ """Return the ebuild contents"""
+ try:
+ ebuildfile = open(filename)
+ except IOError, oe:
+ if oe.errno not in (errno.ENOENT, ):
+ raise
+ return "No Ebuild file there"
+ text = ""
+ dataLines = ebuildfile.readlines()
+ for i in dataLines:
+ text = text + i + " "
+ line2 = dataLines[2]
+ field = line2.split(" ")
+ ebuildfile.close()
+ try:
+ cvs_revision = field[3]
+ except:
+ cvs_revision = ''
+ return text, cvs_revision
^ permalink raw reply related [flat|nested] 32+ messages in thread
end of thread, other threads:[~2012-12-27 23:09 UTC | newest]
Thread overview: 32+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-04-24 22:13 [gentoo-commits] dev/zorry:master commit in: gobs/bin/, gobs/pym/ Magnus Granberg
-- strict thread matches above, loose matches on Subject: below --
2012-12-27 23:09 Magnus Granberg
2012-12-23 17:09 Magnus Granberg
2012-12-22 2:59 Magnus Granberg
2012-12-21 1:44 Magnus Granberg
2012-12-19 2:11 Magnus Granberg
2012-12-07 14:07 Magnus Granberg
2012-12-05 23:56 Magnus Granberg
2012-11-29 22:22 Magnus Granberg
2012-07-17 0:07 Magnus Granberg
2012-06-26 22:10 Magnus Granberg
2012-05-13 17:59 Magnus Granberg
2012-05-09 23:12 Magnus Granberg
2012-05-06 10:41 Magnus Granberg
2012-05-04 22:32 Magnus Granberg
2012-05-01 1:12 Magnus Granberg
2012-04-30 12:08 Magnus Granberg
2012-04-29 14:43 Magnus Granberg
2012-04-29 13:16 Magnus Granberg
2012-04-29 13:13 Magnus Granberg
2012-04-28 16:01 Magnus Granberg
2012-04-27 22:22 Magnus Granberg
2012-04-27 20:59 Magnus Granberg
2011-11-01 21:14 Magnus Granberg
2011-10-29 0:14 Magnus Granberg
2011-09-28 10:53 Magnus Granberg
2011-09-28 0:33 Magnus Granberg
2011-09-26 23:25 Magnus Granberg
2011-08-31 1:46 Magnus Granberg
2011-08-30 23:02 Magnus Granberg
2011-07-31 13:43 Magnus Granberg
2011-04-23 14:23 Magnus Granberg
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox