public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] repo/user/gerislay:master commit in: media-sound/rosegarden/, net-news/rssguard/, media-video/dvdstyler/files/, ...
@ 2018-07-13 21:40 gerion
  0 siblings, 0 replies; only message in thread
From: gerion @ 2018-07-13 21:40 UTC (permalink / raw
  To: gentoo-commits

commit:     b6fa82c3c387c0f9a12a0671ebff55b75d9133ac
Author:     Gerion Entrup <gerion.entrup <AT> flump <DOT> de>
AuthorDate: Fri Jul 13 21:25:47 2018 +0000
Commit:     gerion <gerion.entrup <AT> flump <DOT> de>
CommitDate: Fri Jul 13 21:25:47 2018 +0000
URL:        https://gitweb.gentoo.org/repo/user/gerislay.git/commit/?id=b6fa82c3

cleanup a bunch of old stuff

 app-arch/hardlink/ChangeLog                        |   13 -
 app-arch/hardlink/Manifest                         |    4 -
 app-arch/hardlink/hardlink-0.1.2.ebuild            |   17 -
 app-arch/hardlink/metadata.xml                     |    7 -
 app-text/calibre/Manifest                          |   10 -
 app-text/calibre/calibre-3.26.1.ebuild             |  293 -
 ...libre-2.83.0-lzxd-bounds-error-bug-540596.patch |   37 -
 .../files/calibre-2.9.0-no_updates_dialog.patch    |   27 -
 .../calibre/files/calibre-disable_plugins.patch    |   17 -
 app-text/calibre/files/calibre-server-3.conf       |   13 -
 app-text/calibre/files/calibre-server-3.init       |   58 -
 app-text/calibre/files/calibre-server.conf         |   13 -
 app-text/calibre/files/calibre-server.init         |   58 -
 app-text/calibre/metadata.xml                      |   13 -
 dev-libs/jansson/Manifest                          |    2 -
 dev-libs/jansson/jansson-2.7-r1.ebuild             |   38 -
 dev-python/PyPDF2/ChangeLog                        |  108 -
 dev-python/PyPDF2/ChangeLog-2015                   |   21 -
 dev-python/PyPDF2/Manifest                         |    5 -
 dev-python/PyPDF2/PyPDF2-1.26.0.ebuild             |   31 -
 dev-python/PyPDF2/metadata.xml                     |   12 -
 dev-python/flask-login/Manifest                    |    4 -
 .../flask-login-0.3.2-fix-tests-python2.patch      |   29 -
 dev-python/flask-login/flask-login-0.4.0.ebuild    |   32 -
 dev-python/flask-login/metadata.xml                |   11 -
 dev-python/imdbpy/ChangeLog                        |   95 -
 dev-python/imdbpy/Manifest                         |    6 -
 .../imdbpy/files/imdbpy-4.6-data_location.patch    |   11 -
 dev-python/imdbpy/files/updateToPython3.patch      | 6966 --------------------
 dev-python/imdbpy/imdbpy-4.9-r2.ebuild             |   44 -
 dev-python/imdbpy/metadata.xml                     |    8 -
 media-gfx/blender/Manifest                         |    5 -
 media-gfx/blender/blender-2.78a-r2.ebuild          |  295 -
 .../blender/files/blender-2.78-eigen-3.3.1.patch   |   25 -
 .../blender/files/blender-fix-install-rules.patch  |   16 -
 media-gfx/blender/metadata.xml                     |  101 -
 media-sound/patchage/Manifest                      |    8 +-
 ...ge-1.0.0-r1.ebuild => patchage-1.0.0-r2.ebuild} |    0
 media-sound/rosegarden/Manifest                    |    3 -
 media-sound/rosegarden/metadata.xml                |   11 -
 media-sound/rosegarden/rosegarden-17.04-r1.ebuild  |   60 -
 media-video/dvdstyler/ChangeLog                    |  276 -
 media-video/dvdstyler/Manifest                     |    7 -
 media-video/dvdstyler/dvdstyler-2.3.ebuild         |   65 -
 .../dvdstyler/files/dvdstyler-1.7.4-autoconf.patch |   77 -
 .../dvdstyler/files/dvdstyler-1.8.1-cast.patch     |   11 -
 .../files/dvdstyler-1.8.1-fix_enum_error.patch     |   11 -
 media-video/dvdstyler/metadata.xml                 |    5 -
 net-news/rssguard/Manifest                         |    3 -
 net-news/rssguard/rssguard-3.4.0.ebuild            |   39 -
 net-news/rssguard/rssguard-9999.ebuild             |   39 -
 51 files changed, 4 insertions(+), 9056 deletions(-)

diff --git a/app-arch/hardlink/ChangeLog b/app-arch/hardlink/ChangeLog
deleted file mode 100644
index 7e6f7cb..0000000
--- a/app-arch/hardlink/ChangeLog
+++ /dev/null
@@ -1,13 +0,0 @@
-# ChangeLog for app-arch/hardlink
-# Copyright 1999-2009 Gentoo Foundation; Distributed under the GPL v2
-# $Header: /var/cvsroot/gentoo-x86/app-arch/hardlink/ChangeLog,v 1.2 2009/10/23 09:18:39 robbat2 Exp $
-
-  23 Oct 2009; Robin H. Johnson <robbat2@gentoo.org> hardlink-0.1.1.ebuild:
-  Build fixup.
-
-*hardlink-0.1.1 (22 Oct 2009)
-
-  22 Oct 2009; Robin H. Johnson <robbat2@gentoo.org> +hardlink-0.1.1.ebuild,
-  +metadata.xml:
-  Initial commit. Ebuild by Robin H. Johnson <robbat2@gentoo.org>.
-

diff --git a/app-arch/hardlink/Manifest b/app-arch/hardlink/Manifest
deleted file mode 100644
index 72c6d18..0000000
--- a/app-arch/hardlink/Manifest
+++ /dev/null
@@ -1,4 +0,0 @@
-DIST hardlink_0.1.2.tar.gz 6840 RMD160 c5d10211cdcff03617e50fadc49d6e5e91454f4c SHA1 55fa3d69d48c059315eaaf2df5621ecd1d0c1ac5 SHA256 76181794b41c4dbdb418a91d18c5d5e627df1ace7799e088685632e20fc60d1c
-EBUILD hardlink-0.1.2.ebuild 531 RMD160 b2475e6ce15a1d47674cf7bb8e576d00c09c0b30 SHA1 af12f406e3149f8144b20dcd7c839827d0a1dc6e SHA256 3e366009d4f58b9c605bf2c01dccbad85430cde785e0b3f31d061f2dc46cd6d9
-MISC ChangeLog 493 RMD160 93d6ffdf6c913cba97153d5bca2d2ede8959a315 SHA1 1193df31873234cb55e867e3e05b4f2441fb56b6 SHA256 22157bf388bcf39a290b01702f5f89501fdbe7a8d3d505c5dd06f5cb8efeb039
-MISC metadata.xml 202 RMD160 ca69c6e7044d6681415f1e86a1c084f890ff08ef SHA1 62ab18bf0e1da311494ca0912e5a79daeceb46ad SHA256 76dbb4a720140d78f0ddfb2b2782c03852169c201c1f507eb17ef4d2a82f212a

diff --git a/app-arch/hardlink/hardlink-0.1.2.ebuild b/app-arch/hardlink/hardlink-0.1.2.ebuild
deleted file mode 100644
index 541b7a2..0000000
--- a/app-arch/hardlink/hardlink-0.1.2.ebuild
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 1999-2009 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/app-arch/hardlink/hardlink-0.1.1.ebuild,v 1.2 2009/10/23 09:18:39 robbat2 Exp $
-
-DESCRIPTION="replace file copies using hardlinks"
-HOMEPAGE="http://jak-linux.org/projects/hardlink/"
-SRC_URI="${HOMEPAGE}/${P/-/_}.tar.gz"
-LICENSE="MIT"
-SLOT="0"
-KEYWORDS="~x86 ~amd64"
-IUSE=""
-DEPEND=""
-RDEPEND="dev-lang/python"
-
-src_install() {
-	emake DESTDIR="${D}" install || die "Failed emake install"
-}

diff --git a/app-arch/hardlink/metadata.xml b/app-arch/hardlink/metadata.xml
deleted file mode 100644
index c6d8628..0000000
--- a/app-arch/hardlink/metadata.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
-<pkgmetadata>
-<maintainer>
-  <email>robbat2@gentoo.org</email>
-</maintainer>
-</pkgmetadata>

diff --git a/app-text/calibre/Manifest b/app-text/calibre/Manifest
deleted file mode 100644
index b90e137..0000000
--- a/app-text/calibre/Manifest
+++ /dev/null
@@ -1,10 +0,0 @@
-AUX calibre-2.83.0-lzxd-bounds-error-bug-540596.patch 1296 BLAKE2B a600a8777868b7f73c95d62de9097382cb1d3e076ea756b589811f5e33aa86f1724f93177d82247132a85779ff82960f178cc325b497f8b7f1391ff113729c69 SHA512 fba16c95a69b38113d4331faceae4a81f2319ae1631a03662bd3483ad6f1b687d16db28f9df019990a3bc6918c75f4a4492ca7ef772168a575cb06ff0761bddf
-AUX calibre-2.9.0-no_updates_dialog.patch 1661 BLAKE2B c75fc49a621e8dbd16ee1bad748110399cf09a404b4a905d3f723bac1827787e4749f464ba026700b6e5b3cc0ee646a92de00f1f58d10cf12dc9bc91195ee8b6 SHA512 9663b95ed64bdc2bc40692922384d1c6073177eee58a49c412883c4d2ae098e4e0b4ea51b80443108b7c0c3e4f3fda60c00fc3be4d0b62a5d79d982697927235
-AUX calibre-disable_plugins.patch 1042 BLAKE2B 92a56016c2c54f1b156bc91031420594445545b98b701f7cce99cf4bb86847eebad1ccebdc20a0d1b67f9fa88a9250fc4926d7c04cb36405323388b3171cf502 SHA512 c152ddd92728a89db10c75a4d00a968bf119bef68da8b9116c76827e3cdc8f8a7e5c45fbb973f6ca8db1f79e461518351ce2d47e5e059c282f36d2df499d1629
-AUX calibre-server-3.conf 541 BLAKE2B 06593633721e6adf2cf2077dffa83953eea46ccdcdc857ad468a9b74788326e9424f6ab9058be344dbbac8e49d343f5a1e53ddb486f2018b77319a7f8f713cf4 SHA512 12ef793a5b6ffd4413f780a13cad63927a95f15d17d85d4229eb5005ead68e51779abb822f9898ab5ae8c2094affeec1f9e85c34f109499739e6726b7641542a
-AUX calibre-server-3.init 1796 BLAKE2B f87d785d6f6fc10fa648d40944ec8538027854bb872d9a3be6b5b35b0abf3cda1e4e93874a9422914da5eb4287f193459733956e3f1e25e61bec0c214c552c99 SHA512 88110ded1b9c6cf8d3bfc3d5889654f77578f9d299e88daea9023767b930082e00fbddbb92a6e43c46d900248f3684232d93396ec1d0e784c7ec14b44783f98a
-AUX calibre-server.conf 523 BLAKE2B a9fb65a327656f9b2f54eab27dcaf2cdfbcbe5f0d637204ea39f891c515ae899da156098609bc99398354337524843afbf45409bbb1098605f293661bb8381e7 SHA512 4595786d9b6ed7662a1124e8acc32e03e0149f614a07890f9df08d05811a1b8de51cc33cc1bfbf30b072c7ad75dc3b8247e9de53d20ee2b45017cb4160437058
-AUX calibre-server.init 1811 BLAKE2B fe22257128896d4299c5d5edab6ac63cdcf79da483f4afc49b0d1649b17599389dd4b9f88c35a964e289cbe6d961136f1e5760a2244137c404e573510899bd60 SHA512 760aa7a8f51b23b116ba07749855737869ff2313f0528d67e5a46bc362e5c0a04a7e1f46c32dd6e82add4fa5dc41d397db32dbd1a09c912008e0890d1a133eeb
-DIST calibre-3.26.1.tar.xz 39109660 BLAKE2B 0e98c273b8a5dfafea7a7027de3f83ad25ab835edadedf78b7e9bc356bcac8937d915944f2ab6503b414c49b4e792e090e7bd2433a4e86373bf115720ed78b0a SHA512 893e36b101defaca29281b4bd072aafc1c4cb20a9cd3ee06a0b68fbe6b39cab34952799939ac4f54c77148c87861c5ab4ddff84f5ec8c2274ae7fa6424259ff5
-EBUILD calibre-3.26.1.ebuild 9280 BLAKE2B ef7557d2f7416658a93850dd09766fbb2e784c512c8fa7296137df0b0737afc151ba22c285fa2b4de49b726cdf3e58a1956757fccc9295ce1b5e40f5c2b615cd SHA512 035dc539a854fa30d2216c6bb66b69f5878c6f1e34cd05a6c4d50afbe00aa15e6450666961201abbff3317febea405ac28f8e11d4ad7ab392eb4327aff34196e
-MISC metadata.xml 382 BLAKE2B 87fa7e63a6ed7a4ea55247b362288b43f7edd312cc3085bb8c1b947402ae4aa0df01fac4f6646d260653ff8af7fe28d3dabb8a213dbf3e206181b69835b33d5f SHA512 8503e0a4a48d93682c386eb1d6507b4b26585afc9d62b7cd52bc00b457a887bd17422a03669ff404570ff7f5ff6f0bba14ee935979f8b54722870d6620097de5

diff --git a/app-text/calibre/calibre-3.26.1.ebuild b/app-text/calibre/calibre-3.26.1.ebuild
deleted file mode 100644
index c8832bf..0000000
--- a/app-text/calibre/calibre-3.26.1.ebuild
+++ /dev/null
@@ -1,293 +0,0 @@
-# Copyright 1999-2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=6
-
-PYTHON_COMPAT=( python2_7 )
-PYTHON_REQ_USE="sqlite,ssl"
-
-inherit eutils bash-completion-r1 gnome2-utils multilib toolchain-funcs python-single-r1 xdg-utils
-
-DESCRIPTION="Ebook management application"
-HOMEPAGE="https://calibre-ebook.com/"
-SRC_URI="https://download.calibre-ebook.com/${PV}/${P}.tar.xz"
-
-LICENSE="
-	GPL-3+
-	GPL-3
-	GPL-2+
-	GPL-2
-	GPL-1+
-	LGPL-3+
-	LGPL-2.1+
-	LGPL-2.1
-	BSD
-	MIT
-	Old-MIT
-	Apache-2.0
-	public-domain
-	|| ( Artistic GPL-1+ )
-	CC-BY-3.0
-	OFL-1.1
-	PSF-2
-	unRAR
-"
-KEYWORDS="~amd64"
-SLOT="0"
-IUSE="ios +udisks"
-
-REQUIRED_USE="${PYTHON_REQUIRED_USE}"
-
-COMMON_DEPEND="${PYTHON_DEPS}
-	>=app-text/podofo-0.8.2:=
-	>=app-text/poppler-0.26.5[qt5]
-	>=dev-libs/chmlib-0.40:=
-	dev-libs/glib:2=
-	>=dev-libs/icu-57.1:=
-	dev-libs/libinput:=
-	>=dev-python/apsw-3.13.0[${PYTHON_USEDEP}]
-	>=dev-python/beautifulsoup-3.0.5:python-2[${PYTHON_USEDEP}]
-	dev-python/chardet[${PYTHON_USEDEP}]
-	>=dev-python/cssselect-0.7.1[${PYTHON_USEDEP}]
-	>=dev-python/cssutils-1.0.1[${PYTHON_USEDEP}]
-	>=dev-python/dbus-python-1.2.4[${PYTHON_USEDEP}]
-	>=dev-libs/dbus-glib-0.106
-	>=sys-apps/dbus-1.10.8
-	dev-python/html5-parser[${PYTHON_USEDEP}]
-	>=dev-python/lxml-3.2.1[${PYTHON_USEDEP}]
-	>=dev-python/mechanize-0.2.5[${PYTHON_USEDEP}]
-	dev-python/msgpack[${PYTHON_USEDEP}]
-	dev-python/netifaces[${PYTHON_USEDEP}]
-	dev-python/pillow[${PYTHON_USEDEP}]
-	dev-python/psutil[${PYTHON_USEDEP}]
-	>=dev-python/pygments-2.1.3[${PYTHON_USEDEP}]
-	>=dev-python/python-dateutil-2.5.3[${PYTHON_USEDEP}]
-	>=dev-python/PyQt5-5.8[gui,svg,webkit,widgets,network,printsupport,${PYTHON_USEDEP}]
-	dev-python/regex[${PYTHON_USEDEP}]
-	dev-qt/qtcore:5=
-	dev-qt/qtdbus:5=
-	dev-qt/qtgui:5=
-	dev-qt/qtwidgets:5=
-	dev-util/desktop-file-utils
-	dev-util/gtk-update-icon-cache
-	media-fonts/liberation-fonts
-	media-libs/fontconfig:=
-	>=media-libs/freetype-2:=
-	>=media-libs/libmtp-1.1.11:=
-	>=media-libs/libwmf-0.2.8
-	>=media-gfx/optipng-0.7.6
-	sys-libs/zlib:=
-	virtual/libusb:1=
-	virtual/python-dnspython[${PYTHON_USEDEP}]
-	x11-libs/libxkbcommon:=
-	x11-libs/libX11:=
-	x11-libs/libXext:=
-	x11-libs/libXrender:=
-	x11-misc/shared-mime-info
-	>=x11-misc/xdg-utils-1.0.2-r2
-	ios? (
-		>=app-pda/usbmuxd-1.0.8
-		>=app-pda/libimobiledevice-1.2.0
-	)
-	udisks? ( virtual/libudev )"
-RDEPEND="${COMMON_DEPEND}
-	udisks? ( || ( sys-fs/udisks:2 sys-fs/udisks:0 ) )"
-DEPEND="${COMMON_DEPEND}
-	>=dev-python/setuptools-23.1.0[${PYTHON_USEDEP}]
-	>=virtual/podofo-build-0.9.4
-	virtual/pkgconfig"
-
-pkg_pretend() {
-	if [[ ${MERGE_TYPE} != binary && $(gcc-major-version) -lt 6 ]]; then
-		eerror "Calibre cannot be built with this version of gcc."
-		eerror "You need at least gcc-6.0"
-		die "Your C compiler is too old for this package."
-	fi
-}
-
-src_prepare() {
-	# no_updates: do not annoy user with "new version is availible all the time
-	# disable_plugins: walking sec-hole, wait for upstream to use GHNS interface
-	eapply \
-		"${FILESDIR}/${PN}-2.9.0-no_updates_dialog.patch" \
-		"${FILESDIR}/${PN}-disable_plugins.patch"
-
-	eapply_user
-
-	# Fix outdated version constant.
-	#sed -e "s#\\(^numeric_version =\\).*#\\1 (${PV//./, })#" \
-	#	-i src/calibre/constants.py || \
-	#	die "sed failed to patch constants.py"
-
-	# Avoid sandbox violation in /usr/share/gnome/apps when linux.py
-	# calls xdg-* (bug #258938).
-	sed -e "s|'xdg-desktop-menu', 'install'|\\0, '--mode', 'user'|" \
-		-e "s|check_call(\\['xdg-desktop-menu', 'forceupdate'\\])|#\\0|" \
-		-e "s|\\(CurrentDir(tdir)\\), \\\\\$|\\1:|" \
-		-e "s|, PreserveMIMEDefaults():|:|" \
-		-e "s|'xdg-icon-resource', 'install'|\\0, '--mode', 'user'|" \
-		-e "s|cmd\[2\]|cmd[4]|" \
-		-e "s|cc(\\['xdg-desktop-menu', 'forceupdate'\\])|#\\0|" \
-		-e "s|'xdg-mime', 'install'|\\0, '--mode', 'user'|" \
-		-i src/calibre/linux.py || die "sed failed to patch linux.py"
-
-	# Disable unnecessary privilege dropping for bug #287067.
-	sed -e "s:if os.geteuid() == 0:if False and os.geteuid() == 0:" \
-		-i setup/install.py || die "sed failed to patch install.py"
-
-	sed -e "/^                self.check_call(\\[QMAKE\\] + qmc + \\[proname\\])$/a\
-\\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ self.check_call(['sed', \
-'-e', 's|^CFLAGS .*|\\\\\\\\0 ${CFLAGS}|', \
-'-e', 's|^CXXFLAGS .*|\\\\\\\\0 ${CXXFLAGS}|', \
-'-e', 's|^LFLAGS .*|\\\\\\\\0 ${LDFLAGS}|', \
-'-i', 'Makefile'])" \
-		-i setup/build.py || die "sed failed to patch build.py"
-
-	# use system beautifulsoup, instead of bundled
-	rm -f "${S}"/src/calibre/ebooks/BeautifulSoup.py \
-		|| die "could not remove bundled beautifulsoup"
-	find "${S}" -type f -name \*.py -exec \
-		sed -e 's/calibre.ebooks.BeautifulSoup/BeautifulSoup/' -i {} + \
-		|| die "could not sed bundled beautifulsoup out of the source tree"
-
-	# avoid failure of xdg tools to recognize vendor prefix
-	sed -e "s|xdg-icon-resource install|xdg-icon-resource install --novendor|" \
-		-e "s|'xdg-mime', 'install'|'xdg-mime', 'install', '--novendor'|" \
-		-e "s|'xdg-desktop-menu', 'install'|'xdg-desktop-menu', 'install', '--novendor'|" \
-		-i "${S}"/src/calibre/linux.py || die 'sed failed'
-
-	# don't create/install uninstaller
-	sed '/self\.create_uninstaller()/d' -i src/calibre/linux.py || die
-}
-
-src_install() {
-	# Bypass kbuildsycoca and update-mime-database in order to
-	# avoid sandbox violations if xdg-mime tries to call them.
-	cat - > "${T}/kbuildsycoca" <<-EOF
-	#!${BASH}
-	echo $0 : $@
-	exit 0
-	EOF
-
-	cp "${T}"/{kbuildsycoca,update-mime-database} || die
-	chmod +x "${T}"/{kbuildsycoca,update-mime-database} || die
-
-	export QMAKE="${EPREFIX}/usr/$(get_libdir)/qt5/bin/qmake"
-
-	# Unset DISPLAY in order to prevent xdg-mime from triggering a sandbox
-	# violation with kbuildsycoca as in bug #287067, comment #13.
-	export -n DISPLAY
-
-	# Bug #352625 - Some LANGUAGE values can trigger the following ValueError:
-	#   File "/usr/lib/python2.6/locale.py", line 486, in getdefaultlocale
-	#    return _parse_localename(localename)
-	#  File "/usr/lib/python2.6/locale.py", line 418, in _parse_localename
-	#    raise ValueError, 'unknown locale: %s' % localename
-	#ValueError: unknown locale: 46
-	export -n LANGUAGE
-
-	# Bug #295672 - Avoid sandbox violation in ~/.config by forcing
-	# variables to point to our fake temporary $HOME.
-	export HOME="${T}/fake_homedir"
-	export XDG_CONFIG_HOME="${HOME}/.config"
-	export XDG_DATA_HOME="${HOME}/.local/share"
-	export CALIBRE_CONFIG_DIRECTORY="${XDG_CONFIG_HOME}/calibre"
-	mkdir -p "${XDG_DATA_HOME}" "${CALIBRE_CONFIG_DIRECTORY}" || die
-
-	tc-export CC CXX
-	# Bug #334243 - respect LDFLAGS when building extensions
-	export OVERRIDE_CFLAGS="$CFLAGS" OVERRIDE_LDFLAGS="$LDFLAGS"
-	local libdir=$(get_libdir)
-	[[ -n $libdir ]] || die "get_libdir returned an empty string"
-
-	# Bug #472690 - Avoid sandbox violation for /dev/dri/card0.
-	local x
-	for x in /dev/dri/card[0-9] ; do
-		[[ -e ${x} ]] && addpredict ${x}
-	done
-
-	#dodir "/usr/$(get_libdir)/python2.7/site-packages" # for init_calibre.py
-	#dodir $(python_get_sitedir)
-	PATH=${T}:${PATH} PYTHONPATH=${S}/src${PYTHONPATH:+:}${PYTHONPATH} \
-	"${PYTHON}" setup.py install \
-		--root="${D}" \
-		--prefix="${EPREFIX}/usr" \
-		--libdir="${EPREFIX}/usr/${libdir}" \
-		--staging-root="${ED}usr" \
-		--staging-libdir="${ED}usr/${libdir}" || die
-
-	# The menu entries end up here due to '--mode user' being added to
-	# xdg-* options in src_prepare.
-	dodir /usr/share/mime/packages
-	chmod -fR a+rX,u+w,g-w,o-w "${HOME}"/.local
-	mv "${HOME}"/.local/share/mime/packages/* "${ED}"usr/share/mime/packages/ ||
-		die "failed to register mime types"
-	dodir /usr/share/icons
-	mv "${HOME}"/.local/share/icons/* "${ED}"usr/share/icons/ ||
-		die "failed to install icon files"
-
-	domenu "${HOME}"/.local/share/applications/*.desktop ||
-		die "failed to install .desktop menu files"
-
-	find "${ED}"usr/share -type d -empty -delete
-
-	cd "${ED}"/usr/share/calibre/fonts/liberation || die
-	local x
-	for x in * ; do
-		[[ -f ${EPREFIX}usr/share/fonts/liberation-fonts/${x} ]] || continue
-		ln -sf "../../../fonts/liberation-fonts/${x}" "${x}" || die
-	done
-
-	einfo "Converting python shebangs"
-	python_fix_shebang "${ED}"
-
-	einfo "Compiling python modules"
-	python_optimize "${ED}"usr/lib/calibre
-
-	newinitd "${FILESDIR}"/calibre-server-3.init calibre-server
-	newconfd "${FILESDIR}"/calibre-server-3.conf calibre-server
-
-	bashcomp_alias calibre \
-		lrfviewer \
-		calibre-debug \
-		ebook-meta \
-		calibre-server \
-		ebook-viewer \
-		ebook-polish \
-		fetch-ebook-metadata \
-		lrf2lrs \
-		ebook-convert \
-		ebook-edit \
-		calibre-smtp \
-		ebook-device
-
-}
-
-pkg_preinst() {
-	gnome2_icon_savelist
-	# Indentify stray directories from upstream's "Binary install"
-	# method (see bug 622728).
-	CALIBRE_LIB_DIR=/usr/$(get_libdir)/calibre
-	CALIBRE_LIB_CONTENT=$(for x in "${ED%/}${CALIBRE_LIB_DIR}"/*; do
-		printf -- "${x##*/} "; done) || die "Failed to list ${ED%/}${CALIBRE_LIB_DIR}"
-}
-
-pkg_postinst() {
-	[[ -n ${CALIBRE_LIB_DIR} ]] || die "CALIBRE_LIB_DIR is unset"
-	local x
-	for x in "${EROOT%/}${CALIBRE_LIB_DIR}"/*; do
-		if [[ " ${CALIBRE_LIB_CONTENT} " != *" ${x##*/} "* ]]; then
-			elog "Purging '${x}'"
-			rm -rf "${x}"
-		fi
-	done
-	xdg_desktop_database_update
-	xdg_mimeinfo_database_update
-	gnome2_icon_cache_update
-}
-
-pkg_postrm() {
-	xdg_desktop_database_update
-	xdg_mimeinfo_database_update
-	gnome2_icon_cache_update
-}

diff --git a/app-text/calibre/files/calibre-2.83.0-lzxd-bounds-error-bug-540596.patch b/app-text/calibre/files/calibre-2.83.0-lzxd-bounds-error-bug-540596.patch
deleted file mode 100644
index 5f7d5a4..0000000
--- a/app-text/calibre/files/calibre-2.83.0-lzxd-bounds-error-bug-540596.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From f335c8719b224d3ca7a967b6e91cebd5b26684fe Mon Sep 17 00:00:00 2001
-From: Zac Medico <zmedico@gentoo.org>
-Date: Sun, 23 Apr 2017 16:13:00 -0700
-Subject: [PATCH] Fix bounds error in lzxd_static_init
-
-https://bugs.gentoo.org/show_bug.cgi?id=540596
-https://github.com/kovidgoyal/calibre/pull/650
-
-This includes the changes from the following upstream commits:
-
-https://github.com/kyz/libmspack/commit/6a42ddd1d472afeaf0f7da91e16b60ab2063fb92
-https://github.com/kyz/libmspack/commit/ce3cc03aa500dd9c0b6b820f9519f6b6b9dede05
----
- src/calibre/utils/lzx/lzxd.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/src/calibre/utils/lzx/lzxd.c b/src/calibre/utils/lzx/lzxd.c
-index e683a9e..c531aaa 100644
---- a/src/calibre/utils/lzx/lzxd.c
-+++ b/src/calibre/utils/lzx/lzxd.c
-@@ -357,11 +357,12 @@ static unsigned char extra_bits[51];
- static void lzxd_static_init(void) {
-   int i, j;
- 
--  for (i = 0, j = 0; i < 51; i += 2) {
-+  for (i = 0, j = 0; i < 50; i += 2) {
-     extra_bits[i]   = j; /* 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7... */
-     extra_bits[i+1] = j;
-     if ((i != 0) && (j < 17)) j++; /* 0,0,1,2,3,4...15,16,17,17,17,17... */
-   }
-+  extra_bits[50] = 17;
- 
-   for (i = 0, j = 0; i < 51; i++) {
-     position_base[i] = j; /* 0,1,2,3,4,6,8,12,16,24,32,... */
--- 
-2.10.2
-

diff --git a/app-text/calibre/files/calibre-2.9.0-no_updates_dialog.patch b/app-text/calibre/files/calibre-2.9.0-no_updates_dialog.patch
deleted file mode 100644
index 4d37c3b..0000000
--- a/app-text/calibre/files/calibre-2.9.0-no_updates_dialog.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-diff -burN calibre-2.9.0.orig/src/calibre/gui2/main.py calibre-2.9.0/src/calibre/gui2/main.py
---- calibre-2.9.0.orig/src/calibre/gui2/main.py	2014-11-09 20:09:54.081231882 +0800
-+++ calibre-2.9.0/src/calibre/gui2/main.py	2014-11-09 20:15:48.193033844 +0800
-@@ -37,8 +37,9 @@
-                       help=_('Start minimized to system tray.'))
-     parser.add_option('-v', '--verbose', default=0, action='count',
-                       help=_('Ignored, do not use. Present only for legacy reasons'))
--    parser.add_option('--no-update-check', default=False, action='store_true',
--            help=_('Do not check for updates'))
-+    parser.add_option('--update-check', dest='no_update_check', default=True,
-+            action='store_false',
-+            help=_('Check for updates'))
-     parser.add_option('--ignore-plugins', default=False, action='store_true',
-             help=_('Ignore custom plugins, useful if you installed a plugin'
-                 ' that is preventing calibre from starting'))
-diff -burN calibre-2.9.0.orig/src/calibre/gui2/update.py calibre-2.9.0/src/calibre/gui2/update.py
---- calibre-2.9.0.orig/src/calibre/gui2/update.py	2014-11-09 20:09:54.082231864 +0800
-+++ calibre-2.9.0/src/calibre/gui2/update.py	2014-11-09 20:17:49.954767115 +0800
-@@ -154,6 +154,8 @@
-             self.update_checker.signal.update_found.connect(self.update_found,
-                     type=Qt.QueuedConnection)
-             self.update_checker.start()
-+        else:
-+            self.update_checker = None
- 
-     def recalc_update_label(self, number_of_plugin_updates):
-         self.update_found(self.last_newest_calibre_version, number_of_plugin_updates)

diff --git a/app-text/calibre/files/calibre-disable_plugins.patch b/app-text/calibre/files/calibre-disable_plugins.patch
deleted file mode 100644
index 9ef1dd0..0000000
--- a/app-text/calibre/files/calibre-disable_plugins.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-Description: Disable plugin dialog. It uses a totally non-authenticated and non-trusted way of installing arbitrary code.
-Author: Martin Pitt <mpitt@debian.org>
-Bug-Debian: http://bugs.debian.org/640026
-
-Index: calibre-0.8.29+dfsg/src/calibre/gui2/actions/preferences.py
-===================================================================
---- calibre-0.8.29+dfsg.orig/src/calibre/gui2/actions/preferences.py	2011-12-16 05:49:14.000000000 +0100
-+++ calibre-0.8.29+dfsg/src/calibre/gui2/actions/preferences.py	2011-12-20 19:29:04.798468930 +0100
-@@ -28,8 +28,6 @@
-             pm.addAction(QIcon(I('config.png')), _('Preferences'), self.do_config)
-         cm('welcome wizard', _('Run welcome wizard'),
-                 icon='wizard.png', triggered=self.gui.run_wizard)
--        cm('plugin updater', _('Get plugins to enhance calibre'),
--                icon='plugins/plugin_updater.png', triggered=self.get_plugins)
-         if not DEBUG:
-             pm.addSeparator()
-             cm('restart', _('Restart in debug mode'), icon='debug.png',

diff --git a/app-text/calibre/files/calibre-server-3.conf b/app-text/calibre/files/calibre-server-3.conf
deleted file mode 100644
index bb456e8..0000000
--- a/app-text/calibre/files/calibre-server-3.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-# /etc/conf.d/calibre-server
-# Change this to the user you want to run calibre-server as.
-# You may specify a group too, after a colon
-# NOTE:  This must be set and not to root!
-CALIBRE_USER=
-
-# Set the path of the library to serve.
-# Defaults to the default location for CALIBRE_USER.
-#CALIBRE_LIBRARY='<user home directory>/Calibre Library'
-
-# Extra options to pass to calibre-server.
-# See the calibre-server man page for more options.
-#CALIBRE_SERVER_OPTS="--userdb /srv/calibre/users.sqlite --enable-auth --worker-count 10 --port 8080"

diff --git a/app-text/calibre/files/calibre-server-3.init b/app-text/calibre/files/calibre-server-3.init
deleted file mode 100644
index 049d3e4..0000000
--- a/app-text/calibre/files/calibre-server-3.init
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/sbin/openrc-run
-# Copyright 1999-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License, v2 or later
-
-depend() {
-	need net
-	need localmount
-	after bootmisc
-}
-
-checkconfig() {
-	if [ "${CALIBRE_USER}" = "" -o "${CALIBRE_USER}" = "root" ] ; then
-		eerror "Please edit /etc/conf.d/calibre-server"
-		eerror "You have to specify a user to run calibre-server as, as we will not run it as root!"
-		eerror "Modify CALIBRE_USER to your needs (you can also add a group, after a colon)"
-		return 1
-	fi
-	if ! getent passwd "${CALIBRE_USER%:*}" >/dev/null ; then
-		eerror "Please edit /etc/conf.d/calibre-server"
-		eerror "Your user has to exist!"
-		return 1
-	fi
-	if [ "${CALIBRE_USER%:*}" != "${CALIBRE_USER}" ] ; then
-		if ! getent group "${CALIBRE_USER#*:}" >/dev/null ; then
-			eerror "Please edit /etc/conf.d/calibre-server"
-			eerror "Your group has to exist too!"
-			return 1
-		fi
-	fi
-	if [ "${CALIBRE_LIBRARY}" = "" ] ; then
-		CALIBRE_USER_HOME=$(getent passwd "${CALIBRE_USER%:*}" | cut -d ':' -f 6)
-		CALIBRE_LIBRARY="${CALIBRE_USER_HOME}/Calibre Library"
-	fi
-	if [ ! -d "${CALIBRE_LIBRARY}" ] ; then
-		eerror "Please edit /etc/conf.d/calibre-server"
-		eerror "The Calibre library, '${CALIBRE_LIBRARY},' does not exist."
-		eerror "Please modify CALIBRE_LIBRARY to point to a valid library."
-		return 1
-	fi
-	return 0
-}
-
-start() {
-	checkconfig || return $?
-	local pidfile=/var/run/calibre-server.pid
-	ebegin "Starting calibre-server"
-	start-stop-daemon --user "${CALIBRE_USER}" \
-		--pidfile "${pidfile}" --make-pidfile --background --exec /usr/bin/calibre-server \
-		-- ${CALIBRE_OPTS} "${CALIBRE_LIBRARY}"
-	eend $?
-}
-
-stop() {
-	ebegin "Stopping calibre-server"
-	start-stop-daemon --stop --user "${CALIBRE_USER}" \
-		--pidfile /var/run/calibre-server.pid
-	eend $?
-}

diff --git a/app-text/calibre/files/calibre-server.conf b/app-text/calibre/files/calibre-server.conf
deleted file mode 100644
index c1bed84..0000000
--- a/app-text/calibre/files/calibre-server.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-# /etc/conf.d/calibre-server
-# Change this to the user you want to run calibre-server as.
-# You may specify a group too, after a colon
-# NOTE:  This must be set and not to root!
-CALIBRE_USER=
-
-# Set the path of the library to serve.
-# Defaults to the default location for CALIBRE_USER.
-#CALIBRE_LIBRARY='<user home directory>/Calibre Library'
-
-# Extra options to pass to calibre-server.
-# See the calibre-server man page for more options.
-#CALIBRE_OPTS="--username calibre --password password --thread-pool 10 --port 8080"

diff --git a/app-text/calibre/files/calibre-server.init b/app-text/calibre/files/calibre-server.init
deleted file mode 100644
index 2f90542..0000000
--- a/app-text/calibre/files/calibre-server.init
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/sbin/openrc-run
-# Copyright 1999-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License, v2 or later
-
-depend() {
-	need net
-	need localmount
-	after bootmisc
-}
-
-checkconfig() {
-	if [ "${CALIBRE_USER}" = "" -o "${CALIBRE_USER}" = "root" ] ; then
-		eerror "Please edit /etc/conf.d/calibre-server"
-		eerror "You have to specify a user to run calibre-server as, as we will not run it as root!"
-		eerror "Modify CALIBRE_USER to your needs (you can also add a group, after a colon)"
-		return 1
-	fi
-	if ! getent passwd "${CALIBRE_USER%:*}" >/dev/null ; then
-		eerror "Please edit /etc/conf.d/calibre-server"
-		eerror "Your user has to exist!"
-		return 1
-	fi
-	if [ "${CALIBRE_USER%:*}" != "${CALIBRE_USER}" ] ; then
-		if ! getent group "${CALIBRE_USER#*:}" >/dev/null ; then
-			eerror "Please edit /etc/conf.d/calibre-server"
-			eerror "Your group has to exist too!"
-			return 1
-		fi
-	fi
-	if [ "${CALIBRE_LIBRARY}" = "" ] ; then
-		CALIBRE_USER_HOME=$(getent passwd "${CALIBRE_USER%:*}" | cut -d ':' -f 6)
-		CALIBRE_LIBRARY="${CALIBRE_USER_HOME}/Calibre Library"
-	fi
-	if [ ! -d "${CALIBRE_LIBRARY}" ] ; then
-		eerror "Please edit /etc/conf.d/calibre-server"
-		eerror "The Calibre library, '${CALIBRE_LIBRARY},' does not exist."
-		eerror "Please modify CALIBRE_LIBRARY to point to a valid library."
-		return 1
-	fi
-	return 0
-}
-
-start() {
-	checkconfig || return $?
-	local pidfile=/var/run/calibre-server.pid
-	ebegin "Starting calibre-server"
-	start-stop-daemon --user "${CALIBRE_USER}" \
-		--pidfile "${pidfile}" --make-pidfile --background --exec /usr/bin/calibre-server \
-		-- --with-library "${CALIBRE_LIBRARY}" ${CALIBRE_OPTS}
-	eend $?
-}
-
-stop() {
-	ebegin "Stopping calibre-server"
-	start-stop-daemon --stop --user "${CALIBRE_USER}" \
-		--pidfile /var/run/calibre-server.pid
-	eend $?
-}

diff --git a/app-text/calibre/metadata.xml b/app-text/calibre/metadata.xml
deleted file mode 100644
index 2196cbe..0000000
--- a/app-text/calibre/metadata.xml
+++ /dev/null
@@ -1,13 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
-<pkgmetadata>
-  <maintainer type="person">
-    <email>axs@gentoo.org</email>
-  </maintainer>
-  <maintainer type="person">
-    <email>zmedico@gentoo.org</email>
-  </maintainer>
-  <upstream>
-    <remote-id type="sourceforge">calibre</remote-id>
-  </upstream>
-</pkgmetadata>

diff --git a/dev-libs/jansson/Manifest b/dev-libs/jansson/Manifest
deleted file mode 100644
index 3d7001f..0000000
--- a/dev-libs/jansson/Manifest
+++ /dev/null
@@ -1,2 +0,0 @@
-DIST jansson-2.7.tar.gz 445179 SHA256 7905e6590fb316c0ff943df3dc6a21cd81a59cff7a6d12514054c359d04d78d7 SHA512 657b715868c2fbf8da1e41befee2691524190e12255214c472652ca3e3793b03e2b384dc3aedb1071b67b0b8cb57fd495103979983fe21a2404f12cd70295a28 WHIRLPOOL 684c0a1cae8cbd86d1a4e19bcfa59f36bf6094853ce5e18bd600fcb27fb1afac250e52c1983f888fcae06437c6e142b2ea3823c911b5a2923f4775494d26690f
-EBUILD jansson-2.7-r1.ebuild 837 SHA256 5d71796b6f00ffe4532fdaae0808b7fb49a8caa74715fcc5256f865b6e4e146c SHA512 3c4d40da0f539d4f3b09df9fa1a33ca5e228226c4e3ba11057e89f58874e13cdb4a2140275e5107ca539909d5f900a29f62113f400f45ee16d84ea94e2923822 WHIRLPOOL 6813b9169c0096bf4ddbdd6d16165622e1fd6266263ba0aed3b4676ce51bc153e98bdad7e20b7fca597c8ab9848aeee38ba4baa2654c10b2c68c441b2e841d07

diff --git a/dev-libs/jansson/jansson-2.7-r1.ebuild b/dev-libs/jansson/jansson-2.7-r1.ebuild
deleted file mode 100644
index 1b6d049..0000000
--- a/dev-libs/jansson/jansson-2.7-r1.ebuild
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 1999-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-EAPI="5"
-AUTOTOOLS_AUTORECONF=1
-
-inherit autotools-multilib
-
-DESCRIPTION="C library for encoding, decoding and manipulating JSON data"
-HOMEPAGE="http://www.digip.org/jansson/"
-SRC_URI="http://www.digip.org/jansson/releases/${P}.tar.gz"
-
-LICENSE="MIT"
-SLOT="0"
-KEYWORDS="~amd64 ~x86"
-IUSE="doc static-libs"
-
-DEPEND="doc? ( >=dev-python/sphinx-1.0.4 )"
-RDEPEND=""
-
-DOCS=(CHANGES README.rst)
-
-multilib_src_prepare() {
-	sed -ie 's/-Werror//' src/Makefile.am || die
-	autotools-utils_src_prepare
-}
-
-multilib_src_compile() {
-	autotools-utils_src_compile
-
-	use doc && autotools-utils_src_compile html
-}
-
-multilib_src_install_all() {
-	use doc && HTML_DOCS=("${AUTOTOOLS_BUILD_DIR}/doc/_build/html/")
-	autotools-utils_src_install
-}

diff --git a/dev-python/PyPDF2/ChangeLog b/dev-python/PyPDF2/ChangeLog
deleted file mode 100644
index 196c6f7..0000000
--- a/dev-python/PyPDF2/ChangeLog
+++ /dev/null
@@ -1,108 +0,0 @@
-# ChangeLog for dev-python/PyPDF2
-# Copyright 1999-2016 Gentoo Foundation; Distributed under the GPL v2
-# (auto-generated from git log)
-
-*PyPDF2-1.24 (09 Aug 2015)
-
-  09 Aug 2015; Robin H. Johnson <robbat2@gentoo.org> +PyPDF2-1.24.ebuild,
-  +metadata.xml:
-  proj/gentoo: Initial commit
-
-  This commit represents a new era for Gentoo:
-  Storing the gentoo-x86 tree in Git, as converted from CVS.
-
-  This commit is the start of the NEW history.
-  Any historical data is intended to be grafted onto this point.
-
-  Creation process:
-  1. Take final CVS checkout snapshot
-  2. Remove ALL ChangeLog* files
-  3. Transform all Manifests to thin
-  4. Remove empty Manifests
-  5. Convert all stale $Header$/$Id$ CVS keywords to non-expanded Git $Id$
-  5.1. Do not touch files with -kb/-ko keyword flags.
-
-  Signed-off-by: Robin H. Johnson <robbat2@gentoo.org>
-  X-Thanks: Alec Warner <antarus@gentoo.org> - did the GSoC 2006 migration
-  tests
-  X-Thanks: Robin H. Johnson <robbat2@gentoo.org> - infra guy, herding this
-  project
-  X-Thanks: Nguyen Thai Ngoc Duy <pclouds@gentoo.org> - Former Gentoo
-  developer, wrote Git features for the migration
-  X-Thanks: Brian Harring <ferringb@gentoo.org> - wrote much python to improve
-  cvs2svn
-  X-Thanks: Rich Freeman <rich0@gentoo.org> - validation scripts
-  X-Thanks: Patrick Lauer <patrick@gentoo.org> - Gentoo dev, running new 2014
-  work in migration
-  X-Thanks: Michał Górny <mgorny@gentoo.org> - scripts, QA, nagging
-  X-Thanks: All of other Gentoo developers - many ideas and lots of paint on
-  the bikeshed
-
-*PyPDF2-1.25.1 (16 Aug 2015)
-
-  16 Aug 2015; Ian Delaney <idella4@gentoo.org> +PyPDF2-1.25.1.ebuild:
-  bump
-
-  Package-Manager: portage-2.2.20
-
-  18 Aug 2015; Justin Lecher <jlec@gentoo.org> metadata.xml:
-  Add missing remote-id entries to metadata.xml
-
-  Signed-off-by: Justin Lecher <jlec@gentoo.org>
-
-  21 Aug 2015; Ian Delaney <idella4@gentoo.org> PyPDF2-1.25.1.ebuild:
-  Unrestrict test phase in response to reply from upstream
-
-  Package-Manager: portage-2.2.20
-
-  21 Aug 2015; Ian Delaney <idella4@gentoo.org> PyPDF2-1.25.1.ebuild:
-  remove witespace
-
-  Package-Manager: portage-2.2.20
-
-  24 Aug 2015; Justin Lecher <jlec@gentoo.org> PyPDF2-1.24.ebuild,
-  PyPDF2-1.25.1.ebuild:
-  Use https by default
-
-  Signed-off-by: Justin Lecher <jlec@gentoo.org>
-
-  24 Aug 2015; Justin Lecher <jlec@gentoo.org> metadata.xml:
-  Use https by default
-
-  Convert all URLs for sites supporting encrypted connections from http to
-  https
-
-  Signed-off-by: Justin Lecher <jlec@gentoo.org>
-
-  24 Aug 2015; Justin Lecher <jlec@gentoo.org> PyPDF2-1.24.ebuild:
-  Use https by default
-
-  Convert all URLs for sites supporting encrypted connections from http to
-  https
-
-  Signed-off-by: Justin Lecher <jlec@gentoo.org>
-
-  24 Aug 2015; Mike Gilbert <floppym@gentoo.org> metadata.xml:
-  Revert DOCTYPE SYSTEM https changes in metadata.xml
-
-  repoman does not yet accept the https version.
-  This partially reverts eaaface92ee81f30a6ac66fe7acbcc42c00dc450.
-
-  Bug: https://bugs.gentoo.org/552720
-
-  15 Sep 2015; Justin Lecher <jlec@gentoo.org> PyPDF2-1.25.1.ebuild:
-  Fix Malformed Id header on line: 3
-
-  Convert Header to Id
-
-  Signed-off-by: Justin Lecher <jlec@gentoo.org>
-
-  24 Jan 2016; Michał Górny <mgorny@gentoo.org> metadata.xml:
-  Replace all herds with appropriate projects (GLEP 67)
-
-  Replace all uses of herd with appropriate project maintainers, or no
-  maintainers in case of herds requested to be disbanded.
-
-  24 Jan 2016; Michał Górny <mgorny@gentoo.org> metadata.xml:
-  Set appropriate maintainer types in metadata.xml (GLEP 67)
-

diff --git a/dev-python/PyPDF2/ChangeLog-2015 b/dev-python/PyPDF2/ChangeLog-2015
deleted file mode 100644
index e644b5c..0000000
--- a/dev-python/PyPDF2/ChangeLog-2015
+++ /dev/null
@@ -1,21 +0,0 @@
-# ChangeLog for dev-python/PyPDF2
-# Copyright 1999-2015 Gentoo Foundation; Distributed under the GPL v2
-# $Header: /var/cvsroot/gentoo-x86/dev-python/PyPDF2/ChangeLog,v 1.4 2015/06/03 21:05:27 jlec Exp $
-
-  03 Jun 2015; Justin Lecher <jlec@gentoo.org> metadata.xml:
-  Add pypi to remote-id in metadata.xml
-
-  08 Mar 2015; Pacho Ramos <pacho@gentoo.org> PyPDF2-1.24.ebuild:
-  x86 stable, bug 540290
-
-  06 Mar 2015; Pacho Ramos <pacho@gentoo.org> PyPDF2-1.24.ebuild:
-  amd64 stable, bug 540290
-
-*PyPDF2-1.24 (03 Jan 2015)
-
-  03 Jan 2015; Ian Delaney <idella4@gentoo.org> +PyPDF2-1.24.ebuild,
-  +metadata.xml:
-  ebuild written by me, Jan 3. Required in the bump of xhtml2pdf-0.0.6 to
-  substitute dev-python/pyPdf, of which PyPDF2 is a fork.  The original name
-  using camel case is retained since it poses no problem once installed.
-  Conversion to lowercase causes far more pain than it cures.

diff --git a/dev-python/PyPDF2/Manifest b/dev-python/PyPDF2/Manifest
deleted file mode 100644
index 167e93f..0000000
--- a/dev-python/PyPDF2/Manifest
+++ /dev/null
@@ -1,5 +0,0 @@
-DIST PyPDF2-1.26.0.tar.gz 77556 SHA256 e28f902f2f0a1603ea95ebe21dff311ef09be3d0f0ef29a3e44a932729564385 SHA512 7b427f1d099dcd687a718fb8d86e6f677ad45257414c6367e4dfacd8dfef7e3dbe3816027359868764981f36a30721f1731b4c23cbe2f35cfab1c64fb290b034 WHIRLPOOL 0f96818874f77bfef9d60e3bb84a648ec5366911fbb365d2e9ce965ef7321df845a85fede714e14dcb73e87d85bdc72af38ce40c2ed3ae096bd9daf07a7204b2
-EBUILD PyPDF2-1.26.0.ebuild 904 SHA256 310e413afc44462f67ae8479c14ab40306fb550a6d3fb67e970dd721b1f203e7 SHA512 fe4f3ce545b7ec607c99f60a7a32bed667a3361dd621f3ba9ab2b521c0ab3f8ee1515f4ecc6ca21514c602958155f2781ddb4c74e289381f88d3615fb159ef3a WHIRLPOOL dce5fd158c14ac5ee6dab2c5c8f287d65804459cde34135b892763de7f5978a1d35a5119223584dffc9fc6b62be18867d884625c753f1375d1c2d059b10bf6ad
-MISC ChangeLog 3589 SHA256 94a11ba16031e82247de9e3aff96001a717f1fbe1776f535149745e1e62baea2 SHA512 836dac6d422b156de2deee6a28846c078158971ec9371d74cbbdcabd2848952429cc3c19b4db830d06609e2028a6c85d5ed6775a2ef963bc3b0eded201d9e3a6 WHIRLPOOL eae12ad954ed3da8d3216165615c18dbc661bf31ee6f7dbe4330887a9223b8af949e733306854da8c259c9d5b7550c4a89dd5dd1971806e6be44cd4302d3b9b2
-MISC ChangeLog-2015 893 SHA256 abf37acddf61c2bce90aaf24f15d9ef77b7661c590d1d6c9b5b38f12c1287ef8 SHA512 adbe760a519c675d59716ea91530a191f325c6a2fb251a545da03142f2a502b2bf9f05f764e9fd11c950636f69e480419c532ed2b3cfffdb9e55291612a5a949 WHIRLPOOL 3aff6f09178eb58194a2e08063b46a1cd4e560f7059b4f12260e62dec6eef4122462b4714fd509b31512444b9c577f1571ec3d8f2c52cb6ec64340c9a958752d
-MISC metadata.xml 375 SHA256 58f6dbefe2ab40ebf233a8ef60f81403bff0d6727c8c01e083d1cf7577353359 SHA512 9da6355a60e212e6e5ee3ac1c5cd1c357b243ab3131b2f6d261a4039c6cbc67c1e375be8685c900c533a0273b017b6d4428c7521539b917a2c380f9435b3cefa WHIRLPOOL a46741f32910e9fb4d63648af45c2937d51d396c5afd917a507e02d3b31899adf9b02a8bdae54063230afad736f551df04e2d48ad8e79849ff4c3bd2ce6f86ee

diff --git a/dev-python/PyPDF2/PyPDF2-1.26.0.ebuild b/dev-python/PyPDF2/PyPDF2-1.26.0.ebuild
deleted file mode 100644
index b2940e0..0000000
--- a/dev-python/PyPDF2/PyPDF2-1.26.0.ebuild
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 1999-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-EAPI="5"
-PYTHON_COMPAT=( python{2_7,3_3,3_4,3_5} )
-
-inherit distutils-r1
-
-DESCRIPTION="Python library to work with pdf files"
-HOMEPAGE="https://pypi.python.org/pypi/${PN}/ https://github.com/mstamy2/PyPDF2"
-SRC_URI="mirror://pypi/${P:0:1}/${PN}/${P}.tar.gz"
-
-LICENSE="BSD-2"
-SLOT="0"
-KEYWORDS="~amd64 ~x86"
-IUSE="examples"
-
-python_test() {
-	# https://github.com/mstamy2/PyPDF2/issues/216
-	einfo ""; einfo "According to the author, this 1 failed test is an"
-	einfo "expected failure meaning the installation of PyPDF2 is working"
-	einfo "He plans to update the causative file to see it pass"; einfo ""
-
-	"${PYTHON}" -m unittest Tests.tests || die "Tests failed under ${EPYTHON}"
-}
-
-python_install_all() {
-	use examples && local EXAMPLES=( Sample_Code/. )
-	distutils-r1_python_install_all
-}

diff --git a/dev-python/PyPDF2/metadata.xml b/dev-python/PyPDF2/metadata.xml
deleted file mode 100644
index ad27f68..0000000
--- a/dev-python/PyPDF2/metadata.xml
+++ /dev/null
@@ -1,12 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
-<pkgmetadata>
-  <maintainer type="project">
-    <email>python@gentoo.org</email>
-    <name>Python</name>
-  </maintainer>
-  <upstream>
-    <remote-id type="pypi">PyPDF2</remote-id>
-    <remote-id type="github">mstamy2/PyPDF2</remote-id>
-  </upstream>
-</pkgmetadata>

diff --git a/dev-python/flask-login/Manifest b/dev-python/flask-login/Manifest
deleted file mode 100644
index 7cc8894..0000000
--- a/dev-python/flask-login/Manifest
+++ /dev/null
@@ -1,4 +0,0 @@
-AUX flask-login-0.3.2-fix-tests-python2.patch 1115 BLAKE2B 138cab48cbf144a3d4d4704ae293db79e9d41b30326bb2ec28c1c00e75ceec42a2b0eb46bf61ac369bd863406b9fa4a3d7bdb81421e1288ef19d03871e7f5d02 SHA512 2796244a27b504feba773d29cf394ed04b8d7812d8989aca9a5dcd58207c3b192545531c23883f9d6828320adfb09f31388e92af72da5d5a2d2e6df05b89282c
-DIST flask-login-0.4.0.tar.gz 40606 BLAKE2B 042c30e1c076e5d9cfaebd3ec5f1893adae4f267460ba5f5c071da473e01f8ff1b42ccf11662b867f446bc77efbed5b0af63f3a6b7060ec166c0efd431e5aa04 SHA512 cd3b611eb6f2caf50a3fcb52ffffce4fc40520cb88bb87af10328ceb3a309b42b03b61ba686b6c4a000c866baa013bb2a3a7585dcfc477d702aa329bea63af3d
-EBUILD flask-login-0.4.0.ebuild 851 BLAKE2B 8bffaa2d0c0c97d8cc781240e8712955ee205210fe3d4a11e0a4e8b42edf429a6e68fa563b8aa79d65067500b386ef93ce705425c817725f6dd2e2a0e5ac767c SHA512 5e3a1b40a083d9c4e13378d6c59b67c99150eb1aa426843e425d24ff71ad766da7cadebb7ad0c9fbbdfe6c193f3fb94d6655f5bf85f7172b40eae8f6ea5054f6
-MISC metadata.xml 314 BLAKE2B 6da22c7d637811e8c20945ef154626520ad1643cd4624c90d0d20e507301004812dda4ac89af12966cea1d69eebb73007a78955da9c700d4a9e4323c09f0cc40 SHA512 6031f60cb6209103f927d4886d2dc62ac519936242f7b72689a308757353e6b7e0fa376fdbcfa7d9e88c0a96916a4b8a783e25c0fc9a64d31bbc133738fd0069

diff --git a/dev-python/flask-login/files/flask-login-0.3.2-fix-tests-python2.patch b/dev-python/flask-login/files/flask-login-0.3.2-fix-tests-python2.patch
deleted file mode 100644
index 33811ab..0000000
--- a/dev-python/flask-login/files/flask-login-0.3.2-fix-tests-python2.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-diff --git a/test_login.py b/test_login.py
-index 3f110e0..0c060f5 100644
---- a/test_login.py
-+++ b/test_login.py
-@@ -39,6 +39,7 @@ from flask.ext.login import (LoginManager, UserMixin, AnonymousUserMixin,
- if str is not bytes:
-     unicode = str
- 
-+werkzeug_version = tuple(int(i) for i in werkzeug_version.split('.'))
- 
- @contextmanager
- def listen_to(signal):
-@@ -1073,14 +1074,14 @@ class LoginTestCase(unittest.TestCase):
-     #
-     # Misc
-     #
--    @unittest.skipIf(werkzeug_version.startswith("0.9"),
-+    @unittest.skipIf(werkzeug_version >= (0, 9),
-                      "wait for upstream implementing RFC 5987")
-     def test_chinese_user_agent(self):
-         with self.app.test_client() as c:
-             result = c.get('/', headers=[('User-Agent', u'中文')])
-             self.assertEqual(u'Welcome!', result.data.decode('utf-8'))
- 
--    @unittest.skipIf(werkzeug_version.startswith("0.9"),
-+    @unittest.skipIf(werkzeug_version >= (0, 9),
-                      "wait for upstream implementing RFC 5987")
-     def test_russian_cp1251_user_agent(self):
-         with self.app.test_client() as c:

diff --git a/dev-python/flask-login/flask-login-0.4.0.ebuild b/dev-python/flask-login/flask-login-0.4.0.ebuild
deleted file mode 100644
index eefb217..0000000
--- a/dev-python/flask-login/flask-login-0.4.0.ebuild
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 1999-2017 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Id$
-
-EAPI=6
-PYTHON_COMPAT=( python2_7 python3_{4,5,6} pypy )
-
-inherit distutils-r1
-
-DESCRIPTION="Login session support for Flask"
-HOMEPAGE="https://pypi.python.org/pypi/Flask-Login"
-SRC_URI="https://github.com/maxcountryman/${PN}/archive/${PV}.tar.gz -> ${P}.tar.gz"
-# pypi tarball is missing tests
-
-LICENSE="BSD"
-SLOT="0"
-KEYWORDS="~amd64"
-IUSE="test"
-
-RDEPEND=">=dev-python/flask-0.10[${PYTHON_USEDEP}]"
-DEPEND="${RDEPEND}
-	dev-python/setuptools[${PYTHON_USEDEP}]
-	test? (
-		dev-python/nose[${PYTHON_USEDEP}]
-		dev-python/mock[${PYTHON_USEDEP}]
-		dev-python/blinker[${PYTHON_USEDEP}]
-		$(python_gen_cond_dep 'dev-python/unittest2[${PYTHON_USEDEP}]' 'python2*' pypy)
-	)"
-
-python_test() {
-	nosetests -v || die "Tests fail with ${EPYTHON}"
-}

diff --git a/dev-python/flask-login/metadata.xml b/dev-python/flask-login/metadata.xml
deleted file mode 100644
index d14744a..0000000
--- a/dev-python/flask-login/metadata.xml
+++ /dev/null
@@ -1,11 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
-<pkgmetadata>
-	<maintainer type="project">
-		<email>python@gentoo.org</email>
-		<name>Python</name>
-	</maintainer>
-	<upstream>
-		<remote-id type="pypi">Flask-Login</remote-id>
-	</upstream>
-</pkgmetadata>

diff --git a/dev-python/imdbpy/ChangeLog b/dev-python/imdbpy/ChangeLog
deleted file mode 100644
index 7654d62..0000000
--- a/dev-python/imdbpy/ChangeLog
+++ /dev/null
@@ -1,95 +0,0 @@
-# ChangeLog for dev-python/imdbpy
-# Copyright 1999-2013 Gentoo Foundation; Distributed under the GPL v2
-# $Header: /var/cvsroot/gentoo-x86/dev-python/imdbpy/ChangeLog,v 1.20 2013/06/23 17:43:11 idella4 Exp $
-
-*imdbpy-4.9-r1 (23 Jun 2013)
-
-  23 Jun 2013; Ian Delaney <idella4@gentoo.org> +imdbpy-4.9-r1.ebuild:
-  revbump; migrate -> distutils-r1
-
-*imdbpy-4.9 (04 Nov 2012)
-
-  04 Nov 2012; Ian Delaney <idella4@gentoo.org> +imdbpy-4.9.ebuild,
-  -imdbpy-4.7-r1.ebuild, -imdbpy-4.7.ebuild:
-  bumped to imdbpy-4.9, dropped imdbpy-4.7
-
-  13 Jul 2012; Mike Gilbert <floppym@gentoo.org> imdbpy-4.7-r1.ebuild,
-  imdbpy-4.8.2.ebuild:
-  Update DISTUTILS_GLOBAL_OPTIONS to match EAPI 4 logic in distutils.eclass as
-  suggested by Arfrever.
-
-*imdbpy-4.8.2 (19 Apr 2012)
-
-  19 Apr 2012; Patrick Lauer <patrick@gentoo.org> +imdbpy-4.8.2.ebuild:
-  Bump
-
-  22 Feb 2012; Patrick Lauer <patrick@gentoo.org> imdbpy-4.7-r1.ebuild,
-  imdbpy-4.7.ebuild:
-  Restricting jython too
-
-  22 Feb 2012; Patrick Lauer <patrick@gentoo.org> imdbpy-4.7-r1.ebuild,
-  imdbpy-4.7.ebuild:
-  Restricting pypy
-
-  14 Dec 2011; Mike Gilbert <floppym@gentoo.org> imdbpy-4.7.ebuild:
-  Fix stable ebuild as well.
-
-*imdbpy-4.7-r1 (14 Dec 2011)
-
-  14 Dec 2011; Mike Gilbert <floppym@gentoo.org> +imdbpy-4.7-r1.ebuild:
-  Bump EAPI to 4. Don't install icons as docs. Thanks to Ian Delaney for doing
-  the research.
-
-  12 Mar 2011; Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
-  -imdbpy-4.6.ebuild:
-  Delete.
-
-  12 Mar 2011; Thomas Kahle <tomka@gentoo.org> imdbpy-4.7.ebuild:
-  x86 stable per bug 356439
-
-  27 Feb 2011; Markos Chandras <hwoarang@gentoo.org> imdbpy-4.7.ebuild:
-  Stable on amd64 wrt bug #356439
-
-*imdbpy-4.7 (26 Jan 2011)
-
-  26 Jan 2011; Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
-  +imdbpy-4.7.ebuild:
-  Version bump.
-
-  26 Dec 2010; Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
-  -files/4.5.1-no-docs.patch, -imdbpy-4.5.1.ebuild:
-  Delete.
-
-  01 Aug 2010; Markos Chandras <hwoarang@gentoo.org> imdbpy-4.6.ebuild:
-  Stable on amd64 wrt bug #323745
-
-  26 Jul 2010; Christian Faulhammer <fauli@gentoo.org> imdbpy-4.6.ebuild:
-  stable x86, bug 323745
-
-*imdbpy-4.6 (21 Jun 2010)
-
-  21 Jun 2010; Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
-  +imdbpy-4.6.ebuild, +files/imdbpy-4.6-data_location.patch:
-  Version bump.
-
-  07 Jun 2010; Dirkjan Ochtman <djc@gentoo.org> +files/4.5.1-no-docs.patch,
-  imdbpy-4.5.1.ebuild:
-  Prevent installation of files in /usr/share/doc directly (bug 315983).
-
-*imdbpy-4.5.1 (01 Mar 2010)
-
-  01 Mar 2010; Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
-  +imdbpy-4.5.1.ebuild:
-  Version bump.
-
-*imdbpy-4.4 (09 Jan 2010)
-
-  09 Jan 2010; Arfrever Frehtes Taifersar Arahesis <arfrever@gentoo.org>
-  +imdbpy-4.4.ebuild:
-  Version bump.
-
-*imdbpy-4.3 (21 Nov 2009)
-
-  21 Nov 2009; Doug Goldstein <cardoe@gentoo.org> +imdbpy-4.3.ebuild,
-  +metadata.xml:
-  add initial version to the tree. bug #109719

diff --git a/dev-python/imdbpy/Manifest b/dev-python/imdbpy/Manifest
deleted file mode 100644
index f0589a5..0000000
--- a/dev-python/imdbpy/Manifest
+++ /dev/null
@@ -1,6 +0,0 @@
-AUX imdbpy-4.6-data_location.patch 386 SHA256 22c59416cc2481aab25ab924b6ad2467ec9556a3206c0dce6d2af2899acedb56 SHA512 b6846126cf3eab62f454b1d8073d068a356ee792895939c9a4ee67eacd46616d0173bc85e52f6930f3ee8d96b95913afcc07b5b384a205559bce105e37038567 WHIRLPOOL 06fdd41f098ba290df40860f65582e5c4f59de724951bf634d5d4cb8fc9dfff4b90dbbc68c886bafcc82b4c08de0c22d2b55df66c3778c2fc517d86b61ce42bf
-AUX updateToPython3.patch 293426 SHA256 8c7642ca62560b4349d8d4d8777839224d408b5b74d50aa596ac9d469ec24c8e SHA512 ce3ae3df47ede0a801188fd5a93781c4aedaffa2ad49282f30be36a770ca0957893e091edb4e5c0a5d333b0bc2afb73bb4e04b620586c19ae852d6c917c743aa WHIRLPOOL 157babc75d49a7df41d3cbba3d3e7a914508173da532c46eef6d80a6646124c10c56888d6dbee1f0b0b85edbf9a86a70e7088cdbed1930d0b86edcb5079843cd
-DIST IMDbPY-4.9.tar.gz 307726 SHA256 01e90dce4fe19aeca99c9d3eb14052769467b177e3fb8acc240f9c51c5f477d2 SHA512 1f46ea504e7ca61b0e8ba85bd2bfd88ef2ddc87f085d70336773d840401b03d6a8b807544ee665f4211ca093dc23dbc69619b5c04cab0b5024e73105179be8f4 WHIRLPOOL bb289000fe267124dfa1ee331a0c48722c5799b241d281e39483e73a8650804596d37922d516ad92ca2ca4ef140ff59d80bda99fbd2cbbd67e9bbe8222bbabb6
-EBUILD imdbpy-4.9-r2.ebuild 1141 SHA256 88d63fb339b9e9be9b3ace7d37ba820b8ed708aec7f16cddd147e924b1a9f57d SHA512 d42f5b1828d739dd540d64a7ac57d8d984f34a5d4ad4ee7f3c66c017f9ffe44c7f92cd395c367f2b267577cdd64fa3f2a8bf036f4e1e8362ccfea2d0ce5e68cb WHIRLPOOL 5f9f5efad3fd3353c17b38d95a6af275dcd14d2114cdd8afe6278e37d31920c364937e0004074d9d0a5ad3550e0070cc339b47a8aedd2c78a242f798c4c4dfba
-MISC ChangeLog 2950 SHA256 3cc11d617a998858c242566711ab65d4440c68f07c3b8b7bf5350abd4be2b3e1 SHA512 a189566f54db7bb63e0a9d9922aa02851e96b34626b67d79b4bb5b65302fd51ecf40d06bdfaa199b053b31c75bb5e592cf80a73b548d57e8a7615441cc152647 WHIRLPOOL 0e41fc5f0edad9c3c2ba6b29487660e0d4a0bcff31eefc0700f7ec8b1a4021bab9aaea3a8ba2840308aa04622a5512715287e9423476ff21bd6350ce292b4182
-MISC metadata.xml 229 SHA256 25e4989a4acfca1775e6e9f549f95896b987ba930f025cf9f5a665442bd75561 SHA512 b98105060ceaf1f4cd90bb491eaf0306b0bb1e0d654ecfc55985a7bb522d2763e0671997cbcfcfe405a2c4ec5ee599e376a61beacf7550378d6231d1746a72fd WHIRLPOOL 3432d5668885e5f77ac871e83100b442b87746bb825f3be7a15c63b0917949391c39ac18300983740c26f7a8813bfca1bc2493aaba3bbc2f3e2fa3acd7aa1ed9

diff --git a/dev-python/imdbpy/files/imdbpy-4.6-data_location.patch b/dev-python/imdbpy/files/imdbpy-4.6-data_location.patch
deleted file mode 100644
index 90dc467..0000000
--- a/dev-python/imdbpy/files/imdbpy-4.6-data_location.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- setup.py
-+++ setup.py
-@@ -70,7 +70,7 @@
- 
- # XXX: I'm not sure that 'etc' is a good idea.  Making it an absolute
- #      path seems a recipe for a disaster (with bdist_egg, at least).
--data_files = [('doc', setuptools.findall('docs')), ('etc', ['docs/imdbpy.cfg'])]
-+data_files = [('/etc', ['docs/imdbpy.cfg'])]
- 
- 
- # Defining these 'features', it's possible to run commands like:

diff --git a/dev-python/imdbpy/files/updateToPython3.patch b/dev-python/imdbpy/files/updateToPython3.patch
deleted file mode 100644
index 1ff0385..0000000
--- a/dev-python/imdbpy/files/updateToPython3.patch
+++ /dev/null
@@ -1,6966 +0,0 @@
-diff --git a/bin/get_character.py b/bin/get_character.py
-index ab33f6e..9724cf4 100755
---- a/bin/get_character.py
-+++ b/bin/get_character.py
-@@ -15,13 +15,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "characterID"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "characterID"' % sys.argv[0])
-     sys.exit(2)
- 
- characterID = sys.argv[1]
-@@ -34,14 +34,14 @@ try:
-     # Get a character object with the data about the character identified by
-     # the given characterID.
-     character = i.get_character(characterID)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- 
- if not character:
--    print 'It seems that there\'s no character with characterID "%s"' % characterID
-+    print('It seems that there\'s no character with characterID "%s"' % characterID)
-     sys.exit(4)
- 
- # XXX: this is the easier way to print the main info about a character;
-@@ -51,6 +51,6 @@ if not character:
- # to access the data stored in a character object, so look below; the
- # commented lines show some ways to retrieve information from a
- # character object.
--print character.summary().encode(out_encoding, 'replace')
-+print(character.summary().encode(out_encoding, 'replace'))
- 
- 
-diff --git a/bin/get_company.py b/bin/get_company.py
-index 5792c1f..82c54cf 100755
---- a/bin/get_company.py
-+++ b/bin/get_company.py
-@@ -15,13 +15,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "companyID"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "companyID"' % sys.argv[0])
-     sys.exit(2)
- 
- companyID = sys.argv[1]
-@@ -34,14 +34,14 @@ try:
-     # Get a company object with the data about the company identified by
-     # the given companyID.
-     company = i.get_company(companyID)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- 
- if not company:
--    print 'It seems that there\'s no company with companyID "%s"' % companyID
-+    print('It seems that there\'s no company with companyID "%s"' % companyID)
-     sys.exit(4)
- 
- # XXX: this is the easier way to print the main info about a company;
-@@ -51,6 +51,6 @@ if not company:
- # to access the data stored in a company object, so look below; the
- # commented lines show some ways to retrieve information from a
- # company object.
--print company.summary().encode(out_encoding, 'replace')
-+print(company.summary().encode(out_encoding, 'replace'))
- 
- 
-diff --git a/bin/get_first_character.py b/bin/get_first_character.py
-index f434ee0..7773077 100755
---- a/bin/get_first_character.py
-+++ b/bin/get_first_character.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "character name"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "character name"' % sys.argv[0])
-     sys.exit(2)
- 
- name = sys.argv[1]
-@@ -30,21 +30,21 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--name = unicode(name, in_encoding, 'replace')
-+name = str(name, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of character objects).
-     results = i.search_character(name)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- if not results:
--    print 'No matches for "%s", sorry.' % name.encode(out_encoding, 'replace')
-+    print('No matches for "%s", sorry.' % name.encode(out_encoding, 'replace'))
-     sys.exit(0)
- 
- # Print only the first result.
--print '    Best match for "%s"' % name.encode(out_encoding, 'replace')
-+print('    Best match for "%s"' % name.encode(out_encoding, 'replace'))
- 
- # This is a character instance.
- character = results[0]
-@@ -53,7 +53,7 @@ character = results[0]
- # name; retrieve main information:
- i.update(character)
- 
--print character.summary().encode(out_encoding, 'replace')
-+print(character.summary().encode(out_encoding, 'replace'))
- 
- 
- 
-diff --git a/bin/get_first_company.py b/bin/get_first_company.py
-index c3b3a06..bc6dc8a 100755
---- a/bin/get_first_company.py
-+++ b/bin/get_first_company.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "company name"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "company name"' % sys.argv[0])
-     sys.exit(2)
- 
- name = sys.argv[1]
-@@ -30,21 +30,21 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--name = unicode(name, in_encoding, 'replace')
-+name = str(name, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of company objects).
-     results = i.search_company(name)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- if not results:
--    print 'No matches for "%s", sorry.' % name.encode(out_encoding, 'replace')
-+    print('No matches for "%s", sorry.' % name.encode(out_encoding, 'replace'))
-     sys.exit(0)
- 
- # Print only the first result.
--print '    Best match for "%s"' % name.encode(out_encoding, 'replace')
-+print('    Best match for "%s"' % name.encode(out_encoding, 'replace'))
- 
- # This is a company instance.
- company = results[0]
-@@ -53,7 +53,7 @@ company = results[0]
- # name; retrieve main information:
- i.update(company)
- 
--print company.summary().encode(out_encoding, 'replace')
-+print(company.summary().encode(out_encoding, 'replace'))
- 
- 
- 
-diff --git a/bin/get_first_movie.py b/bin/get_first_movie.py
-index b757a45..fd1dcd4 100755
---- a/bin/get_first_movie.py
-+++ b/bin/get_first_movie.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "movie title"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "movie title"' % sys.argv[0])
-     sys.exit(2)
- 
- title = sys.argv[1]
-@@ -30,21 +30,21 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--title = unicode(title, in_encoding, 'replace')
-+title = str(title, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of Movie objects).
-     results = i.search_movie(title)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- if not results:
--    print 'No matches for "%s", sorry.' % title.encode(out_encoding, 'replace')
-+    print('No matches for "%s", sorry.' % title.encode(out_encoding, 'replace'))
-     sys.exit(0)
- 
- # Print only the first result.
--print '    Best match for "%s"' % title.encode(out_encoding, 'replace')
-+print('    Best match for "%s"' % title.encode(out_encoding, 'replace'))
- 
- # This is a Movie instance.
- movie = results[0]
-@@ -53,7 +53,7 @@ movie = results[0]
- # title and the year; retrieve main information:
- i.update(movie)
- 
--print movie.summary().encode(out_encoding, 'replace')
-+print(movie.summary().encode(out_encoding, 'replace'))
- 
- 
- 
-diff --git a/bin/get_first_person.py b/bin/get_first_person.py
-index 4499485..2100425 100755
---- a/bin/get_first_person.py
-+++ b/bin/get_first_person.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "person name"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "person name"' % sys.argv[0])
-     sys.exit(2)
- 
- name = sys.argv[1]
-@@ -30,21 +30,21 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--name = unicode(name, in_encoding, 'replace')
-+name = str(name, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of Person objects).
-     results = i.search_person(name)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- if not results:
--    print 'No matches for "%s", sorry.' % name.encode(out_encoding, 'replace')
-+    print('No matches for "%s", sorry.' % name.encode(out_encoding, 'replace'))
-     sys.exit(0)
- 
- # Print only the first result.
--print '    Best match for "%s"' % name.encode(out_encoding, 'replace')
-+print('    Best match for "%s"' % name.encode(out_encoding, 'replace'))
- 
- # This is a Person instance.
- person = results[0]
-@@ -53,7 +53,7 @@ person = results[0]
- # name; retrieve main information:
- i.update(person)
- 
--print person.summary().encode(out_encoding, 'replace')
-+print(person.summary().encode(out_encoding, 'replace'))
- 
- 
- 
-diff --git a/bin/get_keyword.py b/bin/get_keyword.py
-index becbf03..6a0b2b2 100755
---- a/bin/get_keyword.py
-+++ b/bin/get_keyword.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "keyword"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "keyword"' % sys.argv[0])
-     sys.exit(2)
- 
- name = sys.argv[1]
-@@ -30,24 +30,24 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--name = unicode(name, in_encoding, 'replace')
-+name = str(name, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of movies).
-     results = i.get_keyword(name, results=20)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- # Print the results.
--print '    %s result%s for "%s":' % (len(results),
-+print('    %s result%s for "%s":' % (len(results),
-                                     ('', 's')[len(results) != 1],
--                                    name.encode(out_encoding, 'replace'))
--print ' : movie title'
-+                                    name.encode(out_encoding, 'replace')))
-+print(' : movie title')
- 
- # Print the long imdb title for every movie.
- for idx, movie in enumerate(results):
--    outp = u'%d: %s' % (idx+1, movie['long imdb title'])
--    print outp.encode(out_encoding, 'replace')
-+    outp = '%d: %s' % (idx+1, movie['long imdb title'])
-+    print(outp.encode(out_encoding, 'replace'))
- 
- 
-diff --git a/bin/get_movie.py b/bin/get_movie.py
-index 54e9a7f..195c303 100755
---- a/bin/get_movie.py
-+++ b/bin/get_movie.py
-@@ -15,13 +15,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "movieID"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "movieID"' % sys.argv[0])
-     sys.exit(2)
- 
- movieID = sys.argv[1]
-@@ -34,14 +34,14 @@ try:
-     # Get a Movie object with the data about the movie identified by
-     # the given movieID.
-     movie = i.get_movie(movieID)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- 
- if not movie:
--    print 'It seems that there\'s no movie with movieID "%s"' % movieID
-+    print('It seems that there\'s no movie with movieID "%s"' % movieID)
-     sys.exit(4)
- 
- # XXX: this is the easier way to print the main info about a movie;
-@@ -51,7 +51,7 @@ if not movie:
- # to access the data stored in a Movie object, so look below; the
- # commented lines show some ways to retrieve information from a
- # Movie object.
--print movie.summary().encode(out_encoding, 'replace')
-+print(movie.summary().encode(out_encoding, 'replace'))
- 
- # Show some info about the movie.
- # This is only a short example; you can get a longer summary using
-diff --git a/bin/get_person.py b/bin/get_person.py
-index 16f50e6..93dd202 100755
---- a/bin/get_person.py
-+++ b/bin/get_person.py
-@@ -15,13 +15,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "personID"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "personID"' % sys.argv[0])
-     sys.exit(2)
- 
- personID = sys.argv[1]
-@@ -34,14 +34,14 @@ try:
-     # Get a Person object with the data about the person identified by
-     # the given personID.
-     person = i.get_person(personID)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- 
- if not person:
--    print 'It seems that there\'s no person with personID "%s"' % personID
-+    print('It seems that there\'s no person with personID "%s"' % personID)
-     sys.exit(4)
- 
- # XXX: this is the easier way to print the main info about a person;
-@@ -51,7 +51,7 @@ if not person:
- # to access the data stored in a Person object, so look below; the
- # commented lines show some ways to retrieve information from a
- # Person object.
--print person.summary().encode(out_encoding, 'replace')
-+print(person.summary().encode(out_encoding, 'replace'))
- 
- # Show some info about the person.
- # This is only a short example; you can get a longer summary using
-diff --git a/bin/get_top_bottom_movies.py b/bin/get_top_bottom_movies.py
-index 594ee71..b815ffd 100755
---- a/bin/get_top_bottom_movies.py
-+++ b/bin/get_top_bottom_movies.py
-@@ -13,12 +13,12 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 1:
--    print 'No arguments are required.'
-+    print('No arguments are required.')
-     sys.exit(2)
- 
- i = imdb.IMDb()
-@@ -29,11 +29,11 @@ bottom100 = i.get_bottom100_movies()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
- for label, ml in [('top 10', top250[:10]), ('bottom 10', bottom100[:10])]:
--    print ''
--    print '%s movies' % label
--    print 'rating\tvotes\ttitle'
-+    print('')
-+    print('%s movies' % label)
-+    print('rating\tvotes\ttitle')
-     for movie in ml:
--        outl = u'%s\t%s\t%s' % (movie.get('rating'), movie.get('votes'),
-+        outl = '%s\t%s\t%s' % (movie.get('rating'), movie.get('votes'),
-                                     movie['long imdb title'])
--        print outl.encode(out_encoding, 'replace')
-+        print(outl.encode(out_encoding, 'replace'))
- 
-diff --git a/bin/imdbpy2sql.py b/bin/imdbpy2sql.py
-index 9a527e3..f35cb7b 100755
---- a/bin/imdbpy2sql.py
-+++ b/bin/imdbpy2sql.py
-@@ -29,9 +29,9 @@ import getopt
- import time
- import re
- import warnings
--import anydbm
-+import dbm
- from itertools import islice, chain
--try: import cPickle as pickle
-+try: import pickle as pickle
- except ImportError: import pickle
- try: from hashlib import md5
- except ImportError: from md5 import md5
-@@ -172,9 +172,9 @@ try:
-                                                 'csv-only-load',
-                                                 'csv=', 'csv-ext=',
-                                                 'imdbids=', 'help'])
--except getopt.error, e:
--    print 'Troubles with arguments.'
--    print HELP
-+except getopt.error as e:
-+    print('Troubles with arguments.')
-+    print(HELP)
-     sys.exit(2)
- 
- for opt in optlist:
-@@ -190,11 +190,11 @@ for opt in optlist:
-         IMDBIDS_METHOD = opt[1]
-     elif opt[0] in ('-e', '--execute'):
-         if opt[1].find(':') == -1:
--            print 'WARNING: wrong command syntax: "%s"' % opt[1]
-+            print('WARNING: wrong command syntax: "%s"' % opt[1])
-             continue
-         when, cmd = opt[1].split(':', 1)
-         if when not in ALLOWED_TIMES:
--            print 'WARNING: unknown time: "%s"' % when
-+            print('WARNING: unknown time: "%s"' % when)
-             continue
-         if when == 'BEFORE_EVERY_TODB':
-             for nw in ('BEFORE_MOVIES_TODB', 'BEFORE_PERSONS_TODB',
-@@ -217,27 +217,27 @@ for opt in optlist:
-     elif opt[0] == '--csv-only-load':
-         CSV_ONLY_LOAD = True
-     elif opt[0] in ('-h', '--help'):
--        print HELP
-+        print(HELP)
-         sys.exit(0)
- 
- if IMDB_PTDF_DIR is None:
--    print 'You must supply the directory with the plain text data files'
--    print HELP
-+    print('You must supply the directory with the plain text data files')
-+    print(HELP)
-     sys.exit(2)
- 
- if URI is None:
--    print 'You must supply the URI for the database connection'
--    print HELP
-+    print('You must supply the URI for the database connection')
-+    print(HELP)
-     sys.exit(2)
- 
- if IMDBIDS_METHOD not in (None, 'dbm', 'table'):
--    print 'the method to (re)store imdbIDs must be one of "dbm" or "table"'
--    print HELP
-+    print('the method to (re)store imdbIDs must be one of "dbm" or "table"')
-+    print(HELP)
-     sys.exit(2)
- 
- if (CSV_ONLY_WRITE or CSV_ONLY_LOAD) and not CSV_DIR:
--    print 'You must specify the CSV directory with the -c argument'
--    print HELP
-+    print('You must specify the CSV directory with the -c argument')
-+    print(HELP)
-     sys.exit(3)
- 
- 
-@@ -247,44 +247,44 @@ URIlower = URI.lower()
- if URIlower.startswith('mysql'):
-     if '--mysql-force-myisam' in sys.argv[1:] and \
-             '--mysql-innodb' in sys.argv[1:]:
--        print '\nWARNING: there is no sense in mixing the --mysql-innodb and\n'\
--                '--mysql-force-myisam command line options!\n'
-+        print('\nWARNING: there is no sense in mixing the --mysql-innodb and\n'\
-+                '--mysql-force-myisam command line options!\n')
-     elif '--mysql-innodb' in sys.argv[1:]:
--        print "\nNOTICE: you've specified the --mysql-innodb command line\n"\
-+        print("\nNOTICE: you've specified the --mysql-innodb command line\n"\
-                 "option; you should do this ONLY IF your system uses InnoDB\n"\
-                 "tables or you really want to use InnoDB; if you're running\n"\
-                 "a MyISAM-based database, please omit any option; if you\n"\
-                 "want to force MyISAM usage on a InnoDB-based database,\n"\
--                "try the --mysql-force-myisam command line option, instead.\n"
-+                "try the --mysql-force-myisam command line option, instead.\n")
-     elif '--mysql-force-myisam' in sys.argv[1:]:
--        print "\nNOTICE: you've specified the --mysql-force-myisam command\n"\
-+        print("\nNOTICE: you've specified the --mysql-force-myisam command\n"\
-                 "line option; you should do this ONLY IF your system uses\n"\
--                "InnoDB tables and you want to use MyISAM tables, instead.\n"
-+                "InnoDB tables and you want to use MyISAM tables, instead.\n")
-     else:
--        print "\nNOTICE: IF you're using InnoDB tables, data insertion can\n"\
-+        print("\nNOTICE: IF you're using InnoDB tables, data insertion can\n"\
-                 "be very slow; you can switch to MyISAM tables - forcing it\n"\
-                 "with the --mysql-force-myisam option - OR use the\n"\
-                 "--mysql-innodb command line option, but DON'T USE these if\n"\
-                 "you're already working on MyISAM tables, because it will\n"\
--                "force MySQL to use InnoDB, and performances will be poor.\n"
-+                "force MySQL to use InnoDB, and performances will be poor.\n")
- elif URIlower.startswith('mssql') and \
-         '--ms-sqlserver' not in sys.argv[1:]:
--    print "\nWARNING: you're using MS SQLServer without the --ms-sqlserver\n"\
--            "command line option: if something goes wrong, try using it.\n"
-+    print("\nWARNING: you're using MS SQLServer without the --ms-sqlserver\n"\
-+            "command line option: if something goes wrong, try using it.\n")
- elif URIlower.startswith('sqlite') and \
-         '--sqlite-transactions' not in sys.argv[1:]:
--    print "\nWARNING: you're using SQLite without the --sqlite-transactions\n"\
-+    print("\nWARNING: you're using SQLite without the --sqlite-transactions\n"\
-             "command line option: you'll have very poor performances!  Try\n"\
--            "using it.\n"
-+            "using it.\n")
- if ('--mysql-force-myisam' in sys.argv[1:] and
-         not URIlower.startswith('mysql')) or ('--mysql-innodb' in
-         sys.argv[1:] and not URIlower.startswith('mysql')) or ('--ms-sqlserver'
-         in sys.argv[1:] and not URIlower.startswith('mssql')) or \
-         ('--sqlite-transactions' in sys.argv[1:] and
-         not URIlower.startswith('sqlite')):
--    print "\nWARNING: you've specified command line options that don't\n"\
-+    print("\nWARNING: you've specified command line options that don't\n"\
-             "belong to the database server you're using: proceed at your\n"\
--            "own risk!\n"
-+            "own risk!\n")
- 
- 
- if CSV_DIR:
-@@ -296,7 +296,7 @@ if CSV_DIR:
-         CSV_LOAD_SQL = CSV_DB2
-         CSV_NULL = ''
-     else:
--        print "\nERROR: importing CSV files is not supported for this database"
-+        print("\nERROR: importing CSV files is not supported for this database")
-         sys.exit(3)
- 
- 
-@@ -325,7 +325,7 @@ for idx, mod in enumerate(USE_ORM):
-             warnings.warn('falling back to "%s".' % mod)
-         USED_ORM = mod
-         break
--    except ImportError, e:
-+    except ImportError as e:
-         if idx+1 >= nrMods:
-             raise IMDbError('unable to use any ORM in %s: %s' % (
-                                             str(USE_ORM), str(e)))
-@@ -384,7 +384,7 @@ class CSVCursor(object):
-             if val is None:
-                 r[idx] = null
-                 continue
--            if (not quoteInteger) and isinstance(val, (int, long)):
-+            if (not quoteInteger) and isinstance(val, int):
-                 r[idx] = str(val)
-                 continue
-             if lobFD and idx == 3:
-@@ -468,7 +468,7 @@ class CSVCursor(object):
- 
-     def fileNames(self):
-         """Return the list of file names."""
--        return [fd.name for fd in self._fdPool.values()]
-+        return [fd.name for fd in list(self._fdPool.values())]
- 
-     def buildFakeFileNames(self):
-         """Populate the self._fdPool dictionary with fake objects
-@@ -491,9 +491,9 @@ class CSVCursor(object):
- 
-     def closeAll(self):
-         """Close all open file descriptors."""
--        for fd in self._fdPool.values():
-+        for fd in list(self._fdPool.values()):
-             fd.close()
--        for fd in self._lobFDPool.values():
-+        for fd in list(self._lobFDPool.values()):
-             fd.close()
- 
- 
-@@ -508,7 +508,7 @@ def loadCSVFiles():
-         CSV_REPL['file'] = cfName
-         CSV_REPL['table'] = tName
-         sqlStr = CSV_LOAD_SQL % CSV_REPL
--        print ' * LOADING CSV FILE %s...' % cfName
-+        print(' * LOADING CSV FILE %s...' % cfName)
-         sys.stdout.flush()
-         executeCustomQueries('BEFORE_CSV_TODB')
-         try:
-@@ -516,11 +516,11 @@ def loadCSVFiles():
-             try:
-                 res = CURS.fetchall()
-                 if res:
--                    print 'LOADING OUTPUT:', res
-+                    print('LOADING OUTPUT:', res)
-             except:
-                 pass
--        except Exception, e:
--            print 'ERROR: unable to import CSV file %s: %s' % (cfName, str(e))
-+        except Exception as e:
-+            print('ERROR: unable to import CSV file %s: %s' % (cfName, str(e)))
-             continue
-         connectObject.commit()
-         executeCustomQueries('AFTER_CSV_TODB')
-@@ -536,13 +536,13 @@ if CSV_DIR:
- # Extract exceptions to trap.
- try:
-     OperationalError = conn.module.OperationalError
--except AttributeError, e:
-+except AttributeError as e:
-     warnings.warn('Unable to import OperationalError; report this as a bug, ' \
-             'since it will mask important exceptions: %s' % e)
-     OperationalError = Exception
- try:
-     IntegrityError = conn.module.IntegrityError
--except AttributeError, e:
-+except AttributeError as e:
-     warnings.warn('Unable to import IntegrityError')
-     IntegrityError = Exception
- 
-@@ -601,7 +601,7 @@ def _makeConvNamed(cols):
-     def _converter(params):
-         for paramIndex, paramSet in enumerate(params):
-             d = {}
--            for i in xrange(nrCols):
-+            for i in range(nrCols):
-                 d[cols[i]] = paramSet[i]
-             params[paramIndex] = d
-         return params
-@@ -653,7 +653,7 @@ def _(s, truncateAt=None):
-     """Nicely print a string to sys.stdout, optionally
-     truncating it a the given char."""
-     if not isinstance(s, UnicodeType):
--        s = unicode(s, 'utf_8')
-+        s = str(s, 'utf_8')
-     if truncateAt is not None:
-         s = s[:truncateAt]
-     s = s.encode(sys.stdout.encoding or 'utf_8', 'replace')
-@@ -689,9 +689,9 @@ def t(s, sinceBegin=False):
-     else:
-         ct = BEGIN_TIME
-         cts = BEGIN_TIMES
--    print '# TIME', s, \
-+    print('# TIME', s, \
-             ': %dmin, %dsec (wall) %dmin, %dsec (user) %dmin, %dsec (system)' \
--            % _minSec(nt-ct, ntimes[0]-cts[0], ntimes[1]-cts[1])
-+            % _minSec(nt-ct, ntimes[0]-cts[0], ntimes[1]-cts[1]))
-     if not sinceBegin:
-         CTIME = nt
-         CTIMES = ntimes
-@@ -801,13 +801,13 @@ class SourceFile(GzipFile):
-         filename = os.path.join(IMDB_PTDF_DIR, filename)
-         try:
-             GzipFile.__init__(self, filename, mode, *args, **kwds)
--        except IOError, e:
-+        except IOError as e:
-             if not pwarning: raise
--            print 'WARNING WARNING WARNING'
--            print 'WARNING unable to read the "%s" file.' % filename
--            print 'WARNING The file will be skipped, and the contained'
--            print 'WARNING information will NOT be stored in the database.'
--            print 'WARNING Complete error: ', e
-+            print('WARNING WARNING WARNING')
-+            print('WARNING unable to read the "%s" file.' % filename)
-+            print('WARNING The file will be skipped, and the contained')
-+            print('WARNING information will NOT be stored in the database.')
-+            print('WARNING Complete error: ', e)
-             # re-raise the exception.
-             raise
-         self.start = start
-@@ -827,12 +827,12 @@ class SourceFile(GzipFile):
- 
-     def readline_NOcheckEnd(self, size=-1):
-         line = GzipFile.readline(self, size)
--        return unicode(line, 'latin_1').encode('utf_8')
-+        return str(line, 'latin_1').encode('utf_8')
- 
-     def readline_checkEnd(self, size=-1):
-         line = GzipFile.readline(self, size)
-         if self.stop is not None and line[:self.stoplen] == self.stop: return ''
--        return unicode(line, 'latin_1').encode('utf_8')
-+        return str(line, 'latin_1').encode('utf_8')
- 
-     def getByHashSections(self):
-         return getSectionHash(self)
-@@ -902,7 +902,7 @@ class _BaseCache(dict):
-         self._table_name = ''
-         self._id_for_custom_q = ''
-         if d is not None:
--            for k, v in d.iteritems(): self[k] = v
-+            for k, v in d.items(): self[k] = v
- 
-     def __setitem__(self, key, counter):
-         """Every time a key is set, its value is the counter;
-@@ -921,8 +921,8 @@ class _BaseCache(dict):
-         if self._flushing: return
-         self._flushing = 1
-         if _recursionLevel >= MAX_RECURSION:
--            print 'WARNING recursion level exceded trying to flush data'
--            print 'WARNING this batch of data is lost (%s).' % self.className
-+            print('WARNING recursion level exceded trying to flush data')
-+            print('WARNING this batch of data is lost (%s).' % self.className)
-             self._tmpDict.clear()
-             return
-         if self._tmpDict:
-@@ -937,7 +937,7 @@ class _BaseCache(dict):
-                                     _keys=keys, _timeit=False)
-                 _after_has_run = True
-                 self._tmpDict.clear()
--            except OperationalError, e:
-+            except OperationalError as e:
-                 # XXX: I'm not sure this is the right thing (and way)
-                 #      to proceed.
-                 if not _after_has_run:
-@@ -952,28 +952,28 @@ class _BaseCache(dict):
-                 firstHalf = {}
-                 poptmpd = self._tmpDict.popitem
-                 originalLength = len(self._tmpDict)
--                for x in xrange(1 + originalLength/2):
-+                for x in range(1 + originalLength/2):
-                     k, v = poptmpd()
-                     firstHalf[k] = v
--                print ' * TOO MANY DATA (%s items in %s), recursion: %s' % \
-+                print(' * TOO MANY DATA (%s items in %s), recursion: %s' % \
-                                                         (originalLength,
-                                                         self.className,
-+                                                        _recursionLevel))
-+                print('   * SPLITTING (run 1 of 2), recursion: %s' % \
-                                                         _recursionLevel)
--                print '   * SPLITTING (run 1 of 2), recursion: %s' % \
--                                                        _recursionLevel
-                 self.flush(quiet=quiet, _recursionLevel=_recursionLevel)
-                 self._tmpDict = firstHalf
--                print '   * SPLITTING (run 2 of 2), recursion: %s' % \
--                                                        _recursionLevel
-+                print('   * SPLITTING (run 2 of 2), recursion: %s' % \
-+                                                        _recursionLevel)
-                 self.flush(quiet=quiet, _recursionLevel=_recursionLevel)
-                 self._tmpDict.clear()
--            except Exception, e:
-+            except Exception as e:
-                 if isinstance(e, KeyboardInterrupt):
-                     raise
--                print 'WARNING: unknown exception caught committing the data'
--                print 'WARNING: to the database; report this as a bug, since'
--                print 'WARNING: many data (%d items) were lost: %s' % \
--                        (len(self._tmpDict), e)
-+                print('WARNING: unknown exception caught committing the data')
-+                print('WARNING: to the database; report this as a bug, since')
-+                print('WARNING: many data (%d items) were lost: %s' % \
-+                        (len(self._tmpDict), e))
-         self._flushing = 0
-         # Flush also deferred data.
-         if self._deferredData:
-@@ -992,7 +992,7 @@ class _BaseCache(dict):
- 
-     def add(self, key, miscData=None):
-         """Insert a new key and return its value."""
--        c = self.counter.next()
-+        c = next(self.counter)
-         # miscData=[('a_dict', 'value')] will set self.a_dict's c key
-         # to 'value'.
-         if miscData is not None:
-@@ -1033,7 +1033,7 @@ class MoviesCache(_BaseCache):
-                                     'md5sum'))
- 
-     def populate(self):
--        print ' * POPULATING %s...' % self.className
-+        print(' * POPULATING %s...' % self.className)
-         titleTbl = tableName(Title)
-         movieidCol = colName(Title, 'id')
-         titleCol = colName(Title, 'title')
-@@ -1072,7 +1072,7 @@ class MoviesCache(_BaseCache):
- 
-     def _toDB(self, quiet=0):
-         if not quiet:
--            print ' * FLUSHING %s...' % self.className
-+            print(' * FLUSHING %s...' % self.className)
-             sys.stdout.flush()
-         l = []
-         lapp = l.append
-@@ -1082,8 +1082,8 @@ class MoviesCache(_BaseCache):
-                 t = analyze_title(k, _emptyString='')
-             except IMDbParserError:
-                 if k and k.strip():
--                    print 'WARNING %s._toDB() invalid title:' % self.className,
--                    print _(k)
-+                    print('WARNING %s._toDB() invalid title:' % self.className, end=' ')
-+                    print(_(k))
-                 continue
-             tget = t.get
-             episodeOf = None
-@@ -1141,7 +1141,7 @@ class PersonsCache(_BaseCache):
-                                 'namePcodeNf', 'surnamePcode', 'md5sum'])
- 
-     def populate(self):
--        print ' * POPULATING PersonsCache...'
-+        print(' * POPULATING PersonsCache...')
-         nameTbl = tableName(Name)
-         personidCol = colName(Name, 'id')
-         nameCol = colName(Name, 'name')
-@@ -1160,7 +1160,7 @@ class PersonsCache(_BaseCache):
- 
-     def _toDB(self, quiet=0):
-         if not quiet:
--            print ' * FLUSHING PersonsCache...'
-+            print(' * FLUSHING PersonsCache...')
-             sys.stdout.flush()
-         l = []
-         lapp = l.append
-@@ -1170,7 +1170,7 @@ class PersonsCache(_BaseCache):
-                 t = analyze_name(k)
-             except IMDbParserError:
-                 if k and k.strip():
--                    print 'WARNING PersonsCache._toDB() invalid name:', _(k)
-+                    print('WARNING PersonsCache._toDB() invalid name:', _(k))
-                 continue
-             tget = t.get
-             name = tget('name')
-@@ -1199,7 +1199,7 @@ class CharactersCache(_BaseCache):
-                                 'surnamePcode', 'md5sum'])
- 
-     def populate(self):
--        print ' * POPULATING CharactersCache...'
-+        print(' * POPULATING CharactersCache...')
-         nameTbl = tableName(CharName)
-         personidCol = colName(CharName, 'id')
-         nameCol = colName(CharName, 'name')
-@@ -1218,7 +1218,7 @@ class CharactersCache(_BaseCache):
- 
-     def _toDB(self, quiet=0):
-         if not quiet:
--            print ' * FLUSHING CharactersCache...'
-+            print(' * FLUSHING CharactersCache...')
-             sys.stdout.flush()
-         l = []
-         lapp = l.append
-@@ -1228,7 +1228,7 @@ class CharactersCache(_BaseCache):
-                 t = analyze_name(k)
-             except IMDbParserError:
-                 if k and k.strip():
--                    print 'WARNING CharactersCache._toDB() invalid name:', _(k)
-+                    print('WARNING CharactersCache._toDB() invalid name:', _(k))
-                 continue
-             tget = t.get
-             name = tget('name')
-@@ -1256,7 +1256,7 @@ class CompaniesCache(_BaseCache):
-                                 'namePcodeSf', 'md5sum'])
- 
-     def populate(self):
--        print ' * POPULATING CharactersCache...'
-+        print(' * POPULATING CharactersCache...')
-         nameTbl = tableName(CompanyName)
-         companyidCol = colName(CompanyName, 'id')
-         nameCol = colName(CompanyName, 'name')
-@@ -1275,7 +1275,7 @@ class CompaniesCache(_BaseCache):
- 
-     def _toDB(self, quiet=0):
-         if not quiet:
--            print ' * FLUSHING CompaniesCache...'
-+            print(' * FLUSHING CompaniesCache...')
-             sys.stdout.flush()
-         l = []
-         lapp = l.append
-@@ -1285,7 +1285,7 @@ class CompaniesCache(_BaseCache):
-                 t = analyze_company_name(k)
-             except IMDbParserError:
-                 if k and k.strip():
--                    print 'WARNING CompaniesCache._toDB() invalid name:', _(k)
-+                    print('WARNING CompaniesCache._toDB() invalid name:', _(k))
-                 continue
-             tget = t.get
-             name = tget('name')
-@@ -1316,7 +1316,7 @@ class KeywordsCache(_BaseCache):
-                                 'phoneticCode'])
- 
-     def populate(self):
--        print ' * POPULATING KeywordsCache...'
-+        print(' * POPULATING KeywordsCache...')
-         nameTbl = tableName(CompanyName)
-         keywordidCol = colName(Keyword, 'id')
-         keyCol = colName(Keyword, 'name')
-@@ -1331,7 +1331,7 @@ class KeywordsCache(_BaseCache):
- 
-     def _toDB(self, quiet=0):
-         if not quiet:
--            print ' * FLUSHING KeywordsCache...'
-+            print(' * FLUSHING KeywordsCache...')
-             sys.stdout.flush()
-         l = []
-         lapp = l.append
-@@ -1365,7 +1365,7 @@ class SQLData(dict):
-         self._recursionLevel = 1
-         self._table = table
-         self._table_name = tableName(table)
--        for k, v in d.items(): self[k] = v
-+        for k, v in list(d.items()): self[k] = v
- 
-     def __setitem__(self, key, value):
-         """The value is discarded, the counter is used as the 'real' key
-@@ -1388,8 +1388,8 @@ class SQLData(dict):
-         CACHE_PID.flush(quiet=1)
-         if _resetRecursion: self._recursionLevel = 1
-         if self._recursionLevel >= MAX_RECURSION:
--            print 'WARNING recursion level exceded trying to flush data'
--            print 'WARNING this batch of data is lost.'
-+            print('WARNING recursion level exceded trying to flush data')
-+            print('WARNING this batch of data is lost.')
-             self.clear()
-             self.counter = self.counterInit
-             return
-@@ -1404,12 +1404,12 @@ class SQLData(dict):
-             _after_has_run = True
-             self.clear()
-             self.counter = self.counterInit
--        except OperationalError, e:
-+        except OperationalError as e:
-             if not _after_has_run:
-                 executeCustomQueries('AFTER_SQLDATA_TODB', _keys=keys,
-                                     _timeit=False)
--            print ' * TOO MANY DATA (%s items), SPLITTING (run #%d)...' % \
--                    (len(self), self._recursionLevel)
-+            print(' * TOO MANY DATA (%s items), SPLITTING (run #%d)...' % \
-+                    (len(self), self._recursionLevel))
-             self._recursionLevel += 1
-             newdata = self.__class__(table=self._table,
-                                     sqlString=self.sqlString,
-@@ -1417,8 +1417,8 @@ class SQLData(dict):
-             newdata._recursionLevel = self._recursionLevel
-             newflushEvery = self.flushEvery / 2
-             if newflushEvery < 1:
--                print 'WARNING recursion level exceded trying to flush data'
--                print 'WARNING this batch of data is lost.'
-+                print('WARNING recursion level exceded trying to flush data')
-+                print('WARNING this batch of data is lost.')
-                 self.clear()
-                 self.counter = self.counterInit
-                 return
-@@ -1426,7 +1426,7 @@ class SQLData(dict):
-             newdata.flushEvery = newflushEvery
-             popitem = self.popitem
-             dsi = dict.__setitem__
--            for x in xrange(len(self)/2):
-+            for x in range(len(self)/2):
-                 k, v = popitem()
-                 dsi(newdata, k, v)
-             newdata.flush(_resetRecursion=0)
-@@ -1434,21 +1434,21 @@ class SQLData(dict):
-             self.flush(_resetRecursion=0)
-             self.clear()
-             self.counter = self.counterInit
--        except Exception, e:
-+        except Exception as e:
-             if isinstance(e, KeyboardInterrupt):
-                 raise
--            print 'WARNING: unknown exception caught committing the data'
--            print 'WARNING: to the database; report this as a bug, since'
--            print 'WARNING: many data (%d items) were lost: %s' % \
--                    (len(self), e)
-+            print('WARNING: unknown exception caught committing the data')
-+            print('WARNING: to the database; report this as a bug, since')
-+            print('WARNING: many data (%d items) were lost: %s' % \
-+                    (len(self), e))
-         connectObject.commit()
- 
-     def _toDB(self):
--        print ' * FLUSHING SQLData...'
-+        print(' * FLUSHING SQLData...')
-         if not CSV_DIR:
--            CURS.executemany(self.sqlString, self.converter(self.values()))
-+            CURS.executemany(self.sqlString, self.converter(list(self.values())))
-         else:
--            CSV_CURS.executemany(self.sqlString, self.values())
-+            CSV_CURS.executemany(self.sqlString, list(self.values()))
- 
- 
- # Miscellaneous functions.
-@@ -1465,7 +1465,7 @@ def unpack(line, headers, sep='\t'):
-                     'rating': '8.4', 'title': 'Incredibles, The (2004)'}
-     """
-     r = {}
--    ls1 = filter(None, line.split(sep))
-+    ls1 = [_f for _f in line.split(sep) if _f]
-     for index, item in enumerate(ls1):
-         try: name = headers[index]
-         except IndexError: name = 'item%s' % index
-@@ -1523,8 +1523,8 @@ def readMovieList():
-         if mid is None:
-             continue
-         if count % 10000 == 0:
--            print 'SCANNING movies:', _(title),
--            print '(movieID: %s)' % mid
-+            print('SCANNING movies:', _(title), end=' ')
-+            print('(movieID: %s)' % mid)
-         count += 1
-     CACHE_MID.flush()
-     CACHE_MID.movieYear.clear()
-@@ -1543,7 +1543,7 @@ def doCast(fp, roleid, rolename):
-     for line in fp:
-         if line and line[0] != '\t':
-             if line[0] == '\n': continue
--            sl = filter(None, line.split('\t'))
-+            sl = [_f for _f in line.split('\t') if _f]
-             if len(sl) != 2: continue
-             name, line = sl
-             miscData = None
-@@ -1580,33 +1580,33 @@ def doCast(fp, roleid, rolename):
-             elif item[0] == '<':
-                 textor = item[1:-1]
-                 try:
--                    order = long(textor)
-+                    order = int(textor)
-                 except ValueError:
-                     os = textor.split(',')
-                     if len(os) == 3:
-                         try:
--                            order = ((long(os[2])-1) * 1000) + \
--                                    ((long(os[1])-1) * 100) + (long(os[0])-1)
-+                            order = ((int(os[2])-1) * 1000) + \
-+                                    ((int(os[1])-1) * 100) + (int(os[0])-1)
-                         except ValueError:
-                             pass
-         movieid = CACHE_MID.addUnique(title)
-         if movieid is None:
-             continue
-         if role is not None:
--            roles = filter(None, [x.strip() for x in role.split('/')])
-+            roles = [_f for _f in [x.strip() for x in role.split('/')] if _f]
-             for role in roles:
-                 cid = CACHE_CID.addUnique(role)
-                 sqldata.add((pid, movieid, cid, note, order))
-         else:
-             sqldata.add((pid, movieid, None, note, order))
-         if count % 10000 == 0:
--            print 'SCANNING %s:' % rolename,
--            print _(name)
-+            print('SCANNING %s:' % rolename, end=' ')
-+            print(_(name))
-         count += 1
-     sqldata.flush()
-     CACHE_PID.flush()
-     CACHE_PID.personGender.clear()
--    print 'CLOSING %s...' % rolename
-+    print('CLOSING %s...' % rolename)
- 
- 
- def castLists():
-@@ -1620,7 +1620,7 @@ def castLists():
-         if fname == 'actress': fname = 'actresses.list.gz'
-         elif fname == 'miscellaneous-crew': fname = 'miscellaneous.list.gz'
-         else: fname = fname + 's.list.gz'
--        print 'DOING', fname
-+        print('DOING', fname)
-         try:
-             f = SourceFile(fname, start=CAST_START, stop=CAST_STOP)
-         except IOError:
-@@ -1658,7 +1658,7 @@ def doAkaNames():
-             try:
-                 name_dict = analyze_name(line)
-             except IMDbParserError:
--                if line: print 'WARNING doAkaNames wrong name:', _(line)
-+                if line: print('WARNING doAkaNames wrong name:', _(line))
-                 continue
-             name = name_dict.get('name')
-             namePcodeCf, namePcodeNf, surnamePcode = name_soundexes(name)
-@@ -1666,7 +1666,7 @@ def doAkaNames():
-                         namePcodeCf, namePcodeNf, surnamePcode,
-                         md5(line).hexdigest()))
-             if count % 10000 == 0:
--                print 'SCANNING akanames:', _(line)
-+                print('SCANNING akanames:', _(line))
-             count += 1
-     sqldata.flush()
-     fp.close()
-@@ -1761,8 +1761,8 @@ def doAkaTitles():
-                         tonD = analyze_title(line, _emptyString='')
-                     except IMDbParserError:
-                         if line:
--                            print 'WARNING doAkaTitles(obsol O) invalid title:',
--                            print _(line)
-+                            print('WARNING doAkaTitles(obsol O) invalid title:', end=' ')
-+                            print(_(line))
-                         continue
-                     tonD['title'] = normalizeTitle(tonD['title'])
-                     line = build_title(tonD, ptdf=1, _emptyString='')
-@@ -1779,8 +1779,8 @@ def doAkaTitles():
-                         titleDict = analyze_title(line, _emptyString='')
-                     except IMDbParserError:
-                         if line:
--                            print 'WARNING doAkaTitles (O) invalid title:',
--                            print _(line)
-+                            print('WARNING doAkaTitles (O) invalid title:', end=' ')
-+                            print(_(line))
-                         continue
-                     if 'episode of' in titleDict:
-                         if obsolete:
-@@ -1820,14 +1820,14 @@ def doAkaTitles():
-                         akatD = analyze_title(akat, _emptyString='')
-                     except IMDbParserError:
-                         if line:
--                            print 'WARNING doAkaTitles(obsol) invalid title:',
--                            print _(akat)
-+                            print('WARNING doAkaTitles(obsol) invalid title:', end=' ')
-+                            print(_(akat))
-                         continue
-                     akatD['title'] = normalizeTitle(akatD['title'])
-                     akat = build_title(akatD, ptdf=1, _emptyString='')
-                 if count % 10000 == 0:
--                    print 'SCANNING %s:' % fname[:-8].replace('-', ' '),
--                    print _(akat)
-+                    print('SCANNING %s:' % fname[:-8].replace('-', ' '), end=' ')
-+                    print(_(akat))
-                 if isEpisode and seriesID is not None:
-                     # Handle series for which only single episodes have
-                     # aliases.
-@@ -1835,8 +1835,8 @@ def doAkaTitles():
-                         akaDict = analyze_title(akat, _emptyString='')
-                     except IMDbParserError:
-                         if line:
--                            print 'WARNING doAkaTitles (epis) invalid title:',
--                            print _(akat)
-+                            print('WARNING doAkaTitles (epis) invalid title:', end=' ')
-+                            print(_(akat))
-                         continue
-                     if 'episode of' in akaDict:
-                         if obsolete:
-@@ -1873,10 +1873,10 @@ def doMovieLinks():
-             if mid is None:
-                 continue
-             if count % 10000 == 0:
--                print 'SCANNING movielinks:', _(title)
-+                print('SCANNING movielinks:', _(title))
-         else:
-             line = line.strip()
--            link_txt = unicode(line, 'utf_8').encode('ascii', 'replace')
-+            link_txt = str(line, 'utf_8').encode('ascii', 'replace')
-             theid = None
-             for k, lenkp1, v in MOVIELINK_IDS:
-                 if link_txt and link_txt[0] == '(' \
-@@ -1907,19 +1907,19 @@ def minusHashFiles(fp, funct, defaultid, descr):
-         title = title.strip()
-         d = funct(text.split('\n'))
-         if not d:
--            print 'WARNING skipping empty information about title:',
--            print _(title)
-+            print('WARNING skipping empty information about title:', end=' ')
-+            print(_(title))
-             continue
-         if not title:
--            print 'WARNING skipping information associated to empty title:',
--            print _(d[0], truncateAt=40)
-+            print('WARNING skipping information associated to empty title:', end=' ')
-+            print(_(d[0], truncateAt=40))
-             continue
-         mid = CACHE_MID.addUnique(title)
-         if mid is None:
-             continue
-         if count % 5000 == 0:
--            print 'SCANNING %s:' % descr,
--            print _(title)
-+            print('SCANNING %s:' % descr, end=' ')
-+            print(_(title))
-         for data in d:
-             sqldata.add((mid, defaultid, data, None))
-         count += 1
-@@ -1963,7 +1963,7 @@ def getTaglines():
-             tag = tag.strip()
-             if not tag: continue
-             if count % 10000 == 0:
--                print 'SCANNING taglines:', _(title)
-+                print('SCANNING taglines:', _(title))
-             sqldata.add((mid, INFO_TYPES['taglines'], tag, None))
-         count += 1
-     sqldata.flush()
-@@ -2001,12 +2001,12 @@ _bus = {'BT': 'budget',
-         'CP': 'copyright holder'
- }
- _usd = '$'
--_gbp = unichr(0x00a3).encode('utf_8')
--_eur = unichr(0x20ac).encode('utf_8')
-+_gbp = chr(0x00a3).encode('utf_8')
-+_eur = chr(0x20ac).encode('utf_8')
- def getBusiness(lines):
-     """Movie's business information."""
-     bd = _parseColonList(lines, _bus)
--    for k in bd.keys():
-+    for k in list(bd.keys()):
-         nv = []
-         for v in bd[k]:
-             v = v.replace('USD ',_usd).replace('GBP ',_gbp).replace('EUR',_eur)
-@@ -2066,13 +2066,13 @@ _ldk = {'OT': 'original title',
-         'LT': 'laserdisc title'
- }
- # Handle laserdisc keys.
--for key, value in _ldk.items():
-+for key, value in list(_ldk.items()):
-     _ldk[key] = 'LD %s' % value
- 
- def getLaserDisc(lines):
-     """Laserdisc information."""
-     d = _parseColonList(lines, _ldk)
--    for k, v in d.iteritems():
-+    for k, v in d.items():
-         d[k] = ' '.join(v)
-     return d
- 
-@@ -2096,7 +2096,7 @@ _mpaa = {'RE': 'mpaa'}
- def getMPAA(lines):
-     """Movie's mpaa information."""
-     d = _parseColonList(lines, _mpaa)
--    for k, v in d.iteritems():
-+    for k, v in d.items():
-         d[k] = ' '.join(v)
-     return d
- 
-@@ -2152,16 +2152,16 @@ def nmmvFiles(fp, funct, fname):
-                 continue
-         else: mopid = CACHE_PID.addUnique(ton)
-         if count % 6000 == 0:
--            print 'SCANNING %s:' % fname[:-8].replace('-', ' '),
--            print _(ton)
-+            print('SCANNING %s:' % fname[:-8].replace('-', ' '), end=' ')
-+            print(_(ton))
-         d = funct(text.split('\n'))
--        for k, v in d.iteritems():
-+        for k, v in d.items():
-             if k != 'notable tv guest appearances':
-                 theid = INFO_TYPES.get(k)
-                 if theid is None:
--                    print 'WARNING key "%s" of ToN' % k,
--                    print _(ton),
--                    print 'not in INFO_TYPES'
-+                    print('WARNING key "%s" of ToN' % k, end=' ')
-+                    print(_(ton), end=' ')
-+                    print('not in INFO_TYPES')
-                     continue
-             if type(v) is _ltype:
-                 for i in v:
-@@ -2176,12 +2176,12 @@ def nmmvFiles(fp, funct, fname):
-                             continue
-                         crole = i.currentRole
-                         if isinstance(crole, list):
--                            crole = ' / '.join([x.get('long imdb name', u'')
-+                            crole = ' / '.join([x.get('long imdb name', '')
-                                                 for x in crole])
-                         if not crole:
-                             crole = None
-                         else:
--                            crole = unicode(crole).encode('utf_8')
-+                            crole = str(crole).encode('utf_8')
-                         guestdata.add((mopid, movieid, crole,
-                                         i.notes or None))
-                         continue
-@@ -2315,7 +2315,7 @@ def _parseBiography(biol):
-                 rn = build_name(analyze_name(n, canonical=1), canonical=1)
-                 res['birth name'] = rn
-             except IMDbParserError:
--                if line: print 'WARNING _parseBiography wrong name:', _(n)
-+                if line: print('WARNING _parseBiography wrong name:', _(n))
-                 continue
-         elif x6 == 'AT: * ':
-             res.setdefault('article', []).append(x[6:].strip())
-@@ -2406,8 +2406,8 @@ def doMovieCompaniesInfo():
-             if 'note' in data:
-                 note = data['note']
-             if count % 10000 == 0:
--                print 'SCANNING %s:' % dataf[0][:-8].replace('-', ' '),
--                print _(data['title'])
-+                print('SCANNING %s:' % dataf[0][:-8].replace('-', ' '), end=' ')
-+                print(_(data['title']))
-             sqldata.add((mid, cid, infoid, note))
-             count += 1
-         sqldata.flush()
-@@ -2461,8 +2461,8 @@ def doMiscMovieInfo():
-             if 'note' in data:
-                 note = data['note']
-             if count % 10000 == 0:
--                print 'SCANNING %s:' % dataf[0][:-8].replace('-', ' '),
--                print _(data['title'])
-+                print('SCANNING %s:' % dataf[0][:-8].replace('-', ' '), end=' ')
-+                print(_(data['title']))
-             info = data['info']
-             if typeindex == 'keywords':
-                 keywordID = CACHE_KWRDID.addUnique(info)
-@@ -2494,7 +2494,7 @@ def getRating():
-         if mid is None:
-             continue
-         if count % 10000 == 0:
--            print 'SCANNING rating:', _(title)
-+            print('SCANNING rating:', _(title))
-         sqldata.add((mid, INFO_TYPES['votes distribution'],
-                     data.get('votes distribution'), None))
-         sqldata.add((mid, INFO_TYPES['votes'], data.get('votes'), None))
-@@ -2516,7 +2516,7 @@ def getTopBottomRating():
-                         RawValue('infoTypeID', INFO_TYPES[what]),
-                         'info', 'note'])
-         count = 1
--        print 'SCANNING %s...' % what
-+        print('SCANNING %s...' % what)
-         for line in fp:
-             data = unpack(line, ('votes distribution', 'votes', 'rank',
-                                 'title'), sep='  ')
-@@ -2576,8 +2576,8 @@ def completeCast():
-             if mid is None:
-                 continue
-             if count % 10000 == 0:
--                print 'SCANNING %s:' % fname[:-8].replace('-', ' '),
--                print _(title)
-+                print('SCANNING %s:' % fname[:-8].replace('-', ' '), end=' ')
-+                print(_(title))
-             sqldata.add((mid, CCKind[ll[1].lower().strip()]))
-             count += 1
-         fp.close()
-@@ -2633,7 +2633,7 @@ def readConstants():
- def _imdbIDsFileName(fname):
-     """Return a file name, adding the optional
-     CSV_DIR directory."""
--    return os.path.join(*(filter(None, [CSV_DIR, fname])))
-+    return os.path.join(*([_f for _f in [CSV_DIR, fname] if _f]))
- 
- 
- def _countRows(tableName):
-@@ -2641,8 +2641,8 @@ def _countRows(tableName):
-     try:
-         CURS.execute('SELECT COUNT(*) FROM %s' % tableName)
-         return (CURS.fetchone() or [0])[0]
--    except Exception, e:
--        print 'WARNING: unable to count rows of table %s: %s' % (tableName, e)
-+    except Exception as e:
-+        print('WARNING: unable to count rows of table %s: %s' % (tableName, e))
-         return 0
- 
- 
-@@ -2658,7 +2658,7 @@ def storeNotNULLimdbIDs(cls):
-     md5sum_col = colName(cls, 'md5sum')
-     imdbID_col = colName(cls, 'imdbID')
- 
--    print 'SAVING imdbID values for %s...' % cname,
-+    print('SAVING imdbID values for %s...' % cname, end=' ')
-     sys.stdout.flush()
-     if _get_imdbids_method() == 'table':
-         try:
-@@ -2668,8 +2668,8 @@ def storeNotNULLimdbIDs(cls):
-                 pass
-             try:
-                 CURS.execute('SELECT * FROM %s LIMIT 1' % table_name)
--            except Exception, e:
--                print 'missing "%s" table (ok if this is the first run)' % table_name
-+            except Exception as e:
-+                print('missing "%s" table (ok if this is the first run)' % table_name)
-                 return
-             query = 'CREATE TEMPORARY TABLE %s_extract AS SELECT %s, %s FROM %s WHERE %s IS NOT NULL' % \
-                     (table_name, md5sum_col, imdbID_col,
-@@ -2678,14 +2678,14 @@ def storeNotNULLimdbIDs(cls):
-             CURS.execute('CREATE INDEX %s_md5sum_idx ON %s_extract (%s)' % (table_name, table_name, md5sum_col))
-             CURS.execute('CREATE INDEX %s_imdbid_idx ON %s_extract (%s)' % (table_name, table_name, imdbID_col))
-             rows = _countRows('%s_extract' % table_name)
--            print 'DONE! (%d entries using a temporary table)' % rows
-+            print('DONE! (%d entries using a temporary table)' % rows)
-             return
--        except Exception, e:
--            print 'WARNING: unable to store imdbIDs in a temporary table (falling back to dbm): %s' % e
-+        except Exception as e:
-+            print('WARNING: unable to store imdbIDs in a temporary table (falling back to dbm): %s' % e)
-     try:
--        db = anydbm.open(_imdbIDsFileName('%s_imdbIDs.db' % cname), 'c')
--    except Exception, e:
--        print 'WARNING: unable to store imdbIDs: %s' % str(e)
-+        db = dbm.open(_imdbIDsFileName('%s_imdbIDs.db' % cname), 'c')
-+    except Exception as e:
-+        print('WARNING: unable to store imdbIDs: %s' % str(e))
-         return
-     try:
-         CURS.execute('SELECT %s, %s FROM %s WHERE %s IS NOT NULL' %
-@@ -2694,10 +2694,10 @@ def storeNotNULLimdbIDs(cls):
-         while res:
-             db.update(dict((str(x[0]), str(x[1])) for x in res))
-             res = CURS.fetchmany(10000)
--    except Exception, e:
--        print 'SKIPPING: unable to retrieve data: %s' % e
-+    except Exception as e:
-+        print('SKIPPING: unable to retrieve data: %s' % e)
-         return
--    print 'DONE! (%d entries)' % len(db)
-+    print('DONE! (%d entries)' % len(db))
-     db.close()
-     return
- 
-@@ -2707,7 +2707,7 @@ def iterbatch(iterable, size):
-     sourceiter = iter(iterable)
-     while True:
-         batchiter = islice(sourceiter, size)
--        yield chain([batchiter.next()], batchiter)
-+        yield chain([next(batchiter)], batchiter)
- 
- 
- def restoreImdbIDs(cls):
-@@ -2720,7 +2720,7 @@ def restoreImdbIDs(cls):
-         cname = 'companies'
-     else:
-         cname = 'characters'
--    print 'RESTORING imdbIDs values for %s...' % cname,
-+    print('RESTORING imdbIDs values for %s...' % cname, end=' ')
-     sys.stdout.flush()
-     table_name = tableName(cls)
-     md5sum_col = colName(cls, 'md5sum')
-@@ -2730,7 +2730,7 @@ def restoreImdbIDs(cls):
-         try:
-             try:
-                 CURS.execute('SELECT * FROM %s_extract LIMIT 1' % table_name)
--            except Exception, e:
-+            except Exception as e:
-                 raise Exception('missing "%s_extract" table (ok if this is the first run)' % table_name)
- 
-             if DB_NAME == 'mysql':
-@@ -2748,20 +2748,20 @@ def restoreImdbIDs(cls):
-                 CURS.execute('SELECT COUNT(*) FROM %s WHERE %s IS NOT NULL' %
-                         (table_name, imdbID_col))
-                 affected_rows = (CURS.fetchone() or [0])[0]
--            except Exception, e:
-+            except Exception as e:
-                 pass
-             rows = _countRows('%s_extract' % table_name)
--            print 'DONE! (restored %s entries out of %d)' % (affected_rows, rows)
-+            print('DONE! (restored %s entries out of %d)' % (affected_rows, rows))
-             t('restore %s' % cname)
-             try: CURS.execute('DROP TABLE %s_extract' % table_name)
-             except: pass
-             return
--        except Exception, e:
--            print 'WARNING: unable to restore imdbIDs using the temporary table (falling back to dbm): %s' % e
-+        except Exception as e:
-+            print('WARNING: unable to restore imdbIDs using the temporary table (falling back to dbm): %s' % e)
-     try:
--        db = anydbm.open(_imdbIDsFileName('%s_imdbIDs.db' % cname), 'r')
--    except Exception, e:
--        print 'WARNING: unable to restore imdbIDs (ok if this is the first run)'
-+        db = dbm.open(_imdbIDsFileName('%s_imdbIDs.db' % cname), 'r')
-+    except Exception as e:
-+        print('WARNING: unable to restore imdbIDs (ok if this is the first run)')
-         return
-     count = 0
-     sql = "UPDATE " + table_name + " SET " + imdbID_col + \
-@@ -2776,9 +2776,9 @@ def restoreImdbIDs(cls):
-         if success:
-             return len(items)
-         return 0
--    for batch in iterbatch(db.iteritems(), 10000):
-+    for batch in iterbatch(iter(db.items()), 10000):
-         count += _restore(sql, batch)
--    print 'DONE! (restored %d entries out of %d)' % (count, len(db))
-+    print('DONE! (restored %d entries out of %d)' % (count, len(db)))
-     t('restore %s' % cname)
-     db.close()
-     return
-@@ -2804,8 +2804,8 @@ def runSafely(funct, fmsg, default, *args, **kwds):
-     value of the function is returned (or 'default')."""
-     try:
-         return funct(*args, **kwds)
--    except Exception, e:
--        print 'WARNING: %s: %s' % (fmsg, e)
-+    except Exception as e:
-+        print('WARNING: %s: %s' % (fmsg, e))
-     return default
- 
- 
-@@ -2815,14 +2815,14 @@ def _executeQuery(query):
-         s_query = query[:60] + '...'
-     else:
-         s_query = query
--    print 'EXECUTING "%s"...' % (s_query),
-+    print('EXECUTING "%s"...' % (s_query), end=' ')
-     sys.stdout.flush()
-     try:
-         CURS.execute(query)
--        print 'DONE!'
-+        print('DONE!')
-         return True
--    except Exception, e:
--        print 'FAILED (%s)!' % e
-+    except Exception as e:
-+        print('FAILED (%s)!' % e)
-         return False
- 
- 
-@@ -2830,7 +2830,7 @@ def executeCustomQueries(when, _keys=None, _timeit=True):
-     """Run custom queries as specified on the command line."""
-     if _keys is None: _keys = {}
-     for query in CUSTOM_QUERIES.get(when, []):
--        print 'EXECUTING "%s:%s"...' % (when, query)
-+        print('EXECUTING "%s:%s"...' % (when, query))
-         sys.stdout.flush()
-         if query.startswith('FOR_EVERY_TABLE:'):
-             query = query[16:]
-@@ -2843,14 +2843,14 @@ def executeCustomQueries(when, _keys=None, _timeit=True):
-                     _executeQuery(query % keys)
-                     if _timeit:
-                         t('%s command' % when)
--                except Exception, e:
--                    print 'FAILED (%s)!' % e
-+                except Exception as e:
-+                    print('FAILED (%s)!' % e)
-                     continue
-         else:
-             try:
-                 _executeQuery(query % _keys)
--            except Exception, e:
--                print 'FAILED (%s)!' % e
-+            except Exception as e:
-+                print('FAILED (%s)!' % e)
-                 continue
-             if _timeit:
-                 t('%s command' % when)
-@@ -2859,26 +2859,26 @@ def executeCustomQueries(when, _keys=None, _timeit=True):
- def buildIndexesAndFK():
-     """Build indexes and Foreign Keys."""
-     executeCustomQueries('BEFORE_INDEXES')
--    print 'building database indexes (this may take a while)'
-+    print('building database indexes (this may take a while)')
-     sys.stdout.flush()
-     # Build database indexes.
-     idx_errors = createIndexes(DB_TABLES)
-     for idx_error in idx_errors:
--        print 'ERROR caught exception creating an index: %s' % idx_error
-+        print('ERROR caught exception creating an index: %s' % idx_error)
-     t('createIndexes()')
--    print 'adding foreign keys (this may take a while)'
-+    print('adding foreign keys (this may take a while)')
-     sys.stdout.flush()
-     # Add FK.
-     fk_errors = createForeignKeys(DB_TABLES)
-     for fk_error in fk_errors:
--        print 'ERROR caught exception creating a foreign key: %s' % fk_error
-+        print('ERROR caught exception creating a foreign key: %s' % fk_error)
-     t('createForeignKeys()')
- 
- 
- def restoreCSV():
-     """Only restore data from a set of CSV files."""
-     CSV_CURS.buildFakeFileNames()
--    print 'loading CSV files into the database'
-+    print('loading CSV files into the database')
-     executeCustomQueries('BEFORE_CSV_LOAD')
-     loadCSVFiles()
-     t('loadCSVFiles()')
-@@ -2892,7 +2892,7 @@ def restoreCSV():
- 
- # begin the iterations...
- def run():
--    print 'RUNNING imdbpy2sql.py using the %s ORM' % USED_ORM
-+    print('RUNNING imdbpy2sql.py using the %s ORM' % USED_ORM)
- 
-     executeCustomQueries('BEGIN')
- 
-@@ -2907,17 +2907,17 @@ def run():
-             None, CompanyName)
- 
-     # Truncate the current database.
--    print 'DROPPING current database...',
-+    print('DROPPING current database...', end=' ')
-     sys.stdout.flush()
-     dropTables(DB_TABLES)
--    print 'DONE!'
-+    print('DONE!')
- 
-     executeCustomQueries('BEFORE_CREATE')
-     # Rebuild the database structure.
--    print 'CREATING new tables...',
-+    print('CREATING new tables...', end=' ')
-     sys.stdout.flush()
-     createTables(DB_TABLES)
--    print 'DONE!'
-+    print('DONE!')
-     t('dropping and recreating the database')
-     executeCustomQueries('AFTER_CREATE')
- 
-@@ -3002,7 +3002,7 @@ def run():
-         return
- 
-     if CSV_DIR:
--        print 'loading CSV files into the database'
-+        print('loading CSV files into the database')
-         executeCustomQueries('BEFORE_CSV_LOAD')
-         loadCSVFiles()
-         t('loadCSVFiles()')
-@@ -3024,9 +3024,9 @@ def _kdb_handler(signum, frame):
-     """Die gracefully."""
-     global _HEARD
-     if _HEARD:
--        print "EHI!  DON'T PUSH ME!  I'VE HEARD YOU THE FIRST TIME! :-)"
-+        print("EHI!  DON'T PUSH ME!  I'VE HEARD YOU THE FIRST TIME! :-)")
-         return
--    print 'INTERRUPT REQUEST RECEIVED FROM USER.  FLUSHING CACHES...'
-+    print('INTERRUPT REQUEST RECEIVED FROM USER.  FLUSHING CACHES...')
-     _HEARD = 1
-     # XXX: trap _every_ error?
-     try: CACHE_MID.flush()
-@@ -3037,24 +3037,24 @@ def _kdb_handler(signum, frame):
-     except IntegrityError: pass
-     try: CACHE_COMPID.flush()
-     except IntegrityError: pass
--    print 'DONE! (in %d minutes, %d seconds)' % \
--            divmod(int(time.time())-BEGIN_TIME, 60)
-+    print('DONE! (in %d minutes, %d seconds)' % \
-+            divmod(int(time.time())-BEGIN_TIME, 60))
-     sys.exit()
- 
- 
- if __name__ == '__main__':
-     try:
--        print 'IMPORTING psyco...',
-+        print('IMPORTING psyco...', end=' ')
-         sys.stdout.flush()
-         #import DONOTIMPORTPSYCO
-         import psyco
-         #psyco.log()
-         psyco.profile()
--        print 'DONE!'
--        print ''
-+        print('DONE!')
-+        print('')
-     except ImportError:
--        print 'FAILED (not a big deal, everything is alright...)'
--        print ''
-+        print('FAILED (not a big deal, everything is alright...)')
-+        print('')
-     import signal
-     signal.signal(signal.SIGINT, _kdb_handler)
-     if CSV_ONLY_LOAD:
-diff --git a/bin/search_character.py b/bin/search_character.py
-index 44b7fa8..922d24e 100755
---- a/bin/search_character.py
-+++ b/bin/search_character.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "character name"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "character name"' % sys.argv[0])
-     sys.exit(2)
- 
- name = sys.argv[1]
-@@ -30,25 +30,25 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--name = unicode(name, in_encoding, 'replace')
-+name = str(name, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of character objects).
-     results = i.search_character(name)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- # Print the results.
--print '    %s result%s for "%s":' % (len(results),
-+print('    %s result%s for "%s":' % (len(results),
-                                     ('', 's')[len(results) != 1],
--                                    name.encode(out_encoding, 'replace'))
--print 'characterID\t: imdbID : name'
-+                                    name.encode(out_encoding, 'replace')))
-+print('characterID\t: imdbID : name')
- 
- # Print the long imdb name for every character.
- for character in results:
--    outp = u'%s\t\t: %s : %s' % (character.characterID, i.get_imdbID(character),
-+    outp = '%s\t\t: %s : %s' % (character.characterID, i.get_imdbID(character),
-                                 character['long imdb name'])
--    print outp.encode(out_encoding, 'replace')
-+    print(outp.encode(out_encoding, 'replace'))
- 
- 
-diff --git a/bin/search_company.py b/bin/search_company.py
-index 6e3df0c..3d1f593 100755
---- a/bin/search_company.py
-+++ b/bin/search_company.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "company name"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "company name"' % sys.argv[0])
-     sys.exit(2)
- 
- name = sys.argv[1]
-@@ -30,25 +30,25 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--name = unicode(name, in_encoding, 'replace')
-+name = str(name, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of company objects).
-     results = i.search_company(name)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- # Print the results.
--print '    %s result%s for "%s":' % (len(results),
-+print('    %s result%s for "%s":' % (len(results),
-                                     ('', 's')[len(results) != 1],
--                                    name.encode(out_encoding, 'replace'))
--print 'companyID\t: imdbID : name'
-+                                    name.encode(out_encoding, 'replace')))
-+print('companyID\t: imdbID : name')
- 
- # Print the long imdb name for every company.
- for company in results:
--    outp = u'%s\t\t: %s : %s' % (company.companyID, i.get_imdbID(company),
-+    outp = '%s\t\t: %s : %s' % (company.companyID, i.get_imdbID(company),
-                                 company['long imdb name'])
--    print outp.encode(out_encoding, 'replace')
-+    print(outp.encode(out_encoding, 'replace'))
- 
- 
-diff --git a/bin/search_keyword.py b/bin/search_keyword.py
-index 21e0da2..5381ebc 100755
---- a/bin/search_keyword.py
-+++ b/bin/search_keyword.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "keyword name"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "keyword name"' % sys.argv[0])
-     sys.exit(2)
- 
- name = sys.argv[1]
-@@ -30,24 +30,24 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--name = unicode(name, in_encoding, 'replace')
-+name = str(name, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of keyword strings).
-     results = i.search_keyword(name, results=20)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- # Print the results.
--print '    %s result%s for "%s":' % (len(results),
-+print('    %s result%s for "%s":' % (len(results),
-                                     ('', 's')[len(results) != 1],
--                                    name.encode(out_encoding, 'replace'))
--print ' : keyword'
-+                                    name.encode(out_encoding, 'replace')))
-+print(' : keyword')
- 
- # Print every keyword.
- for idx, keyword in enumerate(results):
--    outp = u'%d: %s' % (idx+1, keyword)
--    print outp.encode(out_encoding, 'replace')
-+    outp = '%d: %s' % (idx+1, keyword)
-+    print(outp.encode(out_encoding, 'replace'))
- 
- 
-diff --git a/bin/search_movie.py b/bin/search_movie.py
-index b358d03..6b30863 100755
---- a/bin/search_movie.py
-+++ b/bin/search_movie.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "movie title"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "movie title"' % sys.argv[0])
-     sys.exit(2)
- 
- title = sys.argv[1]
-@@ -30,25 +30,25 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--title = unicode(title, in_encoding, 'replace')
-+title = str(title, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of Movie objects).
-     results = i.search_movie(title)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- # Print the results.
--print '    %s result%s for "%s":' % (len(results),
-+print('    %s result%s for "%s":' % (len(results),
-                                     ('', 's')[len(results) != 1],
--                                    title.encode(out_encoding, 'replace'))
--print 'movieID\t: imdbID : title'
-+                                    title.encode(out_encoding, 'replace')))
-+print('movieID\t: imdbID : title')
- 
- # Print the long imdb title for every movie.
- for movie in results:
--    outp = u'%s\t: %s : %s' % (movie.movieID, i.get_imdbID(movie),
-+    outp = '%s\t: %s : %s' % (movie.movieID, i.get_imdbID(movie),
-                                 movie['long imdb title'])
--    print outp.encode(out_encoding, 'replace')
-+    print(outp.encode(out_encoding, 'replace'))
- 
- 
-diff --git a/bin/search_person.py b/bin/search_person.py
-index 8a7dbd4..47112b9 100755
---- a/bin/search_person.py
-+++ b/bin/search_person.py
-@@ -13,13 +13,13 @@ import sys
- try:
-     import imdb
- except ImportError:
--    print 'You bad boy!  You need to install the IMDbPY package!'
-+    print('You bad boy!  You need to install the IMDbPY package!')
-     sys.exit(1)
- 
- 
- if len(sys.argv) != 2:
--    print 'Only one argument is required:'
--    print '  %s "person name"' % sys.argv[0]
-+    print('Only one argument is required:')
-+    print('  %s "person name"' % sys.argv[0])
-     sys.exit(2)
- 
- name = sys.argv[1]
-@@ -30,25 +30,25 @@ i = imdb.IMDb()
- in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
- out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
- 
--name = unicode(name, in_encoding, 'replace')
-+name = str(name, in_encoding, 'replace')
- try:
-     # Do the search, and get the results (a list of Person objects).
-     results = i.search_person(name)
--except imdb.IMDbError, e:
--    print "Probably you're not connected to Internet.  Complete error report:"
--    print e
-+except imdb.IMDbError as e:
-+    print("Probably you're not connected to Internet.  Complete error report:")
-+    print(e)
-     sys.exit(3)
- 
- # Print the results.
--print '    %s result%s for "%s":' % (len(results),
-+print('    %s result%s for "%s":' % (len(results),
-                                     ('', 's')[len(results) != 1],
--                                    name.encode(out_encoding, 'replace'))
--print 'personID\t: imdbID : name'
-+                                    name.encode(out_encoding, 'replace')))
-+print('personID\t: imdbID : name')
- 
- # Print the long imdb name for every person.
- for person in results:
--    outp = u'%s\t: %s : %s' % (person.personID, i.get_imdbID(person),
-+    outp = '%s\t: %s : %s' % (person.personID, i.get_imdbID(person),
-                                 person['long imdb name'])
--    print outp.encode(out_encoding, 'replace')
-+    print(outp.encode(out_encoding, 'replace'))
- 
- 
-diff --git a/ez_setup.py b/ez_setup.py
-index 1ff1d3e..5663de5 100644
---- a/ez_setup.py
-+++ b/ez_setup.py
-@@ -70,10 +70,10 @@ def _validate_md5(egg_name, data):
-     if egg_name in md5_data:
-         digest = md5(data).hexdigest()
-         if digest != md5_data[egg_name]:
--            print >>sys.stderr, (
-+            print((
-                 "md5 validation of %s failed!  (Possible download problem?)"
-                 % egg_name
--            )
-+            ), file=sys.stderr)
-             sys.exit(2)
-     return data
- 
-@@ -103,14 +103,14 @@ def use_setuptools(
-         return do_download()       
-     try:
-         pkg_resources.require("setuptools>="+version); return
--    except pkg_resources.VersionConflict, e:
-+    except pkg_resources.VersionConflict as e:
-         if was_imported:
--            print >>sys.stderr, (
-+            print((
-             "The required version of setuptools (>=%s) is not available, and\n"
-             "can't be installed while this script is running. Please install\n"
-             " a more recent version first, using 'easy_install -U setuptools'."
-             "\n\n(Currently using %r)"
--            ) % (version, e.args[0])
-+            ) % (version, e.args[0]), file=sys.stderr)
-             sys.exit(2)
-         else:
-             del pkg_resources, sys.modules['pkg_resources']    # reload ok
-@@ -129,7 +129,7 @@ def download_setuptools(
-     with a '/'). `to_dir` is the directory where the egg will be downloaded.
-     `delay` is the number of seconds to pause before an actual download attempt.
-     """
--    import urllib2, shutil
-+    import urllib.request, urllib.error, urllib.parse, shutil
-     egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
-     url = download_base + egg_name
-     saveto = os.path.join(to_dir, egg_name)
-@@ -155,7 +155,7 @@ and place it in this directory before rerunning this script.)
-                     version, download_base, delay, url
-                 ); from time import sleep; sleep(delay)
-             log.warn("Downloading %s", url)
--            src = urllib2.urlopen(url)
-+            src = urllib.request.urlopen(url)
-             # Read/write all in one block, so we don't create a corrupt file
-             # if the download is interrupted.
-             data = _validate_md5(egg_name, src.read())
-@@ -216,10 +216,10 @@ def main(argv, version=DEFAULT_VERSION):
-                 os.unlink(egg)
-     else:
-         if setuptools.__version__ == '0.0.1':
--            print >>sys.stderr, (
-+            print((
-             "You have an obsolete version of setuptools installed.  Please\n"
-             "remove it from your system entirely before rerunning this script."
--            )
-+            ), file=sys.stderr)
-             sys.exit(2)
- 
-     req = "setuptools>="+version
-@@ -238,8 +238,8 @@ def main(argv, version=DEFAULT_VERSION):
-             from setuptools.command.easy_install import main
-             main(argv)
-         else:
--            print "Setuptools version",version,"or greater has been installed."
--            print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
-+            print("Setuptools version",version,"or greater has been installed.")
-+            print('(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)')
- 
- def update_md5(filenames):
-     """Update our built-in md5 registry"""
-@@ -252,7 +252,7 @@ def update_md5(filenames):
-         md5_data[base] = md5(f.read()).hexdigest()
-         f.close()
- 
--    data = ["    %r: %r,\n" % it for it in md5_data.items()]
-+    data = ["    %r: %r,\n" % it for it in list(md5_data.items())]
-     data.sort()
-     repl = "".join(data)
- 
-@@ -262,7 +262,7 @@ def update_md5(filenames):
- 
-     match = re.search("\nmd5_data = {\n([^}]+)}", src)
-     if not match:
--        print >>sys.stderr, "Internal error!"
-+        print("Internal error!", file=sys.stderr)
-         sys.exit(2)
- 
-     src = src[:match.start(1)] + repl + src[match.end(1):]
-diff --git a/imdb/Character.py b/imdb/Character.py
-index 5a5239a..8822932 100644
---- a/imdb/Character.py
-+++ b/imdb/Character.py
-@@ -73,15 +73,15 @@ class Character(_Container):
-         *modFunct* -- function called returning text fields.
-         """
-         name = kwds.get('name')
--        if name and not self.data.has_key('name'):
-+        if name and 'name' not in self.data:
-             self.set_name(name)
-         self.characterID = kwds.get('characterID', None)
--        self.myName = kwds.get('myName', u'')
-+        self.myName = kwds.get('myName', '')
- 
-     def _reset(self):
-         """Reset the Character object."""
-         self.characterID = None
--        self.myName = u''
-+        self.myName = ''
- 
-     def set_name(self, name):
-         """Set the name of the character."""
-@@ -96,19 +96,19 @@ class Character(_Container):
-     def _additional_keys(self):
-         """Valid keys to append to the data.keys() list."""
-         addkeys = []
--        if self.data.has_key('name'):
-+        if 'name' in self.data:
-             addkeys += ['long imdb name']
--        if self.data.has_key('headshot'):
-+        if 'headshot' in self.data:
-             addkeys += ['full-size headshot']
-         return addkeys
- 
-     def _getitem(self, key):
-         """Handle special keys."""
-         ## XXX: can a character have an imdbIndex?
--        if self.data.has_key('name'):
-+        if 'name' in self.data:
-             if key == 'long imdb name':
-                 return build_name(self.data)
--        if key == 'full-size headshot' and self.data.has_key('headshot'):
-+        if key == 'full-size headshot' and 'headshot' in self.data:
-             return self._re_fullsizeURL.sub('', self.data.get('headshot', ''))
-         return None
- 
-@@ -116,7 +116,7 @@ class Character(_Container):
-         """Return the characterID."""
-         return self.characterID
- 
--    def __nonzero__(self):
-+    def __bool__(self):
-         """The Character is "false" if the self.data does not contain a name."""
-         # XXX: check the name and the characterID?
-         if self.data.get('name'): return 1
-@@ -125,8 +125,8 @@ class Character(_Container):
-     def __contains__(self, item):
-         """Return true if this Character was portrayed in the given Movie
-         or it was impersonated by the given Person."""
--        from Movie import Movie
--        from Person import Person
-+        from .Movie import Movie
-+        from .Person import Person
-         if isinstance(item, Person):
-             for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
-                 if item.isSame(m.currentRole):
-@@ -142,8 +142,8 @@ class Character(_Container):
-         and/or characterID."""
-         if not isinstance(other, self.__class__):
-             return 0
--        if self.data.has_key('name') and \
--                other.data.has_key('name') and \
-+        if 'name' in self.data and \
-+                'name' in other.data and \
-                 build_name(self.data, canonical=0) == \
-                 build_name(other.data, canonical=0):
-             return 1
-@@ -156,7 +156,7 @@ class Character(_Container):
- 
-     def __deepcopy__(self, memo):
-         """Return a deep copy of a Character instance."""
--        c = Character(name=u'', characterID=self.characterID,
-+        c = Character(name='', characterID=self.characterID,
-                     myName=self.myName, myID=self.myID,
-                     data=deepcopy(self.data, memo),
-                     notes=self.notes, accessSystem=self.accessSystem,
-@@ -172,30 +172,30 @@ class Character(_Container):
-         r = '<Character id:%s[%s] name:_%s_>' % (self.characterID,
-                                         self.accessSystem,
-                                         self.get('name'))
--        if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
-+        if isinstance(r, str): r = r.encode('utf_8', 'replace')
-         return r
- 
-     def __str__(self):
-         """Simply print the short name."""
--        return self.get('name', u'').encode('utf_8', 'replace')
-+        return self.get('name', '').encode('utf_8', 'replace')
- 
-     def __unicode__(self):
-         """Simply print the short title."""
--        return self.get('name', u'')
-+        return self.get('name', '')
- 
-     def summary(self):
-         """Return a string with a pretty-printed summary for the character."""
--        if not self: return u''
--        s = u'Character\n=====\nName: %s\n' % \
--                                self.get('name', u'')
-+        if not self: return ''
-+        s = 'Character\n=====\nName: %s\n' % \
-+                                self.get('name', '')
-         bio = self.get('biography')
-         if bio:
--            s += u'Biography: %s\n' % bio[0]
-+            s += 'Biography: %s\n' % bio[0]
-         filmo = self.get('filmography')
-         if filmo:
--            a_list = [x.get('long imdb canonical title', u'')
-+            a_list = [x.get('long imdb canonical title', '')
-                         for x in filmo[:5]]
--            s += u'Last movies with this character: %s.\n' % u'; '.join(a_list)
-+            s += 'Last movies with this character: %s.\n' % '; '.join(a_list)
-         return s
- 
- 
-diff --git a/imdb/Company.py b/imdb/Company.py
-index 5e05c84..26dc998 100644
---- a/imdb/Company.py
-+++ b/imdb/Company.py
-@@ -71,15 +71,15 @@ class Company(_Container):
-         *modFunct* -- function called returning text fields.
-         """
-         name = kwds.get('name')
--        if name and not self.data.has_key('name'):
-+        if name and 'name' not in self.data:
-             self.set_name(name)
-         self.companyID = kwds.get('companyID', None)
--        self.myName = kwds.get('myName', u'')
-+        self.myName = kwds.get('myName', '')
- 
-     def _reset(self):
-         """Reset the company object."""
-         self.companyID = None
--        self.myName = u''
-+        self.myName = ''
- 
-     def set_name(self, name):
-         """Set the name of the company."""
-@@ -87,7 +87,7 @@ class Company(_Container):
-         # Company diverges a bit from other classes, being able
-         # to directly handle its "notes".  AND THAT'S PROBABLY A BAD IDEA!
-         oname = name = name.strip()
--        notes = u''
-+        notes = ''
-         if name.endswith(')'):
-             fparidx = name.find('(')
-             if fparidx != -1:
-@@ -102,14 +102,14 @@ class Company(_Container):
- 
-     def _additional_keys(self):
-         """Valid keys to append to the data.keys() list."""
--        if self.data.has_key('name'):
-+        if 'name' in self.data:
-             return ['long imdb name']
-         return []
- 
-     def _getitem(self, key):
-         """Handle special keys."""
-         ## XXX: can a company have an imdbIndex?
--        if self.data.has_key('name'):
-+        if 'name' in self.data:
-             if key == 'long imdb name':
-                 return build_company_name(self.data)
-         return None
-@@ -118,7 +118,7 @@ class Company(_Container):
-         """Return the companyID."""
-         return self.companyID
- 
--    def __nonzero__(self):
-+    def __bool__(self):
-         """The company is "false" if the self.data does not contain a name."""
-         # XXX: check the name and the companyID?
-         if self.data.get('name'): return 1
-@@ -126,7 +126,7 @@ class Company(_Container):
- 
-     def __contains__(self, item):
-         """Return true if this company and the given Movie are related."""
--        from Movie import Movie
-+        from .Movie import Movie
-         if isinstance(item, Movie):
-             for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
-                 if item.isSame(m):
-@@ -138,8 +138,8 @@ class Company(_Container):
-         and/or companyID."""
-         if not isinstance(other, self.__class__):
-             return 0
--        if self.data.has_key('name') and \
--                other.data.has_key('name') and \
-+        if 'name' in self.data and \
-+                'name' in other.data and \
-                 build_company_name(self.data) == \
-                 build_company_name(other.data):
-             return 1
-@@ -152,7 +152,7 @@ class Company(_Container):
- 
-     def __deepcopy__(self, memo):
-         """Return a deep copy of a company instance."""
--        c = Company(name=u'', companyID=self.companyID,
-+        c = Company(name='', companyID=self.companyID,
-                     myName=self.myName, myID=self.myID,
-                     data=deepcopy(self.data, memo),
-                     notes=self.notes, accessSystem=self.accessSystem,
-@@ -168,28 +168,28 @@ class Company(_Container):
-         r = '<Company id:%s[%s] name:_%s_>' % (self.companyID,
-                                         self.accessSystem,
-                                         self.get('long imdb name'))
--        if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
-+        if isinstance(r, str): r = r.encode('utf_8', 'replace')
-         return r
- 
-     def __str__(self):
-         """Simply print the short name."""
--        return self.get('name', u'').encode('utf_8', 'replace')
-+        return self.get('name', '').encode('utf_8', 'replace')
- 
-     def __unicode__(self):
-         """Simply print the short title."""
--        return self.get('name', u'')
-+        return self.get('name', '')
- 
-     def summary(self):
-         """Return a string with a pretty-printed summary for the company."""
--        if not self: return u''
--        s = u'Company\n=======\nName: %s\n' % \
--                                self.get('name', u'')
-+        if not self: return ''
-+        s = 'Company\n=======\nName: %s\n' % \
-+                                self.get('name', '')
-         for k in ('distributor', 'production company', 'miscellaneous company',
-                 'special effects company'):
-             d = self.get(k, [])[:5]
-             if not d: continue
--            s += u'Last movies from this company (%s): %s.\n' % \
--                    (k, u'; '.join([x.get('long imdb title', u'') for x in d]))
-+            s += 'Last movies from this company (%s): %s.\n' % \
-+                    (k, '; '.join([x.get('long imdb title', '') for x in d]))
-         return s
- 
- 
-diff --git a/imdb/Movie.py b/imdb/Movie.py
-index 5cdcde6..353d6af 100644
---- a/imdb/Movie.py
-+++ b/imdb/Movie.py
-@@ -163,15 +163,15 @@ class Movie(_Container):
-         *modFunct* -- function called returning text fields.
-         """
-         title = kwds.get('title')
--        if title and not self.data.has_key('title'):
-+        if title and 'title' not in self.data:
-             self.set_title(title)
-         self.movieID = kwds.get('movieID', None)
--        self.myTitle = kwds.get('myTitle', u'')
-+        self.myTitle = kwds.get('myTitle', '')
- 
-     def _reset(self):
-         """Reset the Movie object."""
-         self.movieID = None
--        self.myTitle = u''
-+        self.myTitle = ''
- 
-     def set_title(self, title):
-         """Set the title of the movie."""
-@@ -182,18 +182,18 @@ class Movie(_Container):
-     def _additional_keys(self):
-         """Valid keys to append to the data.keys() list."""
-         addkeys = []
--        if self.data.has_key('title'):
-+        if 'title' in self.data:
-             addkeys += ['canonical title', 'long imdb title',
-                         'long imdb canonical title',
-                         'smart canonical title',
-                         'smart long imdb canonical title']
--        if self.data.has_key('episode of'):
-+        if 'episode of' in self.data:
-             addkeys += ['long imdb episode title', 'series title',
-                         'canonical series title', 'episode title',
-                         'canonical episode title',
-                         'smart canonical series title',
-                         'smart canonical episode title']
--        if self.data.has_key('cover url'):
-+        if 'cover url' in self.data:
-             addkeys += ['full-size cover url']
-         return addkeys
- 
-@@ -215,14 +215,14 @@ class Movie(_Container):
-         used) and the language can be forced with the 'lang' argument,
-         otherwise it's auto-detected."""
-         if title is None:
--            title = self.data.get('title', u'')
-+            title = self.data.get('title', '')
-         if lang is None:
-             lang = self.guessLanguage()
-         return canonicalTitle(title, lang=lang)
- 
-     def _getitem(self, key):
-         """Handle special keys."""
--        if self.data.has_key('episode of'):
-+        if 'episode of' in self.data:
-             if key == 'long imdb episode title':
-                 return build_title(self.data)
-             elif key == 'series title':
-@@ -234,12 +234,12 @@ class Movie(_Container):
-                 ser_title = self.data['episode of']['title']
-                 return self.smartCanonicalTitle(ser_title)
-             elif key == 'episode title':
--                return self.data.get('title', u'')
-+                return self.data.get('title', '')
-             elif key == 'canonical episode title':
--                return canonicalTitle(self.data.get('title', u''))
-+                return canonicalTitle(self.data.get('title', ''))
-             elif key == 'smart canonical episode title':
--                return self.smartCanonicalTitle(self.data.get('title', u''))
--        if self.data.has_key('title'):
-+                return self.smartCanonicalTitle(self.data.get('title', ''))
-+        if 'title' in self.data:
-             if key == 'title':
-                 return self.data['title']
-             elif key == 'long imdb title':
-@@ -253,7 +253,7 @@ class Movie(_Container):
-             elif key == 'smart long imdb canonical title':
-                 return build_title(self.data, canonical=1,
-                                     lang=self.guessLanguage())
--        if key == 'full-size cover url' and self.data.has_key('cover url'):
-+        if key == 'full-size cover url' and 'cover url' in self.data:
-             return self._re_fullsizeURL.sub('', self.data.get('cover url', ''))
-         return None
- 
-@@ -261,10 +261,10 @@ class Movie(_Container):
-         """Return the movieID."""
-         return self.movieID
- 
--    def __nonzero__(self):
-+    def __bool__(self):
-         """The Movie is "false" if the self.data does not contain a title."""
-         # XXX: check the title and the movieID?
--        if self.data.has_key('title'): return 1
-+        if 'title' in self.data: return 1
-         return 0
- 
-     def isSameTitle(self, other):
-@@ -273,8 +273,8 @@ class Movie(_Container):
-         """
-         # XXX: obsolete?
-         if not isinstance(other, self.__class__): return 0
--        if self.data.has_key('title') and \
--                other.data.has_key('title') and \
-+        if 'title' in self.data and \
-+                'title' in other.data and \
-                 build_title(self.data, canonical=0) == \
-                 build_title(other.data, canonical=0):
-             return 1
-@@ -287,9 +287,9 @@ class Movie(_Container):
-     def __contains__(self, item):
-         """Return true if the given Person object is listed in this Movie,
-         or if the the given Character is represented in this Movie."""
--        from Person import Person
--        from Character import Character
--        from Company import Company
-+        from .Person import Person
-+        from .Character import Character
-+        from .Company import Company
-         if isinstance(item, Person):
-             for p in flatten(self.data, yieldDictKeys=1, scalar=Person,
-                             toDescend=(list, dict, tuple, Movie)):
-@@ -309,7 +309,7 @@ class Movie(_Container):
- 
-     def __deepcopy__(self, memo):
-         """Return a deep copy of a Movie instance."""
--        m = Movie(title=u'', movieID=self.movieID, myTitle=self.myTitle,
-+        m = Movie(title='', movieID=self.movieID, myTitle=self.myTitle,
-                     myID=self.myID, data=deepcopy(self.data, memo),
-                     currentRole=deepcopy(self.currentRole, memo),
-                     roleIsPerson=self._roleIsPerson,
-@@ -324,64 +324,64 @@ class Movie(_Container):
-     def __repr__(self):
-         """String representation of a Movie object."""
-         # XXX: add also currentRole and notes, if present?
--        if self.has_key('long imdb episode title'):
-+        if 'long imdb episode title' in self:
-             title = self.get('long imdb episode title')
-         else:
-             title = self.get('long imdb title')
-         r = '<Movie id:%s[%s] title:_%s_>' % (self.movieID, self.accessSystem,
-                                                 title)
--        if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
-+        if isinstance(r, str): r = r.encode('utf_8', 'replace')
-         return r
- 
-     def __str__(self):
-         """Simply print the short title."""
--        return self.get('title', u'').encode('utf_8', 'replace')
-+        return self.get('title', '').encode('utf_8', 'replace')
- 
-     def __unicode__(self):
-         """Simply print the short title."""
--        return self.get('title', u'')
-+        return self.get('title', '')
- 
-     def summary(self):
-         """Return a string with a pretty-printed summary for the movie."""
--        if not self: return u''
--        def _nameAndRole(personList, joiner=u', '):
-+        if not self: return ''
-+        def _nameAndRole(personList, joiner=', '):
-             """Build a pretty string with name and role."""
-             nl = []
-             for person in personList:
--                n = person.get('name', u'')
--                if person.currentRole: n += u' (%s)' % person.currentRole
-+                n = person.get('name', '')
-+                if person.currentRole: n += ' (%s)' % person.currentRole
-                 nl.append(n)
-             return joiner.join(nl)
--        s = u'Movie\n=====\nTitle: %s\n' % \
--                    self.get('long imdb canonical title', u'')
-+        s = 'Movie\n=====\nTitle: %s\n' % \
-+                    self.get('long imdb canonical title', '')
-         genres = self.get('genres')
--        if genres: s += u'Genres: %s.\n' % u', '.join(genres)
-+        if genres: s += 'Genres: %s.\n' % ', '.join(genres)
-         director = self.get('director')
-         if director:
--            s += u'Director: %s.\n' % _nameAndRole(director)
-+            s += 'Director: %s.\n' % _nameAndRole(director)
-         writer = self.get('writer')
-         if writer:
--            s += u'Writer: %s.\n' % _nameAndRole(writer)
-+            s += 'Writer: %s.\n' % _nameAndRole(writer)
-         cast = self.get('cast')
-         if cast:
-             cast = cast[:5]
--            s += u'Cast: %s.\n' % _nameAndRole(cast)
-+            s += 'Cast: %s.\n' % _nameAndRole(cast)
-         runtime = self.get('runtimes')
-         if runtime:
--            s += u'Runtime: %s.\n' % u', '.join(runtime)
-+            s += 'Runtime: %s.\n' % ', '.join(runtime)
-         countries = self.get('countries')
-         if countries:
--            s += u'Country: %s.\n' % u', '.join(countries)
-+            s += 'Country: %s.\n' % ', '.join(countries)
-         lang = self.get('languages')
-         if lang:
--            s += u'Language: %s.\n' % u', '.join(lang)
-+            s += 'Language: %s.\n' % ', '.join(lang)
-         rating = self.get('rating')
-         if rating:
--            s += u'Rating: %s' % rating
-+            s += 'Rating: %s' % rating
-             nr_votes = self.get('votes')
-             if nr_votes:
--                s += u' (%s votes)' % nr_votes
--            s += u'.\n'
-+                s += ' (%s votes)' % nr_votes
-+            s += '.\n'
-         plot = self.get('plot')
-         if not plot:
-             plot = self.get('plot summary')
-@@ -392,7 +392,7 @@ class Movie(_Container):
-             i = plot.find('::')
-             if i != -1:
-                 plot = plot[:i]
--            s += u'Plot: %s' % plot
-+            s += 'Plot: %s' % plot
-         return s
- 
- 
-diff --git a/imdb/Person.py b/imdb/Person.py
-index 6e3e462..e1c7de0 100644
---- a/imdb/Person.py
-+++ b/imdb/Person.py
-@@ -118,16 +118,16 @@ class Person(_Container):
-         *billingPos* -- position of this person in the credits list.
-         """
-         name = kwds.get('name')
--        if name and not self.data.has_key('name'):
-+        if name and 'name' not in self.data:
-             self.set_name(name)
-         self.personID = kwds.get('personID', None)
--        self.myName = kwds.get('myName', u'')
-+        self.myName = kwds.get('myName', '')
-         self.billingPos = kwds.get('billingPos', None)
- 
-     def _reset(self):
-         """Reset the Person object."""
-         self.personID = None
--        self.myName = u''
-+        self.myName = ''
-         self.billingPos = None
- 
-     def _clear(self):
-@@ -143,16 +143,16 @@ class Person(_Container):
-     def _additional_keys(self):
-         """Valid keys to append to the data.keys() list."""
-         addkeys = []
--        if self.data.has_key('name'):
-+        if 'name' in self.data:
-             addkeys += ['canonical name', 'long imdb name',
-                         'long imdb canonical name']
--        if self.data.has_key('headshot'):
-+        if 'headshot' in self.data:
-             addkeys += ['full-size headshot']
-         return addkeys
- 
-     def _getitem(self, key):
-         """Handle special keys."""
--        if self.data.has_key('name'):
-+        if 'name' in self.data:
-             if key == 'name':
-                 return normalizeName(self.data['name'])
-             elif key == 'canonical name':
-@@ -161,7 +161,7 @@ class Person(_Container):
-                 return build_name(self.data, canonical=0)
-             elif key == 'long imdb canonical name':
-                 return build_name(self.data)
--        if key == 'full-size headshot' and self.data.has_key('headshot'):
-+        if key == 'full-size headshot' and 'headshot' in self.data:
-             return self._re_fullsizeURL.sub('', self.data.get('headshot', ''))
-         return None
- 
-@@ -169,17 +169,17 @@ class Person(_Container):
-         """Return the personID."""
-         return self.personID
- 
--    def __nonzero__(self):
-+    def __bool__(self):
-         """The Person is "false" if the self.data does not contain a name."""
-         # XXX: check the name and the personID?
--        if self.data.has_key('name'): return 1
-+        if 'name' in self.data: return 1
-         return 0
- 
-     def __contains__(self, item):
-         """Return true if this Person has worked in the given Movie,
-         or if the fiven Character was played by this Person."""
--        from Movie import Movie
--        from Character import Character
-+        from .Movie import Movie
-+        from .Character import Character
-         if isinstance(item, Movie):
-             for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
-                 if item.isSame(m):
-@@ -196,8 +196,8 @@ class Person(_Container):
-         """
-         if not isinstance(other, self.__class__):
-             return 0
--        if self.data.has_key('name') and \
--                other.data.has_key('name') and \
-+        if 'name' in self.data and \
-+                'name' in other.data and \
-                 build_name(self.data, canonical=1) == \
-                 build_name(other.data, canonical=1):
-             return 1
-@@ -209,7 +209,7 @@ class Person(_Container):
- 
-     def __deepcopy__(self, memo):
-         """Return a deep copy of a Person instance."""
--        p = Person(name=u'', personID=self.personID, myName=self.myName,
-+        p = Person(name='', personID=self.personID, myName=self.myName,
-                     myID=self.myID, data=deepcopy(self.data, memo),
-                     currentRole=deepcopy(self.currentRole, memo),
-                     roleIsPerson=self._roleIsPerson,
-@@ -227,49 +227,49 @@ class Person(_Container):
-         # XXX: add also currentRole and notes, if present?
-         r = '<Person id:%s[%s] name:_%s_>' % (self.personID, self.accessSystem,
-                                         self.get('long imdb canonical name'))
--        if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
-+        if isinstance(r, str): r = r.encode('utf_8', 'replace')
-         return r
- 
-     def __str__(self):
-         """Simply print the short name."""
--        return self.get('name', u'').encode('utf_8', 'replace')
-+        return self.get('name', '').encode('utf_8', 'replace')
- 
-     def __unicode__(self):
-         """Simply print the short title."""
--        return self.get('name', u'')
-+        return self.get('name', '')
- 
-     def summary(self):
-         """Return a string with a pretty-printed summary for the person."""
--        if not self: return u''
--        s = u'Person\n=====\nName: %s\n' % \
--                                self.get('long imdb canonical name', u'')
-+        if not self: return ''
-+        s = 'Person\n=====\nName: %s\n' % \
-+                                self.get('long imdb canonical name', '')
-         bdate = self.get('birth date')
-         if bdate:
--            s += u'Birth date: %s' % bdate
-+            s += 'Birth date: %s' % bdate
-             bnotes = self.get('birth notes')
-             if bnotes:
--                s += u' (%s)' % bnotes
--            s += u'.\n'
-+                s += ' (%s)' % bnotes
-+            s += '.\n'
-         ddate = self.get('death date')
-         if ddate:
--            s += u'Death date: %s' % ddate
-+            s += 'Death date: %s' % ddate
-             dnotes = self.get('death notes')
-             if dnotes:
--                s += u' (%s)' % dnotes
--            s += u'.\n'
-+                s += ' (%s)' % dnotes
-+            s += '.\n'
-         bio = self.get('mini biography')
-         if bio:
--            s += u'Biography: %s\n' % bio[0]
-+            s += 'Biography: %s\n' % bio[0]
-         director = self.get('director')
-         if director:
--            d_list = [x.get('long imdb canonical title', u'')
-+            d_list = [x.get('long imdb canonical title', '')
-                         for x in director[:3]]
--            s += u'Last movies directed: %s.\n' % u'; '.join(d_list)
-+            s += 'Last movies directed: %s.\n' % '; '.join(d_list)
-         act = self.get('actor') or self.get('actress')
-         if act:
--            a_list = [x.get('long imdb canonical title', u'')
-+            a_list = [x.get('long imdb canonical title', '')
-                         for x in act[:5]]
--            s += u'Last movies acted: %s.\n' % u'; '.join(a_list)
-+            s += 'Last movies acted: %s.\n' % '; '.join(a_list)
-         return s
- 
- 
-diff --git a/imdb/__init__.py b/imdb/__init__.py
-index f93482d..3921dbf 100644
---- a/imdb/__init__.py
-+++ b/imdb/__init__.py
-@@ -28,9 +28,9 @@ __all__ = ['IMDb', 'IMDbError', 'Movie', 'Person', 'Character', 'Company',
- __version__ = VERSION = '4.9'
- 
- # Import compatibility module (importing it is enough).
--import _compat
-+from . import _compat
- 
--import sys, os, ConfigParser, logging
-+import sys, os, configparser, logging
- from types import MethodType
- 
- from imdb import Movie, Person, Character, Company
-@@ -75,14 +75,14 @@ imdbURL_find = imdbURL_base + 'find?%s'
- # Name of the configuration file.
- confFileName = 'imdbpy.cfg'
- 
--class ConfigParserWithCase(ConfigParser.ConfigParser):
-+class ConfigParserWithCase(configparser.ConfigParser):
-     """A case-sensitive parser for configuration files."""
-     def __init__(self, defaults=None, confFile=None, *args, **kwds):
-         """Initialize the parser.
- 
-         *defaults* -- defaults values.
-         *confFile* -- the file (or list of files) to parse."""
--        ConfigParser.ConfigParser.__init__(self, defaults=defaults)
-+        configparser.ConfigParser.__init__(self, defaults=defaults)
-         if confFile is None:
-             dotFileName = '.' + confFileName
-             # Current and home directory.
-@@ -102,8 +102,8 @@ class ConfigParserWithCase(ConfigParser.ConfigParser):
-         for fname in confFile:
-             try:
-                 self.read(fname)
--            except (ConfigParser.MissingSectionHeaderError,
--                    ConfigParser.ParsingError), e:
-+            except (configparser.MissingSectionHeaderError,
-+                    configparser.ParsingError) as e:
-                 _aux_logger.warn('Troubles reading config file: %s' % e)
-             # Stop at the first valid file.
-             if self.has_section('imdbpy'):
-@@ -115,7 +115,7 @@ class ConfigParserWithCase(ConfigParser.ConfigParser):
- 
-     def _manageValue(self, value):
-         """Custom substitutions for values."""
--        if not isinstance(value, (str, unicode)):
-+        if not isinstance(value, str):
-             return value
-         vlower = value.lower()
-         if vlower in self._boolean_states:
-@@ -126,7 +126,7 @@ class ConfigParserWithCase(ConfigParser.ConfigParser):
- 
-     def get(self, section, option, *args, **kwds):
-         """Return the value of an option from a given section."""
--        value = ConfigParser.ConfigParser.get(self, section, option,
-+        value = configparser.ConfigParser.get(self, section, option,
-                                             *args, **kwds)
-         return self._manageValue(value)
- 
-@@ -135,7 +135,7 @@ class ConfigParserWithCase(ConfigParser.ConfigParser):
-         given section."""
-         if section != 'DEFAULT' and not self.has_section(section):
-             return []
--        keys = ConfigParser.ConfigParser.options(self, section)
-+        keys = configparser.ConfigParser.options(self, section)
-         return [(k, self.get(section, k, *args, **kwds)) for k in keys]
- 
-     def getDict(self, section):
-@@ -159,7 +159,7 @@ def IMDb(accessSystem=None, *arguments, **keywords):
-                 accessSystem = 'http'
-             kwds.update(keywords)
-             keywords = kwds
--        except Exception, e:
-+        except Exception as e:
-             logging.getLogger('imdbpy').warn('Unable to read configuration' \
-                                             ' file; complete error: %s' % e)
-             # It just LOOKS LIKE a bad habit: we tried to read config
-@@ -176,24 +176,24 @@ def IMDb(accessSystem=None, *arguments, **keywords):
-         try:
-             import logging.config
-             logging.config.fileConfig(os.path.expanduser(logCfg))
--        except Exception, e:
-+        except Exception as e:
-             logging.getLogger('imdbpy').warn('unable to read logger ' \
-                                             'config: %s' % e)
-     if accessSystem in ('httpThin', 'webThin', 'htmlThin'):
-         logging.warn('httpThin was removed since IMDbPY 4.8')
-         accessSystem = 'http'
-     if accessSystem in ('http', 'web', 'html'):
--        from parser.http import IMDbHTTPAccessSystem
-+        from .parser.http import IMDbHTTPAccessSystem
-         return IMDbHTTPAccessSystem(*arguments, **keywords)
-     elif accessSystem in ('mobile',):
--        from parser.mobile import IMDbMobileAccessSystem
-+        from .parser.mobile import IMDbMobileAccessSystem
-         return IMDbMobileAccessSystem(*arguments, **keywords)
-     elif accessSystem in ('local', 'files'):
-         # The local access system was removed since IMDbPY 4.2.
-         raise IMDbError('the local access system was removed since IMDbPY 4.2')
-     elif accessSystem in ('sql', 'db', 'database'):
-         try:
--            from parser.sql import IMDbSqlAccessSystem
-+            from .parser.sql import IMDbSqlAccessSystem
-         except ImportError:
-             raise IMDbError('the sql access system is not installed')
-         return IMDbSqlAccessSystem(*arguments, **keywords)
-@@ -207,17 +207,17 @@ def available_access_systems():
-     asList = []
-     # XXX: trying to import modules is a good thing?
-     try:
--        from parser.http import IMDbHTTPAccessSystem
-+        from .parser.http import IMDbHTTPAccessSystem
-         asList.append('http')
-     except ImportError:
-         pass
-     try:
--        from parser.mobile import IMDbMobileAccessSystem
-+        from .parser.mobile import IMDbMobileAccessSystem
-         asList.append('mobile')
-     except ImportError:
-         pass
-     try:
--        from parser.sql import IMDbSqlAccessSystem
-+        from .parser.sql import IMDbSqlAccessSystem
-         asList.append('sql')
-     except ImportError:
-         pass
-@@ -429,8 +429,8 @@ class IMDbBase:
-             results = 20
-         # XXX: I suppose it will be much safer if the user provides
-         #      an unicode string... this is just a guess.
--        if not isinstance(title, unicode):
--            title = unicode(title, encoding, 'replace')
-+        if not isinstance(title, str):
-+            title = str(title, encoding, 'replace')
-         if not _episodes:
-             res = self._search_movie(title, results)
-         else:
-@@ -489,8 +489,8 @@ class IMDbBase:
-             results = int(results)
-         except (ValueError, OverflowError):
-             results = 20
--        if not isinstance(name, unicode):
--            name = unicode(name, encoding, 'replace')
-+        if not isinstance(name, str):
-+            name = str(name, encoding, 'replace')
-         res = self._search_person(name, results)
-         return [Person.Person(personID=self._get_real_personID(pi),
-                 data=pd, modFunct=self._defModFunct,
-@@ -534,8 +534,8 @@ class IMDbBase:
-             results = int(results)
-         except (ValueError, OverflowError):
-             results = 20
--        if not isinstance(name, unicode):
--            name = unicode(name, encoding, 'replace')
-+        if not isinstance(name, str):
-+            name = str(name, encoding, 'replace')
-         res = self._search_character(name, results)
-         return [Character.Character(characterID=self._get_real_characterID(pi),
-                 data=pd, modFunct=self._defModFunct,
-@@ -579,8 +579,8 @@ class IMDbBase:
-             results = int(results)
-         except (ValueError, OverflowError):
-             results = 20
--        if not isinstance(name, unicode):
--            name = unicode(name, encoding, 'replace')
-+        if not isinstance(name, str):
-+            name = str(name, encoding, 'replace')
-         res = self._search_company(name, results)
-         return [Company.Company(companyID=self._get_real_companyID(pi),
-                 data=pd, modFunct=self._defModFunct,
-@@ -600,8 +600,8 @@ class IMDbBase:
-             results = int(results)
-         except (ValueError, OverflowError):
-             results = 100
--        if not isinstance(keyword, unicode):
--            keyword = unicode(keyword, encoding, 'replace')
-+        if not isinstance(keyword, str):
-+            keyword = str(keyword, encoding, 'replace')
-         return self._search_keyword(keyword, results)
- 
-     def _get_keyword(self, keyword, results):
-@@ -620,8 +620,8 @@ class IMDbBase:
-             results = 100
-         # XXX: I suppose it will be much safer if the user provides
-         #      an unicode string... this is just a guess.
--        if not isinstance(keyword, unicode):
--            keyword = unicode(keyword, encoding, 'replace')
-+        if not isinstance(keyword, str):
-+            keyword = str(keyword, encoding, 'replace')
-         res = self._get_keyword(keyword, results)
-         return [Movie.Movie(movieID=self._get_real_movieID(mi),
-                 data=md, modFunct=self._defModFunct,
-@@ -653,12 +653,12 @@ class IMDbBase:
-         """Return a Movie object."""
-         # XXX: not really useful...
-         if 'title' in keywords:
--            if not isinstance(keywords['title'], unicode):
--                keywords['title'] = unicode(keywords['title'],
-+            if not isinstance(keywords['title'], str):
-+                keywords['title'] = str(keywords['title'],
-                                             encoding, 'replace')
-         elif len(arguments) > 1:
--            if not isinstance(arguments[1], unicode):
--                arguments[1] = unicode(arguments[1], encoding, 'replace')
-+            if not isinstance(arguments[1], str):
-+                arguments[1] = str(arguments[1], encoding, 'replace')
-         return Movie.Movie(accessSystem=self.accessSystem,
-                             *arguments, **keywords)
- 
-@@ -666,12 +666,12 @@ class IMDbBase:
-         """Return a Person object."""
-         # XXX: not really useful...
-         if 'name' in keywords:
--            if not isinstance(keywords['name'], unicode):
--                keywords['name'] = unicode(keywords['name'],
-+            if not isinstance(keywords['name'], str):
-+                keywords['name'] = str(keywords['name'],
-                                             encoding, 'replace')
-         elif len(arguments) > 1:
--            if not isinstance(arguments[1], unicode):
--                arguments[1] = unicode(arguments[1], encoding, 'replace')
-+            if not isinstance(arguments[1], str):
-+                arguments[1] = str(arguments[1], encoding, 'replace')
-         return Person.Person(accessSystem=self.accessSystem,
-                                 *arguments, **keywords)
- 
-@@ -679,12 +679,12 @@ class IMDbBase:
-         """Return a Character object."""
-         # XXX: not really useful...
-         if 'name' in keywords:
--            if not isinstance(keywords['name'], unicode):
--                keywords['name'] = unicode(keywords['name'],
-+            if not isinstance(keywords['name'], str):
-+                keywords['name'] = str(keywords['name'],
-                                             encoding, 'replace')
-         elif len(arguments) > 1:
--            if not isinstance(arguments[1], unicode):
--                arguments[1] = unicode(arguments[1], encoding, 'replace')
-+            if not isinstance(arguments[1], str):
-+                arguments[1] = str(arguments[1], encoding, 'replace')
-         return Character.Character(accessSystem=self.accessSystem,
-                                     *arguments, **keywords)
- 
-@@ -692,12 +692,12 @@ class IMDbBase:
-         """Return a Company object."""
-         # XXX: not really useful...
-         if 'name' in keywords:
--            if not isinstance(keywords['name'], unicode):
--                keywords['name'] = unicode(keywords['name'],
-+            if not isinstance(keywords['name'], str):
-+                keywords['name'] = str(keywords['name'],
-                                             encoding, 'replace')
-         elif len(arguments) > 1:
--            if not isinstance(arguments[1], unicode):
--                arguments[1] = unicode(arguments[1], encoding, 'replace')
-+            if not isinstance(arguments[1], str):
-+                arguments[1] = str(arguments[1], encoding, 'replace')
-         return Company.Company(accessSystem=self.accessSystem,
-                                     *arguments, **keywords)
- 
-@@ -769,7 +769,7 @@ class IMDbBase:
-                 method = lambda *x: {}
-             try:
-                 ret = method(mopID)
--            except Exception, e:
-+            except Exception as e:
-                 self._imdb_logger.critical('caught an exception retrieving ' \
-                                     'or parsing "%s" info set for mopID ' \
-                                     '"%s" (accessSystem: %s)',
-@@ -782,7 +782,7 @@ class IMDbBase:
-             if 'data' in ret:
-                 res.update(ret['data'])
-                 if isinstance(ret['data'], dict):
--                    keys = ret['data'].keys()
-+                    keys = list(ret['data'].keys())
-             if 'info sets' in ret:
-                 for ri in ret['info sets']:
-                     mop.add_to_current_info(ri, keys, mainInfoset=i)
-diff --git a/imdb/helpers.py b/imdb/helpers.py
-index f220614..e9be10c 100644
---- a/imdb/helpers.py
-+++ b/imdb/helpers.py
-@@ -60,7 +60,7 @@ def makeCgiPrintEncoding(encoding):
-         """Encode the given string using the %s encoding, and replace
-         chars outside the given charset with XML char references.""" % encoding
-         s = escape(s, quote=1)
--        if isinstance(s, unicode):
-+        if isinstance(s, str):
-             s = s.encode(encoding, 'xmlcharrefreplace')
-         return s
-     return cgiPrint
-@@ -85,7 +85,7 @@ def makeTextNotes(replaceTxtNotes):
-     of the makeObject2Txt function."""
-     def _replacer(s):
-         outS = replaceTxtNotes
--        if not isinstance(s, (unicode, str)):
-+        if not isinstance(s, str):
-             return s
-         ssplit = s.split('::', 1)
-         text = ssplit[0]
-@@ -98,12 +98,12 @@ def makeTextNotes(replaceTxtNotes):
-             keysDict['notes'] = True
-             outS = outS.replace('%(notes)s', ssplit[1])
-         else:
--            outS = outS.replace('%(notes)s', u'')
-+            outS = outS.replace('%(notes)s', '')
-         def _excludeFalseConditionals(matchobj):
-             # Return an empty string if the conditional is false/empty.
-             if matchobj.group(1) in keysDict:
-                 return matchobj.group(2)
--            return u''
-+            return ''
-         while re_conditional.search(outS):
-             outS = re_conditional.sub(_excludeFalseConditionals, outS)
-         return outS
-@@ -139,17 +139,17 @@ def makeObject2Txt(movieTxt=None, personTxt=None, characterTxt=None,
-         if _limitRecursion is None:
-             _limitRecursion = 0
-         elif _limitRecursion > 5:
--            return u''
-+            return ''
-         _limitRecursion += 1
-         if isinstance(obj, (list, tuple)):
-             return joiner.join([object2txt(o, _limitRecursion=_limitRecursion)
-                                 for o in obj])
-         elif isinstance(obj, dict):
-             # XXX: not exactly nice, neither useful, I fear.
--            return joiner.join([u'%s::%s' %
-+            return joiner.join(['%s::%s' %
-                             (object2txt(k, _limitRecursion=_limitRecursion),
-                             object2txt(v, _limitRecursion=_limitRecursion))
--                            for k, v in obj.items()])
-+                            for k, v in list(obj.items())])
-         objData = {}
-         if isinstance(obj, Movie):
-             objData['movieID'] = obj.movieID
-@@ -172,25 +172,25 @@ def makeObject2Txt(movieTxt=None, personTxt=None, characterTxt=None,
-             if proceed:
-                 return matchobj.group(2)
-             else:
--                return u''
-+                return ''
-             return matchobj.group(2)
-         while re_conditional.search(outs):
-             outs = re_conditional.sub(_excludeFalseConditionals, outs)
-         for key in re_subst.findall(outs):
-             value = obj.get(key) or getattr(obj, key, None)
--            if not isinstance(value, (unicode, str)):
-+            if not isinstance(value, str):
-                 if not _recurse:
-                     if value:
--                        value =  unicode(value)
-+                        value =  str(value)
-                 if value:
-                     value = object2txt(value, _limitRecursion=_limitRecursion)
-             elif value:
--                value = applyToValues(unicode(value))
-+                value = applyToValues(str(value))
-             if not value:
--                value = u''
--            elif not isinstance(value, (unicode, str)):
--                value = unicode(value)
--            outs = outs.replace(u'%(' + key + u')s', value)
-+                value = ''
-+            elif not isinstance(value, str):
-+                value = str(value)
-+            outs = outs.replace('%(' + key + ')s', value)
-         return outs
-     return object2txt
- 
-@@ -213,7 +213,7 @@ def makeModCGILinks(movieTxt, personTxt, characterTxt=None,
-             if item:
-                 movieID = item.movieID
-                 to_replace = movieTxt % {'movieID': movieID,
--                                        'title': unicode(_cgiPrint(to_replace),
-+                                        'title': str(_cgiPrint(to_replace),
-                                                         encoding,
-                                                         'xmlcharrefreplace')}
-             return to_replace
-@@ -223,7 +223,7 @@ def makeModCGILinks(movieTxt, personTxt, characterTxt=None,
-             if item:
-                 personID = item.personID
-                 to_replace = personTxt % {'personID': personID,
--                                        'name': unicode(_cgiPrint(to_replace),
-+                                        'name': str(_cgiPrint(to_replace),
-                                                         encoding,
-                                                         'xmlcharrefreplace')}
-             return to_replace
-@@ -237,7 +237,7 @@ def makeModCGILinks(movieTxt, personTxt, characterTxt=None,
-                 if characterID is None:
-                     return to_replace
-                 to_replace = characterTxt % {'characterID': characterID,
--                                        'name': unicode(_cgiPrint(to_replace),
-+                                        'name': str(_cgiPrint(to_replace),
-                                                         encoding,
-                                                         'xmlcharrefreplace')}
-             return to_replace
-@@ -265,7 +265,7 @@ modHtmlLinksASCII = makeModCGILinks(movieTxt=_movieTxt, personTxt=_personTxt,
- 
- 
- everyentcharrefs = entcharrefs.copy()
--for k, v in {'lt':u'<','gt':u'>','amp':u'&','quot':u'"','apos':u'\''}.items():
-+for k, v in list({'lt':'<','gt':'>','amp':'&','quot':'"','apos':'\''}.items()):
-     everyentcharrefs[k] = v
-     everyentcharrefs['#%s' % ord(v)] = v
- everyentcharrefsget = everyentcharrefs.get
-@@ -279,7 +279,7 @@ def _replAllXMLRef(match):
-     value = everyentcharrefsget(ref)
-     if value is None:
-         if ref[0] == '#':
--            return unichr(int(ref[1:]))
-+            return chr(int(ref[1:]))
-         else:
-             return ref
-     return value
-@@ -292,7 +292,7 @@ def subXMLHTMLSGMLRefs(s):
- 
- def sortedSeasons(m):
-     """Return a sorted list of seasons of the given series."""
--    seasons = m.get('episodes', {}).keys()
-+    seasons = list(m.get('episodes', {}).keys())
-     seasons.sort()
-     return seasons
- 
-@@ -308,7 +308,7 @@ def sortedEpisodes(m, season=None):
-         if not isinstance(season, (tuple, list)):
-             seasons = [season]
-     for s in seasons:
--        eps_indx = m.get('episodes', {}).get(s, {}).keys()
-+        eps_indx = list(m.get('episodes', {}).get(s, {}).keys())
-         eps_indx.sort()
-         for e in eps_indx:
-             episodes.append(m['episodes'][s][e])
-@@ -382,7 +382,7 @@ _MAP_TOP_OBJ = {
- }
- 
- # Tags to be converted to lists.
--_TAGS_TO_LIST = dict([(x[0], None) for x in TAGS_TO_MODIFY.values()])
-+_TAGS_TO_LIST = dict([(x[0], None) for x in list(TAGS_TO_MODIFY.values())])
- _TAGS_TO_LIST.update(_MAP_TOP_OBJ)
- 
- def tagToKey(tag):
-@@ -423,12 +423,12 @@ def parseTags(tag, _topLevel=True, _as=None, _infoset2keys=None,
-         _key2infoset = {}
-     name = tagToKey(tag)
-     firstChild = tag.find(recursive=False)
--    tagStr = (tag.string or u'').strip()
-+    tagStr = (tag.string or '').strip()
-     if not tagStr and name == 'item':
-         # Handles 'item' tags containing text and a 'notes' sub-tag.
-         tagContent = tag.contents[0]
-         if isinstance(tagContent, BeautifulSoup.NavigableString):
--            tagStr = (unicode(tagContent) or u'').strip()
-+            tagStr = (str(tagContent) or '').strip()
-     tagType = tag.get('type')
-     infoset = tag.get('infoset')
-     if infoset:
-@@ -504,9 +504,9 @@ def parseTags(tag, _topLevel=True, _as=None, _infoset2keys=None,
-         _adder = lambda key, value: item.data.update({key: value})
-     elif tagStr:
-         if tag.notes:
--            notes = (tag.notes.string or u'').strip()
-+            notes = (tag.notes.string or '').strip()
-             if notes:
--                tagStr += u'::%s' % notes
-+                tagStr += '::%s' % notes
-         else:
-             tagStr = _valueWithType(tag, tagStr)
-         return tagStr
-@@ -534,7 +534,7 @@ def parseTags(tag, _topLevel=True, _as=None, _infoset2keys=None,
-         # Add information about 'info sets', but only to the top-level object.
-         item.infoset2keys = _infoset2keys
-         item.key2infoset = _key2infoset
--        item.current_info = _infoset2keys.keys()
-+        item.current_info = list(_infoset2keys.keys())
-     return item
- 
- 
-@@ -594,7 +594,7 @@ def sortAKAsBySimilarity(movie, title, _titlesOnly=True, _preferredLang=None):
-     # estimate string distance between current title and given title
-     m_title = movie['title'].lower()
-     l_title = title.lower()
--    if isinstance(l_title, unicode):
-+    if isinstance(l_title, str):
-         l_title = l_title.encode('utf8')
-     scores = []
-     score = difflib.SequenceMatcher(None, m_title.encode('utf8'), l_title).ratio()
-@@ -603,7 +603,7 @@ def sortAKAsBySimilarity(movie, title, _titlesOnly=True, _preferredLang=None):
-     for language, aka in akasLanguages(movie):
-         # estimate string distance between current title and given title
-         m_title = aka.lower()
--        if isinstance(m_title, unicode):
-+        if isinstance(m_title, str):
-             m_title = m_title.encode('utf8')
-         score = difflib.SequenceMatcher(None, m_title, l_title).ratio()
-         # if current language is the same as the given one, increase score
-@@ -626,7 +626,7 @@ def getAKAsInLanguage(movie, lang, _searchedTitle=None):
-             akas.append(aka)
-     if _searchedTitle:
-         scores = []
--        if isinstance(_searchedTitle, unicode):
-+        if isinstance(_searchedTitle, str):
-             _searchedTitle = _searchedTitle.encode('utf8')
-         for aka in akas:
-             m_aka = aka
-diff --git a/imdb/linguistics.py b/imdb/linguistics.py
-index da8829f..0679de9 100644
---- a/imdb/linguistics.py
-+++ b/imdb/linguistics.py
-@@ -154,7 +154,7 @@ for lang in LANG_COUNTRIES:
- 
- def toUnicode(articles):
-     """Convert a list of articles utf-8 encoded to unicode strings."""
--    return tuple([art.decode('utf_8') for art in articles])
-+    return tuple([articles])
- 
- 
- def toDicts(articles):
-diff --git a/imdb/locale/generatepot.py b/imdb/locale/generatepot.py
-index 282f7d4..a914d32 100755
---- a/imdb/locale/generatepot.py
-+++ b/imdb/locale/generatepot.py
-@@ -50,7 +50,7 @@ msgstr ""
- """
- 
- if len(sys.argv) != 2:
--    print "Usage: %s dtd_file" % sys.argv[0]
-+    print("Usage: %s dtd_file" % sys.argv[0])
-     sys.exit()
- 
- dtdfilename = sys.argv[1]
-@@ -59,20 +59,20 @@ elements = re_element.findall(dtd)
- uniq = set(elements)
- elements = list(uniq)
- 
--print POT_HEADER_TEMPLATE % {
-+print(POT_HEADER_TEMPLATE % {
-     'now': dt.strftime(dt.now(), "%Y-%m-%d %H:%M+0000")
--}
-+})
- for element in sorted(elements):
-     if element in DEFAULT_MESSAGES:
--        print '# Default: %s' % DEFAULT_MESSAGES[element]
-+        print('# Default: %s' % DEFAULT_MESSAGES[element])
-     else:
--        print '# Default: %s' % element.replace('-', ' ').capitalize()
--    print 'msgid "%s"' % element
--    print 'msgstr ""'
-+        print('# Default: %s' % element.replace('-', ' ').capitalize())
-+    print('msgid "%s"' % element)
-+    print('msgstr ""')
-     # use this part instead of the line above to generate the po file for English
-     #if element in DEFAULT_MESSAGES:
-     #    print 'msgstr "%s"' % DEFAULT_MESSAGES[element]
-     #else:
-     #    print 'msgstr "%s"' % element.replace('-', ' ').capitalize()
--    print
-+    print()
- 
-diff --git a/imdb/locale/msgfmt.py b/imdb/locale/msgfmt.py
-index 9e0ab74..7eaf251 100644
---- a/imdb/locale/msgfmt.py
-+++ b/imdb/locale/msgfmt.py
-@@ -106,7 +106,7 @@ class MsgFmt(object):
- 
-     def generate_mo(self):
-         """Return the generated output."""
--        keys = self.messages.keys()
-+        keys = list(self.messages.keys())
-         # the keys are sorted in the .mo file
-         keys.sort()
-         offsets = []
-@@ -134,7 +134,7 @@ class MsgFmt(object):
-             voffsets += [l2, o2 + valuestart]
-         offsets = koffsets + voffsets
-         output.append(struct.pack("Iiiiiii",
--                             0x950412deL,       # Magic
-+                             0x950412de,       # Magic
-                              0,                 # Version
-                              len(keys),         # # of entries
-                              7*4,               # start of key index
-@@ -151,32 +151,32 @@ def make(filename, outfile):
-     infile, outfile = mf.make_filenames(filename, outfile)
-     try:
-         lines = file(infile).readlines()
--    except IOError, msg:
--        print >> sys.stderr, msg
-+    except IOError as msg:
-+        print(msg, file=sys.stderr)
-         sys.exit(1)
-     try:
-         mf.read_po(lines)
-         output = mf.generate_mo()
--    except SyntaxErrorException, msg:
--        print >> sys.stderr, msg
-+    except SyntaxErrorException as msg:
-+        print(msg, file=sys.stderr)
- 
-     try:
-         open(outfile, "wb").write(output)
--    except IOError, msg:
--        print >> sys.stderr, msg
-+    except IOError as msg:
-+        print(msg, file=sys.stderr)
- 
- 
- def usage(code, msg=''):
--    print >> sys.stderr, __doc__
-+    print(__doc__, file=sys.stderr)
-     if msg:
--        print >> sys.stderr, msg
-+        print(msg, file=sys.stderr)
-     sys.exit(code)
- 
- 
- def main():
-     try:
-         opts, args = getopt.getopt(sys.argv[1:], 'hVo:', ['help', 'version', 'output-file='])
--    except getopt.error, msg:
-+    except getopt.error as msg:
-         usage(1, msg)
- 
-     outfile = None
-@@ -185,14 +185,14 @@ def main():
-         if opt in ('-h', '--help'):
-             usage(0)
-         elif opt in ('-V', '--version'):
--            print >> sys.stderr, "msgfmt.py", __version__
-+            print("msgfmt.py", __version__, file=sys.stderr)
-             sys.exit(0)
-         elif opt in ('-o', '--output-file'):
-             outfile = arg
-     # do it
-     if not args:
--        print >> sys.stderr, 'No input file given'
--        print >> sys.stderr, "Try `msgfmt --help' for more information."
-+        print('No input file given', file=sys.stderr)
-+        print("Try `msgfmt --help' for more information.", file=sys.stderr)
-         return
- 
-     for filename in args:
-diff --git a/imdb/locale/rebuildmo.py b/imdb/locale/rebuildmo.py
-index b72a74c..3f6302b 100755
---- a/imdb/locale/rebuildmo.py
-+++ b/imdb/locale/rebuildmo.py
-@@ -22,7 +22,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
- import glob
--import msgfmt
-+from . import msgfmt
- import os
- 
- #LOCALE_DIR = os.path.dirname(__file__)
-@@ -45,5 +45,5 @@ def rebuildmo():
- 
- if __name__ == '__main__':
-     languages = rebuildmo()
--    print 'Created locale for: %s.' % ' '.join(languages)
-+    print('Created locale for: %s.' % ' '.join(languages))
- 
-diff --git a/imdb/parser/http/__init__.py b/imdb/parser/http/__init__.py
-index e49e7bb..91d96da 100644
---- a/imdb/parser/http/__init__.py
-+++ b/imdb/parser/http/__init__.py
-@@ -28,7 +28,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- import sys
- import socket
- import logging
--from urllib import FancyURLopener, quote_plus
-+from urllib.request import FancyURLopener
-+from urllib.parse import quote_plus
- from codecs import lookup
- 
- from imdb import IMDbBase, imdbURL_movie_main, imdbURL_person_main, \
-@@ -38,16 +39,16 @@ from imdb import IMDbBase, imdbURL_movie_main, imdbURL_person_main, \
- from imdb.utils import analyze_title
- from imdb._exceptions import IMDbDataAccessError, IMDbParserError
- 
--import searchMovieParser
--import searchPersonParser
--import searchCharacterParser
--import searchCompanyParser
--import searchKeywordParser
--import movieParser
--import personParser
--import characterParser
--import companyParser
--import topBottomParser
-+from . import searchMovieParser
-+from . import searchPersonParser
-+from . import searchCharacterParser
-+from . import searchCompanyParser
-+from . import searchKeywordParser
-+from . import movieParser
-+from . import personParser
-+from . import characterParser
-+from . import companyParser
-+from . import topBottomParser
- 
- # Logger for miscellaneous functions.
- _aux_logger = logging.getLogger('imdbpy.parser.http.aux')
-@@ -132,7 +133,7 @@ class IMDbURLopener(FancyURLopener):
-     _logger = logging.getLogger('imdbpy.parser.http.urlopener')
- 
-     def __init__(self, *args, **kwargs):
--        self._last_url = u''
-+        self._last_url = ''
-         FancyURLopener.__init__(self, *args, **kwargs)
-         # Headers to add to every request.
-         # XXX: IMDb's web server doesn't like urllib-based programs,
-@@ -153,7 +154,7 @@ class IMDbURLopener(FancyURLopener):
-     def set_proxy(self, proxy):
-         """Set the proxy."""
-         if not proxy:
--            if self.proxies.has_key('http'):
-+            if 'http' in self.proxies:
-                 del self.proxies['http']
-         else:
-             if not proxy.lower().startswith('http://'):
-@@ -169,14 +170,14 @@ class IMDbURLopener(FancyURLopener):
-     def get_header(self, header):
-         """Return the first value of a header, or None
-         if not present."""
--        for index in xrange(len(self.addheaders)):
-+        for index in range(len(self.addheaders)):
-             if self.addheaders[index][0] == header:
-                 return self.addheaders[index][1]
-         return None
- 
-     def del_header(self, header):
-         """Remove a default header."""
--        for index in xrange(len(self.addheaders)):
-+        for index in range(len(self.addheaders)):
-             if self.addheaders[index][0] == header:
-                 del self.addheaders[index]
-                 break
-@@ -215,7 +216,7 @@ class IMDbURLopener(FancyURLopener):
-             if size != -1:
-                 self.del_header('Range')
-             self.close()
--        except IOError, e:
-+        except IOError as e:
-             if size != -1:
-                 # Ensure that the Range header is removed.
-                 self.del_header('Range')
-@@ -231,7 +232,7 @@ class IMDbURLopener(FancyURLopener):
-             self._logger.warn('Unable to detect the encoding of the retrieved '
-                         'page [%s]; falling back to default latin1.', encode)
-         ##print unicode(content, encode, 'replace').encode('utf8')
--        return unicode(content, encode, 'replace')
-+        return str(content, encode, 'replace')
- 
-     def http_error_default(self, url, fp, errcode, errmsg, headers):
-         if errcode == 404:
-@@ -333,21 +334,21 @@ class IMDbHTTPAccessSystem(IMDbBase):
-         """Normalize the given movieID."""
-         try:
-             return '%07d' % int(movieID)
--        except ValueError, e:
-+        except ValueError as e:
-             raise IMDbParserError('invalid movieID "%s": %s' % (movieID, e))
- 
-     def _normalize_personID(self, personID):
-         """Normalize the given personID."""
-         try:
-             return '%07d' % int(personID)
--        except ValueError, e:
-+        except ValueError as e:
-             raise IMDbParserError('invalid personID "%s": %s' % (personID, e))
- 
-     def _normalize_characterID(self, characterID):
-         """Normalize the given characterID."""
-         try:
-             return '%07d' % int(characterID)
--        except ValueError, e:
-+        except ValueError as e:
-             raise IMDbParserError('invalid characterID "%s": %s' % \
-                     (characterID, e))
- 
-@@ -355,7 +356,7 @@ class IMDbHTTPAccessSystem(IMDbBase):
-         """Normalize the given companyID."""
-         try:
-             return '%07d' % int(companyID)
--        except ValueError, e:
-+        except ValueError as e:
-             raise IMDbParserError('invalid companyID "%s": %s' % \
-                     (companyID, e))
- 
-@@ -453,13 +454,13 @@ class IMDbHTTPAccessSystem(IMDbBase):
-         'char' (for characters) or 'co' (for companies).
-         ton is the title or the name to search.
-         results is the maximum number of results to be retrieved."""
--        if isinstance(ton, unicode):
-+        if isinstance(ton, str):
-             try:
-                 ton = ton.encode('iso8859-1')
--            except Exception, e:
-+            except Exception as e:
-                 try:
-                     ton = ton.encode('utf-8')
--                except Exception, e:
-+                except Exception as e:
-                     pass
-         ##params = 'q=%s&%s=on&mx=%s' % (quote_plus(ton), kind, str(results))
-         params = 'q=%s;s=%s;mx=%s' % (quote_plus(ton), kind, str(results))
-@@ -666,7 +667,7 @@ class IMDbHTTPAccessSystem(IMDbBase):
-         cont = self._retrieve(self.urls['movie_main'] % movieID + 'epdate', _noCookies=True)
-         data_d = self.mProxy.eprating_parser.parse(cont)
-         # set movie['episode of'].movieID for every episode.
--        if data_d.get('data', {}).has_key('episodes rating'):
-+        if 'episodes rating' in data_d.get('data', {}):
-             for item in data_d['data']['episodes rating']:
-                 episode = item['episode']
-                 episode['episode of'].movieID = movieID
-diff --git a/imdb/parser/http/bsouplxml/_bsoup.py b/imdb/parser/http/bsouplxml/_bsoup.py
-index afab5da..0aee945 100644
---- a/imdb/parser/http/bsouplxml/_bsoup.py
-+++ b/imdb/parser/http/bsouplxml/_bsoup.py
-@@ -81,7 +81,7 @@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
- 
- """
--from __future__ import generators
-+
- 
- __author__ = "Leonard Richardson (leonardr@segfault.org)"
- __version__ = "3.0.7a"
-@@ -90,12 +90,13 @@ __license__ = "New-style BSD"
- 
- from sgmllib import SGMLParser, SGMLParseError
- import codecs
--import markupbase
-+import _markupbase
- import types
- import re
- import sgmllib
-+import collections
- try:
--  from htmlentitydefs import name2codepoint
-+  from html.entities import name2codepoint
- except ImportError:
-   name2codepoint = {}
- try:
-@@ -105,7 +106,7 @@ except NameError:
- 
- #These hacks make Beautiful Soup able to parse XML with namespaces
- sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
--markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
-+_markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
- 
- DEFAULT_OUTPUT_ENCODING = "utf-8"
- 
-@@ -153,7 +154,7 @@ class PageElement:
-         #this element (and any children) hadn't been parsed. Connect
-         #the two.
-         lastChild = self._lastRecursiveChild()
--        nextElement = lastChild.next
-+        nextElement = lastChild.__next__
- 
-         if self.previous:
-             self.previous.next = nextElement
-@@ -178,8 +179,8 @@ class PageElement:
-         return lastChild
- 
-     def insert(self, position, newChild):
--        if (isinstance(newChild, basestring)
--            or isinstance(newChild, unicode)) \
-+        if (isinstance(newChild, str)
-+            or isinstance(newChild, str)) \
-             and not isinstance(newChild, NavigableString):
-             newChild = NavigableString(newChild)
- 
-@@ -233,7 +234,7 @@ class PageElement:
-                 newChild.nextSibling.previousSibling = newChild
-             newChildsLastElement.next = nextChild
- 
--        if newChildsLastElement.next:
-+        if newChildsLastElement.__next__:
-             newChildsLastElement.next.previous = newChildsLastElement
-         self.contents.insert(position, newChild)
- 
-@@ -334,7 +335,7 @@ class PageElement:
-         g = generator()
-         while True:
-             try:
--                i = g.next()
-+                i = next(g)
-             except StopIteration:
-                 break
-             if i:
-@@ -350,7 +351,7 @@ class PageElement:
-     def nextGenerator(self):
-         i = self
-         while i:
--            i = i.next
-+            i = i.__next__
-             yield i
- 
-     def nextSiblingGenerator(self):
-@@ -385,22 +386,22 @@ class PageElement:
-     def toEncoding(self, s, encoding=None):
-         """Encodes an object to a string in some encoding, or to Unicode.
-         ."""
--        if isinstance(s, unicode):
-+        if isinstance(s, str):
-             if encoding:
-                 s = s.encode(encoding)
-         elif isinstance(s, str):
-             if encoding:
-                 s = s.encode(encoding)
-             else:
--                s = unicode(s)
-+                s = str(s)
-         else:
-             if encoding:
-                 s  = self.toEncoding(str(s), encoding)
-             else:
--                s = unicode(s)
-+                s = str(s)
-         return s
- 
--class NavigableString(unicode, PageElement):
-+class NavigableString(str, PageElement):
- 
-     def __new__(cls, value):
-         """Create a new NavigableString.
-@@ -410,9 +411,9 @@ class NavigableString(unicode, PageElement):
-         passed in to the superclass's __new__ or the superclass won't know
-         how to handle non-ASCII characters.
-         """
--        if isinstance(value, unicode):
--            return unicode.__new__(cls, value)
--        return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
-+        if isinstance(value, str):
-+            return str.__new__(cls, value)
-+        return str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
- 
-     def __getnewargs__(self):
-         return (NavigableString.__str__(self),)
-@@ -424,7 +425,7 @@ class NavigableString(unicode, PageElement):
-         if attr == 'string':
-             return self
-         else:
--            raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
-+            raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, attr))
- 
-     def __unicode__(self):
-         return str(self).decode(DEFAULT_OUTPUT_ENCODING)
-@@ -462,7 +463,7 @@ class Tag(PageElement):
-     def _invert(h):
-         "Cheap function to invert a hash."
-         i = {}
--        for k,v in h.items():
-+        for k,v in list(h.items()):
-             i[v] = k
-         return i
- 
-@@ -481,23 +482,23 @@ class Tag(PageElement):
-         escaped."""
-         x = match.group(1)
-         if self.convertHTMLEntities and x in name2codepoint:
--            return unichr(name2codepoint[x])
-+            return chr(name2codepoint[x])
-         elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
-             if self.convertXMLEntities:
-                 return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
-             else:
--                return u'&%s;' % x
-+                return '&%s;' % x
-         elif len(x) > 0 and x[0] == '#':
-             # Handle numeric entities
-             if len(x) > 1 and x[1] == 'x':
--                return unichr(int(x[2:], 16))
-+                return chr(int(x[2:], 16))
-             else:
--                return unichr(int(x[1:]))
-+                return chr(int(x[1:]))
- 
-         elif self.escapeUnrecognizedEntities:
--            return u'&amp;%s;' % x
-+            return '&amp;%s;' % x
-         else:
--            return u'&%s;' % x
-+            return '&%s;' % x
- 
-     def __init__(self, parser, name, attrs=None, parent=None,
-                  previous=None):
-@@ -520,11 +521,11 @@ class Tag(PageElement):
-         self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
- 
-         # Convert any HTML, XML, or numeric entities in the attribute values.
--        convert = lambda(k, val): (k,
-+        convert = lambda k_val: (k_val[0],
-                                    re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
-                                           self._convertEntities,
--                                          val))
--        self.attrs = map(convert, self.attrs)
-+                                          k_val[1]))
-+        self.attrs = list(map(convert, self.attrs))
- 
-     def get(self, key, default=None):
-         """Returns the value of the 'key' attribute for the tag, or
-@@ -533,7 +534,7 @@ class Tag(PageElement):
-         return self._getAttrMap().get(key, default)
- 
-     def has_key(self, key):
--        return self._getAttrMap().has_key(key)
-+        return key in self._getAttrMap()
- 
-     def __getitem__(self, key):
-         """tag[key] returns the value of the 'key' attribute for the tag,
-@@ -551,7 +552,7 @@ class Tag(PageElement):
-     def __contains__(self, x):
-         return x in self.contents
- 
--    def __nonzero__(self):
-+    def __bool__(self):
-         "A tag is non-None even if it has no contents."
-         return True
- 
-@@ -577,14 +578,14 @@ class Tag(PageElement):
-                 #We don't break because bad HTML can define the same
-                 #attribute multiple times.
-             self._getAttrMap()
--            if self.attrMap.has_key(key):
-+            if key in self.attrMap:
-                 del self.attrMap[key]
- 
-     def __call__(self, *args, **kwargs):
-         """Calling a tag like a function is the same as calling its
-         findAll() method. Eg. tag('a') returns a list of all the A tags
-         found within this tag."""
--        return apply(self.findAll, args, kwargs)
-+        return self.findAll(*args, **kwargs)
- 
-     def __getattr__(self, tag):
-         #print "Getattr %s.%s" % (self.__class__, tag)
-@@ -592,7 +593,7 @@ class Tag(PageElement):
-             return self.find(tag[:-3])
-         elif tag.find('__') != 0:
-             return self.find(tag)
--        raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
-+        raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__, tag))
- 
-     def __eq__(self, other):
-         """Returns true iff this tag has the same name, the same attributes,
-@@ -850,7 +851,7 @@ class SoupStrainer:
-         if isinstance(markupName, Tag):
-             markup = markupName
-             markupAttrs = markup
--        callFunctionWithTagData = callable(self.name) \
-+        callFunctionWithTagData = isinstance(self.name, collections.Callable) \
-                                 and not isinstance(markupName, Tag)
- 
-         if (not self.name) \
-@@ -862,7 +863,7 @@ class SoupStrainer:
-             else:
-                 match = True
-                 markupAttrMap = None
--                for attr, matchAgainst in self.attrs.items():
-+                for attr, matchAgainst in list(self.attrs.items()):
-                     if not markupAttrMap:
-                          if hasattr(markupAttrs, 'get'):
-                             markupAttrMap = markupAttrs
-@@ -903,16 +904,16 @@ class SoupStrainer:
-             if self._matches(markup, self.text):
-                 found = markup
-         else:
--            raise Exception, "I don't know how to match against a %s" \
--                  % markup.__class__
-+            raise Exception("I don't know how to match against a %s" \
-+                  % markup.__class__)
-         return found
- 
-     def _matches(self, markup, matchAgainst):
-         #print "Matching %s against %s" % (markup, matchAgainst)
-         result = False
--        if matchAgainst == True and type(matchAgainst) == types.BooleanType:
-+        if matchAgainst == True and type(matchAgainst) == bool:
-             result = markup != None
--        elif callable(matchAgainst):
-+        elif isinstance(matchAgainst, collections.Callable):
-             result = matchAgainst(markup)
-         else:
-             #Custom match methods take the tag as an argument, but all
-@@ -920,7 +921,7 @@ class SoupStrainer:
-             if isinstance(markup, Tag):
-                 markup = markup.name
-             if markup and not isString(markup):
--                markup = unicode(markup)
-+                markup = str(markup)
-             #Now we know that chunk is either a string, or None.
-             if hasattr(matchAgainst, 'match'):
-                 # It's a regexp object.
-@@ -928,10 +929,10 @@ class SoupStrainer:
-             elif isList(matchAgainst):
-                 result = markup in matchAgainst
-             elif hasattr(matchAgainst, 'items'):
--                result = markup.has_key(matchAgainst)
-+                result = matchAgainst in markup
-             elif matchAgainst and isString(markup):
--                if isinstance(markup, unicode):
--                    matchAgainst = unicode(matchAgainst)
-+                if isinstance(markup, str):
-+                    matchAgainst = str(matchAgainst)
-                 else:
-                     matchAgainst = str(matchAgainst)
- 
-@@ -952,13 +953,13 @@ def isList(l):
-     """Convenience method that works with all 2.x versions of Python
-     to determine whether or not something is listlike."""
-     return hasattr(l, '__iter__') \
--           or (type(l) in (types.ListType, types.TupleType))
-+           or (type(l) in (list, tuple))
- 
- def isString(s):
-     """Convenience method that works with all 2.x versions of Python
-     to determine whether or not something is stringlike."""
-     try:
--        return isinstance(s, unicode) or isinstance(s, basestring)
-+        return isinstance(s, str) or isinstance(s, str)
-     except NameError:
-         return isinstance(s, str)
- 
-@@ -970,7 +971,7 @@ def buildTagMap(default, *args):
-     for portion in args:
-         if hasattr(portion, 'items'):
-             #It's a map. Merge it.
--            for k,v in portion.items():
-+            for k,v in list(portion.items()):
-                 built[k] = v
-         elif isList(portion):
-             #It's a list. Map each item to the default.
-@@ -1013,7 +1014,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
-                        lambda x: '<!' + x.group(1) + '>')
-                       ]
- 
--    ROOT_TAG_NAME = u'[document]'
-+    ROOT_TAG_NAME = '[document]'
- 
-     HTML_ENTITIES = "html"
-     XML_ENTITIES = "xml"
-@@ -1109,14 +1110,14 @@ class BeautifulStoneSoup(Tag, SGMLParser):
-     def _feed(self, inDocumentEncoding=None, isHTML=False):
-         # Convert the document to Unicode.
-         markup = self.markup
--        if isinstance(markup, unicode):
-+        if isinstance(markup, str):
-             if not hasattr(self, 'originalEncoding'):
-                 self.originalEncoding = None
-         else:
-             dammit = UnicodeDammit\
-                      (markup, [self.fromEncoding, inDocumentEncoding],
-                       smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
--            markup = dammit.unicode
-+            markup = dammit.str
-             self.originalEncoding = dammit.originalEncoding
-             self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
-         if markup:
-@@ -1155,8 +1156,8 @@ class BeautifulStoneSoup(Tag, SGMLParser):
-     def isSelfClosingTag(self, name):
-         """Returns true iff the given string is the name of a
-         self-closing tag according to this parser."""
--        return self.SELF_CLOSING_TAGS.has_key(name) \
--               or self.instanceSelfClosingTags.has_key(name)
-+        return name in self.SELF_CLOSING_TAGS \
-+               or name in self.instanceSelfClosingTags
- 
-     def reset(self):
-         Tag.__init__(self, self, self.ROOT_TAG_NAME)
-@@ -1191,7 +1192,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
- 
-     def endData(self, containerClass=NavigableString):
-         if self.currentData:
--            currentData = u''.join(self.currentData)
-+            currentData = ''.join(self.currentData)
-             if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
-                 not set([tag.name for tag in self.tagStack]).intersection(
-                     self.PRESERVE_WHITESPACE_TAGS)):
-@@ -1254,7 +1255,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
- 
-         nestingResetTriggers = self.NESTABLE_TAGS.get(name)
-         isNestable = nestingResetTriggers != None
--        isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
-+        isResetNesting = name in self.RESET_NESTING_TAGS
-         popTo = None
-         inclusive = True
-         for i in range(len(self.tagStack)-1, 0, -1):
-@@ -1267,7 +1268,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
-             if (nestingResetTriggers != None
-                 and p.name in nestingResetTriggers) \
-                 or (nestingResetTriggers == None and isResetNesting
--                    and self.RESET_NESTING_TAGS.has_key(p.name)):
-+                    and p.name in self.RESET_NESTING_TAGS):
- 
-                 #If we encounter one of the nesting reset triggers
-                 #peculiar to this tag, or we encounter another tag
-@@ -1285,7 +1286,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
-         if self.quoteStack:
-             #This is not a real tag.
-             #print "<%s> is not real!" % name
--            attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
-+            attrs = ''.join([' %s="%s"' % (x_y[0], x_y[1]) for x_y in attrs])
-             self.handle_data('<%s%s>' % (name, attrs))
-             return
-         self.endData()
-@@ -1338,7 +1339,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
-         object, possibly one with a %SOUP-ENCODING% slot into which an
-         encoding will be plugged later."""
-         if text[:3] == "xml":
--            text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
-+            text = "xml version='1.0' encoding='%SOUP-ENCODING%'"
-         self._toStringSubclass(text, ProcessingInstruction)
- 
-     def handle_comment(self, text):
-@@ -1348,7 +1349,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
-     def handle_charref(self, ref):
-         "Handle character references as data."
-         if self.convertEntities:
--            data = unichr(int(ref))
-+            data = chr(int(ref))
-         else:
-             data = '&#%s;' % ref
-         self.handle_data(data)
-@@ -1360,7 +1361,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
-         data = None
-         if self.convertHTMLEntities:
-             try:
--                data = unichr(name2codepoint[ref])
-+                data = chr(name2codepoint[ref])
-             except KeyError:
-                 pass
- 
-@@ -1469,7 +1470,7 @@ class BeautifulSoup(BeautifulStoneSoup):
-     BeautifulStoneSoup before writing your own subclass."""
- 
-     def __init__(self, *args, **kwargs):
--        if not kwargs.has_key('smartQuotesTo'):
-+        if 'smartQuotesTo' not in kwargs:
-             kwargs['smartQuotesTo'] = self.HTML_ENTITIES
-         kwargs['isHTML'] = True
-         BeautifulStoneSoup.__init__(self, *args, **kwargs)
-@@ -1652,7 +1653,7 @@ class BeautifulSOAP(BeautifulStoneSoup):
-             parent._getAttrMap()
-             if (isinstance(tag, Tag) and len(tag.contents) == 1 and
-                 isinstance(tag.contents[0], NavigableString) and
--                not parent.attrMap.has_key(tag.name)):
-+                tag.name not in parent.attrMap):
-                 parent[tag.name] = tag.contents[0]
-         BeautifulStoneSoup.popTag(self)
- 
-@@ -1726,9 +1727,9 @@ class UnicodeDammit:
-                      self._detectEncoding(markup, isHTML)
-         self.smartQuotesTo = smartQuotesTo
-         self.triedEncodings = []
--        if markup == '' or isinstance(markup, unicode):
-+        if markup == '' or isinstance(markup, str):
-             self.originalEncoding = None
--            self.unicode = unicode(markup)
-+            self.str = str(markup)
-             return
- 
-         u = None
-@@ -1741,7 +1742,7 @@ class UnicodeDammit:
-                 if u: break
- 
-         # If no luck and we have auto-detection library, try that:
--        if not u and chardet and not isinstance(self.markup, unicode):
-+        if not u and chardet and not isinstance(self.markup, str):
-             u = self._convertFrom(chardet.detect(self.markup)['encoding'])
- 
-         # As a last resort, try utf-8 and windows-1252:
-@@ -1750,14 +1751,14 @@ class UnicodeDammit:
-                 u = self._convertFrom(proposed_encoding)
-                 if u: break
- 
--        self.unicode = u
-+        self.str = u
-         if not u: self.originalEncoding = None
- 
-     def _subMSChar(self, orig):
-         """Changes a MS smart quote character to an XML or HTML
-         entity."""
-         sub = self.MS_CHARS.get(orig)
--        if type(sub) == types.TupleType:
-+        if type(sub) == tuple:
-             if self.smartQuotesTo == 'xml':
-                 sub = '&#x%s;' % sub[1]
-             else:
-@@ -1777,7 +1778,7 @@ class UnicodeDammit:
-                                                       "iso-8859-1",
-                                                       "iso-8859-2"):
-             markup = re.compile("([\x80-\x9f])").sub \
--                     (lambda(x): self._subMSChar(x.group(1)),
-+                     (lambda x: self._subMSChar(x.group(1)),
-                       markup)
- 
-         try:
-@@ -1785,7 +1786,7 @@ class UnicodeDammit:
-             u = self._toUnicode(markup, proposed)
-             self.markup = u
-             self.originalEncoding = proposed
--        except Exception, e:
-+        except Exception as e:
-             # print "That didn't work!"
-             # print e
-             return None
-@@ -1814,7 +1815,7 @@ class UnicodeDammit:
-         elif data[:4] == '\xff\xfe\x00\x00':
-             encoding = 'utf-32le'
-             data = data[4:]
--        newdata = unicode(data, encoding)
-+        newdata = str(data, encoding)
-         return newdata
- 
-     def _detectEncoding(self, xml_data, isHTML=False):
-@@ -1827,41 +1828,41 @@ class UnicodeDammit:
-             elif xml_data[:4] == '\x00\x3c\x00\x3f':
-                 # UTF-16BE
-                 sniffed_xml_encoding = 'utf-16be'
--                xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
-+                xml_data = str(xml_data, 'utf-16be').encode('utf-8')
-             elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
-                      and (xml_data[2:4] != '\x00\x00'):
-                 # UTF-16BE with BOM
-                 sniffed_xml_encoding = 'utf-16be'
--                xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
-+                xml_data = str(xml_data[2:], 'utf-16be').encode('utf-8')
-             elif xml_data[:4] == '\x3c\x00\x3f\x00':
-                 # UTF-16LE
-                 sniffed_xml_encoding = 'utf-16le'
--                xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
-+                xml_data = str(xml_data, 'utf-16le').encode('utf-8')
-             elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
-                      (xml_data[2:4] != '\x00\x00'):
-                 # UTF-16LE with BOM
-                 sniffed_xml_encoding = 'utf-16le'
--                xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
-+                xml_data = str(xml_data[2:], 'utf-16le').encode('utf-8')
-             elif xml_data[:4] == '\x00\x00\x00\x3c':
-                 # UTF-32BE
-                 sniffed_xml_encoding = 'utf-32be'
--                xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
-+                xml_data = str(xml_data, 'utf-32be').encode('utf-8')
-             elif xml_data[:4] == '\x3c\x00\x00\x00':
-                 # UTF-32LE
-                 sniffed_xml_encoding = 'utf-32le'
--                xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
-+                xml_data = str(xml_data, 'utf-32le').encode('utf-8')
-             elif xml_data[:4] == '\x00\x00\xfe\xff':
-                 # UTF-32BE with BOM
-                 sniffed_xml_encoding = 'utf-32be'
--                xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
-+                xml_data = str(xml_data[4:], 'utf-32be').encode('utf-8')
-             elif xml_data[:4] == '\xff\xfe\x00\x00':
-                 # UTF-32LE with BOM
-                 sniffed_xml_encoding = 'utf-32le'
--                xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
-+                xml_data = str(xml_data[4:], 'utf-32le').encode('utf-8')
-             elif xml_data[:3] == '\xef\xbb\xbf':
-                 # UTF-8 with BOM
-                 sniffed_xml_encoding = 'utf-8'
--                xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
-+                xml_data = str(xml_data[3:], 'utf-8').encode('utf-8')
-             else:
-                 sniffed_xml_encoding = 'ascii'
-                 pass
-@@ -1924,7 +1925,7 @@ class UnicodeDammit:
-                     250,251,252,253,254,255)
-             import string
-             c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
--            ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
-+            ''.join(map(chr, list(range(256)))), ''.join(map(chr, emap)))
-         return s.translate(c.EBCDIC_TO_ASCII_MAP)
- 
-     MS_CHARS = { '\x80' : ('euro', '20AC'),
-@@ -1967,4 +1968,4 @@ class UnicodeDammit:
- if __name__ == '__main__':
-     import sys
-     soup = BeautifulSoup(sys.stdin)
--    print soup.prettify()
-+    print(soup.prettify())
-diff --git a/imdb/parser/http/bsouplxml/bsoupxpath.py b/imdb/parser/http/bsouplxml/bsoupxpath.py
-index c5c489d..45d1254 100644
---- a/imdb/parser/http/bsouplxml/bsoupxpath.py
-+++ b/imdb/parser/http/bsouplxml/bsoupxpath.py
-@@ -26,7 +26,7 @@ __docformat__ = 'restructuredtext'
- 
- import re
- import string
--import _bsoup as BeautifulSoup
-+from . import _bsoup as BeautifulSoup
- 
- 
- # XPath related enumerations and constants
-@@ -226,7 +226,7 @@ class PathStep:
-                     attribute_value = True
-                 else:
-                     attribute_value = None
--                if not self.soup_args.has_key('attrs'):
-+                if 'attrs' not in self.soup_args:
-                     self.soup_args['attrs'] = {}
-                 self.soup_args['attrs'][attribute_name] = attribute_value
-                 return None
-@@ -284,7 +284,7 @@ class PathStep:
- 
-             if found:
-                 for checker in self.checkers:
--                    found = filter(checker, found)
-+                    found = list(filter(checker, found))
-                 result.extend(found)
- 
-         return result
-@@ -311,14 +311,14 @@ class PredicateFilter:
-                 self.__filter = self.__starts_with
-             else:
-                 self.__filter = self.__contains
--            args = map(string.strip, arguments.split(','))
-+            args = list(map(string.strip, arguments.split(',')))
-             if args[0][0] == '@':
-                 self.arguments = (True, args[0][1:], args[1][1:-1])
-             else:
-                 self.arguments = (False, args[0], args[1][1:-1])
-         elif name == 'string-length':
-             self.__filter = self.__string_length
--            args = map(string.strip, arguments.split(','))
-+            args = list(map(string.strip, arguments.split(',')))
-             if args[0][0] == '@':
-                 self.arguments = (True, args[0][1:])
-             else:
-@@ -356,7 +356,7 @@ class PredicateFilter:
-         if self.arguments[0]:
-             # this is an attribute
-             attribute_name = self.arguments[1]
--            if node.has_key(attribute_name):
-+            if attribute_name in node:
-                 first = node[attribute_name]
-                 return first.startswith(self.arguments[2])
-         elif self.arguments[1] == 'text()':
-@@ -369,7 +369,7 @@ class PredicateFilter:
-         if self.arguments[0]:
-             # this is an attribute
-             attribute_name = self.arguments[1]
--            if node.has_key(attribute_name):
-+            if attribute_name in node:
-                 first = node[attribute_name]
-                 return self.arguments[2] in first
-         elif self.arguments[1] == 'text()':
-@@ -382,7 +382,7 @@ class PredicateFilter:
-         if self.arguments[0]:
-             # this is an attribute
-             attribute_name = self.arguments[1]
--            if node.has_key(attribute_name):
-+            if attribute_name in node:
-                 value = node[attribute_name]
-             else:
-                 value = None
-@@ -399,11 +399,11 @@ _steps = {}
- def get_path(path):
-     """Utility for eliminating repeated parsings of the same paths and steps.
-     """
--    if not _paths.has_key(path):
-+    if path not in _paths:
-         p = Path(path, parse=False)
-         steps = tokenize_path(path)
-         for step in steps:
--            if not _steps.has_key(step):
-+            if step not in _steps:
-                 _steps[step] = PathStep(step)
-             p.steps.append(_steps[step])
-         _paths[path] = p
-diff --git a/imdb/parser/http/bsouplxml/etree.py b/imdb/parser/http/bsouplxml/etree.py
-index 28465f5..9e4a203 100644
---- a/imdb/parser/http/bsouplxml/etree.py
-+++ b/imdb/parser/http/bsouplxml/etree.py
-@@ -21,10 +21,10 @@ along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
--import _bsoup as BeautifulSoup
--from _bsoup import Tag as Element
-+from . import _bsoup as BeautifulSoup
-+from ._bsoup import Tag as Element
- 
--import bsoupxpath
-+from . import bsoupxpath
- 
- # Not directly used by IMDbPY, but do not remove: it's used by IMDbPYKit,
- # for example.
-@@ -38,7 +38,7 @@ def fromstring(xml_string):
- 
- def tostring(element, encoding=None, pretty_print=False):
-     """Return a string or unicode representation of an element."""
--    if encoding is unicode:
-+    if encoding is str:
-         encoding = None
-     # For BeautifulSoup 3.1
-     #encArgs = {'prettyPrint': pretty_print}
-diff --git a/imdb/parser/http/bsouplxml/html.py b/imdb/parser/http/bsouplxml/html.py
-index bbf13bd..eb0c928 100644
---- a/imdb/parser/http/bsouplxml/html.py
-+++ b/imdb/parser/http/bsouplxml/html.py
-@@ -21,7 +21,7 @@ along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
--import _bsoup as BeautifulSoup
-+from . import _bsoup as BeautifulSoup
- 
- 
- def fromstring(html_string):
-diff --git a/imdb/parser/http/characterParser.py b/imdb/parser/http/characterParser.py
-index ff5ea09..7fa17d5 100644
---- a/imdb/parser/http/characterParser.py
-+++ b/imdb/parser/http/characterParser.py
-@@ -27,9 +27,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
- import re
--from utils import Attribute, Extractor, DOMParserBase, build_movie, \
-+from .utils import Attribute, Extractor, DOMParserBase, build_movie, \
-                     analyze_imdbid
--from personParser import DOMHTMLMaindetailsParser
-+from .personParser import DOMHTMLMaindetailsParser
- 
- from imdb.Movie import Movie
- 
-@@ -55,9 +55,9 @@ class DOMHTMLCharacterMaindetailsParser(DOMHTMLMaindetailsParser):
-                           'roleID': "./a/@href"
-                           },
-                       postprocess=lambda x:
--                          build_movie(x.get('title') or u'',
--                              movieID=analyze_imdbid(x.get('link') or u''),
--                              roleID=_personIDs.findall(x.get('roleID') or u''),
-+                          build_movie(x.get('title') or '',
-+                              movieID=analyze_imdbid(x.get('link') or ''),
-+                              roleID=_personIDs.findall(x.get('roleID') or ''),
-                               status=x.get('status') or None,
-                               _parsingCharacter=True))]
- 
-@@ -125,7 +125,7 @@ class DOMHTMLCharacterBioParser(DOMParserBase):
-                                 'info': "./preceding-sibling::h4[1]//text()",
-                                 'text': ".//text()"
-                             },
--                            postprocess=lambda x: u'%s: %s' % (
-+                            postprocess=lambda x: '%s: %s' % (
-                                 x.get('info').strip(),
-                                 x.get('text').replace('\n',
-                                     ' ').replace('||', '\n\n').strip()))),
-@@ -190,7 +190,7 @@ class DOMHTMLCharacterQuotesParser(DOMParserBase):
-         return {'quotes': newData}
- 
- 
--from personParser import DOMHTMLSeriesParser
-+from .personParser import DOMHTMLSeriesParser
- 
- _OBJECTS = {
-     'character_main_parser': ((DOMHTMLCharacterMaindetailsParser,),
-diff --git a/imdb/parser/http/companyParser.py b/imdb/parser/http/companyParser.py
-index 8433791..b0a65ab 100644
---- a/imdb/parser/http/companyParser.py
-+++ b/imdb/parser/http/companyParser.py
-@@ -25,7 +25,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
- import re
--from utils import build_movie, Attribute, Extractor, DOMParserBase, \
-+from .utils import build_movie, Attribute, Extractor, DOMParserBase, \
-                     analyze_imdbid
- 
- from imdb.utils import analyze_company_name
-@@ -64,9 +64,9 @@ class DOMCompanyParser(DOMParserBase):
-                                 'year': "./text()[1]"
-                                 },
-                             postprocess=lambda x:
--                                build_movie(u'%s %s' % \
-+                                build_movie('%s %s' % \
-                                 (x.get('title'), x.get('year').strip()),
--                                movieID=analyze_imdbid(x.get('link') or u''),
-+                                movieID=analyze_imdbid(x.get('link') or ''),
-                                 _parsingCompany=True))),
-             ]
- 
-@@ -75,7 +75,7 @@ class DOMCompanyParser(DOMParserBase):
-         ]
- 
-     def postprocess_data(self, data):
--        for key in data.keys():
-+        for key in list(data.keys()):
-             new_key = key.replace('company', 'companies')
-             new_key = new_key.replace('other', 'miscellaneous')
-             new_key = new_key.replace('distributor', 'distributors')
-diff --git a/imdb/parser/http/movieParser.py b/imdb/parser/http/movieParser.py
-index d05b9e0..fe64371 100644
---- a/imdb/parser/http/movieParser.py
-+++ b/imdb/parser/http/movieParser.py
-@@ -28,14 +28,14 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
- import re
--import urllib
-+import urllib.request, urllib.parse, urllib.error
- 
- from imdb import imdbURL_base
- from imdb.Person import Person
- from imdb.Movie import Movie
- from imdb.Company import Company
- from imdb.utils import analyze_title, split_company_name_notes, _Container
--from utils import build_person, DOMParserBase, Attribute, Extractor, \
-+from .utils import build_person, DOMParserBase, Attribute, Extractor, \
-                     analyze_imdbid
- 
- 
-@@ -105,12 +105,12 @@ def _manageRoles(mo):
-             continue
-         roleID = analyze_imdbid(role)
-         if roleID is None:
--            roleID = u'/'
-+            roleID = '/'
-         else:
--            roleID += u'/'
--        newRoles.append(u'<div class="_imdbpyrole" roleid="%s">%s</div>' % \
-+            roleID += '/'
-+        newRoles.append('<div class="_imdbpyrole" roleid="%s">%s</div>' % \
-                 (roleID, role.strip()))
--    return firstHalf + u' / '.join(newRoles) + mo.group(3)
-+    return firstHalf + ' / '.join(newRoles) + mo.group(3)
- 
- 
- _reRolesMovie = re.compile(r'(<td class="char">)(.*?)(</td>)',
-@@ -133,7 +133,7 @@ def makeSplitter(lstrip=None, sep='|', comments=True,
-         if lstrip is not None:
-             x = x.lstrip(lstrip).lstrip()
-         lx = x.split(sep)
--        lx[:] = filter(None, [j.strip() for j in lx])
-+        lx[:] = [_f for _f in [j.strip() for j in lx] if _f]
-         if comments:
-             lx[:] = [j.replace(origNotesSep, newNotesSep, 1) for j in lx]
-         if strip:
-@@ -182,7 +182,7 @@ class DOMHTMLMovieParser(DOMParserBase):
-                             path={'person': ".//text()",
-                                     'link': "./td[1]/a[@href]/@href"},
-                             postprocess=lambda x: \
--                                    build_person(x.get('person') or u'',
-+                                    build_person(x.get('person') or '',
-                                         personID=analyze_imdbid(x.get('link')))
-                                 )),
- 
-@@ -195,9 +195,9 @@ class DOMHTMLMovieParser(DOMParserBase):
-                                 'roleID': \
-                                     "td[4]/div[@class='_imdbpyrole']/@roleid"},
-                             postprocess=lambda x: \
--                                    build_person(x.get('person') or u'',
-+                                    build_person(x.get('person') or '',
-                                     personID=analyze_imdbid(x.get('link')),
--                                    roleID=(x.get('roleID') or u'').split('/'))
-+                                    roleID=(x.get('roleID') or '').split('/'))
-                                 )),
- 
-                 Extractor(label='genres',
-@@ -290,7 +290,7 @@ class DOMHTMLMovieParser(DOMParserBase):
-                                     path={'name': "./text()",
-                                         'link': "./@href"},
-                                     postprocess=lambda x: \
--                                        build_person(x.get('name') or u'',
-+                                        build_person(x.get('name') or '',
-                                         personID=analyze_imdbid(x.get('link')))
-                                     )),
- 
-@@ -300,7 +300,7 @@ class DOMHTMLMovieParser(DOMParserBase):
-                                     path={'name': "./text()",
-                                         'link': "./@href"},
-                                     postprocess=lambda x: \
--                                        build_person(x.get('name') or u'',
-+                                        build_person(x.get('name') or '',
-                                         personID=analyze_imdbid(x.get('link')))
-                                     )),
- 
-@@ -310,7 +310,7 @@ class DOMHTMLMovieParser(DOMParserBase):
-                                     path={'name': "./text()",
-                                         'link': "@href"},
-                                     postprocess=lambda x: \
--                                        build_person(x.get('name') or u'',
-+                                        build_person(x.get('name') or '',
-                                         personID=analyze_imdbid(x.get('link')))
-                                     )),
- 
-@@ -377,9 +377,9 @@ class DOMHTMLMovieParser(DOMParserBase):
-                                         'comp-link': "./a/@href",
-                                         'notes': "./text()"},
-                                 postprocess=lambda x: \
--                                        Company(name=x.get('name') or u'',
-+                                        Company(name=x.get('name') or '',
-                                 companyID=analyze_imdbid(x.get('comp-link')),
--                                notes=(x.get('notes') or u'').strip())
-+                                notes=(x.get('notes') or '').strip())
-                             )),
- 
-                 Extractor(label='rating',
-@@ -431,7 +431,7 @@ class DOMHTMLMovieParser(DOMParserBase):
-     re_airdate = re.compile(r'(.*)\s*\(season (\d+), episode (\d+)\)', re.I)
-     def postprocess_data(self, data):
-         # Convert section names.
--        for sect in data.keys():
-+        for sect in list(data.keys()):
-             if sect in _SECT_CONV:
-                 data[_SECT_CONV[sect]] = data[sect]
-                 del data[sect]
-@@ -441,7 +441,7 @@ class DOMHTMLMovieParser(DOMParserBase):
-             value = data[key]
-             if isinstance(value, list) and value:
-                 if isinstance(value[0], Person):
--                    data[key] = filter(lambda x: x.personID is not None, value)
-+                    data[key] = [x for x in value if x.personID is not None]
-                 if isinstance(value[0], _Container):
-                     for obj in data[key]:
-                         obj.accessSystem = self._as
-@@ -463,7 +463,7 @@ class DOMHTMLMovieParser(DOMParserBase):
-             if nakas:
-                 data['akas'] = nakas
-         if 'runtimes' in data:
--            data['runtimes'] = [x.replace(' min', u'')
-+            data['runtimes'] = [x.replace(' min', '')
-                                 for x in data['runtimes']]
-         if 'original air date' in data:
-             oid = self.re_space.sub(' ', data['original air date']).strip()
-@@ -534,9 +534,9 @@ def _process_plotsummary(x):
-     if xauthor:
-         xauthor = xauthor.replace('{', '<').replace('}', '>').replace('(',
-                                     '<').replace(')', '>').strip()
--    xplot = x.get('plot', u'').strip()
-+    xplot = x.get('plot', '').strip()
-     if xauthor:
--        xplot += u'::%s' % xauthor
-+        xplot += '::%s' % xauthor
-     return xplot
- 
- class DOMHTMLPlotParser(DOMParserBase):
-@@ -670,18 +670,18 @@ class DOMHTMLAwardsParser(DOMParserBase):
-         if len(data) == 0:
-             return {}
-         nd = []
--        for key in data.keys():
-+        for key in list(data.keys()):
-             dom = self.get_dom(key)
-             assigner = self.xpath(dom, "//a/text()")[0]
-             for entry in data[key]:
--                if not entry.has_key('name'):
-+                if 'name' not in entry:
-                     if not entry:
-                         continue
-                     # this is an award, not a recipient
-                     entry['assigner'] = assigner.strip()
-                     # find the recipients
-                     matches = [p for p in data[key]
--                               if p.has_key('name') and (entry['anchor'] ==
-+                               if 'name' in p and (entry['anchor'] ==
-                                    p['anchor'])]
-                     if self.subject == 'title':
-                         recipients = [Person(name=recipient['name'],
-@@ -860,7 +860,7 @@ class DOMHTMLGoofsParser(DOMParserBase):
- 
-     extractors = [Extractor(label='goofs', path="//ul[@class='trivia']/li",
-                     attrs=Attribute(key='goofs', multi=True, path=".//text()",
--                        postprocess=lambda x: (x or u'').strip()))]
-+                        postprocess=lambda x: (x or '').strip()))]
- 
- 
- class DOMHTMLQuotesParser(DOMParserBase):
-@@ -947,7 +947,7 @@ class DOMHTMLReleaseinfoParser(DOMParserBase):
-             date = date.strip()
-             if not (country and date): continue
-             notes = i['notes']
--            info = u'%s::%s' % (country, date)
-+            info = '%s::%s' % (country, date)
-             if notes:
-                 info += notes
-             rl.append(info)
-@@ -1023,7 +1023,7 @@ class DOMHTMLRatingsParser(DOMParserBase):
-         votes = data.get('votes', [])
-         if votes:
-             nd['number of votes'] = {}
--            for i in xrange(1, 11):
-+            for i in range(1, 11):
-                 _ordinal = int(votes[i]['ordinal'])
-                 _strvts = votes[i]['votes'] or '0'
-                 nd['number of votes'][_ordinal] = \
-@@ -1046,7 +1046,7 @@ class DOMHTMLRatingsParser(DOMParserBase):
-         dem_voters = data.get('demographic voters')
-         if dem_voters:
-             nd['demographic'] = {}
--            for i in xrange(1, len(dem_voters)):
-+            for i in range(1, len(dem_voters)):
-                 if (dem_voters[i]['votes'] is not None) \
-                    and (dem_voters[i]['votes'].strip()):
-                     nd['demographic'][dem_voters[i]['voters'].strip().lower()] \
-@@ -1111,10 +1111,10 @@ class DOMHTMLEpisodesRatings(DOMParserBase):
-             except:
-                 pass
-             ept = ept.strip()
--            ept = u'%s {%s' % (title, ept)
-+            ept = '%s {%s' % (title, ept)
-             nr = i['nr']
-             if nr:
--                ept += u' (#%s)' % nr.strip()
-+                ept += ' (#%s)' % nr.strip()
-             ept += '}'
-             if movieID is not None:
-                 movieID = str(movieID)
-@@ -1160,7 +1160,7 @@ class DOMHTMLOfficialsitesParser(DOMParserBase):
-                     'info': "./text()"
-                 },
-                 postprocess=lambda x: (x.get('info').strip(),
--                            urllib.unquote(_normalize_href(x.get('link'))))))
-+                            urllib.parse.unquote(_normalize_href(x.get('link'))))))
-         ]
- 
- 
-@@ -1195,13 +1195,13 @@ class DOMHTMLConnectionParser(DOMParserBase):
-         ]
- 
-     def postprocess_data(self, data):
--        for key in data.keys():
-+        for key in list(data.keys()):
-             nl = []
-             for v in data[key]:
-                 title = v['title']
-                 ts = title.split('::', 1)
-                 title = ts[0].strip()
--                notes = u''
-+                notes = ''
-                 if len(ts) == 2:
-                     notes = ts[1].strip()
-                 m = Movie(title=title,
-@@ -1229,9 +1229,9 @@ class DOMHTMLLocationsParser(DOMParserBase):
-                                 path={'place': ".//text()",
-                                         'note': "./following-sibling::dd[1]" \
-                                                 "//text()"},
--                                postprocess=lambda x: (u'%s::%s' % (
-+                                postprocess=lambda x: ('%s::%s' % (
-                                     x['place'].strip(),
--                                    (x['note'] or u'').strip())).strip(':')))]
-+                                    (x['note'] or '').strip())).strip(':')))]
- 
- 
- class DOMHTMLTechParser(DOMParserBase):
-@@ -1272,7 +1272,7 @@ class DOMHTMLTechParser(DOMParserBase):
- 
-     def postprocess_data(self, data):
-         for key in data:
--            data[key] = filter(None, data[key])
-+            data[key] = [_f for _f in data[key] if _f]
-         if self.kind in ('literature', 'business', 'contacts') and data:
-             if 'screenplay/teleplay' in data:
-                 data['screenplay-teleplay'] = data['screenplay/teleplay']
-@@ -1284,7 +1284,7 @@ class DOMHTMLTechParser(DOMParserBase):
-                     data['biography-print'] = data['biography (print)']
-                     del data['biography (print)']
-             # Tech info.
--            for key in data.keys():
-+            for key in list(data.keys()):
-                 if key.startswith('film negative format'):
-                     data['film negative format'] = data[key]
-                     del data[key]
-@@ -1313,7 +1313,7 @@ class DOMHTMLRecParser(DOMParserBase):
-                             path={'title': ".//text()",
-                                     'movieID': ".//a/@href"}))]
-     def postprocess_data(self, data):
--        for key in data.keys():
-+        for key in list(data.keys()):
-             n_key = key
-             n_keyl = n_key.lower()
-             if n_keyl == 'suggested by the database':
-@@ -1361,7 +1361,7 @@ class DOMHTMLNewsParser(DOMParserBase):
-                     'date': x.get('fromdate').split('|')[0].strip(),
-                     'from': x.get('fromdate').split('|')[1].replace('From ',
-                             '').strip(),
--                    'body': (x.get('body') or u'').strip(),
-+                    'body': (x.get('body') or '').strip(),
-                     'link': _normalize_href(x.get('link')),
-                     'full article link': _normalize_href(x.get('fulllink'))
-                 }))
-@@ -1374,10 +1374,10 @@ class DOMHTMLNewsParser(DOMParserBase):
-         ]
- 
-     def postprocess_data(self, data):
--        if not data.has_key('news'):
-+        if 'news' not in data:
-             return {}
-         for news in data['news']:
--            if news.has_key('full article link'):
-+            if 'full article link' in news:
-                 if news['full article link'] is None:
-                     del news['full article link']
-         return data
-@@ -1467,11 +1467,11 @@ class DOMHTMLSeasonEpisodesParser(DOMParserBase):
-         series = Movie(title=series_title, movieID=str(series_id),
-                         accessSystem=self._as, modFunct=self._modFunct)
-         if series.get('kind') == 'movie':
--            series['kind'] = u'tv series'
-+            series['kind'] = 'tv series'
-         try: selected_season = int(selected_season)
-         except: pass
-         nd = {selected_season: {}}
--        for episode_nr, episode in data.iteritems():
-+        for episode_nr, episode in data.items():
-             if not (episode and episode[0] and
-                     episode_nr.startswith('episode ')):
-                 continue
-@@ -1488,7 +1488,7 @@ class DOMHTMLSeasonEpisodesParser(DOMParserBase):
-                 continue
-             ep_obj = Movie(movieID=episode_id, title=episode_title,
-                         accessSystem=self._as, modFunct=self._modFunct)
--            ep_obj['kind'] = u'episode'
-+            ep_obj['kind'] = 'episode'
-             ep_obj['episode of'] = series
-             ep_obj['season'] = selected_season
-             ep_obj['episode'] = episode_nr
-@@ -1512,14 +1512,14 @@ def _build_episode(x):
-     episode_id = analyze_imdbid(x.get('link'))
-     episode_title = x.get('title')
-     e = Movie(movieID=episode_id, title=episode_title)
--    e['kind'] = u'episode'
-+    e['kind'] = 'episode'
-     oad = x.get('oad')
-     if oad:
-         e['original air date'] = oad.strip()
-     year = x.get('year')
-     if year is not None:
-         year = year[5:]
--        if year == 'unknown': year = u'????'
-+        if year == 'unknown': year = '????'
-         if year and year.isdigit():
-             year = int(year)
-         e['year'] = year
-@@ -1597,9 +1597,9 @@ class DOMHTMLEpisodesParser(DOMParserBase):
-                             'roleID': \
-                                 "../td[4]/div[@class='_imdbpyrole']/@roleid"},
-                         postprocess=lambda x: \
--                                build_person(x.get('person') or u'',
-+                                build_person(x.get('person') or '',
-                                 personID=analyze_imdbid(x.get('link')),
--                                roleID=(x.get('roleID') or u'').split('/'),
-+                                roleID=(x.get('roleID') or '').split('/'),
-                                 accessSystem=self._as,
-                                 modFunct=self._modFunct)))
-                 ]
-@@ -1628,7 +1628,7 @@ class DOMHTMLEpisodesParser(DOMParserBase):
-         series = Movie(title=stitle, movieID=str(seriesID),
-                         accessSystem=self._as, modFunct=self._modFunct)
-         nd = {}
--        for key in data.keys():
-+        for key in list(data.keys()):
-             if key.startswith('filter-season-') or key.startswith('season-'):
-                 season_key = key.replace('filter-season-', '').replace('season-', '')
-                 try: season_key = int(season_key)
-@@ -1644,9 +1644,9 @@ class DOMHTMLEpisodesParser(DOMParserBase):
-                         ep_counter += 1
-                     cast_key = 'Season %s, Episode %s:' % (season_key,
-                                                             episode_key)
--                    if data.has_key(cast_key):
-+                    if cast_key in data:
-                         cast = data[cast_key]
--                        for i in xrange(len(cast)):
-+                        for i in range(len(cast)):
-                             cast[i].billingPos = i + 1
-                         episode['cast'] = cast
-                     episode['episode of'] = series
-@@ -1694,7 +1694,7 @@ class DOMHTMLFaqsParser(DOMParserBase):
-                     'question': "./h3/a/span/text()",
-                     'answer': "../following-sibling::div[1]//text()"
-                 },
--                postprocess=lambda x: u'%s::%s' % (x.get('question').strip(),
-+                postprocess=lambda x: '%s::%s' % (x.get('question').strip(),
-                                     '\n\n'.join(x.get('answer').replace(
-                                         '\n\n', '\n').strip().split('||')))))
-         ]
-@@ -1724,7 +1724,7 @@ class DOMHTMLAiringParser(DOMParserBase):
-             path="//title",
-             attrs=Attribute(key='series title', path="./text()",
-                             postprocess=lambda x: \
--                                    x.replace(' - TV schedule', u''))),
-+                                    x.replace(' - TV schedule', ''))),
-         Extractor(label='series id',
-             path="//h1/a[@href]",
-             attrs=Attribute(key='series id', path="./@href")),
-@@ -1757,7 +1757,7 @@ class DOMHTMLAiringParser(DOMParserBase):
-             return {}
-         seriesTitle = data['series title']
-         seriesID = analyze_imdbid(data['series id'])
--        if data.has_key('airing'):
-+        if 'airing' in data:
-             for airing in data['airing']:
-                 title = airing.get('title', '').strip()
-                 if not title:
-@@ -1780,7 +1780,7 @@ class DOMHTMLAiringParser(DOMParserBase):
-         if 'series id' in data:
-             del data['series id']
-         if 'airing' in data:
--            data['airing'] = filter(None, data['airing'])
-+            data['airing'] = [_f for _f in data['airing'] if _f]
-         if 'airing' not in data or not data['airing']:
-             return {}
-         return data
-diff --git a/imdb/parser/http/personParser.py b/imdb/parser/http/personParser.py
-index af96721..e93895a 100644
---- a/imdb/parser/http/personParser.py
-+++ b/imdb/parser/http/personParser.py
-@@ -30,11 +30,11 @@ import re
- from imdb.Movie import Movie
- from imdb.utils import analyze_name, canonicalName, normalizeName, \
-                         analyze_title, date_and_notes
--from utils import build_movie, DOMParserBase, Attribute, Extractor, \
-+from .utils import build_movie, DOMParserBase, Attribute, Extractor, \
-                         analyze_imdbid
- 
- 
--from movieParser import _manageRoles
-+from .movieParser import _manageRoles
- _reRoles = re.compile(r'(<li>.*? \.\.\.\. )(.*?)(</li>|<br>)',
-                         re.I | re.M | re.S)
- 
-@@ -84,13 +84,13 @@ class DOMHTMLMaindetailsParser(DOMParserBase):
-                           'roleID': "./a[starts-with(@href, '/character/')]/@href"
-                           },
-                       postprocess=lambda x:
--                          build_movie(x.get('title') or u'',
-+                          build_movie(x.get('title') or '',
-                               year=x.get('year'),
--                              movieID=analyze_imdbid(x.get('link') or u''),
--                              rolesNoChar=(x.get('rolesNoChar') or u'').strip(),
--                              chrRoles=(x.get('chrRoles') or u'').strip(),
-+                              movieID=analyze_imdbid(x.get('link') or ''),
-+                              rolesNoChar=(x.get('rolesNoChar') or '').strip(),
-+                              chrRoles=(x.get('chrRoles') or '').strip(),
-                               additionalNotes=x.get('notes'),
--                              roleID=(x.get('roleID') or u''),
-+                              roleID=(x.get('roleID') or ''),
-                               status=x.get('status') or None))]
- 
-     extractors = [
-@@ -137,9 +137,9 @@ class DOMHTMLMaindetailsParser(DOMParserBase):
-                                 'title': './a/text()'
-                                 },
-                                 postprocess=lambda x:
--                                    build_movie(x.get('title') or u'',
--                                        movieID=analyze_imdbid(x.get('link') or u''),
--                                        roleID=(x.get('roleID') or u'').split('/'),
-+                                    build_movie(x.get('title') or '',
-+                                        movieID=analyze_imdbid(x.get('link') or ''),
-+                                        roleID=(x.get('roleID') or '').split('/'),
-                                         status=x.get('status') or None)))
-             ]
- 
-@@ -154,19 +154,19 @@ class DOMHTMLMaindetailsParser(DOMParserBase):
-                 del data[what]
-         # XXX: the code below is for backwards compatibility
-         # probably could be removed
--        for key in data.keys():
-+        for key in list(data.keys()):
-             if key.startswith('actor '):
--                if not data.has_key('actor'):
-+                if 'actor' not in data:
-                     data['actor'] = []
-                 data['actor'].extend(data[key])
-                 del data[key]
-             if key.startswith('actress '):
--                if not data.has_key('actress'):
-+                if 'actress' not in data:
-                     data['actress'] = []
-                 data['actress'].extend(data[key])
-                 del data[key]
-             if key.startswith('self '):
--                if not data.has_key('self'):
-+                if 'self' not in data:
-                     data['self'] = []
-                 data['self'].extend(data[key])
-                 del data[key]
-@@ -213,7 +213,7 @@ class DOMHTMLBioParser(DOMParserBase):
-                     Attribute(key='death notes',
-                         path="./text()",
-                         # TODO: check if this slicing is always correct
--                        postprocess=lambda x: u''.join(x).strip()[2:])]
-+                        postprocess=lambda x: ''.join(x).strip()[2:])]
-     extractors = [
-             Extractor(label='headshot',
-                         path="//a[@name='headshot']",
-@@ -253,7 +253,7 @@ class DOMHTMLBioParser(DOMParserBase):
-                                 },
-                             postprocess=lambda x: "%s::%s" % \
-                                 (x.get('bio').strip(),
--                                (x.get('by') or u'').strip() or u'Anonymous'))),
-+                                (x.get('by') or '').strip() or 'Anonymous'))),
-             Extractor(label='spouse',
-                         path="//div[h5='Spouse']/table/tr",
-                         attrs=Attribute(key='spouse',
-@@ -264,7 +264,7 @@ class DOMHTMLBioParser(DOMParserBase):
-                                 },
-                             postprocess=lambda x: ("%s::%s" % \
-                                 (x.get('name').strip(),
--                                (x.get('info') or u'').strip())).strip(':'))),
-+                                (x.get('info') or '').strip())).strip(':'))),
-             Extractor(label='trade mark',
-                         path="//div[h5='Trade Mark']/p",
-                         attrs=Attribute(key='trade mark',
-@@ -351,7 +351,7 @@ class DOMHTMLOtherWorksParser(DOMParserBase):
- def _build_episode(link, title, minfo, role, roleA, roleAID):
-     """Build an Movie object for a given episode of a series."""
-     episode_id = analyze_imdbid(link)
--    notes = u''
-+    notes = ''
-     minidx = minfo.find(' -')
-     # Sometimes, for some unknown reason, the role is left in minfo.
-     if minidx != -1:
-@@ -365,7 +365,7 @@ def _build_episode(link, title, minfo, role, roleA, roleAID):
-         if slfRole and role is None and roleA is None:
-             role = slfRole
-     eps_data = analyze_title(title)
--    eps_data['kind'] = u'episode'
-+    eps_data['kind'] = 'episode'
-     # FIXME: it's wrong for multiple characters (very rare on tv series?).
-     if role is None:
-         role = roleA # At worse, it's None.
-@@ -419,7 +419,7 @@ class DOMHTMLSeriesParser(DOMParserBase):
-                                 },
-                             postprocess=lambda x: _build_episode(x.get('link'),
-                                 x.get('title'),
--                                (x.get('info') or u'').strip(),
-+                                (x.get('info') or '').strip(),
-                                 x.get('role'),
-                                 x.get('roleA'),
-                                 x.get('roleAID'))))
-@@ -429,7 +429,7 @@ class DOMHTMLSeriesParser(DOMParserBase):
-         if len(data) == 0:
-             return {}
-         nd = {}
--        for key in data.keys():
-+        for key in list(data.keys()):
-             dom = self.get_dom(key)
-             link = self.xpath(dom, "//a/@href")[0]
-             title = self.xpath(dom, "//a/text()")[0][1:-1]
-@@ -483,10 +483,10 @@ class DOMHTMLPersonGenresParser(DOMParserBase):
-         return {self.kind: data}
- 
- 
--from movieParser import DOMHTMLTechParser
--from movieParser import DOMHTMLOfficialsitesParser
--from movieParser import DOMHTMLAwardsParser
--from movieParser import DOMHTMLNewsParser
-+from .movieParser import DOMHTMLTechParser
-+from .movieParser import DOMHTMLOfficialsitesParser
-+from .movieParser import DOMHTMLAwardsParser
-+from .movieParser import DOMHTMLNewsParser
- 
- 
- _OBJECTS = {
-diff --git a/imdb/parser/http/searchCharacterParser.py b/imdb/parser/http/searchCharacterParser.py
-index c81ca7e..4a540b5 100644
---- a/imdb/parser/http/searchCharacterParser.py
-+++ b/imdb/parser/http/searchCharacterParser.py
-@@ -26,9 +26,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
- from imdb.utils import analyze_name, build_name
--from utils import Extractor, Attribute, analyze_imdbid
-+from .utils import Extractor, Attribute, analyze_imdbid
- 
--from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
-+from .searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
- 
- 
- class DOMBasicCharacterParser(DOMBasicMovieParser):
-@@ -37,7 +37,7 @@ class DOMBasicCharacterParser(DOMBasicMovieParser):
-     It's used by the DOMHTMLSearchCharacterParser class to return a result
-     for a direct match (when a search on IMDb results in a single
-     character, the web server sends directly the movie page."""
--    _titleFunct = lambda self, x: analyze_name(x or u'', canonical=False)
-+    _titleFunct = lambda self, x: analyze_name(x or '', canonical=False)
- 
- 
- class DOMHTMLSearchCharacterParser(DOMHTMLSearchMovieParser):
-@@ -53,7 +53,7 @@ class DOMHTMLSearchCharacterParser(DOMHTMLSearchMovieParser):
-                             'name': "./a[1]/text()"
-                             },
-                         postprocess=lambda x: (
--                            analyze_imdbid(x.get('link') or u''),
-+                            analyze_imdbid(x.get('link') or ''),
-                             {'name': x.get('name')}
-                         ))]
-     extractors = [Extractor(label='search',
-diff --git a/imdb/parser/http/searchCompanyParser.py b/imdb/parser/http/searchCompanyParser.py
-index ab666fb..84ca0ed 100644
---- a/imdb/parser/http/searchCompanyParser.py
-+++ b/imdb/parser/http/searchCompanyParser.py
-@@ -26,9 +26,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
- from imdb.utils import analyze_company_name, build_company_name
--from utils import Extractor, Attribute, analyze_imdbid
-+from .utils import Extractor, Attribute, analyze_imdbid
- 
--from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
-+from .searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
- 
- class DOMBasicCompanyParser(DOMBasicMovieParser):
-     """Simply get the name of a company and the imdbID.
-@@ -37,7 +37,7 @@ class DOMBasicCompanyParser(DOMBasicMovieParser):
-     for a direct match (when a search on IMDb results in a single
-     company, the web server sends directly the company page.
-     """
--    _titleFunct = lambda self, x: analyze_company_name(x or u'')
-+    _titleFunct = lambda self, x: analyze_company_name(x or '')
- 
- 
- class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
-@@ -56,7 +56,7 @@ class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
-                         postprocess=lambda x: (
-                             analyze_imdbid(x.get('link')),
-                             analyze_company_name(x.get('name')+(x.get('notes')
--                                                or u''), stripNotes=True)
-+                                                or ''), stripNotes=True)
-                         ))]
-     extractors = [Extractor(label='search',
-                             path="//td[3]/a[starts-with(@href, " \
-diff --git a/imdb/parser/http/searchKeywordParser.py b/imdb/parser/http/searchKeywordParser.py
-index ed72906..973560d 100644
---- a/imdb/parser/http/searchKeywordParser.py
-+++ b/imdb/parser/http/searchKeywordParser.py
-@@ -24,10 +24,10 @@ along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
--from utils import Extractor, Attribute, analyze_imdbid
-+from .utils import Extractor, Attribute, analyze_imdbid
- from imdb.utils import analyze_title, analyze_company_name
- 
--from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
-+from .searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
- 
- class DOMBasicKeywordParser(DOMBasicMovieParser):
-     """Simply get the name of a keyword.
-@@ -38,7 +38,7 @@ class DOMBasicKeywordParser(DOMBasicMovieParser):
-     """
-     # XXX: it's still to be tested!
-     # I'm not even sure there can be a direct hit, searching for keywords.
--    _titleFunct = lambda self, x: analyze_company_name(x or u'')
-+    _titleFunct = lambda self, x: analyze_company_name(x or '')
- 
- 
- class DOMHTMLSearchKeywordParser(DOMHTMLSearchMovieParser):
-@@ -91,10 +91,10 @@ class DOMHTMLSearchMovieKeywordParser(DOMHTMLSearchMovieParser):
-                             'outline': "./span[@class='outline']//text()"
-                             },
-                         postprocess=lambda x: (
--                            analyze_imdbid(x.get('link') or u''),
--                            custom_analyze_title4kwd(x.get('info') or u'',
--                                                    x.get('ynote') or u'',
--                                                    x.get('outline') or u'')
-+                            analyze_imdbid(x.get('link') or ''),
-+                            custom_analyze_title4kwd(x.get('info') or '',
-+                                                    x.get('ynote') or '',
-+                                                    x.get('outline') or '')
-                         ))]
- 
-     extractors = [Extractor(label='search',
-diff --git a/imdb/parser/http/searchMovieParser.py b/imdb/parser/http/searchMovieParser.py
-index 44c78d0..8e61f56 100644
---- a/imdb/parser/http/searchMovieParser.py
-+++ b/imdb/parser/http/searchMovieParser.py
-@@ -28,7 +28,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- 
- import re
- from imdb.utils import analyze_title, build_title
--from utils import DOMParserBase, Attribute, Extractor, analyze_imdbid
-+from .utils import DOMParserBase, Attribute, Extractor, analyze_imdbid
- 
- 
- class DOMBasicMovieParser(DOMParserBase):
-@@ -40,7 +40,7 @@ class DOMBasicMovieParser(DOMParserBase):
-     # Stay generic enough to be used also for other DOMBasic*Parser classes.
-     _titleAttrPath = ".//text()"
-     _linkPath = "//link[@rel='canonical']"
--    _titleFunct = lambda self, x: analyze_title(x or u'')
-+    _titleFunct = lambda self, x: analyze_title(x or '')
- 
-     def _init(self):
-         self.preprocessors += [('<span class="tv-extra">TV mini-series</span>',
-@@ -54,7 +54,7 @@ class DOMBasicMovieParser(DOMParserBase):
-                                 path=self._linkPath,
-                                 attrs=Attribute(key='link', path="./@href",
-                                 postprocess=lambda x: \
--                                        analyze_imdbid((x or u'').replace(
-+                                        analyze_imdbid((x or '').replace(
-                                             'http://pro.imdb.com', ''))
-                                     ))]
- 
-@@ -105,18 +105,18 @@ class DOMHTMLSearchMovieParser(DOMParserBase):
-                             'akas': ".//p[@class='find-aka']//text()"
-                             },
-                         postprocess=lambda x: (
--                            analyze_imdbid(x.get('link') or u''),
--                            custom_analyze_title(x.get('info') or u''),
-+                            analyze_imdbid(x.get('link') or ''),
-+                            custom_analyze_title(x.get('info') or ''),
-                             x.get('akas')
-                         ))]
-     extractors = [Extractor(label='search',
-                         path="//td[3]/a[starts-with(@href, '/title/tt')]/..",
-                         attrs=_attrs)]
-     def _init(self):
--        self.url = u''
-+        self.url = ''
- 
-     def _reset(self):
--        self.url = u''
-+        self.url = ''
- 
-     def preprocess_string(self, html_string):
-         if self._notDirectHitTitle in html_string[:1024].lower():
-@@ -131,22 +131,22 @@ class DOMHTMLSearchMovieParser(DOMParserBase):
-         # Direct hit!
-         dbme = self._BaseParser(useModule=self._useModule)
-         res = dbme.parse(html_string, url=self.url)
--        if not res: return u''
-+        if not res: return ''
-         res = res['data']
--        if not (res and res[0]): return u''
-+        if not (res and res[0]): return ''
-         link = '%s%s' % (self._linkPrefix, res[0][0])
-         #    # Tries to cope with companies for which links to pro.imdb.com
-         #    # are missing.
-         #    link = self.url.replace(imdbURL_base[:-1], '')
-         title = self._titleBuilder(res[0][1])
--        if not (link and title): return u''
-+        if not (link and title): return ''
-         link = link.replace('http://pro.imdb.com', '')
-         new_html = '<td></td><td></td><td><a href="%s">%s</a></td>' % (link,
-                                                                     title)
-         return new_html
- 
-     def postprocess_data(self, data):
--        if not data.has_key('data'):
-+        if 'data' not in data:
-             data['data'] = []
-         results = getattr(self, 'results', None)
-         if results is not None:
-@@ -161,7 +161,7 @@ class DOMHTMLSearchMovieParser(DOMParserBase):
-                 if not datum[0] and datum[1]:
-                     continue
-                 if datum[2] is not None:
--                    akas = filter(None, datum[2].split('::'))
-+                    akas = [_f for _f in datum[2].split('::') if _f]
-                     if self._linkPrefix == '/title/tt':
-                         akas = [a.replace('" - ', '::').rstrip() for a in akas]
-                         akas = [a.replace('aka "', '', 1).replace('aka  "',
-diff --git a/imdb/parser/http/searchPersonParser.py b/imdb/parser/http/searchPersonParser.py
-index 1756efc..e0b33f0 100644
---- a/imdb/parser/http/searchPersonParser.py
-+++ b/imdb/parser/http/searchPersonParser.py
-@@ -27,15 +27,15 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- 
- import re
- from imdb.utils import analyze_name, build_name
--from utils import Extractor, Attribute, analyze_imdbid
-+from .utils import Extractor, Attribute, analyze_imdbid
- 
--from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
-+from .searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
- 
- 
- def _cleanName(n):
-     """Clean the name in a title tag."""
-     if not n:
--        return u''
-+        return ''
-     n = n.replace('Filmography by type for', '') # FIXME: temporary.
-     return n
- 
-@@ -68,9 +68,9 @@ class DOMHTMLSearchPersonParser(DOMHTMLSearchMovieParser):
-                             'akas': ".//div[@class='_imdbpyAKA']/text()"
-                             },
-                         postprocess=lambda x: (
--                            analyze_imdbid(x.get('link') or u''),
--                            analyze_name((x.get('name') or u'') + \
--                                        (x.get('index') or u''),
-+                            analyze_imdbid(x.get('link') or ''),
-+                            analyze_name((x.get('name') or '') + \
-+                                        (x.get('index') or ''),
-                                          canonical=1), x.get('akas')
-                         ))]
-     extractors = [Extractor(label='search',
-diff --git a/imdb/parser/http/topBottomParser.py b/imdb/parser/http/topBottomParser.py
-index f0f2950..3e2950b 100644
---- a/imdb/parser/http/topBottomParser.py
-+++ b/imdb/parser/http/topBottomParser.py
-@@ -25,7 +25,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
- from imdb.utils import analyze_title
--from utils import DOMParserBase, Attribute, Extractor, analyze_imdbid
-+from .utils import DOMParserBase, Attribute, Extractor, analyze_imdbid
- 
- 
- class DOMHTMLTop250Parser(DOMParserBase):
-diff --git a/imdb/parser/http/utils.py b/imdb/parser/http/utils.py
-index f8dbc05..4b52f2a 100644
---- a/imdb/parser/http/utils.py
-+++ b/imdb/parser/http/utils.py
-@@ -32,6 +32,7 @@ from imdb.utils import flatten, _Container
- from imdb.Movie import Movie
- from imdb.Person import Person
- from imdb.Character import Character
-+import collections
- 
- 
- # Year, imdbIndex and kind.
-@@ -55,77 +56,77 @@ def _putRefs(d, re_titles, re_names, re_characters, lastKey=None):
-     """Iterate over the strings inside list items or dictionary values,
-     substitutes movie titles and person names with the (qv) references."""
-     if isinstance(d, list):
--        for i in xrange(len(d)):
--            if isinstance(d[i], (unicode, str)):
-+        for i in range(len(d)):
-+            if isinstance(d[i], str):
-                 if lastKey in _modify_keys:
-                     if re_names:
--                        d[i] = re_names.sub(ur"'\1' (qv)", d[i])
-+                        d[i] = re_names.sub(r"'\1' (qv)", d[i])
-                     if re_titles:
--                        d[i] = re_titles.sub(ur'_\1_ (qv)', d[i])
-+                        d[i] = re_titles.sub(r'_\1_ (qv)', d[i])
-                     if re_characters:
--                        d[i] = re_characters.sub(ur'#\1# (qv)', d[i])
-+                        d[i] = re_characters.sub(r'#\1# (qv)', d[i])
-             elif isinstance(d[i], (list, dict)):
-                 _putRefs(d[i], re_titles, re_names, re_characters,
-                         lastKey=lastKey)
-     elif isinstance(d, dict):
--        for k, v in d.items():
-+        for k, v in list(d.items()):
-             lastKey = k
--            if isinstance(v, (unicode, str)):
-+            if isinstance(v, str):
-                 if lastKey in _modify_keys:
-                     if re_names:
--                        d[k] = re_names.sub(ur"'\1' (qv)", v)
-+                        d[k] = re_names.sub(r"'\1' (qv)", v)
-                     if re_titles:
--                        d[k] = re_titles.sub(ur'_\1_ (qv)', v)
-+                        d[k] = re_titles.sub(r'_\1_ (qv)', v)
-                     if re_characters:
--                        d[k] = re_characters.sub(ur'#\1# (qv)', v)
-+                        d[k] = re_characters.sub(r'#\1# (qv)', v)
-             elif isinstance(v, (list, dict)):
-                 _putRefs(d[k], re_titles, re_names, re_characters,
-                         lastKey=lastKey)
- 
- 
- # Handle HTML/XML/SGML entities.
--from htmlentitydefs import entitydefs
-+from html.entities import entitydefs
- entitydefs = entitydefs.copy()
- entitydefsget = entitydefs.get
- entitydefs['nbsp'] = ' '
- 
- sgmlentity = {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\'', 'ndash': '-'}
- sgmlentityget = sgmlentity.get
--_sgmlentkeys = sgmlentity.keys()
-+_sgmlentkeys = list(sgmlentity.keys())
- 
- entcharrefs = {}
- entcharrefsget = entcharrefs.get
--for _k, _v in entitydefs.items():
-+for _k, _v in list(entitydefs.items()):
-     if _k in _sgmlentkeys: continue
-     if _v[0:2] == '&#':
-         dec_code = _v[1:-1]
--        _v = unichr(int(_v[2:-1]))
-+        _v = chr(int(_v[2:-1]))
-         entcharrefs[dec_code] = _v
-     else:
-         dec_code = '#' + str(ord(_v))
--        _v = unicode(_v, 'latin_1', 'replace')
-+        _v = str(_v, 'latin_1', 'replace')
-         entcharrefs[dec_code] = _v
-     entcharrefs[_k] = _v
- del _sgmlentkeys, _k, _v
--entcharrefs['#160'] = u' '
--entcharrefs['#xA0'] = u' '
--entcharrefs['#xa0'] = u' '
--entcharrefs['#XA0'] = u' '
--entcharrefs['#x22'] = u'"'
--entcharrefs['#X22'] = u'"'
-+entcharrefs['#160'] = ' '
-+entcharrefs['#xA0'] = ' '
-+entcharrefs['#xa0'] = ' '
-+entcharrefs['#XA0'] = ' '
-+entcharrefs['#x22'] = '"'
-+entcharrefs['#X22'] = '"'
- # convert &x26; to &amp;, to make BeautifulSoup happy; beware that this
- # leaves lone '&' in the html broken, but I assume this is better than
- # the contrary...
--entcharrefs['#38'] = u'&amp;'
--entcharrefs['#x26'] = u'&amp;'
--entcharrefs['#x26'] = u'&amp;'
-+entcharrefs['#38'] = '&amp;'
-+entcharrefs['#x26'] = '&amp;'
-+entcharrefs['#x26'] = '&amp;'
- 
- re_entcharrefs = re.compile('&(%s|\#160|\#\d{1,5}|\#x[0-9a-f]{1,4});' %
-                             '|'.join(map(re.escape, entcharrefs)), re.I)
- re_entcharrefssub = re_entcharrefs.sub
- 
--sgmlentity.update(dict([('#34', u'"'), ('#38', u'&'),
--                        ('#60', u'<'), ('#62', u'>'), ('#39', u"'")]))
-+sgmlentity.update(dict([('#34', '"'), ('#38', '&'),
-+                        ('#60', '<'), ('#62', '>'), ('#39', "'")]))
- re_sgmlref = re.compile('&(%s);' % '|'.join(map(re.escape, sgmlentity)))
- re_sgmlrefsub = re_sgmlref.sub
- 
-@@ -148,9 +149,9 @@ def _replXMLRef(match):
-                 #if ref[2:] == '26':
-                 #    # Don't convert &x26; to &amp;, to make BeautifulSoup happy.
-                 #    return '&amp;'
--                return unichr(int(ref[2:], 16))
-+                return chr(int(ref[2:], 16))
-             else:
--                return unichr(int(ref[1:]))
-+                return chr(int(ref[1:]))
-         else:
-             return ref
-     return value
-@@ -179,8 +180,8 @@ def build_person(txt, personID=None, billingPos=None,
-     found in the IMDb's web site."""
-     #if personID is None
-     #    _b_p_logger.debug('empty name or personID for "%s"', txt)
--    notes = u''
--    role = u''
-+    notes = ''
-+    role = ''
-     # Search the (optional) separator between name and role/notes.
-     if txt.find('....') != -1:
-         sep = '....'
-@@ -219,7 +220,7 @@ def build_person(txt, personID=None, billingPos=None,
-             # We're managing something that doesn't have a 'role', so
-             # everything are notes.
-             notes = role_comment
--    if role == '....': role = u''
-+    if role == '....': role = ''
-     roleNotes = []
-     # Manages multiple roleIDs.
-     if isinstance(roleID, list):
-@@ -245,7 +246,7 @@ def build_person(txt, personID=None, billingPos=None,
-         if lr == 1:
-             role = role[0]
-             roleID = roleID[0]
--            notes = roleNotes[0] or u''
-+            notes = roleNotes[0] or ''
-     elif roleID is not None:
-         roleID = str(roleID)
-     if personID is not None:
-@@ -287,8 +288,8 @@ def build_movie(txt, movieID=None, roleID=None, status=None,
-     title = re_spaces.sub(' ', txt).strip()
-     # Split the role/notes from the movie title.
-     tsplit = title.split(_defSep, 1)
--    role = u''
--    notes = u''
-+    role = ''
-+    notes = ''
-     roleNotes = []
-     if len(tsplit) == 2:
-         title = tsplit[0].rstrip()
-@@ -346,16 +347,16 @@ def build_movie(txt, movieID=None, roleID=None, status=None,
-                 if notes: notes = '%s %s' % (title[fpIdx:], notes)
-                 else: notes = title[fpIdx:]
-                 title = title[:fpIdx].rstrip()
--        title = u'%s (%s)' % (title, year)
-+        title = '%s (%s)' % (title, year)
-     if _parsingCharacter and roleID and not role:
-         roleID = None
-     if not roleID:
-         roleID = None
-     elif len(roleID) == 1:
-         roleID = roleID[0]
--    if not role and chrRoles and isinstance(roleID, (str, unicode)):
-+    if not role and chrRoles and isinstance(roleID, str):
-         roleID = _re_chrIDs.findall(roleID)
--        role = ' / '.join(filter(None, chrRoles.split('@@')))
-+        role = ' / '.join([_f for _f in chrRoles.split('@@') if _f])
-     # Manages multiple roleIDs.
-     if isinstance(roleID, list):
-         tmprole = role.split('/')
-@@ -387,7 +388,7 @@ def build_movie(txt, movieID=None, roleID=None, status=None,
-     if (not title) or (movieID is None):
-         _b_m_logger.error('empty title or movieID for "%s"', txt)
-     if rolesNoChar:
--        rolesNoChar = filter(None, [x.strip() for x in rolesNoChar.split('/')])
-+        rolesNoChar = [_f for _f in [x.strip() for x in rolesNoChar.split('/')] if _f]
-         if not role:
-             role = []
-         elif not isinstance(role, list):
-@@ -397,7 +398,7 @@ def build_movie(txt, movieID=None, roleID=None, status=None,
-     if additionalNotes:
-         additionalNotes = re_spaces.sub(' ', additionalNotes).strip()
-         if notes:
--            notes += u' '
-+            notes += ' '
-         notes += additionalNotes
-     if role and isinstance(role, list) and notes.endswith(role[-1].replace('\n', ' ')):
-         role = role[:-1]
-@@ -450,8 +451,8 @@ class DOMParserBase(object):
-                     self._is_xml_unicode = False
-                     self.usingModule = 'lxml'
-                 elif mod == 'beautifulsoup':
--                    from bsouplxml.html import fromstring
--                    from bsouplxml.etree import tostring
-+                    from .bsouplxml.html import fromstring
-+                    from .bsouplxml.etree import tostring
-                     self._is_xml_unicode = True
-                     self.usingModule = 'beautifulsoup'
-                 else:
-@@ -462,7 +463,7 @@ class DOMParserBase(object):
-                 if _gotError:
-                     warnings.warn('falling back to "%s"' % mod)
-                 break
--            except ImportError, e:
-+            except ImportError as e:
-                 if idx+1 >= nrMods:
-                     # Raise the exception, if we don't have any more
-                     # options to try.
-@@ -507,8 +508,8 @@ class DOMParserBase(object):
-         else:
-             self.getRefs = self._defGetRefs
-         # Useful only for the testsuite.
--        if not isinstance(html_string, unicode):
--            html_string = unicode(html_string, 'latin_1', 'replace')
-+        if not isinstance(html_string, str):
-+            html_string = str(html_string, 'latin_1', 'replace')
-         html_string = subXMLRefs(html_string)
-         # Temporary fix: self.parse_dom must work even for empty strings.
-         html_string = self.preprocess_string(html_string)
-@@ -526,13 +527,13 @@ class DOMParserBase(object):
-             #print self.tostring(dom).encode('utf8')
-             try:
-                 dom = self.preprocess_dom(dom)
--            except Exception, e:
-+            except Exception as e:
-                 self._logger.error('%s: caught exception preprocessing DOM',
-                                     self._cname, exc_info=True)
-             if self.getRefs:
-                 try:
-                     self.gather_refs(dom)
--                except Exception, e:
-+                except Exception as e:
-                     self._logger.warn('%s: unable to gather refs: %s',
-                                     self._cname, exc_info=True)
-             data = self.parse_dom(dom)
-@@ -540,7 +541,7 @@ class DOMParserBase(object):
-             data = {}
-         try:
-             data = self.postprocess_data(data)
--        except Exception, e:
-+        except Exception as e:
-             self._logger.error('%s: caught exception postprocessing data',
-                                 self._cname, exc_info=True)
-         if self._containsObjects:
-@@ -549,7 +550,7 @@ class DOMParserBase(object):
-         return data
- 
-     def _build_empty_dom(self):
--        from bsouplxml import _bsoup
-+        from .bsouplxml import _bsoup
-         return _bsoup.BeautifulSoup('')
- 
-     def get_dom(self, html_string):
-@@ -560,7 +561,7 @@ class DOMParserBase(object):
-                 dom = self._build_empty_dom()
-                 self._logger.error('%s: using a fake empty DOM', self._cname)
-             return dom
--        except Exception, e:
-+        except Exception as e:
-             self._logger.error('%s: caught exception parsing DOM',
-                                 self._cname, exc_info=True)
-             return self._build_empty_dom()
-@@ -574,25 +575,25 @@ class DOMParserBase(object):
-             result = []
-             for item in xpath_result:
-                 if isinstance(item, str):
--                    item = unicode(item)
-+                    item = str(item)
-                 result.append(item)
-             return result
--        except Exception, e:
-+        except Exception as e:
-             self._logger.error('%s: caught exception extracting XPath "%s"',
-                                 self._cname, path, exc_info=True)
-             return []
- 
-     def tostring(self, element):
-         """Convert the element to a string."""
--        if isinstance(element, (unicode, str)):
--            return unicode(element)
-+        if isinstance(element, str):
-+            return str(element)
-         else:
-             try:
--                return self._tostring(element, encoding=unicode)
--            except Exception, e:
-+                return self._tostring(element, encoding=str)
-+            except Exception as e:
-                 self._logger.error('%s: unable to convert to string',
-                                     self._cname, exc_info=True)
--                return u''
-+                return ''
- 
-     def clone(self, element):
-         """Clone an element."""
-@@ -603,22 +604,22 @@ class DOMParserBase(object):
-         if not html_string:
-             return html_string
-         # Remove silly &nbsp;&raquo; and &ndash; chars.
--        html_string = html_string.replace(u' \xbb', u'')
--        html_string = html_string.replace(u'&ndash;', u'-')
-+        html_string = html_string.replace(' \xbb', '')
-+        html_string = html_string.replace('&ndash;', '-')
-         try:
-             preprocessors = self.preprocessors
-         except AttributeError:
-             return html_string
-         for src, sub in preprocessors:
-             # re._pattern_type is present only since Python 2.5.
--            if callable(getattr(src, 'sub', None)):
-+            if isinstance(getattr(src, 'sub', None), collections.Callable):
-                 html_string = src.sub(sub, html_string)
-             elif isinstance(src, str):
-                 html_string = html_string.replace(src, sub)
--            elif callable(src):
-+            elif isinstance(src, collections.Callable):
-                 try:
-                     html_string = src(html_string)
--                except Exception, e:
-+                except Exception as e:
-                     _msg = '%s: caught exception preprocessing html'
-                     self._logger.error(_msg, self._cname, exc_info=True)
-                     continue
-@@ -663,10 +664,10 @@ class DOMParserBase(object):
-                     group_key = self.tostring(group_key)
-                     normalizer = extractor.group_key_normalize
-                     if normalizer is not None:
--                        if callable(normalizer):
-+                        if isinstance(normalizer, collections.Callable):
-                             try:
-                                 group_key = normalizer(group_key)
--                            except Exception, e:
-+                            except Exception as e:
-                                 _m = '%s: unable to apply group_key normalizer'
-                                 self._logger.error(_m, self._cname,
-                                                     exc_info=True)
-@@ -677,7 +678,7 @@ class DOMParserBase(object):
-                 for attr in extractor.attrs:
-                     if isinstance(attr.path, dict):
-                         data = {}
--                        for field in attr.path.keys():
-+                        for field in list(attr.path.keys()):
-                             path = attr.path[field]
-                             value = self.xpath(element, path)
-                             if not value:
-@@ -694,10 +695,10 @@ class DOMParserBase(object):
-                     if not data:
-                         continue
-                     attr_postprocess = attr.postprocess
--                    if callable(attr_postprocess):
-+                    if isinstance(attr_postprocess, collections.Callable):
-                         try:
-                             data = attr_postprocess(data)
--                        except Exception, e:
-+                        except Exception as e:
-                             _m = '%s: unable to apply attr postprocess'
-                             self._logger.error(_m, self._cname, exc_info=True)
-                     key = attr.key
-@@ -737,17 +738,17 @@ class DOMParserBase(object):
-     def add_refs(self, data):
-         """Modify data according to the expected output."""
-         if self.getRefs:
--            titl_re = ur'(%s)' % '|'.join([re.escape(x) for x
--                                            in self._titlesRefs.keys()])
--            if titl_re != ur'()': re_titles = re.compile(titl_re, re.U)
-+            titl_re = r'(%s)' % '|'.join([re.escape(x) for x
-+                                            in list(self._titlesRefs.keys())])
-+            if titl_re != r'()': re_titles = re.compile(titl_re, re.U)
-             else: re_titles = None
--            nam_re = ur'(%s)' % '|'.join([re.escape(x) for x
--                                            in self._namesRefs.keys()])
--            if nam_re != ur'()': re_names = re.compile(nam_re, re.U)
-+            nam_re = r'(%s)' % '|'.join([re.escape(x) for x
-+                                            in list(self._namesRefs.keys())])
-+            if nam_re != r'()': re_names = re.compile(nam_re, re.U)
-             else: re_names = None
--            chr_re = ur'(%s)' % '|'.join([re.escape(x) for x
--                                            in self._charactersRefs.keys()])
--            if chr_re != ur'()': re_characters = re.compile(chr_re, re.U)
-+            chr_re = r'(%s)' % '|'.join([re.escape(x) for x
-+                                            in list(self._charactersRefs.keys())])
-+            if chr_re != r'()': re_characters = re.compile(chr_re, re.U)
-             else: re_characters = None
-             _putRefs(data, re_titles, re_names, re_characters)
-         return {'data': data, 'titlesRefs': self._titlesRefs,
-@@ -827,8 +828,8 @@ class GatherRefs(DOMParserBase):
-                             'link': './@href',
-                             'info': './following::text()[1]'
-                             },
--        postprocess=lambda x: _parse_ref(x.get('text') or u'', x.get('link') or '',
--                                         (x.get('info') or u'').strip()))]
-+        postprocess=lambda x: _parse_ref(x.get('text') or '', x.get('link') or '',
-+                                         (x.get('info') or '').strip()))]
-     extractors = [
-         Extractor(label='names refs',
-             path="//a[starts-with(@href, '/name/nm')][string-length(@href)=16]",
-diff --git a/imdb/parser/mobile/__init__.py b/imdb/parser/mobile/__init__.py
-index c391386..f9b8539 100644
---- a/imdb/parser/mobile/__init__.py
-+++ b/imdb/parser/mobile/__init__.py
-@@ -25,7 +25,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- 
- import re
- import logging
--from urllib import unquote
-+from urllib.parse import unquote
- 
- from imdb.Movie import Movie
- from imdb.utils import analyze_title, analyze_name, canonicalName, \
-@@ -259,7 +259,7 @@ class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
-             m = Movie(movieID=str(mid[0]), data=s_data,
-                         accessSystem=self.accessSystem,
-                         modFunct=self._defModFunct)
--            d['kind'] = kind = u'episode'
-+            d['kind'] = kind = 'episode'
-             d['episode of'] = m
-         if kind in ('tv series', 'tv mini series'):
-             years = _findBetween(cont, '<h1>', '</h1>', maxRes=1)
-@@ -471,7 +471,7 @@ class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
-         cont = self._mretrieve(self.urls['movie_main'] % movieID + 'plotsummary')
-         plot = _findBetween(cont, '<p class="plotpar">', '</p>')
-         plot[:] = [_unHtml(x) for x in plot]
--        for i in xrange(len(plot)):
-+        for i in range(len(plot)):
-             p = plot[i]
-             wbyidx = p.rfind(' Written by ')
-             if wbyidx != -1:
-@@ -555,7 +555,7 @@ class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
-                     #date, notes = date_and_notes(date)
-                     # TODO: fix to handle real names.
-                     date_notes = date.split(' in ', 1)
--                    notes = u''
-+                    notes = ''
-                     date = date_notes[0]
-                     if len(date_notes) == 2:
-                         notes = date_notes[1]
-@@ -576,7 +576,7 @@ class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
-                 akas = akas.split(' | ')
-             else:
-                 akas = akas.split(' / ')
--            if akas: r['akas'] = filter(None, [x.strip() for x in akas])
-+            if akas: r['akas'] = [_f for _f in [x.strip() for x in akas] if _f]
-         hs = _findBetween(s, "rel='image_src'", '>', maxRes=1)
-         if not hs:
-             hs = _findBetween(s, 'rel="image_src"', '>', maxRes=1)
-@@ -607,7 +607,7 @@ class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
-         #if _parseChr:
-         #    ws.append(('filmography', 'filmography'))
-         for sect, sectName in ws:
--            raws = u''
-+            raws = ''
-             if sectName == 'self':
-                 sect = 'Self'
-             # Everything between the current section link and the end
-@@ -657,7 +657,7 @@ class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
-                 movieID = str(movieID[0])
-                 # Search the status.
-                 stidx = m.find('<i>')
--                status = u''
-+                status = ''
-                 if stidx != -1:
-                     stendidx = m.rfind('</i>')
-                     if stendidx != -1:
-@@ -691,7 +691,7 @@ class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
-             if vtag:
-                 try:
-                     vtag = unquote(str(vtag[0]))
--                    vtag = unicode(vtag, 'latin_1')
-+                    vtag = str(vtag, 'latin_1')
-                     r.update(analyze_name(vtag))
-                 except UnicodeEncodeError:
-                     pass
-@@ -725,7 +725,7 @@ class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
-         misc_sects[:] = [x for x in misc_sects if len(x) == 2]
-         for sect, data in misc_sects:
-             sect = sect.lower().replace(':', '').strip()
--            if d.has_key(sect) and sect != 'mini biography': continue
-+            if sect in d and sect != 'mini biography': continue
-             elif sect in ('spouse', 'nickname'): continue
-             if sect == 'salary': sect = 'salary history'
-             elif sect == 'where are they now': sect = 'where now'
-@@ -761,7 +761,7 @@ class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
-                         bioAuth = bio[:byidx].rstrip()
-                     else:
-                         bioAuth = 'Anonymous'
--                    bio = u'%s::%s' % (bioAuth, bio[byidx+23:].lstrip())
-+                    bio = '%s::%s' % (bioAuth, bio[byidx+23:].lstrip())
-                     ndata.append(bio)
-                 data[:] = ndata
-                 if 'mini biography' in d:
-diff --git a/imdb/parser/sql/__init__.py b/imdb/parser/sql/__init__.py
-index 4ab5adc..4dafc53 100644
---- a/imdb/parser/sql/__init__.py
-+++ b/imdb/parser/sql/__init__.py
-@@ -53,9 +53,9 @@ _aux_logger = logging.getLogger('imdbpy.parser.sql.aux')
- def titleVariations(title, fromPtdf=0):
-     """Build title variations useful for searches; if fromPtdf is true,
-     the input is assumed to be in the plain text data files format."""
--    if fromPtdf: title1 = u''
-+    if fromPtdf: title1 = ''
-     else: title1 = title
--    title2 = title3 = u''
-+    title2 = title3 = ''
-     if fromPtdf or re_year_index.search(title):
-         # If it appears to have a (year[/imdbIndex]) indication,
-         # assume that a long imdb canonical name was provided.
-@@ -73,13 +73,13 @@ def titleVariations(title, fromPtdf=0):
-         # Just a title.
-         # title1: the canonical title.
-         title1 = canonicalTitle(title)
--        title3 = u''
-+        title3 = ''
-     # title2 is title1 without the article, or title1 unchanged.
-     if title1:
-         title2 = title1
--        t2s = title2.split(u', ')
-+        t2s = title2.split(', ')
-         if t2s[-1].lower() in _unicodeArticles:
--            title2 = u', '.join(t2s[:-1])
-+            title2 = ', '.join(t2s[:-1])
-     _aux_logger.debug('title variations: 1:[%s] 2:[%s] 3:[%s]',
-                         title1, title2, title3)
-     return title1, title2, title3
-@@ -90,7 +90,7 @@ re_nameIndex = re.compile(r'\(([IVXLCDM]+)\)')
- def nameVariations(name, fromPtdf=0):
-     """Build name variations useful for searches; if fromPtdf is true,
-     the input is assumed to be in the plain text data files format."""
--    name1 = name2 = name3 = u''
-+    name1 = name2 = name3 = ''
-     if fromPtdf or re_nameIndex.search(name):
-         # We've a name with an (imdbIndex)
-         namedict = analyze_name(name, canonical=1)
-@@ -98,17 +98,17 @@ def nameVariations(name, fromPtdf=0):
-         name1 = namedict['name']
-         # name3 is the canonical name with the imdbIndex.
-         if fromPtdf:
--            if namedict.has_key('imdbIndex'):
-+            if 'imdbIndex' in namedict:
-                 name3 = name
-         else:
-             name3 = build_name(namedict, canonical=1)
-     else:
-         # name1 is the name in the canonical format.
-         name1 = canonicalName(name)
--        name3 = u''
-+        name3 = ''
-     # name2 is the name in the normal format, if it differs from name1.
-     name2 = normalizeName(name1)
--    if name1 == name2: name2 = u''
-+    if name1 == name2: name2 = ''
-     _aux_logger.debug('name variations: 1:[%s] 2:[%s] 3:[%s]',
-                         name1, name2, name3)
-     return name1, name2, name3
-@@ -173,10 +173,10 @@ def scan_names(name_list, name1, name2, name3, results=0, ro_thresold=None,
-         # XXX: on Symbian, here we get a str; not sure this is the
-         #      right place to fix it.
-         if isinstance(nil, str):
--            nil = unicode(nil, 'latin1', 'ignore')
-+            nil = str(nil, 'latin1', 'ignore')
-         # Distance with the canonical name.
-         ratios = [ratcliff(name1, nil, sm1) + 0.05]
--        namesurname = u''
-+        namesurname = ''
-         if not _scan_character:
-             nils = nil.split(', ', 1)
-             surname = nils[0]
-@@ -201,10 +201,10 @@ def scan_names(name_list, name1, name2, name3, results=0, ro_thresold=None,
-                         build_name(n_data, canonical=1), sm3) + 0.1)
-         ratio = max(ratios)
-         if ratio >= RO_THRESHOLD:
--            if resd.has_key(i):
-+            if i in resd:
-                 if ratio > resd[i][0]: resd[i] = (ratio, (i, n_data))
-             else: resd[i] = (ratio, (i, n_data))
--    res = resd.values()
-+    res = list(resd.values())
-     res.sort()
-     res.reverse()
-     if results > 0: res[:] = res[:results]
-@@ -250,7 +250,7 @@ def scan_titles(titles_list, title1, title2, title3, results=0,
-         # XXX: on Symbian, here we get a str; not sure this is the
-         #      right place to fix it.
-         if isinstance(til, str):
--            til = unicode(til, 'latin1', 'ignore')
-+            til = str(til, 'latin1', 'ignore')
-         # Distance with the canonical title (with or without article).
-         #   titleS      -> titleR
-         #   titleS, the -> titleR, the
-@@ -278,11 +278,11 @@ def scan_titles(titles_list, title1, title2, title3, results=0,
-                         build_title(t_data, canonical=1, ptdf=1), sm3) + 0.1)
-         ratio = max(ratios)
-         if ratio >= RO_THRESHOLD:
--            if resd.has_key(i):
-+            if i in resd:
-                 if ratio > resd[i][0]:
-                     resd[i] = (ratio, (i, t_data))
-             else: resd[i] = (ratio, (i, t_data))
--    res = resd.values()
-+    res = list(resd.values())
-     res.sort()
-     res.reverse()
-     if results > 0: res[:] = res[:results]
-@@ -303,7 +303,7 @@ def scan_company_names(name_list, name1, results=0, ro_thresold=None):
-         # XXX: on Symbian, here we get a str; not sure this is the
-         #      right place to fix it.
-         if isinstance(n, str):
--            n = unicode(n, 'latin1', 'ignore')
-+            n = str(n, 'latin1', 'ignore')
-         o_name = n
-         var = 0.0
-         if withoutCountry and n.endswith(']'):
-@@ -314,12 +314,12 @@ def scan_company_names(name_list, name1, results=0, ro_thresold=None):
-         # Distance with the company name.
-         ratio = ratcliff(name1, n, sm1) + var
-         if ratio >= RO_THRESHOLD:
--            if resd.has_key(i):
-+            if i in resd:
-                 if ratio > resd[i][0]: resd[i] = (ratio,
-                                             (i, analyze_company_name(o_name)))
-             else:
-                 resd[i] = (ratio, (i, analyze_company_name(o_name)))
--    res = resd.values()
-+    res = list(resd.values())
-     res.sort()
-     res.reverse()
-     if results > 0: res[:] = res[:results]
-@@ -417,8 +417,8 @@ def _reGroupDict(d, newgr):
-     and put it in the subsection (another dictionary) named
-     'laserdisc', using the key 'label'."""
-     r = {}
--    newgrks = newgr.keys()
--    for k, v in d.items():
-+    newgrks = list(newgr.keys())
-+    for k, v in list(d.items()):
-         if k in newgrks:
-             r.setdefault(newgr[k][0], {})[newgr[k][1]] = v
-             # A not-so-clearer version:
-@@ -434,7 +434,7 @@ def _groupListBy(l, index):
-     tmpd = {}
-     for item in l:
-         tmpd.setdefault(item[index], []).append(item)
--    res = tmpd.values()
-+    res = list(tmpd.values())
-     return res
- 
- 
-@@ -458,7 +458,7 @@ def get_movie_data(movieID, kindDict, fromAka=0, _table=None):
-             'season': m.seasonNr, 'episode': m.episodeNr}
-     if not fromAka:
-         if m.seriesYears is not None:
--            mdict['series years'] = unicode(m.seriesYears)
-+            mdict['series years'] = str(m.seriesYears)
-     if mdict['imdbIndex'] is None: del mdict['imdbIndex']
-     if mdict['year'] is None: del mdict['year']
-     else:
-@@ -507,7 +507,7 @@ def getSingleInfo(table, movieID, infoType, notAList=False):
-         info = r.info
-         note = r.note
-         if note:
--            info += u'::%s' % note
-+            info += '::%s' % note
-         retList.append(info)
-     if not retList:
-         return {}
-@@ -552,11 +552,11 @@ class IMDbSqlAccessSystem(IMDbBase):
-             mod = mod.strip().lower()
-             try:
-                 if mod == 'sqlalchemy':
--                    from alchemyadapter import getDBTables, NotFoundError, \
-+                    from .alchemyadapter import getDBTables, NotFoundError, \
-                                                 setConnection, AND, OR, IN, \
-                                                 ISNULL, CONTAINSSTRING, toUTF8
-                 elif mod == 'sqlobject':
--                    from objectadapter import getDBTables, NotFoundError, \
-+                    from .objectadapter import getDBTables, NotFoundError, \
-                                                 setConnection, AND, OR, IN, \
-                                                 ISNULL, CONTAINSSTRING, toUTF8
-                 else:
-@@ -578,7 +578,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-                 if _gotError:
-                     self._sql_logger.warn('falling back to "%s"' % mod)
-                 break
--            except ImportError, e:
-+            except ImportError as e:
-                 if idx+1 >= nrMods:
-                     raise IMDbError('unable to use any ORM in %s: %s' % (
-                                                     str(useORM), str(e)))
-@@ -593,7 +593,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         self._sql_logger.debug('connecting to %s', uri)
-         try:
-             self._connection = setConnection(uri, DB_TABLES)
--        except AssertionError, e:
-+        except AssertionError as e:
-             raise IMDbDataAccessError( \
-                     'unable to connect to the database server; ' + \
-                     'complete message: "%s"' % str(e))
-@@ -642,11 +642,11 @@ class IMDbSqlAccessSystem(IMDbBase):
- 
-     def _findRefs(self, o, trefs, nrefs):
-         """Find titles or names references in strings."""
--        if isinstance(o, (unicode, str)):
-+        if isinstance(o, str):
-             for title in re_titleRef.findall(o):
-                 a_title = analyze_title(title, canonical=0)
-                 rtitle = build_title(a_title, ptdf=1)
--                if trefs.has_key(rtitle): continue
-+                if rtitle in trefs: continue
-                 movieID = self._getTitleID(rtitle)
-                 if movieID is None:
-                     movieID = self._getTitleID(title)
-@@ -655,7 +655,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-                 m = Movie(title=rtitle, movieID=movieID,
-                             accessSystem=self.accessSystem)
-                 trefs[rtitle] = m
--                rtitle2 = canonicalTitle(a_title.get('title', u''))
-+                rtitle2 = canonicalTitle(a_title.get('title', ''))
-                 if rtitle2 and rtitle2 != rtitle and rtitle2 != title:
-                     trefs[rtitle2] = m
-                 if title != rtitle:
-@@ -663,7 +663,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-             for name in re_nameRef.findall(o):
-                 a_name = analyze_name(name, canonical=1)
-                 rname = build_name(a_name, canonical=1)
--                if nrefs.has_key(rname): continue
-+                if rname in nrefs: continue
-                 personID = self._getNameID(rname)
-                 if personID is None:
-                     personID = self._getNameID(name)
-@@ -671,7 +671,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-                 p = Person(name=rname, personID=personID,
-                             accessSystem=self.accessSystem)
-                 nrefs[rname] = p
--                rname2 = normalizeName(a_name.get('name', u''))
-+                rname2 = normalizeName(a_name.get('name', ''))
-                 if rname2 and rname2 != rname:
-                     nrefs[rname2] = p
-                 if name != rname and name != rname2:
-@@ -680,7 +680,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-             for item in o:
-                 self._findRefs(item, trefs, nrefs)
-         elif isinstance(o, dict):
--            for value in o.values():
-+            for value in list(o.values()):
-                 self._findRefs(value, trefs, nrefs)
-         return (trefs, nrefs)
- 
-@@ -690,7 +690,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         nrefs = {}
-         try:
-             return self._findRefs(o, trefs, nrefs)
--        except RuntimeError, e:
-+        except RuntimeError as e:
-             # Symbian/python 2.2 has a poor regexp implementation.
-             import warnings
-             warnings.warn('RuntimeError in '
-@@ -716,7 +716,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-                 try:
-                     lookup(e)
-                     lat1 = akatitle.encode('latin_1', 'replace')
--                    return unicode(lat1, e, 'replace')
-+                    return str(lat1, e, 'replace')
-                 except (LookupError, ValueError, TypeError):
-                     continue
-         return None
-@@ -726,7 +726,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         if val is None:
-             return ISNULL(col)
-         else:
--            if isinstance(val, (int, long)):
-+            if isinstance(val, int):
-                 return col == val
-             else:
-                 return col == self.toUTF8(val)
-@@ -919,7 +919,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         #                s_title = s_title_rebuilt
-         #else:
-         #    _episodes = False
--        if isinstance(s_title, unicode):
-+        if isinstance(s_title, str):
-             s_title = s_title.encode('ascii', 'ignore')
- 
-         soundexCode = soundex(s_title)
-@@ -978,7 +978,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-             q2 = [(q.movieID, get_movie_data(q.id, self._kind, fromAka=1))
-                     for q in AkaTitle.select(conditionAka)]
-             qr += q2
--        except NotFoundError, e:
-+        except NotFoundError as e:
-             raise IMDbDataAccessError( \
-                     'unable to search the database: "%s"' % str(e))
- 
-@@ -1025,7 +1025,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         infosets = self.get_movie_infoset()
-         try:
-             res = get_movie_data(movieID, self._kind)
--        except NotFoundError, e:
-+        except NotFoundError as e:
-             raise IMDbDataAccessError( \
-                     'unable to get movieID "%s": "%s"' % (movieID, str(e)))
-         if not res:
-@@ -1051,9 +1051,9 @@ class IMDbSqlAccessSystem(IMDbBase):
-                     curRole = robj.name
-                     curRoleID = robj.id
-                 p = Person(personID=pdata[0], name=pdata[5],
--                            currentRole=curRole or u'',
-+                            currentRole=curRole or '',
-                             roleID=curRoleID,
--                            notes=pdata[2] or u'',
-+                            notes=pdata[2] or '',
-                             accessSystem='sql')
-                 if pdata[6]: p['imdbIndex'] = pdata[6]
-                 p.billingPos = pdata[3]
-@@ -1088,7 +1088,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-                     cDbTxt += ' %s' % cDb.countryCode
-                 company = Company(name=cDbTxt,
-                                 companyID=mdata[1],
--                                notes=mdata[2] or u'',
-+                                notes=mdata[2] or '',
-                                 accessSystem=self.accessSystem)
-                 res.setdefault(sect, []).append(company)
-         # AKA titles.
-@@ -1108,8 +1108,8 @@ class IMDbSqlAccessSystem(IMDbBase):
-             for cc in CompleteCast.select(CompleteCast.q.movieID == movieID)]
-         if compcast:
-             for entry in compcast:
--                val = unicode(entry[1])
--                res[u'complete %s' % entry[0]] = val
-+                val = str(entry[1])
-+                res['complete %s' % entry[0]] = val
-         # Movie connections.
-         mlinks = [[ml.linkedMovieID, self._link[ml.linkTypeID]]
-                     for ml in MovieLink.select(MovieLink.q.movieID == movieID)]
-@@ -1143,11 +1143,11 @@ class IMDbSqlAccessSystem(IMDbBase):
-                 if season not in episodes: episodes[season] = {}
-                 ep_number = episode_data.get('episode')
-                 if ep_number is None:
--                    ep_number = max((episodes[season].keys() or [0])) + 1
-+                    ep_number = max((list(episodes[season].keys()) or [0])) + 1
-                 episodes[season][ep_number] = m
-             res['episodes'] = episodes
--            res['number of episodes'] = sum([len(x) for x in episodes.values()])
--            res['number of seasons'] = len(episodes.keys())
-+            res['number of episodes'] = sum([len(x) for x in list(episodes.values())])
-+            res['number of seasons'] = len(list(episodes.keys()))
-         # Regroup laserdisc information.
-         res = _reGroupDict(res, self._moviesubs)
-         # Do some transformation to preserve consistency with other
-@@ -1212,7 +1212,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         if not name: return []
-         s_name = analyze_name(name)['name']
-         if not s_name: return []
--        if isinstance(s_name, unicode):
-+        if isinstance(s_name, str):
-             s_name = s_name.encode('ascii', 'ignore')
-         soundexCode = soundex(s_name)
-         name1, name2, name3 = nameVariations(name)
-@@ -1237,7 +1237,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-             q2 = [(q.personID, {'name': q.name, 'imdbIndex': q.imdbIndex})
-                     for q in AkaName.select(conditionAka)]
-             qr += q2
--        except NotFoundError, e:
-+        except NotFoundError as e:
-             raise IMDbDataAccessError( \
-                     'unable to search the database: "%s"' % str(e))
- 
-@@ -1278,7 +1278,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         infosets = self.get_person_infoset()
-         try:
-             p = Name.get(personID)
--        except NotFoundError, e:
-+        except NotFoundError as e:
-             raise IMDbDataAccessError( \
-                     'unable to get personID "%s": "%s"' % (personID, str(e)))
-         res = {'name': p.name, 'imdbIndex': p.imdbIndex}
-@@ -1298,7 +1298,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-             for mdata in group:
-                 duty = orig_duty = group[0][3]
-                 if duty not in seenDuties: seenDuties.append(orig_duty)
--                note = mdata[2] or u''
-+                note = mdata[2] or ''
-                 if 'episode of' in mdata[4]:
-                     duty = 'episodes'
-                     if orig_duty not in ('actor', 'actress'):
-@@ -1311,7 +1311,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-                     curRole = robj.name
-                     curRoleID = robj.id
-                 m = Movie(movieID=mdata[0], data=mdata[4],
--                            currentRole=curRole or u'',
-+                            currentRole=curRole or '',
-                             roleID=curRoleID,
-                             notes=note, accessSystem='sql')
-                 if duty != 'episodes':
-@@ -1380,7 +1380,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         if not name: return []
-         s_name = analyze_name(name)['name']
-         if not s_name: return []
--        if isinstance(s_name, unicode):
-+        if isinstance(s_name, str):
-             s_name = s_name.encode('ascii', 'ignore')
-         s_name = normalizeName(s_name)
-         soundexCode = soundex(s_name)
-@@ -1413,7 +1413,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         try:
-             qr = [(q.id, {'name': q.name, 'imdbIndex': q.imdbIndex})
-                     for q in CharName.select(condition)]
--        except NotFoundError, e:
-+        except NotFoundError as e:
-             raise IMDbDataAccessError( \
-                     'unable to search the database: "%s"' % str(e))
-         res = scan_names(qr, s_name, name2, '', results,
-@@ -1433,7 +1433,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         infosets = self.get_character_infoset()
-         try:
-             c = CharName.get(characterID)
--        except NotFoundError, e:
-+        except NotFoundError as e:
-             raise IMDbDataAccessError( \
-                     'unable to get characterID "%s": "%s"' % (characterID, e))
-         res = {'name': c.name, 'imdbIndex': c.imdbIndex}
-@@ -1452,12 +1452,12 @@ class IMDbSqlAccessSystem(IMDbBase):
-         for f in filmodata:
-             curRole = None
-             curRoleID = f[1]
--            note = f[2] or u''
-+            note = f[2] or ''
-             if curRoleID is not None:
-                 robj = Name.get(curRoleID)
-                 curRole = robj.name
-             m = Movie(movieID=f[0], data=f[3],
--                        currentRole=curRole or u'',
-+                        currentRole=curRole or '',
-                         roleID=curRoleID, roleIsPerson=True,
-                         notes=note, accessSystem='sql')
-             fdata.append(m)
-@@ -1473,7 +1473,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-     def _search_company(self, name, results):
-         name = name.strip()
-         if not name: return []
--        if isinstance(name, unicode):
-+        if isinstance(name, str):
-             name = name.encode('ascii', 'ignore')
-         soundexCode = soundex(name)
-         # If the soundex is None, compare only with the first
-@@ -1488,7 +1488,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         try:
-             qr = [(q.id, {'name': q.name, 'country': q.countryCode})
-                     for q in CompanyName.select(condition)]
--        except NotFoundError, e:
-+        except NotFoundError as e:
-             raise IMDbDataAccessError( \
-                     'unable to search the database: "%s"' % str(e))
-         qr[:] = [(x[0], build_company_name(x[1])) for x in qr]
-@@ -1509,7 +1509,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-         infosets = self.get_company_infoset()
-         try:
-             c = CompanyName.get(companyID)
--        except NotFoundError, e:
-+        except NotFoundError as e:
-             raise IMDbDataAccessError( \
-                     'unable to get companyID "%s": "%s"' % (companyID, e))
-         res = {'name': c.name, 'country': c.countryCode}
-@@ -1529,7 +1529,7 @@ class IMDbSqlAccessSystem(IMDbBase):
-             ctype = group[0][2]
-             for movieID, companyID, ctype, note, movieData in group:
-                 movie = Movie(data=movieData, movieID=movieID,
--                            notes=note or u'', accessSystem=self.accessSystem)
-+                            notes=note or '', accessSystem=self.accessSystem)
-                 res.setdefault(ctype, []).append(movie)
-             res.get(ctype, []).sort()
-         return {'data': res, 'info sets': infosets}
-diff --git a/imdb/parser/sql/alchemyadapter.py b/imdb/parser/sql/alchemyadapter.py
-index 9b5c79e..47e348e 100644
---- a/imdb/parser/sql/alchemyadapter.py
-+++ b/imdb/parser/sql/alchemyadapter.py
-@@ -39,7 +39,7 @@ except ImportError:
-                          'Keys will not be created.')
- 
- from imdb._exceptions import IMDbDataAccessError
--from dbschema import *
-+from .dbschema import *
- 
- # Used to convert table and column names.
- re_upper = re.compile(r'([A-Z])')
-@@ -128,7 +128,7 @@ class QAdapter(object):
- 
-     def __getattr__(self, name):
-         try: return getattr(self.table.c, self.colMap[name])
--        except KeyError, e: raise AttributeError("unable to get '%s'" % name)
-+        except KeyError as e: raise AttributeError("unable to get '%s'" % name)
- 
-     def __repr__(self):
-         return '<QAdapter(table=%s, colMap=%s) [id=%s]>' % \
-@@ -146,11 +146,11 @@ class RowAdapter(object):
-         if colMap is None:
-             colMap = {}
-         self.colMap = colMap
--        self.colMapKeys = colMap.keys()
-+        self.colMapKeys = list(colMap.keys())
- 
-     def __getattr__(self, name):
-         try: return getattr(self.row, self.colMap[name])
--        except KeyError, e: raise AttributeError("unable to get '%s'" % name)
-+        except KeyError as e: raise AttributeError("unable to get '%s'" % name)
- 
-     def __setattr__(self, name, value):
-         # FIXME: I can't even think about how much performances suffer,
-@@ -339,7 +339,7 @@ class TableAdapter(object):
-         #      db-level.
-         try:
-             idx.create()
--        except exc.OperationalError, e:
-+        except exc.OperationalError as e:
-             _alchemy_logger.warn('Skipping creation of the %s.%s index: %s' %
-                                 (self.sqlmeta.table, col.name, e))
- 
-@@ -388,7 +388,7 @@ class TableAdapter(object):
-     def __call__(self, *args, **kwds):
-         """To insert a new row with the syntax: TableClass(key=value, ...)"""
-         taArgs = {}
--        for key, value in kwds.items():
-+        for key, value in list(kwds.items()):
-             taArgs[self.colMap.get(key, key)] = value
-         self._ta_insert.execute(*args, **taArgs)
- 
-diff --git a/imdb/parser/sql/dbschema.py b/imdb/parser/sql/dbschema.py
-index 2f359fb..aa2a58c 100644
---- a/imdb/parser/sql/dbschema.py
-+++ b/imdb/parser/sql/dbschema.py
-@@ -88,8 +88,8 @@ class DBCol(object):
-             s += ', foreignKey="%s"' % self.foreignKey
-         for param in self.params:
-             val = self.params[param]
--            if isinstance(val, (unicode, str)):
--                val = u'"%s"' % val
-+            if isinstance(val, str):
-+                val = '"%s"' % val
-             s += ', %s=%s' % (param, val)
-         s += ')>'
-         return s
-@@ -106,7 +106,7 @@ class DBTable(object):
-     def __str__(self):
-         """Class representation."""
-         return '<DBTable %s (%d cols, %d values)>' % (self.name,
--                len(self.cols), sum([len(v) for v in self.values.values()]))
-+                len(self.cols), sum([len(v) for v in list(self.values.values())]))
- 
-     def __repr__(self):
-         """Class representation."""
-@@ -441,7 +441,7 @@ def createTables(tables, ifNotExists=True):
-                                     table._imdbpyName)
-             for key in table._imdbpySchema.values:
-                 for value in table._imdbpySchema.values[key]:
--                    table(**{key: unicode(value)})
-+                    table(**{key: str(value)})
- 
- def createIndexes(tables, ifNotExists=True):
-     """Create the indexes in the database.
-@@ -452,7 +452,7 @@ def createIndexes(tables, ifNotExists=True):
-                                 table._imdbpyName)
-         try:
-             table.addIndexes(ifNotExists)
--        except Exception, e:
-+        except Exception as e:
-             errors.append(e)
-             continue
-     return errors
-@@ -469,7 +469,7 @@ def createForeignKeys(tables, ifNotExists=True):
-                                 table._imdbpyName)
-         try:
-             table.addForeignKeys(mapTables, ifNotExists)
--        except Exception, e:
-+        except Exception as e:
-             errors.append(e)
-             continue
-     return errors
-diff --git a/imdb/parser/sql/objectadapter.py b/imdb/parser/sql/objectadapter.py
-index 9797104..ddc1676 100644
---- a/imdb/parser/sql/objectadapter.py
-+++ b/imdb/parser/sql/objectadapter.py
-@@ -26,7 +26,7 @@ import logging
- from sqlobject import *
- from sqlobject.sqlbuilder import ISNULL, ISNOTNULL, AND, OR, IN, CONTAINSSTRING
- 
--from dbschema import *
-+from .dbschema import *
- 
- _object_logger = logging.getLogger('imdbpy.parser.sql.object')
- 
-@@ -59,7 +59,7 @@ def addIndexes(cls, ifNotExists=True):
-             cls.sqlmeta.addIndex(idx)
-     try:
-         cls.createIndexes(ifNotExists)
--    except dberrors.OperationalError, e:
-+    except dberrors.OperationalError as e:
-         _object_logger.warn('Skipping creation of the %s.%s index: %s' %
-                             (cls.sqlmeta.table, col.name, e))
- addIndexes = classmethod(addIndexes)
-@@ -105,7 +105,7 @@ def _buildFakeFKTable(cls, fakeTableName):
- def addForeignKeys(cls, mapTables, ifNotExists=True):
-     """Create all required foreign keys."""
-     # Do not even try, if there are no FK, in this table.
--    if not filter(None, [col.foreignKey for col in cls._imdbpySchema.cols]):
-+    if not [_f for _f in [col.foreignKey for col in cls._imdbpySchema.cols] if _f]:
-         return
-     fakeTableName = 'myfaketable%s' % cls.sqlmeta.table
-     if fakeTableName in FAKE_TABLES_REPOSITORY:
-diff --git a/imdb/utils.py b/imdb/utils.py
-index 9c300b0..7ddcb68 100644
---- a/imdb/utils.py
-+++ b/imdb/utils.py
-@@ -21,7 +21,7 @@ along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- """
- 
--from __future__ import generators
-+
- import re
- import string
- import logging
-@@ -73,11 +73,11 @@ def canonicalName(name):
-     #      - Jr.: 8025
-     # Don't convert names already in the canonical format.
-     if name.find(', ') != -1: return name
--    if isinstance(name, unicode):
--        joiner = u'%s, %s'
--        sur_joiner = u'%s %s'
--        sur_space = u' %s'
--        space = u' '
-+    if isinstance(name, str):
-+        joiner = '%s, %s'
-+        sur_joiner = '%s %s'
-+        sur_space = ' %s'
-+        space = ' '
-     else:
-         joiner = '%s, %s'
-         sur_joiner = '%s %s'
-@@ -117,8 +117,8 @@ def canonicalName(name):
- 
- def normalizeName(name):
-     """Return a name in the normal "Name Surname" format."""
--    if isinstance(name, unicode):
--        joiner = u'%s %s'
-+    if isinstance(name, str):
-+        joiner = '%s %s'
-     else:
-         joiner = '%s %s'
-     sname = name.split(', ')
-@@ -195,7 +195,7 @@ def canonicalTitle(title, lang=None):
-     title portion, without year[/imdbIndex] or special markup.
-     The 'lang' argument can be used to specify the language of the title.
-     """
--    isUnicode = isinstance(title, unicode)
-+    isUnicode = isinstance(title, str)
-     articlesDicts = linguistics.articlesDictsForLang(lang)
-     try:
-         if title.split(', ')[-1].lower() in articlesDicts[isUnicode]:
-@@ -203,7 +203,7 @@ def canonicalTitle(title, lang=None):
-     except IndexError:
-         pass
-     if isUnicode:
--        _format = u'%s, %s'
-+        _format = '%s, %s'
-     else:
-         _format = '%s, %s'
-     ltitle = title.lower()
-@@ -234,7 +234,7 @@ def normalizeTitle(title, lang=None):
-     title portion, without year[/imdbIndex] or special markup.
-     The 'lang' argument can be used to specify the language of the title.
-     """
--    isUnicode = isinstance(title, unicode)
-+    isUnicode = isinstance(title, str)
-     stitle = title.split(', ')
-     articlesDicts = linguistics.articlesDictsForLang(lang)
-     if len(stitle) > 1 and stitle[-1].lower() in articlesDicts[isUnicode]:
-@@ -242,8 +242,8 @@ def normalizeTitle(title, lang=None):
-         if stitle[-1][-1] in ("'", '-'):
-             sep = ''
-         if isUnicode:
--            _format = u'%s%s%s'
--            _joiner = u', '
-+            _format = '%s%s%s'
-+            _joiner = ', '
-         else:
-             _format = '%s%s%s'
-             _joiner = ', '
-@@ -298,7 +298,7 @@ def is_series_episode(title):
- 
- 
- def analyze_title(title, canonical=None, canonicalSeries=None,
--                    canonicalEpisode=None, _emptyString=u''):
-+                    canonicalEpisode=None, _emptyString=''):
-     """Analyze the given title and return a dictionary with the
-     "stripped" title, the kind of the show ("movie", "tv series", etc.),
-     the year of production and the optional imdbIndex (a roman number
-@@ -352,7 +352,7 @@ def analyze_title(title, canonical=None, canonicalSeries=None,
-                 except (TypeError, ValueError):
-                     pass
-         episode_d = analyze_title(episode_or_year, canonical=canonicalEpisode)
--        episode_d['kind'] = u'episode'
-+        episode_d['kind'] = 'episode'
-         episode_d['episode of'] = series_d
-         if oad:
-             episode_d['original air date'] = oad[1:-1]
-@@ -381,19 +381,19 @@ def analyze_title(title, canonical=None, canonicalSeries=None,
-     #      video game:     5,490
-     #      More up-to-date statistics: http://us.imdb.com/database_statistics
-     if title.endswith('(TV)'):
--        kind = u'tv movie'
-+        kind = 'tv movie'
-         title = title[:-4].rstrip()
-     elif title.endswith('(V)'):
--        kind = u'video movie'
-+        kind = 'video movie'
-         title = title[:-3].rstrip()
-     elif title.endswith('(video)'):
--        kind = u'video movie'
-+        kind = 'video movie'
-         title = title[:-7].rstrip()
-     elif title.endswith('(mini)'):
--        kind = u'tv mini series'
-+        kind = 'tv mini series'
-         title = title[:-6].rstrip()
-     elif title.endswith('(VG)'):
--        kind = u'video game'
-+        kind = 'video game'
-         title = title[:-4].rstrip()
-     # Search for the year and the optional imdbIndex (a roman number).
-     yi = re_year_index.findall(title)
-@@ -403,17 +403,17 @@ def analyze_title(title, canonical=None, canonicalSeries=None,
-             yk, yiy, yii = yi[-1]
-             yi = [(yiy, yii)]
-             if yk == 'TV episode':
--                kind = u'episode'
-+                kind = 'episode'
-             elif yk == 'TV':
--                kind = u'tv movie'
-+                kind = 'tv movie'
-             elif yk == 'TV Series':
--                kind = u'tv series'
-+                kind = 'tv series'
-             elif yk == 'Video':
--                kind = u'video movie'
-+                kind = 'video movie'
-             elif yk == 'TV mini-series':
--                kind = u'tv mini series'
-+                kind = 'tv mini series'
-             elif yk == 'Video Game':
--                kind = u'video game'
-+                kind = 'video game'
-             title = re_remove_kind.sub('(', title)
-     if yi:
-         last_yi = yi[-1]
-@@ -428,10 +428,10 @@ def analyze_title(title, canonical=None, canonicalSeries=None,
-     # XXX: strip('"') is not used for compatibility with Python 2.0.
-     if title and title[0] == title[-1] == '"':
-         if not kind:
--            kind = u'tv series'
-+            kind = 'tv series'
-         title = title[1:-1].strip()
-     elif title.endswith('(TV series)'):
--        kind = u'tv series'
-+        kind = 'tv series'
-         title = title[:-11].rstrip()
-     if not title:
-         raise IMDbParserError('invalid title: "%s"' % original_t)
-@@ -443,7 +443,7 @@ def analyze_title(title, canonical=None, canonicalSeries=None,
-     # 'kind' is one in ('movie', 'episode', 'tv series', 'tv mini series',
-     #                   'tv movie', 'video movie', 'video game')
-     result['title'] = title
--    result['kind'] = kind or u'movie'
-+    result['kind'] = kind or 'movie'
-     if year and year != '????':
-         if '-' in year:
-             result['series years'] = year
-@@ -461,7 +461,7 @@ def analyze_title(title, canonical=None, canonicalSeries=None,
- 
- _web_format = '%d %B %Y'
- _ptdf_format = '(%Y-%m-%d)'
--def _convertTime(title, fromPTDFtoWEB=1, _emptyString=u''):
-+def _convertTime(title, fromPTDFtoWEB=1, _emptyString=''):
-     """Convert a time expressed in the pain text data files, to
-     the 'Episode dated ...' format used on the web site; if
-     fromPTDFtoWEB is false, the inverted conversion is applied."""
-@@ -470,13 +470,13 @@ def _convertTime(title, fromPTDFtoWEB=1, _emptyString=u''):
-             from_format = _ptdf_format
-             to_format = _web_format
-         else:
--            from_format = u'Episode dated %s' % _web_format
-+            from_format = 'Episode dated %s' % _web_format
-             to_format = _ptdf_format
-         t = strptime(title, from_format)
-         title = strftime(to_format, t)
-         if fromPTDFtoWEB:
-             if title[0] == '0': title = title[1:]
--            title = u'Episode dated %s' % title
-+            title = 'Episode dated %s' % title
-     except ValueError:
-         pass
-     if isinstance(_emptyString, str):
-@@ -489,7 +489,7 @@ def _convertTime(title, fromPTDFtoWEB=1, _emptyString=u''):
- 
- def build_title(title_dict, canonical=None, canonicalSeries=None,
-                 canonicalEpisode=None, ptdf=0, lang=None, _doYear=1,
--                _emptyString=u''):
-+                _emptyString=''):
-     """Given a dictionary that represents a "long" IMDb title,
-     return a string.
- 
-@@ -555,11 +555,11 @@ def build_title(title_dict, canonical=None, canonicalSeries=None,
-             title = normalizeTitle(title, lang=lang)
-     if pre_title:
-         title = '%s %s' % (pre_title, title)
--    if kind in (u'tv series', u'tv mini series'):
-+    if kind in ('tv series', 'tv mini series'):
-         title = '"%s"' % title
-     if _doYear:
-         imdbIndex = title_dict.get('imdbIndex')
--        year = title_dict.get('year') or u'????'
-+        year = title_dict.get('year') or '????'
-         if isinstance(_emptyString, str):
-             year = str(year)
-         title += ' (%s' % year
-@@ -582,7 +582,7 @@ def split_company_name_notes(name):
-     """Return two strings, the first representing the company name,
-     and the other representing the (optional) notes."""
-     name = name.strip()
--    notes = u''
-+    notes = ''
-     if name.endswith(')'):
-         fpidx = name.find('(')
-         if fpidx != -1:
-@@ -616,7 +616,7 @@ def analyze_company_name(name, stripNotes=False):
-     return result
- 
- 
--def build_company_name(name_dict, _emptyString=u''):
-+def build_company_name(name_dict, _emptyString=''):
-     """Given a dictionary that represents a "long" IMDb company name,
-     return a string.
-     """
-@@ -770,15 +770,15 @@ def modifyStrings(o, modFunct, titlesRefs, namesRefs, charactersRefs):
-     in a list), using the provided modFunct function and titlesRefs
-     namesRefs and charactersRefs references dictionaries."""
-     # Notice that it doesn't go any deeper than the first two levels in a list.
--    if isinstance(o, (unicode, str)):
-+    if isinstance(o, str):
-         return modFunct(o, titlesRefs, namesRefs, charactersRefs)
-     elif isinstance(o, (list, tuple, dict)):
-         _stillorig = 1
--        if isinstance(o, (list, tuple)): keys = xrange(len(o))
--        else: keys = o.keys()
-+        if isinstance(o, (list, tuple)): keys = range(len(o))
-+        else: keys = list(o.keys())
-         for i in keys:
-             v = o[i]
--            if isinstance(v, (unicode, str)):
-+            if isinstance(v, str):
-                 if _stillorig:
-                     o = copy(o)
-                     _stillorig = 0
-@@ -793,8 +793,8 @@ def date_and_notes(s):
-     """Parse (birth|death) date and notes; returns a tuple in the
-     form (date, notes)."""
-     s = s.strip()
--    if not s: return (u'', u'')
--    notes = u''
-+    if not s: return ('', '')
-+    notes = ''
-     if s[0].isdigit() or s.split()[0].lower() in ('c.', 'january', 'february',
-                                                 'march', 'april', 'may', 'june',
-                                                 'july', 'august', 'september',
-@@ -807,8 +807,8 @@ def date_and_notes(s):
-             s = s[:i]
-     else:
-         notes = s
--        s = u''
--    if s == '????': s = u''
-+        s = ''
-+    if s == '????': s = ''
-     return s, notes
- 
- 
-@@ -816,12 +816,12 @@ class RolesList(list):
-     """A list of Person or Character instances, used for the currentRole
-     property."""
-     def __unicode__(self):
--        return u' / '.join([unicode(x) for x in self])
-+        return ' / '.join([str(x) for x in self])
- 
-     def __str__(self):
-         # FIXME: does it make sense at all?  Return a unicode doesn't
-         #        seem right, in __str__.
--        return u' / '.join([unicode(x).encode('utf8') for x in self])
-+        return ' / '.join([str(x).encode('utf8') for x in self])
- 
- 
- # Replace & with &amp;, but only if it's not already part of a charref.
-@@ -836,7 +836,7 @@ def escape4xml(value):
-     value = _re_amp.sub('&amp;', value)
-     value = value.replace('"', '&quot;').replace("'", '&apos;')
-     value = value.replace('<', '&lt;').replace('>', '&gt;')
--    if isinstance(value, unicode):
-+    if isinstance(value, str):
-         value = value.encode('ascii', 'xmlcharrefreplace')
-     return value
- 
-@@ -847,9 +847,9 @@ def _refsToReplace(value, modFunct, titlesRefs, namesRefs, charactersRefs):
-     by the user-provided modFunct function, the second is the same
-     reference un-escaped."""
-     mRefs = []
--    for refRe, refTemplate in [(re_titleRef, u'_%s_ (qv)'),
--                                (re_nameRef, u"'%s' (qv)"),
--                                (re_characterRef, u'#%s# (qv)')]:
-+    for refRe, refTemplate in [(re_titleRef, '_%s_ (qv)'),
-+                                (re_nameRef, "'%s' (qv)"),
-+                                (re_characterRef, '#%s# (qv)')]:
-         theseRefs = []
-         for theRef in refRe.findall(value):
-             # refTemplate % theRef values don't change for a single
-@@ -877,7 +877,7 @@ def _handleTextNotes(s):
-     ssplit = s.split('::', 1)
-     if len(ssplit) == 1:
-         return s
--    return u'%s<notes>%s</notes>' % (ssplit[0], ssplit[1])
-+    return '%s<notes>%s</notes>' % (ssplit[0], ssplit[1])
- 
- 
- def _normalizeValue(value, withRefs=False, modFunct=None, titlesRefs=None,
-@@ -885,7 +885,7 @@ def _normalizeValue(value, withRefs=False, modFunct=None, titlesRefs=None,
-     """Replace some chars that can't be present in a XML text."""
-     # XXX: use s.encode(encoding, 'xmlcharrefreplace') ?  Probably not
-     #      a great idea: after all, returning a unicode is safe.
--    if isinstance(value, (unicode, str)):
-+    if isinstance(value, str):
-         if not withRefs:
-             value = _handleTextNotes(escape4xml(value))
-         else:
-@@ -899,7 +899,7 @@ def _normalizeValue(value, withRefs=False, modFunct=None, titlesRefs=None,
-                 for toReplace, replaceWith in replaceList:
-                     value = value.replace(toReplace, replaceWith)
-     else:
--        value = unicode(value)
-+        value = str(value)
-     return value
- 
- 
-@@ -914,7 +914,7 @@ def _tag4TON(ton, addAccessSystem=False, _containerOnly=False):
-     else:
-         value = ton.get('long imdb name') or ton.get('name', '')
-     value = _normalizeValue(value)
--    extras = u''
-+    extras = ''
-     crl = ton.currentRole
-     if crl:
-         if not isinstance(crl, list):
-@@ -925,33 +925,33 @@ def _tag4TON(ton, addAccessSystem=False, _containerOnly=False):
-             crValue = _normalizeValue(crValue)
-             crID = cr.getID()
-             if crID is not None:
--                extras += u'<current-role><%s id="%s">' \
--                            u'<name>%s</name></%s>' % (crTag, crID,
-+                extras += '<current-role><%s id="%s">' \
-+                            '<name>%s</name></%s>' % (crTag, crID,
-                                                         crValue, crTag)
-             else:
--                extras += u'<current-role><%s><name>%s</name></%s>' % \
-+                extras += '<current-role><%s><name>%s</name></%s>' % \
-                                (crTag, crValue, crTag)
-             if cr.notes:
--                extras += u'<notes>%s</notes>' % _normalizeValue(cr.notes)
--            extras += u'</current-role>'
-+                extras += '<notes>%s</notes>' % _normalizeValue(cr.notes)
-+            extras += '</current-role>'
-     theID = ton.getID()
-     if theID is not None:
--        beginTag = u'<%s id="%s"' % (tag, theID)
-+        beginTag = '<%s id="%s"' % (tag, theID)
-         if addAccessSystem and ton.accessSystem:
-             beginTag += ' access-system="%s"' % ton.accessSystem
-         if not _containerOnly:
--            beginTag += u'><%s>%s</%s>' % (what, value, what)
-+            beginTag += '><%s>%s</%s>' % (what, value, what)
-         else:
--            beginTag += u'>'
-+            beginTag += '>'
-     else:
-         if not _containerOnly:
--            beginTag = u'<%s><%s>%s</%s>' % (tag, what, value, what)
-+            beginTag = '<%s><%s>%s</%s>' % (tag, what, value, what)
-         else:
--            beginTag = u'<%s>' % tag
-+            beginTag = '<%s>' % tag
-     beginTag += extras
-     if ton.notes:
--        beginTag += u'<notes>%s</notes>' % _normalizeValue(ton.notes)
--    return (beginTag, u'</%s>' % tag)
-+        beginTag += '<notes>%s</notes>' % _normalizeValue(ton.notes)
-+    return (beginTag, '</%s>' % tag)
- 
- 
- TAGS_TO_MODIFY = {
-@@ -986,13 +986,13 @@ def _tagAttr(key, fullpath):
-         tagName, useTitle = TAGS_TO_MODIFY[fullpath]
-         if useTitle:
-             attrs['key'] = _escapedKey
--    elif not isinstance(key, unicode):
-+    elif not isinstance(key, str):
-         if isinstance(key, str):
--            tagName = unicode(key, 'ascii', 'ignore')
-+            tagName = str(key, 'ascii', 'ignore')
-         else:
-             strType = str(type(key)).replace("<type '", "").replace("'>", "")
-             attrs['keytype'] = strType
--            tagName = unicode(key)
-+            tagName = str(key)
-     else:
-         tagName = key
-     if isinstance(key, int):
-@@ -1011,7 +1011,7 @@ def _tagAttr(key, fullpath):
-         tagName = 'item'
-         _utils_logger.error('invalid tag: %s [%s]' % (_escapedKey, fullpath))
-         attrs['key'] = _escapedKey
--    return tagName, u' '.join([u'%s="%s"' % i for i in attrs.items()])
-+    return tagName, ' '.join(['%s="%s"' % i for i in list(attrs.items())])
- 
- 
- def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
-@@ -1032,17 +1032,17 @@ def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
-                 tagName = key.__class__.__name__.lower()
-             else:
-                 tagName, attrs = _tagAttr(key, fullpath)
--                openTag = u'<%s' % tagName
-+                openTag = '<%s' % tagName
-                 if attrs:
-                     openTag += ' %s' % attrs
-                 if _topLevel and key2infoset and key in key2infoset:
--                    openTag += u' infoset="%s"' % key2infoset[key]
-+                    openTag += ' infoset="%s"' % key2infoset[key]
-                 if isinstance(value, int):
-                     openTag += ' type="int"'
-                 elif isinstance(value, float):
-                     openTag += ' type="float"'
--                openTag += u'>'
--                closeTag = u'</%s>' % tagName
-+                openTag += '>'
-+                closeTag = '</%s>' % tagName
-             _l.append(openTag)
-             _seq2xml(value, _l, withRefs, modFunct, titlesRefs,
-                     namesRefs, charactersRefs, _topLevel=False,
-@@ -1050,11 +1050,11 @@ def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
-             _l.append(closeTag)
-     elif isinstance(seq, (list, tuple)):
-         tagName, attrs = _tagAttr('item', fullpath)
--        beginTag = u'<%s' % tagName
-+        beginTag = '<%s' % tagName
-         if attrs:
--            beginTag += u' %s' % attrs
-+            beginTag += ' %s' % attrs
-         #beginTag += u'>'
--        closeTag = u'</%s>' % tagName
-+        closeTag = '</%s>' % tagName
-         for item in seq:
-             if isinstance(item, _Container):
-                 _seq2xml(item, _l, withRefs, modFunct, titlesRefs,
-@@ -1067,7 +1067,7 @@ def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
-                     openTag += ' type="int"'
-                 elif isinstance(item, float):
-                     openTag += ' type="float"'
--                openTag += u'>'
-+                openTag += '>'
-                 _l.append(openTag)
-                 _seq2xml(item, _l, withRefs, modFunct, titlesRefs,
-                         namesRefs, charactersRefs, _topLevel=False,
-@@ -1086,7 +1086,7 @@ def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
-     return _l
- 
- 
--_xmlHead = u"""<?xml version="1.0"?>
-+_xmlHead = """<?xml version="1.0"?>
- <!DOCTYPE %s SYSTEM "http://imdbpy.sf.net/dtd/imdbpy{VERSION}.dtd">
- 
- """
-@@ -1111,8 +1111,8 @@ class _Container(object):
-     # Regular expression used to build the 'full-size (headshot|cover url)'.
-     _re_fullsizeURL = re.compile(r'\._V1\._SX(\d+)_SY(\d+)_')
- 
--    def __init__(self, myID=None, data=None, notes=u'',
--                currentRole=u'', roleID=None, roleIsPerson=False,
-+    def __init__(self, myID=None, data=None, notes='',
-+                currentRole='', roleID=None, roleIsPerson=False,
-                 accessSystem=None, titlesRefs=None, namesRefs=None,
-                 charactersRefs=None, modFunct=None, *args, **kwds):
-         """Initialize a Movie, Person, Character or Company object.
-@@ -1199,12 +1199,12 @@ class _Container(object):
-         """Return a Character or Person instance."""
-         if self.__role:
-             return self.__role
--        return self._roleClass(name=u'', accessSystem=self.accessSystem,
-+        return self._roleClass(name='', accessSystem=self.accessSystem,
-                                 modFunct=self.modFunct)
- 
-     def _set_currentRole(self, role):
-         """Set self.currentRole to a Character or Person instance."""
--        if isinstance(role, (unicode, str)):
-+        if isinstance(role, str):
-             if not role:
-                 self.__role = None
-             else:
-@@ -1213,7 +1213,7 @@ class _Container(object):
-         elif isinstance(role, (list, tuple)):
-             self.__role = RolesList()
-             for item in role:
--                if isinstance(item, (unicode, str)):
-+                if isinstance(item, str):
-                     self.__role.append(self._roleClass(name=item,
-                                         accessSystem=self.accessSystem,
-                                         modFunct=self.modFunct))
-@@ -1234,7 +1234,7 @@ class _Container(object):
-         """Reset the object."""
-         self.data = {}
-         self.myID = None
--        self.notes = u''
-+        self.notes = ''
-         self.titlesRefs = {}
-         self.namesRefs = {}
-         self.charactersRefs = {}
-@@ -1250,7 +1250,7 @@ class _Container(object):
-     def clear(self):
-         """Reset the dictionary."""
-         self.data.clear()
--        self.notes = u''
-+        self.notes = ''
-         self.titlesRefs = {}
-         self.namesRefs = {}
-         self.charactersRefs = {}
-@@ -1393,7 +1393,7 @@ class _Container(object):
-             if value is None:
-                 return None
-             tag = self.__class__.__name__.lower()
--            return u''.join(_seq2xml({key: value}, withRefs=withRefs,
-+            return ''.join(_seq2xml({key: value}, withRefs=withRefs,
-                                         modFunct=origModFunct,
-                                         titlesRefs=self.titlesRefs,
-                                         namesRefs=self.namesRefs,
-@@ -1409,14 +1409,14 @@ class _Container(object):
-         beginTag, endTag = _tag4TON(self, addAccessSystem=True,
-                                     _containerOnly=True)
-         resList = [beginTag]
--        for key in self.keys():
-+        for key in list(self.keys()):
-             value = self.getAsXML(key, _with_add_keys=_with_add_keys)
-             if not value:
-                 continue
-             resList.append(value)
-         resList.append(endTag)
-         head = _xmlHead % self.__class__.__name__.lower()
--        return head + u''.join(resList)
-+        return head + ''.join(resList)
- 
-     def _getitem(self, key):
-         """Handle special keys."""
-@@ -1436,7 +1436,7 @@ class _Container(object):
-             try:
-                 return modifyStrings(rawData, self.modFunct, self.titlesRefs,
-                                     self.namesRefs, self.charactersRefs)
--            except RuntimeError, e:
-+            except RuntimeError as e:
-                 # Symbian/python 2.2 has a poor regexp implementation.
-                 import warnings
-                 warnings.warn('RuntimeError in '
-@@ -1460,20 +1460,20 @@ class _Container(object):
- 
-     def keys(self):
-         """Return a list of valid keys."""
--        return self.data.keys() + self._additional_keys()
-+        return list(self.data.keys()) + self._additional_keys()
- 
-     def items(self):
-         """Return the items in the dictionary."""
--        return [(k, self.get(k)) for k in self.keys()]
-+        return [(k, self.get(k)) for k in list(self.keys())]
- 
-     # XXX: is this enough?
--    def iteritems(self): return self.data.iteritems()
--    def iterkeys(self): return self.data.iterkeys()
--    def itervalues(self): return self.data.itervalues()
-+    def iteritems(self): return iter(self.data.items())
-+    def iterkeys(self): return iter(self.data.keys())
-+    def itervalues(self): return iter(self.data.values())
- 
-     def values(self):
-         """Return the values in the dictionary."""
--        return [self.get(k) for k in self.keys()]
-+        return [self.get(k) for k in list(self.keys())]
- 
-     def has_key(self, key):
-         """Return true if a given section is defined."""
-@@ -1497,7 +1497,7 @@ class _Container(object):
-             return failobj
- 
-     def setdefault(self, key, failobj=None):
--        if not self.has_key(key):
-+        if key not in self:
-             self[key] = failobj
-         return self[key]
- 
-@@ -1526,7 +1526,7 @@ class _Container(object):
-         """Directly store the item with the given key."""
-         self.data[key] = item
- 
--    def __nonzero__(self):
-+    def __bool__(self):
-         """Return true if self.data contains something."""
-         if self.data: return 1
-         return 0
-@@ -1551,18 +1551,18 @@ def flatten(seq, toDescend=(list, dict, tuple), yieldDictKeys=0,
-         if isinstance(seq, (dict, _Container)):
-             if yieldDictKeys:
-                 # Yield also the keys of the dictionary.
--                for key in seq.iterkeys():
-+                for key in seq.keys():
-                     for k in flatten(key, toDescend=toDescend,
-                                 yieldDictKeys=yieldDictKeys,
-                                 onlyKeysType=onlyKeysType, scalar=scalar):
-                         if onlyKeysType and isinstance(k, onlyKeysType):
-                             yield k
--            for value in seq.itervalues():
-+            for value in seq.values():
-                 for v in flatten(value, toDescend=toDescend,
-                                 yieldDictKeys=yieldDictKeys,
-                                 onlyKeysType=onlyKeysType, scalar=scalar):
-                     yield v
--        elif not isinstance(seq, (str, unicode, int, float)):
-+        elif not isinstance(seq, (str, int, float)):
-             for item in seq:
-                 for i in flatten(item, toDescend=toDescend,
-                                 yieldDictKeys=yieldDictKeys,
-diff --git a/setup.py b/setup.py
-index 09640df..692ada6 100755
---- a/setup.py
-+++ b/setup.py
-@@ -124,7 +124,7 @@ params = {
-         'license': 'GPL',
-         'platforms': 'any',
-         'keywords': keywords,
--        'classifiers': filter(None, classifiers.split("\n")),
-+        'classifiers': [_f for _f in classifiers.split("\n") if _f],
-         'zip_safe': False, # XXX: I guess, at least...
-         # Download URLs.
-         'url': home_page,
-@@ -197,9 +197,9 @@ def runRebuildmo():
-         rebuildmo = imp.load_module('rebuildmo', *modInfo)
-         os.chdir(modulePath)
-         languages = rebuildmo.rebuildmo()
--        print 'Created locale for: %s.' % ' '.join(languages)
--    except Exception, e:
--        print 'ERROR: unable to rebuild .mo files; caught exception %s' % e
-+        print('Created locale for: %s.' % ' '.join(languages))
-+    except Exception as e:
-+        print('ERROR: unable to rebuild .mo files; caught exception %s' % e)
-     sys.path = path
-     os.chdir(cwd)
-     return languages
-@@ -236,6 +236,6 @@ try:
-         data_files.append((os.path.join(distutils.sysconfig.get_python_lib(), base_dir), files_found))
-     setuptools.setup(**params)
- except SystemExit:
--    print ERR_MSG
-+    print(ERR_MSG)
-     raise
- 

diff --git a/dev-python/imdbpy/imdbpy-4.9-r2.ebuild b/dev-python/imdbpy/imdbpy-4.9-r2.ebuild
deleted file mode 100644
index 05196d0..0000000
--- a/dev-python/imdbpy/imdbpy-4.9-r2.ebuild
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 1999-2013 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/dev-python/imdbpy/imdbpy-4.9-r1.ebuild,v 1.1 2013/06/23 17:43:11 idella4 Exp $
-
-EAPI=5
-PYTHON_COMPAT=( python{2_5,2_6,2_7,3_3} pypy2_0 )
-
-inherit distutils-r1
-
-MY_PN="IMDbPY"
-MY_P="${MY_PN}-${PV}"
-
-DESCRIPTION="Python package to access the IMDb movie database"
-HOMEPAGE="http://imdbpy.sourceforge.net/ http://pypi.python.org/pypi/IMDbPY"
-SRC_URI="mirror://pypi/${MY_PN:0:1}/${MY_PN}/${MY_P}.tar.gz"
-
-LICENSE="GPL-2"
-SLOT="0"
-KEYWORDS="~alpha ~amd64 ~ppc ~sparc ~x86"
-IUSE=""
-
-DEPEND="dev-python/setuptools[${PYTHON_USEDEP}]
-		dev-python/configparser"
-RDEPEND=""
-
-S="${WORKDIR}/${MY_PN}-${PV}"
-
-DISTUTILS_GLOBAL_OPTIONS=("*-jython --without-cutils")
-DOCS=( docs/FAQS.txt docs/imdbpy48.dtd docs/imdbpy.cfg )
-
-PATCHES=( "${FILESDIR}/updateToPython3.patch" "${FILESDIR}/${PN}-4.6-data_location.patch" )
-
-src_configure() {
-	distutils-r1_src_configure --without-cutils
-}
-
-python_install_all() {
-	local doc
-	for doc in docs/README*
-	do
-		DOCS=( "${DOCS[@]}" $doc )
-	done
-	distutils-r1_python_install_all
-}

diff --git a/dev-python/imdbpy/metadata.xml b/dev-python/imdbpy/metadata.xml
deleted file mode 100644
index 7cd64fc..0000000
--- a/dev-python/imdbpy/metadata.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
-<pkgmetadata>
-	<herd>mythtv</herd>
-	<upstream>
-		<remote-id type="pypi">IMDbPY</remote-id>
-	</upstream>
-</pkgmetadata>

diff --git a/media-gfx/blender/Manifest b/media-gfx/blender/Manifest
deleted file mode 100644
index c6db9de..0000000
--- a/media-gfx/blender/Manifest
+++ /dev/null
@@ -1,5 +0,0 @@
-AUX blender-2.78-eigen-3.3.1.patch 666 SHA256 35900be15f8bb0540050ffab9067122ba87433eecf896095e7ce1e889e54af5b SHA512 9a2ee48fa38e23a5b086514468bd1c96ee2b97228e95cdfaac3c5d3b2c21c4e47a0ced4306d7dc89268d63b60e551b655d6d627790215ad68791fe3f35717bf5 WHIRLPOOL b073ba691ada03ead560a9be8907b8c1111c1029fdc7e6a4f59c24f5771a34bc287800f9458d81488fa19faba662afd876693bd9933600e498b13d95c4f134c6
-AUX blender-fix-install-rules.patch 518 SHA256 4fbdd73c4bb20e316f6a02c7e6a33a90285db787aac5b3baf66394d256fe6e0f SHA512 5343f0e9e6bba2d15a38cb5db95a1aeff0a704c0762e558b9b74d88dd58e2fb5077289f0d8f25a61fa092d083f7db916d27c4642dfd7cf84d4c989258c3253ec WHIRLPOOL 6ae8c113ab1ccac5c1465e0deab3bd101922303c0f60ecdb4d1cbff7dd02d37c299d4897beb5239b60e8724e47b830ecd5b7f6045dd301847f2583d559bdacbb
-DIST blender-2.78a.tar.gz 44188123 SHA256 014a14b1ba00c0e651f106469b2e5dd444f11be5a7af48056f0ed59de90cceaf SHA512 57239b6ce473aaef114453f97a6e88535ec0a2f50cfecd221fa6bdcca6fe6559dcbefd48c3f20dc2235c195de13c59a1a155d5b64d777be31812d32e68be9370 WHIRLPOOL 85ea5ea5594716433a5f2f14233a489841598baf80170f01fff3946b4fa6ebc99086396a232f0f3c5f0e8c8d6c16ec27e98b7b58d79d289f5190214dc7efb677
-EBUILD blender-2.78a-r2.ebuild 8353 SHA256 c1c9119567498541d027fa46fa8904c2bc5b1911c8663d4a9aa4e4a94fcc81cc SHA512 c12a7c31babf8d837205238bd5e4f0affb53a2e61f6a53d327eeae4b08741e9e9073debfc3f95edb5ea277462ce1898b2cae398190ab94dc2745e91390af3da4 WHIRLPOOL 2395d88212d8c95a7f0c620795a2dabab8f942ac919cf7c4d05bfa885fd3265cd977f8944e7afaa5f2cdf0b6e432a050af395c1fe684f26eb7ebc829d2de2623
-MISC metadata.xml 3440 SHA256 45cba6ae08884bbca2a5a94993dde5abfd900ead160a2f7eec19a3f4779bb696 SHA512 75c1cfb4998e9bbd2c3d4836bfba2f273d90920a2a72f70b599ee4cdfdfbf02cf1f2508aa24df6c762e3c323000de0a04623b21caaf5843e235888d3906043a8 WHIRLPOOL f20d54a0f468ee3ed1c1d5eca5790f73963d1abceb7fd7ed2040d060882fdf40950e728eea5686f50f60124a1a8ce85c6c5a84da7f67de5660930ce6a3c69c53

diff --git a/media-gfx/blender/blender-2.78a-r2.ebuild b/media-gfx/blender/blender-2.78a-r2.ebuild
deleted file mode 100644
index 3235f0c..0000000
--- a/media-gfx/blender/blender-2.78a-r2.ebuild
+++ /dev/null
@@ -1,295 +0,0 @@
-# Copyright 1999-2017 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=6
-PYTHON_COMPAT=( python3_5 )
-
-inherit check-reqs cmake-utils fdo-mime flag-o-matic gnome2-utils \
-	pax-utils python-single-r1 toolchain-funcs versionator
-
-DESCRIPTION="3D Creation/Animation/Publishing System"
-HOMEPAGE="http://www.blender.org"
-
-SRC_URI="http://download.blender.org/source/${P}.tar.gz"
-
-# Blender can have letters in the version string,
-# so strip of the letter if it exists.
-MY_PV="$(get_version_component_range 1-2)"
-
-SLOT="0"
-LICENSE="|| ( GPL-2 BL )"
-KEYWORDS="~amd64 ~x86"
-IUSE="+boost +bullet +dds +elbeem +game-engine +openexr collada colorio \
-	cuda cycles debug doc ffmpeg fftw headless jack jemalloc jpeg2k libav \
-	llvm man ndof nls openal openimageio openmp opensubdiv openvdb \
-	player sdl sndfile test tiff valgrind"
-
-# OpenCL and nVidia performance is rubbish with Blender
-# If you have nVidia, use CUDA.
-REQUIRED_USE="${PYTHON_REQUIRED_USE}
-	player? ( game-engine !headless )
-	cuda? ( cycles )
-	cycles? ( boost openexr tiff openimageio )
-	colorio? ( boost )
-	openvdb? ( boost )
-	opensubdiv? ( cuda )
-	nls? ( boost )
-	openal? ( boost )
-	game-engine? ( boost )
-	?? ( ffmpeg libav )"
-
-# Since not using OpenCL with nVidia, depend on ATI binary
-# blobs as Cycles with OpenCL does not work with any open
-# source drivers.
-RDEPEND="${PYTHON_DEPS}
-	dev-libs/lzo:2
-	>=dev-python/numpy-1.10.1[${PYTHON_USEDEP}]
-	dev-python/requests[${PYTHON_USEDEP}]
-	media-libs/freetype
-	media-libs/glew:*
-	media-libs/libpng:0=
-	media-libs/libsamplerate
-	sys-libs/zlib
-	virtual/glu
-	virtual/jpeg:0=
-	virtual/libintl
-	virtual/opengl
-	boost? ( >=dev-libs/boost-1.62:=[nls?,threads(+)] )
-	collada? ( >=media-libs/opencollada-1.6.18:= )
-	colorio? ( >=media-libs/opencolorio-1.0.9-r2 )
-	cuda? ( =dev-util/nvidia-cuda-toolkit-8.0*:= )
-	ffmpeg? ( media-video/ffmpeg:=[x264,mp3,encode,theora,jpeg2k?] )
-	libav? ( >=media-video/libav-11.3:=[x264,mp3,encode,theora,jpeg2k?] )
-	fftw? ( sci-libs/fftw:3.0= )
-	!headless? (
-		x11-libs/libX11
-		x11-libs/libXi
-		x11-libs/libXxf86vm
-	)
-	jack? ( virtual/jack )
-	jemalloc? ( dev-libs/jemalloc:= )
-	jpeg2k? ( media-libs/openjpeg:0 )
-	llvm? ( sys-devel/llvm )
-	ndof? (
-		app-misc/spacenavd
-		dev-libs/libspnav
-	)
-	nls? ( virtual/libiconv )
-	openal? ( media-libs/openal )
-	openimageio? ( >=media-libs/openimageio-1.6.9 )
-	openexr? (
-		>=media-libs/ilmbase-2.2.0:=
-		>=media-libs/openexr-2.2.0:=
-	)
-	opensubdiv? ( media-libs/opensubdiv[cuda=] )
-	openvdb? (
-		media-gfx/openvdb[${PYTHON_USEDEP},abi3-compat(+),openvdb-compression(+)]
-		dev-cpp/tbb
-		>=dev-libs/c-blosc-1.5.2
-	)
-	sdl? ( media-libs/libsdl2[sound,joystick] )
-	sndfile? ( media-libs/libsndfile )
-	tiff? ( media-libs/tiff:0 )
-	valgrind? ( dev-util/valgrind )"
-
-DEPEND="${RDEPEND}
-	>=dev-cpp/eigen-3.2.8:3
-	nls? ( sys-devel/gettext )
-	doc? (
-		app-doc/doxygen[-nodot(-),dot(+),latex]
-		dev-python/sphinx[latex]
-	)"
-
-PATCHES=( "${FILESDIR}"/${PN}-fix-install-rules.patch
-	  "${FILESDIR}"/${PN}-2.78-eigen-3.3.1.patch )
-
-blender_check_requirements() {
-	[[ ${MERGE_TYPE} != binary ]] && use openmp && tc-check-openmp
-
-	if use doc; then
-		CHECKREQS_DISK_BUILD="4G" check-reqs_pkg_pretend
-	fi
-}
-
-pkg_pretend() {
-	blender_check_requirements
-}
-
-pkg_setup() {
-	blender_check_requirements
-	python-single-r1_pkg_setup
-}
-
-src_prepare() {
-	default
-
-	# we don't want static glew, but it's scattered across
-	# multiple files that differ from version to version
-	# !!!CHECK THIS SED ON EVERY VERSION BUMP!!!
-	local file
-	while IFS="" read -d $'\0' -r file ; do
-		sed -i -e '/-DGLEW_STATIC/d' "${file}" || die
-	done < <(find . -type f -name "CMakeLists.txt")
-
-	# Disable MS Windows help generation. The variable doesn't do what it
-	# it sounds like.
-	sed -e "s|GENERATE_HTMLHELP      = YES|GENERATE_HTMLHELP      = NO|" \
-	    -i doc/doxygen/Doxyfile || die
-}
-
-src_configure() {
-	# FIX: forcing '-funsigned-char' fixes an anti-aliasing issue with menu
-	# shadows, see bug #276338 for reference
-	append-flags -funsigned-char
-	append-lfs-flags
-	append-cppflags -DOPENVDB_3_ABI_COMPATIBLE
-
-	local mycmakeargs=(
-		-DPYTHON_VERSION="${EPYTHON/python/}"
-		-DPYTHON_LIBRARY="$(python_get_library_path)"
-		-DPYTHON_INCLUDE_DIR="$(python_get_includedir)"
-		-DWITH_INSTALL_PORTABLE=OFF
-		-DWITH_PYTHON_INSTALL=OFF
-		-DWITH_PYTHON_INSTALL_NUMPY=OFF
-		-DWITH_STATIC_LIBS=OFF
-		-DWITH_SYSTEM_GLEW=ON
-		-DWITH_SYSTEM_OPENJPEG=ON
-		-DWITH_SYSTEM_EIGEN3=ON
-		-DWITH_SYSTEM_LZO=ON
-		-DWITH_C11=ON
-		-DWITH_CXX11=ON
-		-DWITH_BOOST=$(usex boost)
-		-DWITH_BULLET=$(usex bullet)
-		-DWITH_CODEC_FFMPEG=$(usex ffmpeg)
-		-DWITH_CODEC_SNDFILE=$(usex sndfile)
-		-DWITH_CUDA=$(usex cuda)
-		-DWITH_CYCLES_DEVICE_CUDA=$(usex cuda TRUE FALSE)
-		-DWITH_CYCLES=$(usex cycles)
-		-DWITH_CYCLES_OSL=OFF
-		-DWITH_LLVM=$(usex llvm)
-		-DWITH_FFTW3=$(usex fftw)
-		-DWITH_GAMEENGINE=$(usex game-engine)
-		-DWITH_HEADLESS=$(usex headless)
-		-DWITH_X11=$(usex !headless)
-		-DWITH_IMAGE_DDS=$(usex dds)
-		-DWITH_IMAGE_OPENEXR=$(usex openexr)
-		-DWITH_IMAGE_OPENJPEG=$(usex jpeg2k)
-		-DWITH_IMAGE_TIFF=$(usex tiff)
-		-DWITH_INPUT_NDOF=$(usex ndof)
-		-DWITH_INTERNATIONAL=$(usex nls)
-		-DWITH_JACK=$(usex jack)
-		-DWITH_MOD_FLUID=$(usex elbeem)
-		-DWITH_MOD_OCEANSIM=$(usex fftw)
-		-DWITH_OPENAL=$(usex openal)
-		-DWITH_OPENCL=OFF
-		-DWITH_CYCLES_DEVICE_OPENCL=OFF
-		-DWITH_OPENCOLORIO=$(usex colorio)
-		-DWITH_OPENCOLLADA=$(usex collada)
-		-DWITH_OPENIMAGEIO=$(usex openimageio)
-		-DWITH_OPENMP=$(usex openmp)
-		-DWITH_OPENSUBDIV=$(usex opensubdiv)
-		-DWITH_OPENVDB=$(usex openvdb)
-		-DWITH_OPENVDB_BLOSC=$(usex openvdb)
-		-DWITH_PLAYER=$(usex player)
-		-DWITH_SDL=$(usex sdl)
-		-DWITH_CXX_GUARDEDALLOC=$(usex debug)
-		-DWITH_ASSERT_ABORT=$(usex debug)
-		-DWITH_GTESTS=$(usex test)
-		-DWITH_DOC_MANPAGE=$(usex man)
-		-DWITH_MEM_JEMALLOC=$(usex jemalloc)
-		-DWITH_MEM_VALGRIND=$(usex valgrind)
-	)
-	cmake-utils_src_configure
-}
-
-src_compile() {
-	cmake-utils_src_compile
-
-	if use doc; then
-		# Workaround for binary drivers.
-		addpredict /dev/ati
-		addpredict /dev/nvidiactl
-
-		einfo "Generating Blender C/C++ API docs ..."
-		cd "${CMAKE_USE_DIR}"/doc/doxygen || die
-		doxygen -u Doxyfile || die
-		doxygen || die "doxygen failed to build API docs."
-
-		cd "${CMAKE_USE_DIR}" || die
-		einfo "Generating (BPY) Blender Python API docs ..."
-		"${BUILD_DIR}"/bin/blender --background --python doc/python_api/sphinx_doc_gen.py -noaudio || die "sphinx failed."
-
-		cd "${CMAKE_USE_DIR}"/doc/python_api || die
-		sphinx-build sphinx-in BPY_API || die "sphinx failed."
-	fi
-}
-
-src_test() {
-	if use test; then
-		einfo "Running Blender Unit Tests ..."
-		cd "${BUILD_DIR}"/bin/tests || die
-		local f
-		for f in *_test; do
-			./"${f}" || die
-		done
-	fi
-}
-
-src_install() {
-	# Pax mark blender for hardened support.
-	pax-mark m "${CMAKE_BUILD_DIR}"/bin/blender
-
-	if use doc; then
-		docinto "html/API/python"
-		dodoc -r "${CMAKE_USE_DIR}"/doc/python_api/BPY_API/.
-
-		docinto "html/API/blender"
-		dodoc -r "${CMAKE_USE_DIR}"/doc/doxygen/html/.
-	fi
-
-	cmake-utils_src_install
-
-	# fix doc installdir
-	docinto "html"
-	dodoc "${CMAKE_USE_DIR}"/release/text/readme.html
-	rm -r "${ED%/}"/usr/share/doc/blender || die
-
-	python_fix_shebang "${ED%/}/usr/bin/blender-thumbnailer.py"
-	python_optimize "${ED%/}/usr/share/blender/${MY_PV}/scripts"
-}
-
-pkg_preinst() {
-	gnome2_icon_savelist
-}
-
-pkg_postinst() {
-	elog
-	elog "Blender uses python integration. As such, may have some"
-	elog "inherit risks with running unknown python scripts."
-	elog
-	elog "It is recommended to change your blender temp directory"
-	elog "from /tmp to /home/user/tmp or another tmp file under your"
-	elog "home directory. This can be done by starting blender, then"
-	elog "dragging the main menu down do display all paths."
-	elog
-	ewarn
-	ewarn "This ebuild does not unbundle the massive amount of 3rd party"
-	ewarn "libraries which are shipped with blender. Note that"
-	ewarn "these have caused security issues in the past."
-	ewarn "If you are concerned about security, file a bug upstream:"
-	ewarn "  https://developer.blender.org/"
-	ewarn
-	gnome2_icon_cache_update
-	fdo-mime_desktop_database_update
-}
-
-pkg_postrm() {
-	gnome2_icon_cache_update
-	fdo-mime_desktop_database_update
-
-	ewarn ""
-	ewarn "You may want to remove the following directory."
-	ewarn "~/.config/${PN}/${MY_PV}/cache/"
-	ewarn "It may contain extra render kernels not tracked by portage"
-	ewarn ""
-}

diff --git a/media-gfx/blender/files/blender-2.78-eigen-3.3.1.patch b/media-gfx/blender/files/blender-2.78-eigen-3.3.1.patch
deleted file mode 100644
index 540aa6b..0000000
--- a/media-gfx/blender/files/blender-2.78-eigen-3.3.1.patch
+++ /dev/null
@@ -1,25 +0,0 @@
---- blender-2.78a-orig/extern/ceres/include/ceres/jet.h	2016-10-25 01:13:56.000000000 +1100
-+++ blender-2.78a/extern/ceres/include/ceres/jet.h	2017-01-11 13:27:24.708241265 +1100
-@@ -757,6 +757,7 @@
-   typedef ceres::Jet<T, N> Real;
-   typedef ceres::Jet<T, N> NonInteger;
-   typedef ceres::Jet<T, N> Nested;
-+  typedef ceres::Jet<T, N> Literal;
- 
-   static typename ceres::Jet<T, N> dummy_precision() {
-     return ceres::Jet<T, N>(1e-12);
-@@ -777,6 +778,14 @@
-     HasFloatingPoint = 1,
-     RequireInitialization = 1
-   };
-+  
-+  template<bool Vectorized>
-+  struct Div {
-+    enum {
-+      AVX = false,
-+      Cost = 1
-+    };
-+  };
- };
- 
- }  // namespace Eigen

diff --git a/media-gfx/blender/files/blender-fix-install-rules.patch b/media-gfx/blender/files/blender-fix-install-rules.patch
deleted file mode 100644
index e62aba8..0000000
--- a/media-gfx/blender/files/blender-fix-install-rules.patch
+++ /dev/null
@@ -1,16 +0,0 @@
-diff -purN a/source/creator/CMakeLists.txt b/source/creator/CMakeLists.txt
---- a/source/creator/CMakeLists.txt	2016-09-28 10:26:55.000000000 +0100
-+++ b/source/creator/CMakeLists.txt	2016-10-03 12:17:08.938928486 +0100
-@@ -328,12 +328,6 @@ endif()
- # Install Targets (Generic, All Platforms)
- 
- 
--# important to make a clean  install each time, else old scripts get loaded.
--install(
--	CODE
--	"file(REMOVE_RECURSE ${TARGETDIR_VER})"
--)
--
- if(WITH_PYTHON)
- 	# install(CODE "message(\"copying blender scripts...\")")
- 	

diff --git a/media-gfx/blender/metadata.xml b/media-gfx/blender/metadata.xml
deleted file mode 100644
index ef8d087..0000000
--- a/media-gfx/blender/metadata.xml
+++ /dev/null
@@ -1,101 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
-<pkgmetadata>
-	<maintainer restrict="&gt;=media-gfx/blender-2.77" type="person">
-		<email>j.scruggs@gmail.com</email>
-		<name>Jonathan Scruggs</name>
-	</maintainer>
-	<maintainer restrict="&gt;=media-gfx/blender-2.77" type="person">
-		<email>agrigo2001@yahoo.com.au</email>
-		<name>Adrian Grigo</name>
-	</maintainer>
-	<maintainer type="project">
-		<email>proxy-maint@gentoo.org</email>
-		<name>Proxy Maintainers</name>
-	</maintainer>
-	<maintainer type="project">
-		<email>graphics@gentoo.org</email>
-		<name>Gentoo Graphics Project</name>
-	</maintainer>
-
-	<longdescription>
-		Blender, the open source software for 3D modeling, animation,
-		rendering, post-production, interactive creation and playback.
-	</longdescription>
-
-	<use>
-		<flag name="boost">
-			Enable features depending on boost.
-		</flag>
-		<flag name="bullet">
-			Enable Bullet (Physics Engine).
-		</flag>
-		<flag name="collada">
-			Add support for Collada interchange format through
-			<pkg>media-libs/opencollada</pkg>.
-		</flag>
-		<flag name="colorio">
-			Enable OpenColorIO color management through
-			<pkg>media-libs/opencolorio</pkg>.
-		</flag>
-		<flag name="cuda">
-			Build cycles renderer with nVidia CUDA support.
-		</flag>
-		<flag name="cycles">
-			Build cycles renderer (requires <pkg>media-libs/openimageio</pkg> and
-			<pkg>dev-libs/boost</pkg>).
-		</flag>
-		<flag name="dds">
-			Adds DDS textures support to Blender.
-		</flag>
-		<flag name="doc">
-			Build C and Python API documentation directly from the source code.
-		</flag>
-		<flag name="elbeem">
-			Adds surface fluid simulation to Blender using El'Beem library.
-		</flag>
-		<flag name="game-engine" restrict="&gt;=media-gfx/blender-2.50">
-			Adds Game Engine support to Blender.
-		</flag>
-		<flag name="headless" restrict="&gt;=media-gfx/blender-2.77">
-			Build without graphical support (renderfarm, server mode only).
-		</flag>
-		<flag name="jemalloc" restrict="&gt;=media-gfx/blender-2.77">
-			Use <pkg>dev-libs/jemalloc</pkg> for memory allocation.
-		</flag>
-		<flag name="llvm" restrict="&gt;=media-gfx/blender-2.77">
-			Enable features depending on llvm.
-		</flag>
-		<flag name="man" restrict="&gt;=media-gfx/blender-2.77">
-			Build and install man pages.
-		</flag>
-		<flag name="ndof">
-			Enable NDOF input devices (SpaceNavigator and friends).
-		</flag>
-		<flag name="openimageio">Enable OpenImageIO Support</flag>
-		<flag name="opennl" restrict="&lt;=media-gfx/blender-2.73">
-			Enable use of Open Numerical Library
-		</flag>
-		<flag name="opensubdiv" restrict="&gt;=media-gfx/blender-2.77">
-			Add rendering support form OpenSubdiv from Dreamworks Animation
-			through <pkg>media-libs/opensubdiv</pkg>.
-		</flag>
-		<flag name="openvdb" restrict="&gt;=media-gfx/blender-2.77">
-			Add GPU preview rendering. Only works with nVidia cards.
-		</flag>
-		<flag name="player">
-			Build the Blender Player. THis requires the Game engine.
-		</flag>
-		<flag name="redcode" restrict="&lt;=media-gfx/blender-2.73">
-			This flag add support for RED CODE camera digital format (5K HD
-			images *.r3d) - EXPERIMENTAL.
-		</flag>
-		<flag name="test" restrict="&gt;=media-gfx/blender-2.77">
-			Build the provided unit tests.
-		</flag>
-		<flag name="valgrind" restrict="&gt;=media-gfx/blender-2.77">
-			Add support for memory debugging using
-			<pkg>dev-util/valgrind</pkg>
-		</flag>
-	</use>
-</pkgmetadata>

diff --git a/media-sound/patchage/Manifest b/media-sound/patchage/Manifest
index 05dd597..ead99f8 100644
--- a/media-sound/patchage/Manifest
+++ b/media-sound/patchage/Manifest
@@ -1,4 +1,4 @@
-AUX patchage-0.5.0-desktop.patch 224 SHA256 0d078e63c5dbdde508be319e7180fa1694e7575414e0cdc062b0559d66da389c SHA512 ae000bc340d48a9bb89fc067b4b1abba3de39ef9be086c7eeffae71ddca6172ce500ea6ea9854fde8cc19df3117195cb6fdb4ecd0867aa69f332ac0a7d377b69 WHIRLPOOL 4edab48a5b35eba0eb0dbcd6545c09581f85c1910a9a81a16b22f8d8451c053e6567fa8aa5ec98e86a3b74b2639344239ec90508a625e0ac7846db22b3f6d004
-DIST patchage-1.0.0.tar.bz2 414300 SHA256 6b21d74ef1b54fa62be8d6ba65ca8b61c7b6b5230cc85e093527081239bfeda9 SHA512 0a2be0183257a34a68ec84e6fb17d29a3d8ba7dd54a05fcdd13784ac8f5621eb7a376f17d42168958f5e1a8dab8858a9c5c8c867aa1838736cc2b7775f75f510 WHIRLPOOL 86278e94a71069e86a92d3f00ae61a99aca710738df79345ffa47d797df340719dce452c19b0a6165420bba97f3fc7d27f918dc4e294e90bfe4158d7a4f333bb
-EBUILD patchage-1.0.0-r1.ebuild 1019 SHA256 0709005d9912203d932df8d63290337170f85592aa5887afde9c79c233b0e2c0 SHA512 505b76f877d740939e12849437403f6a76d6bc4d2864be955b6d2727b4a8e413a1236678fb97daf9c3d603d833453b0779d8838ab0db0b5cabcbb6b7901fcdf3 WHIRLPOOL 91f9c63d77b1f74045538462bd8ed2d0e0e152059c0a75a31a2e27164d20d99a8fab040f9a2bebbff23dd85e7a17ec95da89fec19130fbb5bc8e473679593193
-MISC metadata.xml 600 SHA256 328d5523796f70d83115dfb2ca81d1482e3f2b6fd3fecb4aad9173e1b3fc400f SHA512 d08bd05a57ca03d9f91c0904f9a1b92e332e4475fd6729feefb63c75df2c5ad6eebd718b6e7db8482819f463606dcca888400c1560dd9b2c9e426c26634ece77 WHIRLPOOL d2ae8940cafd4cc656938963bded9738c790a8022edb43f579f23014f05073d1ff7792a177683b760a3722c1c1634bdc656fd265195b2864f9d87402511734b9
+AUX patchage-0.5.0-desktop.patch 224 BLAKE2B 74bae167f890e5c03043f8906a18576183d2519141d5413780e03091c9b5432044185812d192c12073eadaab0823e8ef1d25c81d57e8614abd120440763d6776 SHA512 ae000bc340d48a9bb89fc067b4b1abba3de39ef9be086c7eeffae71ddca6172ce500ea6ea9854fde8cc19df3117195cb6fdb4ecd0867aa69f332ac0a7d377b69
+DIST patchage-1.0.0.tar.bz2 414300 BLAKE2B 4f2fd0a0911cb261508883b73305c2736143d9a165b854b8b4042a3c0f6454e1f79bc3b6cd5d28844e1cdeeaf7dd20effc164aa1151390d4c45af0a339c2ef5a SHA512 0a2be0183257a34a68ec84e6fb17d29a3d8ba7dd54a05fcdd13784ac8f5621eb7a376f17d42168958f5e1a8dab8858a9c5c8c867aa1838736cc2b7775f75f510
+EBUILD patchage-1.0.0-r2.ebuild 1019 BLAKE2B bfbc0c49d65fa91a40eeddef8896bd98bcfb0f0fe9f8a2965be4ed00a09bffef3eca7422107758a89d64bd5ce2d353ac24d156bc1ee302d574f338f68d7ddcf8 SHA512 505b76f877d740939e12849437403f6a76d6bc4d2864be955b6d2727b4a8e413a1236678fb97daf9c3d603d833453b0779d8838ab0db0b5cabcbb6b7901fcdf3
+MISC metadata.xml 600 BLAKE2B 89bfd8b0da00e90fdd9fb85ca8604c80de0e2a8e471309aec0022f31f844d06c914c7536a3928179256f85b313fefaa3dec8937159f7131a03838276cca86162 SHA512 d08bd05a57ca03d9f91c0904f9a1b92e332e4475fd6729feefb63c75df2c5ad6eebd718b6e7db8482819f463606dcca888400c1560dd9b2c9e426c26634ece77

diff --git a/media-sound/patchage/patchage-1.0.0-r1.ebuild b/media-sound/patchage/patchage-1.0.0-r2.ebuild
similarity index 100%
rename from media-sound/patchage/patchage-1.0.0-r1.ebuild
rename to media-sound/patchage/patchage-1.0.0-r2.ebuild

diff --git a/media-sound/rosegarden/Manifest b/media-sound/rosegarden/Manifest
deleted file mode 100644
index 8ed579c..0000000
--- a/media-sound/rosegarden/Manifest
+++ /dev/null
@@ -1,3 +0,0 @@
-DIST rosegarden-17.04.tar.bz2 6454603 SHA256 988a6141c5b0a8e85c029f650de78bf57100c4d778c22d0194b0692584640ece SHA512 23240522cba8cc3b5d0e3b29ee5b871c911c7634d74f65d04b353f59747bdf6a1bfd9985f16ab331ea2399a797e66b2ebd110e192bb52ba4df453d42d7b8f73b WHIRLPOOL 91b7d8d74578e8666de607f55cecdfc57dde9b9b4e2367d7b5a2f1d3ae76eaf3d0ef6b62d78ae4f9f080448019caf8e0580d5c1e30f56708c9b2dcc2c3113aa9
-EBUILD rosegarden-17.04-r1.ebuild 1202 SHA256 d904a72c4da845efa51ba8afbf3a80efa9b1dd4c1afa84d4baab6d5b96769932 SHA512 3be5bfa03535520bff740a632080947ca32af0e106994de97fc0277987ae808c283dd64a3b4fa4103d74e463ea866e5789a9bf0dc8b2df9d93dddc3802c8a6d8 WHIRLPOOL c8db3a68cb96715008db0dbf8ce17987fd1685a1990e17b1c8fd5caf3020ce173d357d695074df701026ce00fcb2c6d17691b31b11815d3d434950a3cc51bca4
-MISC metadata.xml 349 SHA256 97a83e4c15e9cdbac3ca20099643e4dd0a8ba32661aa2b87febcd48445aa1613 SHA512 1790ba05a4f13f358de6890c908b2f1eb0581143063b7237220dd05aba31d16d68f2cf6c4712a08894909b7de5306d592807e9f3171b66b72fd867bd339a0cee WHIRLPOOL ff9d9c24a41d18572aa6396d46f3c2a8646663b0bca8ec7d70459a9e975d10440d63c69ad37e6b4495615c6252ca07246afbaa957115c0d1642668dc976733c7

diff --git a/media-sound/rosegarden/metadata.xml b/media-sound/rosegarden/metadata.xml
deleted file mode 100644
index c399767..0000000
--- a/media-sound/rosegarden/metadata.xml
+++ /dev/null
@@ -1,11 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
-<pkgmetadata>
-  <maintainer type="project">
-    <email>proaudio@gentoo.org</email>
-    <name>Gentoo ProAudio Project</name>
-  </maintainer>
-  <upstream>
-    <remote-id type="sourceforge">rosegarden</remote-id>
-  </upstream>
-</pkgmetadata>

diff --git a/media-sound/rosegarden/rosegarden-17.04-r1.ebuild b/media-sound/rosegarden/rosegarden-17.04-r1.ebuild
deleted file mode 100644
index 5403601..0000000
--- a/media-sound/rosegarden/rosegarden-17.04-r1.ebuild
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 1999-2017 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=5
-inherit cmake-utils eutils fdo-mime gnome2-utils
-
-DESCRIPTION="MIDI and audio sequencer and notation editor"
-HOMEPAGE="http://www.rosegardenmusic.com/"
-SRC_URI="mirror://sourceforge/${PN}/${P}.tar.bz2"
-
-LICENSE="GPL-2"
-SLOT="0"
-KEYWORDS="~amd64 ~ppc ~x86"
-IUSE="lirc"
-
-RDEPEND="
-	dev-qt/qtgui:5
-	dev-qt/qtcore:5
-	dev-qt/qtwidgets:5
-	dev-qt/qtxml:5
-	dev-qt/qtnetwork:5
-	dev-qt/qtprintsupport:5
-	media-libs/ladspa-sdk:=
-	x11-libs/libSM:=
-	virtual/jack
-	media-libs/alsa-lib:=
-	>=media-libs/dssi-1.0.0:=
-	media-libs/liblo:=
-	media-libs/liblrdf:=
-	sci-libs/fftw:3.0
-	media-libs/libsamplerate:=
-	media-libs/libsndfile:=
-	sys-libs/zlib:=
-	lirc? ( app-misc/lirc:= )"
-DEPEND="${RDEPEND}
-	dev-qt/qttest:5
-	virtual/pkgconfig"
-
-src_configure() {
-	local mycmakeargs=(
-		"-DDISABLE_LIRC=$(usex lirc OFF ON)"
-	)
-	cmake-utils_src_configure
-}
-
-pkg_preinst() {
-	gnome2_icon_savelist
-}
-
-pkg_postinst() {
-	gnome2_icon_cache_update
-	fdo-mime_desktop_database_update
-	fdo-mime_mime_database_update
-}
-
-pkg_postrm() {
-	gnome2_icon_cache_update
-	fdo-mime_desktop_database_update
-	fdo-mime_mime_database_update
-}

diff --git a/media-video/dvdstyler/ChangeLog b/media-video/dvdstyler/ChangeLog
deleted file mode 100644
index 9d0d41c..0000000
--- a/media-video/dvdstyler/ChangeLog
+++ /dev/null
@@ -1,276 +0,0 @@
-# ChangeLog for media-video/dvdstyler
-# Copyright 1999-2012 Gentoo Foundation; Distributed under the GPL v2
-# $Header: /var/cvsroot/gentoo-x86/media-video/dvdstyler/ChangeLog,v 1.53 2012/05/05 08:58:59 jdhore Exp $
-
-  05 May 2012; Jeff Horelick <jdhore@gentoo.org> dvdstyler-2.1.ebuild:
-  dev-util/pkgconfig -> virtual/pkgconfig
-
-  23 Dec 2011; Samuli Suominen <ssuominen@gentoo.org> dvdstyler-2.1.ebuild:
-  Fix building with FFmpeg >= 0.9 wrt #395793 by Helmut Jarausch
-
-  22 Dec 2011; Samuli Suominen <ssuominen@gentoo.org>
-  -dvdstyler-2.0_rc1.ebuild, -dvdstyler-2.0_rc2.ebuild:
-  old
-
-*dvdstyler-2.1 (22 Dec 2011)
-
-  22 Dec 2011; Samuli Suominen <ssuominen@gentoo.org> +dvdstyler-2.1.ebuild:
-  Version bump. Missing -ljpeg (in wxVillaLib/ for imagjpg.cpp) wrt #367863 by
-  Diego Elio Pettenò.
-
-*dvdstyler-2.0_rc2 (16 Oct 2011)
-
-  16 Oct 2011; Samuli Suominen <ssuominen@gentoo.org>
-  +dvdstyler-2.0_rc2.ebuild:
-  Version bump.
-
-  16 Oct 2011; Samuli Suominen <ssuominen@gentoo.org> -dvdstyler-1.8.1.ebuild,
-  -files/dvdstyler-1.8.1-cast.patch:
-  old
-
-  09 Oct 2011; Samuli Suominen <ssuominen@gentoo.org> -dvdstyler-1.7.4.ebuild,
-  -files/dvdstyler-1.7.4-autoconf.patch, -dvdstyler-1.8.1_beta1.ebuild:
-  old
-
-*dvdstyler-2.0_rc1 (09 Oct 2011)
-
-  09 Oct 2011; Samuli Suominen <ssuominen@gentoo.org>
-  +dvdstyler-2.0_rc1.ebuild:
-  Version bump wrt #353478 by Thomas Rausch. This will also fix building with
-  recent FFmpeg wrt #374797 by Diego Elio Pettenò.
-
-  16 Sep 2011; Steve Dibb <beandog@gentoo.org> dvdstyler-1.7.4.ebuild,
-  dvdstyler-1.8.1_beta1.ebuild, dvdstyler-1.8.1.ebuild:
-  Use ffmpeg virtual, bug 362129
-
-  13 Aug 2011; Kacper Kowalik <xarthisius@gentoo.org> dvdstyler-1.7.4.ebuild,
-  dvdstyler-1.8.1_beta1.ebuild, dvdstyler-1.8.1.ebuild:
-  Dropped ppc keywords wrt #361737
-
-  20 Jul 2011; Pacho Ramos <pacho@gentoo.org> metadata.xml:
-  Drop maintainer due retirement, bug #34534
-
-*dvdstyler-1.8.1 (18 Jul 2010)
-
-  18 Jul 2010; Daniel Black <dragonheart@gentoo.org>
-  +dvdstyler-1.8.1.ebuild, +files/dvdstyler-1.8.1-cast.patch:
-  version bump as per bug #327289.
-
-*dvdstyler-1.8.1_beta1 (28 Apr 2010)
-
-  28 Apr 2010; Daniel Pielmeier <billie@gentoo.org>
-  +dvdstyler-1.8.1_beta1.ebuild:
-  Version bump.
-
-  17 Feb 2010; Ryan Hill <dirtyepic@gentoo.org> dvdstyler-1.7.4.ebuild:
-  Fix wxsvg dependency to account for the removal of the ffmpeg USE flag in
-  later versions.
-
-*dvdstyler-1.7.4 (14 Oct 2009)
-
-  14 Oct 2009; Daniel Black <dragonheart@gentoo.org>
-  dvdstyler-1.7.4_rc1.ebuild, +dvdstyler-1.7.4.ebuild,
-  +files/dvdstyler-1.7.4-autoconf.patch:
-  add missing xmlto dependency as was noticed on irc #gentoo (by I forget
-  who). Added version bump
-
-*dvdstyler-1.7.4_rc1 (16 Sep 2009)
-
-  16 Sep 2009; Samuli Suominen <ssuominen@gentoo.org>
-  +dvdstyler-1.7.4_rc1.ebuild:
-  Version bump (#284035).
-
-*dvdstyler-1.7.3_beta3_p1 (05 Jul 2009)
-
-  05 Jul 2009; Samuli Suominen <ssuominen@gentoo.org>
-  +dvdstyler-1.7.3_beta3_p1.ebuild,
-  +files/dvdstyler-1.7.3_beta3_p1-link_to_wxgtk_adv.patch:
-  Version bump wrt #273422.
-
-*dvdstyler-1.7.3_beta2 (29 Apr 2009)
-
-  29 Apr 2009; Daniel Black <dragonheart@gentoo.org>
-  +dvdstyler-1.7.3_beta2.ebuild:
-  version bump - bug #265650 - ebuild by Fabio Correa
-
-*dvdstyler-1.7.2 (29 Apr 2009)
-
-  29 Apr 2009; Daniel Black <dragonheart@gentoo.org>
-  +files/dvdstyler-1.7.2-skipxmlvalidation.patch,
-  dvdstyler-1.7.2_beta4.ebuild, +dvdstyler-1.7.2.ebuild:
-  version bug as per bug #262454. Ebuild changes thanks to Fabio
-
-  16 Feb 2009; Daniel Black <dragonheart@gentoo.org>
-  -dvdstyler-1.5-r1.ebuild, -dvdstyler-1.5.1.ebuild,
-  -dvdstyler-1.5.1_p2.ebuild, -dvdstyler-1.7.2_beta3.ebuild:
-  removed as nolonger works - bug #25916 comment 0
-
-  16 Feb 2009; Daniel Black <dragonheart@gentoo.org>
-  -dvdstyler-1.7.2_beta3.ebuild, dvdstyler-1.7.2_beta4.ebuild:
-  fix minium ffmpeg version - bug #257964 thanks Jordan Bradley. Fixed bug
-  #259160 - ffmpeg required USE=encoder to be useable. Thanks Ed Criscuolo
-
-*dvdstyler-1.7.2_beta4 (08 Feb 2009)
-
-  08 Feb 2009; Daniel Black <dragonheart@gentoo.org>
-  +dvdstyler-1.7.2_beta4.ebuild:
-  version bump as per bug #257121 thanks Daniel Pielmeier and Bernd Butscheidt
-
-*dvdstyler-1.7.2_beta3 (18 Jan 2009)
-*dvdstyler-1.7.1 (18 Jan 2009)
-
-  18 Jan 2009; Daniel Black <dragonheart@gentoo.org>
-  +dvdstyler-1.7.1.ebuild, +dvdstyler-1.7.2_beta3.ebuild:
-  version bump as per bug #215665. Thanks to all that waited so long and
-  provided feedback
-
-  03 Jan 2009; Christoph Mende <angelos@gentoo.org> dvdstyler-1.5.1.ebuild,
-  dvdstyler-1.5.1_p2.ebuild:
-  Bumped to EAPI2, changed netpbm dependency to include [jpeg], fixed wxGTK
-  dependency
-
-  01 Apr 2008; Daniel Black <dragonheart@gentoo.org>
-  dvdstyler-1.5-r1.ebuild, dvdstyler-1.5.1.ebuild,
-  dvdstyler-1.5.1_p2.ebuild:
-  remove duplicate desktop entry - bug #208806 thanks to Stefan Briesenick
-
-*dvdstyler-1.5.1_p2 (24 Dec 2007)
-
-  24 Dec 2007; Daniel Black <dragonheart@gentoo.org>
-  +dvdstyler-1.5.1_p2.ebuild:
-  version bump to fix some serious problems - bug #203057 thanks to Michael
-
-*dvdstyler-1.5.1 (16 Aug 2007)
-
-  16 Aug 2007; Daniel Black <dragonheart@gentoo.org>
-  +dvdstyler-1.5.1.ebuild:
-  version bump
-
-  24 Jul 2007; Daniel Black <dragonheart@gentoo.org>
-  -files/dvdstyler-1.4-gcc4.patch, -dvdstyler-1.4.ebuild,
-  -dvdstyler-1.5.ebuild:
-  old version cleanout
-
-  24 Jul 2007; Christian Faulhammer <opfer@gentoo.org>
-  dvdstyler-1.5-r1.ebuild:
-  stable x86, bug 185864
-
-  02 Jul 2007; Daniel Black <dragonheart@gentoo.org>
-  dvdstyler-1.5-r1.ebuild:
-  addded missing media-libs/netpbm dependency as per bug #183848 thanks to
-  Paulo Aragao
-
-*dvdstyler-1.5-r1 (19 Jun 2007)
-
-  19 Jun 2007; Daniel Black <dragonheart@gentoo.org>
-  +dvdstyler-1.5-r1.ebuild:
-  better make_desktop entry thanks to Chris Mayo bug #181958
-
-*dvdstyler-1.5 (10 Jun 2007)
-
-  10 Jun 2007; Daniel Black <dragonheart@gentoo.org> +dvdstyler-1.5.ebuild:
-  version bump thanks to all supporters in bug #109894
-
-  03 Feb 2007; Diego Pettenò <flameeyes@gentoo.org> ChangeLog:
-  Regenerate digest in Manifest2 format.
-
-  29 Dec 2005; Mark Loeser <halcy0n@gentoo.org>
-  +files/dvdstyler-1.4-gcc4.patch, dvdstyler-1.4.ebuild:
-  Fix compilation with gcc-4.0; bug #113927
-
-  15 Dec 2005; Donnie Berkholz <spyderous@gentoo.org>; dvdstyler-1.4.ebuild:
-  Port to modular X. Remove virtual/x11, it comes in from gtk+.
-
-  07 Sep 2005; Daniel Black <dragonheart@gentoo.org> -dvdstyler-1.31.ebuild:
-  removed 1.31 as per bug #104872 to remove version problem. ~amd64 still has
-  issues according to Jim in bug #104872
-
-  05 Sep 2005; Daniel Black <dragonheart@gentoo.org> dvdstyler-1.31.ebuild:
-  drop x86 keyword to avoid downgrade - bug #104872
-
-  05 Sep 2005; Daniel Black <dragonheart@gentoo.org> dvdstyler-1.4.ebuild:
-  x86 stable on 1.4 plan to remove 1.31
-
-*dvdstyler-1.4 (04 Sep 2005)
-
-  04 Sep 2005; Daniel Black <dragonheart@gentoo.org> +dvdstyler-1.4.ebuild,
-  dvdstyler-1.31.ebuild, -dvdstyler-1.40.ebuild:
-  QA clean - removed obscelete build commands, corrected versioing, removed
-  INSTALL and README from installation. Inspired by flameeyes
-
-  08 Jun 2005; David Holm <dholm@gentoo.org> dvdstyler-1.40.ebuild:
-  Added to ~ppc.
-
-  07 Jun 2005; Jan Brinkmann <luckyduck@gentoo.org> dvdstyler-1.31.ebuild,
-  dvdstyler-1.40.ebuild:
-  We now depend on =wxGTK-2.4*, see #93746 for details.
-
-*dvdstyler-1.40 (07 Jun 2005)
-
-  07 Jun 2005; Jan Brinkmann <luckyduck@gentoo.org> +dvdstyler-1.40.ebuild:
-  New upstream release, fixes #95328.
-
-  22 Apr 2005; Jan Brinkmann <luckyduck@gentoo.org>
-  -files/1.1-AutoMakefile.patch, -files/1.1-code.patch,
-  -files/1.1-gcc34.patch, -files/1.3_beta-code.patch, -dvdstyler-1.1.ebuild,
-  dvdstyler-1.31.ebuild, -dvdstyler-1.3_beta.ebuild:
-  stable on amd64 and x86, did some cleanup
-
-  22 Apr 2005; Jan Brinkmann <luckyduck@gentoo.org> dvdstyler-1.31.ebuild:
-  stable on amd64 and x86
-
-  19 Apr 2005; Bryan Østergaard <kloeri@gentoo.org> metadata.xml:
-  Remove retired developer from metadata.xml.
-
-  30 Mar 2005; Jan Brinkmann <luckyduck@gentoo.org> dvdstyler-1.31.ebuild:
-  dvdstyler now generates desktop file using make_desktop_entry, fixes #82209
-
-*dvdstyler-1.31 (06 Feb 2005)
-
-  06 Feb 2005; <augustus@gentoo.org> +dvdstyler-1.31.ebuild:
-  Added new build 1.31.
-
-  15 Jan 2005; Jan Brinkmann <luckyduck@gentoo.org> dvdstyler-1.1.ebuild,
-  dvdstyler-1.3_beta.ebuild:
-  changed app-cdr/cdrtools dependency to virtual/cdrtools. see bug #77817.
-
-*dvdstyler-1.3_beta (21 Nov 2004)
-
-  21 Nov 2004; Daniel Black <dragonheart@gentoo.org>
-  +files/1.3_beta-code.patch, +dvdstyler-1.3_beta.ebuild:
-  version bump.
-
-*dvdstyler-1.1 (21 Nov 2004)
-
-  21 Nov 2004; Daniel Black <dragonheart@gentoo.org>
-  -files/1.05-AutoMakefile.patch, -files/1.05-code.patch,
-  -files/1.05-gcc34.patch, +files/1.1-AutoMakefile.patch,
-  +files/1.1-code.patch, +files/1.1-gcc34.patch, -dvdstyler-1.05-r1.ebuild,
-  +dvdstyler-1.1.ebuild:
-  version bump and cleanout. Ebuild and patches thanks to Philippe Weibel
-  <philippe.weibel@free.fr> and Patrick Dawson <pkdawson@gentoo.org> in bug
-  #47899.
-
-  18 Oct 2004; Danny van Dyk <kugelfang@gentoo.org>
-  dvdstyler-1.05-r1.ebuild:
-  Marked ~amd64.
-
-  04 Oct 2004; Patrick Dawson <pkdawson@gentoo.org> +files/1.05-gcc34.patch,
-  dvdstyler-1.05-r1.ebuild:
-  gcc-3.4 fix, closes bug #65983.
-
-*dvdstyler-1.05-r1 (05 Aug 2004)
-
-  05 Aug 2004; Daniel Black <dragonheart@gentoo.org>
-  files/1.05-AutoMakefile.patch, +dvdstyler-1.05-r1.ebuild,
-  -dvdstyler-1.05.ebuild:
-  Changed to use installed dev-libs/expat rather than one that came with the
-  package. Also moved silence.mp2 to the correct directory.
-
-*dvdstyler-1.05 (30 Jul 2004)
-
-  30 Jul 2004; Daniel Black <dragonheart@gentoo.org> +metadata.xml,
-  +dvdstyler-1.05.ebuild, +files/1.05-code.patch, 
-  +files/1.05-AutoMakefile.patch:
-  Inital import as per bug #47899. Thanks to Ian Haylock
-  <haylocki@v21mail.co.uk> for the bug.

diff --git a/media-video/dvdstyler/Manifest b/media-video/dvdstyler/Manifest
deleted file mode 100644
index 0e61149..0000000
--- a/media-video/dvdstyler/Manifest
+++ /dev/null
@@ -1,7 +0,0 @@
-AUX dvdstyler-1.7.4-autoconf.patch 3058 SHA256 b85ca996bb831f79d2e61497d09f19313db097207935d3727f145354f8108143 SHA512 64ec0f4b0718fc8b89dfd78691fffd2a50514ea4fe91a56985d3b775394204d91b34858a49e454a0722f1bc4ed3f98622315f51adacfea7ccb160c2e0af4f1c4 WHIRLPOOL aa123edc1e90d96329d8d7dd8fb487243501d10a374b6a23d7e89765c7bf9bfc254a679442cafc23ef43deb9873aa289f41c62411103e5268c1ff91f737eb78c
-AUX dvdstyler-1.8.1-cast.patch 437 SHA256 5efcb4b2f943e2e3ad5b4d0e5891c69655ec0aec912a45c61eec32b8c5862070 SHA512 1e825a9911508a2e43ab41b81cf511b6bd43f15b0394df96a89231e0d97c0bac1259701fcf3b71cac781db74925710d4c8082b4dac4605187b7d5d81aec2f734 WHIRLPOOL e5a4f82e1a6a0d9f6c63e7d653ee2e2958f3f8f559d4532675e452f1f1ac6ac1a6ff1b4ce134f6b3cb8b124f4112b01663657ab9400c578c7cbee1288c080207
-AUX dvdstyler-1.8.1-fix_enum_error.patch 562 SHA256 d9a11f38dd0245ff0ce687296798139100171f484a7c90b43ff8b37c201a8240 SHA512 af5cdf9d6686b510794c3077ed1ec4551aa48b198b3570db9d4948a5a01246ee496e1c8bdd7b96c9c23372d0cc6d9e415df79a52c4f0f4c2b6cf3050cfded34a WHIRLPOOL 978f08233c30aa52220e89da3e9587753caf372100362d4097f3e856eeddf3b49bb179201994199a2eede0ec1fe44b954465f07b9eeb8795bcbfa1b7e99e1ec7
-DIST DVDStyler-2.3.tar.bz2 4458253 SHA256 42030f845d24db229e6d5f74fc7ad3d0ec2f2fe6046657aa06c48356b94670b1 SHA512 f7005d0a04968ffa2250a6b298b6b9612dc70611619667455e728131a23ec53d1ee9212839f13426508e419b00aab4c646ff9722885940fef80418be3bc66e9a WHIRLPOOL b0ef1c4a49e54f80e8c9e70280676ba1dac216d5b8d81fac514d193b0fe63341473d0f9a20994a3bb8e2fb6960b6d8afae3809bbee81d1df490d23d95ff44c4d
-EBUILD dvdstyler-2.3.ebuild 1650 SHA256 20046e0f945e7d49343062e1f0865cd04b409a59616073d07732433f9c9b6bca SHA512 81b68756ae2994e40bdd76eaf7ebce0009d0239e1744244866c6e9fc2bfb53220829b085fa1bcba59e8e5eb20bdd70835127400e7b4c0c8aaf58f187ea6645c5 WHIRLPOOL 077e87e69df04243c2be9040685f1446c62ed3d5dc10fbabbdb6d70f11e71ac30536988e012c12c97c89afb38a790379cb6bac987a7be516f133dcc8e6bbccdc
-MISC ChangeLog 9870 SHA256 d06504f6cfe54236c1551ce3b265393ec95e32774e7fea0ce4a0d09ceecbb84a SHA512 5fa60e5faf5ef59ad026628775dfb4e2fbae886c269c20a1be60310757a97cb91325633a1039a3638ce26c8f1f048d865dfaae96e9a3aac1c530098f6da29397 WHIRLPOOL efe178a726ee10999665ff8e09e19125b2b642d3bc4033b543148fd7b490c5db691d4744226a03145bf7467a84c8817cbfe5ea88f5b37ff7b77511b9894328ea
-MISC metadata.xml 158 SHA256 f9dd4fb4a7f368e776ea0d7d394dad8f3a674e67f6103d6fb58a8a98971eeee4 SHA512 0bfb0265d3c42ba1958f8e589316112dba381e7701773668443cf0afe678a8d80675392efb2713e3d376b85b03a5b2bfb390c94411e0c6cf8a223b26aa9a04ca WHIRLPOOL 447b37b9f46058b1adc8d30a57460a8ab6b5ec149870c5ea385956cae9dd10a99b36e63526f2d13cabf13a4185f8b4e22a78156ab141a6e397bba298ab7efb2a

diff --git a/media-video/dvdstyler/dvdstyler-2.3.ebuild b/media-video/dvdstyler/dvdstyler-2.3.ebuild
deleted file mode 100644
index 3c0180b..0000000
--- a/media-video/dvdstyler/dvdstyler-2.3.ebuild
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 1999-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/media-video/dvdstyler/dvdstyler-2.1.ebuild,v 1.4 2012/05/05 08:58:59 jdhore Exp $
-
-EAPI=4
-
-MY_P=${P/dvds/DVDS}
-WX_GTK_VER=2.8
-
-inherit wxwidgets
-
-DESCRIPTION="A cross-platform free DVD authoring application"
-HOMEPAGE="http://www.dvdstyler.org/"
-SRC_URI="mirror://sourceforge/${PN}/${MY_P}.tar.bz2"
-
-LICENSE="GPL-2"
-SLOT="0"
-KEYWORDS="~amd64 ~x86"
-IUSE="debug gnome kernel_linux"
-
-COMMON_DEPEND=">=app-cdr/dvd+rw-tools-7.1
-	>=media-libs/libexif-0.6.16
-	>=media-libs/wxsvg-1.1.9
-	>=media-video/dvdauthor-0.7.0
-	>=media-video/xine-ui-0.99.1
-	virtual/cdrtools
-	>=virtual/ffmpeg-0.6.90[encode]
-	virtual/jpeg
-	x11-libs/wxGTK:2.8[gstreamer,X]
-	gnome? ( >=gnome-base/libgnomeui-2 )
-	kernel_linux? ( sys-fs/udev )"
-RDEPEND="${COMMON_DEPEND}
-	>=app-cdr/dvdisaster-0.72.2"
-DEPEND="${COMMON_DEPEND}
-	app-arch/zip
-	app-text/xmlto
-	virtual/pkgconfig
-	sys-devel/gettext"
-
-S=${WORKDIR}/${MY_P}
-
-src_prepare() {
-	use gnome || sed -i -e '/PKG_CONFIG/s:libgnomeui-2.0:dIsAbLeAuToMaGiC&:' configure
-
-	# rmdir: failed to remove `tempfoobar': Directory not empty
-	sed -i -e '/rmdir "$$t"/d' docs/Makefile.in || die
-
-	sed -i -e 's:@LIBS@:& -ljpeg:' wxVillaLib/Makefile.in || die #367863
-
-	if has_version ">=media-video/ffmpeg-0.9"; then
-		sed -i -e '/^#i/s:vsink_buffer:buffersink:' src/mediatrc_ffmpeg.cpp || die #395793
-	fi
-}
-
-src_configure() {
-	econf \
-	 	--docdir=/usr/share/doc/${PF} \
-		$(use_enable debug) \
-		--with-wx-config=${WX_CONFIG}
-}
-
-src_install() {
-	default
-	rm -f "${ED}"usr/share/doc/${PF}/{COPYING*,INSTALL*}
-}

diff --git a/media-video/dvdstyler/files/dvdstyler-1.7.4-autoconf.patch b/media-video/dvdstyler/files/dvdstyler-1.7.4-autoconf.patch
deleted file mode 100644
index a9d6a2b..0000000
--- a/media-video/dvdstyler/files/dvdstyler-1.7.4-autoconf.patch
+++ /dev/null
@@ -1,77 +0,0 @@
---- /tmp/DVDStyler-1.7.4/./src/Makefile.am	2009-09-04 15:49:32.000000000 +1000
-+++ ./src/Makefile.am	2009-10-14 15:34:46.000000000 +1100
-@@ -7,7 +7,7 @@
-  VobListBox.cpp TitlePropDlg.cpp TitlesetManager.cpp\
-  BurnDlg.cpp ProgressDlg.cpp DirCtrl.cpp StatusBar.cpp MainWin.cpp dvdstyler.cpp
- 
--CXXFLAGS = @CXXFLAGS@ -I..
-+AM_CXXFLAGS = @CXXFLAGS@ -I..
- LDADD = ../wxVillaLib/libwxvilla.a
- 
- SUBDIRS = rc
---- /tmp/DVDStyler-1.7.4/./Makefile.am	2008-07-15 07:58:46.000000000 +1000
-+++ ./Makefile.am	2009-10-14 15:23:26.000000000 +1100
-@@ -1,6 +1,5 @@
- SUBDIRS = wxVillaLib src locale backgrounds buttons objects data docs
- ACLOCAL_AMFLAGS=-I.
--pkgdatadir=@datadir@/doc/@PACKAGE@
--pkgdata_DATA = AUTHORS COPYING INSTALL README ChangeLog
-+doc_DATA = AUTHORS COPYING INSTALL README ChangeLog
- run: all
- 	$(MAKE) -C src run
---- /tmp/DVDStyler-1.7.4/./wxVillaLib/Makefile.am	2007-12-31 09:51:35.000000000 +1100
-+++ ./wxVillaLib/Makefile.am	2009-10-14 15:35:32.000000000 +1100
-@@ -3,4 +3,4 @@
-  PipeExecute.cpp PropDlg.cpp SConv.cpp\
-  Thumbnails.cpp ThumbnailFactory.cpp\
-  utils.cpp VerticalToolbar.cpp
--CXXFLAGS = @CXXFLAGS@ @GNOMEUI2_CFLAGS@ @LIBEXIF_CFLAGS@ -DWX_SVG
-+AM_CXXFLAGS = @CXXFLAGS@ @GNOMEUI2_CFLAGS@ @LIBEXIF_CFLAGS@ -DWX_SVG
---- /tmp/DVDStyler-1.7.4/locale/Makefile.in	2009-09-19 18:46:38.000000000 +1000
-+++ locale/Makefile.in	2009-10-14 15:46:49.000000000 +1100
-@@ -4,8 +4,10 @@
- # building lang.mo files from the translated lang.po catalogs.
- 
- PROGNAME=dvdstyler
--LINGUAS=cs da de el es eu fr hu it ko mk nl pl pt pt_BR ru sk sl sr sv vi zh_TW
--LINGUAS_ALL=ca cs da de el es eu fr hu it ja ko mk nl pl pt pt_BR ru sk sl sr sv vi zh_TW
-+# ls *.po | cut -f1 -d . | xargs echo  >> Makefile.in          
-+LINGUAS=cs da de el es fr it ko mk pl pt pt_BR ru sl sr vi zh_TW
-+LINGUAS_ALL=cs da de el es fr it ko mk pl pt pt_BR ru sl sr vi zh_TW
-+
- 
- prefix=@prefix@
- datarootdir=@datarootdir@
---- /tmp/DVDStyler-1.7.4/docs/Makefile.am	2009-08-30 00:30:10.000000000 +1000
-+++ docs/Makefile.am	2009-10-14 16:08:10.000000000 +1100
-@@ -1,22 +1,19 @@
--pkgdatadir=@datadir@/man/man1
--pkgdata_DATA = dvdstyler.1
-+dist_man_MANS = dvdstyler.1
- 
- LINGUAS=en de
--docdir=@datadir@/@PACKAGE@/docs
- doc_DATA = $(LINGUAS:%=help_%.zip)
- 
- help%.zip: DVDStylerGuide%.xml
--	mkdir temp
--	(cd temp && $(XMLTO) --skip-validation htmlhelp ../DVDStylerGuide$*.xml && rm -f *.hh* && $(ZIP) ../help$*.zip *.html)
--	rm -f temp/*.html
--	rmdir temp
-+	t=`mktemp -d tempXXXXXXX`; \
-+	(cd "$$t" && $(XMLTO) --skip-validation htmlhelp ../DVDStylerGuide$*.xml && rm -f *.hh* && $(ZIP) ../help$*.zip *.html) ; \
-+	rm -f "$$t"/*.html ; \
-+	rmdir "$$t"
- 	$(ZIP) help$*.zip htmlhelp.hhp toc.hhc images/*.png
- 
- html: $(LINGUAS:%=html_%)
- 
- html%: DVDStylerGuide%.xml
--	rm -rf html$*
--	mkdir html$*
--	(cd html$* && $(XMLTO) --skip-validation htmlhelp ../DVDStylerGuide$*.xml && rm -f *.hh*)
--	mkdir html$*/images
-+	t=`mktemp -d htmlXXXXXX` ; \
-+	(cd "$$t" && $(XMLTO) --skip-validation htmlhelp ../DVDStylerGuide$*.xml && rm -f *.hh*) ; \
-+	mkdir "$$t"/images
- 	cp images/*.png html$*/images/

diff --git a/media-video/dvdstyler/files/dvdstyler-1.8.1-cast.patch b/media-video/dvdstyler/files/dvdstyler-1.8.1-cast.patch
deleted file mode 100644
index fb4bf90..0000000
--- a/media-video/dvdstyler/files/dvdstyler-1.8.1-cast.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- /tmp/imagjpg.cpp	2010-07-18 15:46:11.000000000 +1000
-+++ ./wxVillaLib/imagjpg.cpp	2010-07-18 15:46:29.000000000 +1000
-@@ -226,7 +226,7 @@
- 
- bool wxJPGHandler::LoadFile( wxImage *image, wxInputStream& stream, bool verbose, int WXUNUSED(index) )
- {
--    wxCHECK_MSG( image, false, "NULL image pointer" );
-+    wxCHECK_MSG( image, false, wxT("NULL image pointer") );
- 
-     struct jpeg_decompress_struct cinfo;
-     wx_error_mgr jerr;

diff --git a/media-video/dvdstyler/files/dvdstyler-1.8.1-fix_enum_error.patch b/media-video/dvdstyler/files/dvdstyler-1.8.1-fix_enum_error.patch
deleted file mode 100644
index a356310..0000000
--- a/media-video/dvdstyler/files/dvdstyler-1.8.1-fix_enum_error.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- DVDStyler-1.8.1/src/mediatrc_ffmpeg.cpp	2011-08-29 19:24:10.000000000 +0200
-+++ DVDStyler-1.8.1-new/src/mediatrc_ffmpeg.cpp	2011-08-29 19:24:56.000000000 +0200
-@@ -2242,7 +2242,7 @@
- 			} else
- 				big_picture.quality = (int) ost->st->quality;
- 			if (!me_threshold)
--				big_picture.pict_type = 0;
-+				big_picture.pict_type = AV_PICTURE_TYPE_NONE;
- //            big_picture.pts = AV_NOPTS_VALUE;
- 			big_picture.pts= ost->sync_opts;
- //            big_picture.pts= av_rescale(ost->sync_opts, AV_TIME_BASE*(int64_t)enc->time_base.num, enc->time_base.den);

diff --git a/media-video/dvdstyler/metadata.xml b/media-video/dvdstyler/metadata.xml
deleted file mode 100644
index d9df187..0000000
--- a/media-video/dvdstyler/metadata.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
-<pkgmetadata>
-<herd>video</herd>
-</pkgmetadata>

diff --git a/net-news/rssguard/Manifest b/net-news/rssguard/Manifest
deleted file mode 100644
index c2588da..0000000
--- a/net-news/rssguard/Manifest
+++ /dev/null
@@ -1,3 +0,0 @@
-DIST rssguard-3.4.0.tar.gz 5106275 SHA256 5c62e76fe8d3d0fe93234ddced8d97e871c7ff1d9c3cd28da853c8524cad3fd5 SHA512 9f71678354bff5d680234aa99f54452ce9185dc2a8ded835d30e264a9e13540b9b45bfe0e0234679629d8c85eebd3e25d0a94193cab7c9aac4b0d975d6c5ab61 WHIRLPOOL 9eae344e8231e41c3e4ef515d47dd0dd720ce4fdd68358d887780b2a6e618593ec6be98c7a436fb96ea66dedf64fb732e94556104b8582fd384640405c45a8a7
-EBUILD rssguard-3.4.0.ebuild 984 SHA256 2f00bf17a970eb5966f130ebaeff34a6cea1f592acd4550acfa18006a9967d5e SHA512 561b9982db3badc86cb9d5f8b1831264e15d38c2c59ca01eca3e0341121bf1eacd663c847fe22e6093cd80de523eaf4d53781f690f7cc66fbea7009d29a3b345 WHIRLPOOL ce1780567c3381cb5d9f83c48caf727d7ae867933d1e29f034cb5012b3b2d6c0c5582ba3d102f89c62c8599930bc40b40fa0073434e98855c3ee1f2c54f78c08
-EBUILD rssguard-9999.ebuild 984 SHA256 2f00bf17a970eb5966f130ebaeff34a6cea1f592acd4550acfa18006a9967d5e SHA512 561b9982db3badc86cb9d5f8b1831264e15d38c2c59ca01eca3e0341121bf1eacd663c847fe22e6093cd80de523eaf4d53781f690f7cc66fbea7009d29a3b345 WHIRLPOOL ce1780567c3381cb5d9f83c48caf727d7ae867933d1e29f034cb5012b3b2d6c0c5582ba3d102f89c62c8599930bc40b40fa0073434e98855c3ee1f2c54f78c08

diff --git a/net-news/rssguard/rssguard-3.4.0.ebuild b/net-news/rssguard/rssguard-3.4.0.ebuild
deleted file mode 100644
index 8873eff..0000000
--- a/net-news/rssguard/rssguard-3.4.0.ebuild
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 1999-2017 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=6
-
-inherit qmake-utils
-
-if [[ ${PV} == *9999 ]]; then
-	inherit git-r3
-	EGIT_REPO_URI="https://github.com/martinrotter/${PN}"
-	EGIT_SUBMODULES=()
-else
-	SRC_URI="https://github.com/martinrotter/${PN}/archive/${PV}.tar.gz -> ${P}.tar.gz"
-	KEYWORDS="~amd64"
-fi
-
-DESCRIPTION="Simple, light and easy-to-use RSS/ATOM feed aggregator developed using Qt framework"
-HOMEPAGE="https://github.com/martinrotter/rssguard"
-
-LICENSE="GPLv3"
-SLOT="0"
-IUSE="+webengine"
-
-# minimum Qt version required
-QT_PV="5.6.0:5"
-
-DEPEND=">=dev-qt/qtcore-${QT_PV}
-	>=dev-qt/qtgui-${QT_PV}
-	>=dev-qt/qtwidgets-${QT_PV}
-	>=dev-qt/qtsql-${QT_PV}
-	>=dev-qt/qtnetwork-${QT_PV}
-	>=dev-qt/qtxml-${QT_PV}
-	webengine? ( >=dev-qt/qtwebengine-${QT_PV}[widgets] )"
-RDEPEND="${DEPEND}"
-
-src_configure() {
-	use webengine && WEBENGINE="true" || WEBENGINE="false"
-	eqmake5 PREFIX="${D}" USE_WEBENGINE="${WEBENGINE}"
-}

diff --git a/net-news/rssguard/rssguard-9999.ebuild b/net-news/rssguard/rssguard-9999.ebuild
deleted file mode 100644
index 8873eff..0000000
--- a/net-news/rssguard/rssguard-9999.ebuild
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 1999-2017 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=6
-
-inherit qmake-utils
-
-if [[ ${PV} == *9999 ]]; then
-	inherit git-r3
-	EGIT_REPO_URI="https://github.com/martinrotter/${PN}"
-	EGIT_SUBMODULES=()
-else
-	SRC_URI="https://github.com/martinrotter/${PN}/archive/${PV}.tar.gz -> ${P}.tar.gz"
-	KEYWORDS="~amd64"
-fi
-
-DESCRIPTION="Simple, light and easy-to-use RSS/ATOM feed aggregator developed using Qt framework"
-HOMEPAGE="https://github.com/martinrotter/rssguard"
-
-LICENSE="GPLv3"
-SLOT="0"
-IUSE="+webengine"
-
-# minimum Qt version required
-QT_PV="5.6.0:5"
-
-DEPEND=">=dev-qt/qtcore-${QT_PV}
-	>=dev-qt/qtgui-${QT_PV}
-	>=dev-qt/qtwidgets-${QT_PV}
-	>=dev-qt/qtsql-${QT_PV}
-	>=dev-qt/qtnetwork-${QT_PV}
-	>=dev-qt/qtxml-${QT_PV}
-	webengine? ( >=dev-qt/qtwebengine-${QT_PV}[widgets] )"
-RDEPEND="${DEPEND}"
-
-src_configure() {
-	use webengine && WEBENGINE="true" || WEBENGINE="false"
-	eqmake5 PREFIX="${D}" USE_WEBENGINE="${WEBENGINE}"
-}


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2018-07-13 21:40 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-07-13 21:40 [gentoo-commits] repo/user/gerislay:master commit in: media-sound/rosegarden/, net-news/rssguard/, media-video/dvdstyler/files/, gerion

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox